未验证 提交 f98a0f88 编写于 作者: B Bernard Xiong 提交者: GitHub

Merge pull request #3928 from thread-liu/stm32mp157a-ev1

[add] openamp for stm32mp157a-ev1
...@@ -422,12 +422,14 @@ CONFIG_SOC_STM32MP157A=y ...@@ -422,12 +422,14 @@ CONFIG_SOC_STM32MP157A=y
# Onboard Peripheral Drivers # Onboard Peripheral Drivers
# #
CONFIG_BSP_USING_STLINK_TO_USART=y CONFIG_BSP_USING_STLINK_TO_USART=y
# CONFIG_BSP_USING_PMIC is not set
# CONFIG_BSP_USING_NAND is not set
# CONFIG_BSP_USING_OPENAMP is not set
# #
# On-chip Peripheral Drivers # On-chip Peripheral Drivers
# #
CONFIG_BSP_USING_GPIO=y CONFIG_BSP_USING_GPIO=y
# CONFIG_BSP_USING_WWDG is not set
CONFIG_BSP_USING_UART=y CONFIG_BSP_USING_UART=y
# CONFIG_BSP_USING_UART3 is not set # CONFIG_BSP_USING_UART3 is not set
# CONFIG_BSP_UART3_RX_USING_DMA is not set # CONFIG_BSP_UART3_RX_USING_DMA is not set
...@@ -435,11 +437,10 @@ CONFIG_BSP_USING_UART4=y ...@@ -435,11 +437,10 @@ CONFIG_BSP_USING_UART4=y
# CONFIG_BSP_UART4_RX_USING_DMA is not set # CONFIG_BSP_UART4_RX_USING_DMA is not set
# CONFIG_BSP_UART4_TX_USING_DMA is not set # CONFIG_BSP_UART4_TX_USING_DMA is not set
# CONFIG_BSP_USING_TIM is not set # CONFIG_BSP_USING_TIM is not set
# CONFIG_BSP_USING_LPTIM is not set
# CONFIG_BSP_USING_PWM is not set # CONFIG_BSP_USING_PWM is not set
# CONFIG_BSP_USING_ADC is not set # CONFIG_BSP_USING_ADC is not set
# CONFIG_BSP_USING_DAC is not set # CONFIG_BSP_USING_DAC is not set
# CONFIG_BSP_USING_I2C1 is not set # CONFIG_BSP_USING_I2C is not set
# CONFIG_BSP_USING_SPI is not set # CONFIG_BSP_USING_SPI is not set
# CONFIG_BSP_USING_CRC is not set # CONFIG_BSP_USING_CRC is not set
# CONFIG_BSP_USING_RNG is not set # CONFIG_BSP_USING_RNG is not set
......
...@@ -67,15 +67,14 @@ ...@@ -67,15 +67,14 @@
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board/CubeMX_Config/CM4/Inc}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board/CubeMX_Config/CM4/Inc}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board/ports}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board/ports}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/HAL_Drivers/config}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/HAL_Drivers/config}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/HAL_Drivers}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/HAL_Drivers}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/STM32MPxx_HAL/CMSIS/Core/Include}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/CMSIS/Core/Include}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/STM32MPxx_HAL/CMSIS/Device/ST/STM32MP1xx/Include}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/CMSIS/Device/ST/STM32MP1xx/Include}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/STM32MPxx_HAL/CMSIS/Include}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/CMSIS/Include}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Inc}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Inc}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/components/drivers/include}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/components/drivers/include}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/components/finsh}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/components/finsh}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/components/libc/compilers/newlib}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/include}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/include}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/libcpu/arm/common}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/libcpu/arm/common}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/libcpu/arm/cortex-m4}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/libcpu/arm/cortex-m4}&quot;" />
...@@ -159,15 +158,14 @@ ...@@ -159,15 +158,14 @@
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board/CubeMX_Config/CM4/Inc}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board/CubeMX_Config/CM4/Inc}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board/ports}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board/ports}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//board}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/HAL_Drivers/config}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/HAL_Drivers/config}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/HAL_Drivers}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/HAL_Drivers}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/STM32MPxx_HAL/CMSIS/Core/Include}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/CMSIS/Core/Include}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/STM32MPxx_HAL/CMSIS/Device/ST/STM32MP1xx/Include}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/CMSIS/Device/ST/STM32MP1xx/Include}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/STM32MPxx_HAL/CMSIS/Include}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/CMSIS/Include}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Inc}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Inc}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/components/drivers/include}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/components/drivers/include}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/components/finsh}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/components/finsh}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/components/libc/compilers/newlib}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/include}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/include}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/libcpu/arm/common}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/libcpu/arm/common}&quot;" />
<listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/libcpu/arm/cortex-m4}&quot;" /> <listOptionValue builtIn="false" value="&quot;${workspace_loc://${ProjName}//rt-thread/libcpu/arm/cortex-m4}&quot;" />
...@@ -180,7 +178,7 @@ ...@@ -180,7 +178,7 @@
</toolChain> </toolChain>
</folderInfo> </folderInfo>
<sourceEntries> <sourceEntries>
<entry excluding="//board/CubeMX_Config/CM4/Src/main.c|//board/CubeMX_Config/CM4/Src/stm32mp1xx_it.c|//board/ports|//libraries/HAL_Drivers/drv_adc.c|//libraries/HAL_Drivers/drv_can.c|//libraries/HAL_Drivers/drv_crypto.c|//libraries/HAL_Drivers/drv_dac.c|//libraries/HAL_Drivers/drv_eth.c|//libraries/HAL_Drivers/drv_flash|//libraries/HAL_Drivers/drv_hwtimer.c|//libraries/HAL_Drivers/drv_lcd.c|//libraries/HAL_Drivers/drv_lcd_mipi.c|//libraries/HAL_Drivers/drv_lptim.c|//libraries/HAL_Drivers/drv_pm.c|//libraries/HAL_Drivers/drv_pulse_encoder.c|//libraries/HAL_Drivers/drv_pwm.c|//libraries/HAL_Drivers/drv_qspi.c|//libraries/HAL_Drivers/drv_rtc.c|//libraries/HAL_Drivers/drv_sdio.c|//libraries/HAL_Drivers/drv_sdram.c|//libraries/HAL_Drivers/drv_soft_i2c.c|//libraries/HAL_Drivers/drv_spi.c|//libraries/HAL_Drivers/drv_usbd.c|//libraries/HAL_Drivers/drv_usbh.c|//libraries/HAL_Drivers/drv_wdt.c|//libraries/STM32MPxx_HAL/CMSIS/Device/ST/STM32MP1xx/Source/Templates/arm|//libraries/STM32MPxx_HAL/CMSIS/Device/ST/STM32MP1xx/Source/Templates/iar|//libraries/STM32MPxx_HAL/CMSIS/Device/ST/STM32MP1xx/Source/Templates/system_stm32mp1xx.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_cec.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_crc.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_crc_ex.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_cryp.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_cryp_ex.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_dcmi.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_dfsdm.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_dfsdm_ex.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_fdcan.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_hash.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_hash_ex.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_lptim.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_mdios.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_msp_template.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_qspi.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_rng.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_rtc.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_rtc_ex.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_sai.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_sai_ex.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_sd.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_sd_ex.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_smbus.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_spdifrx.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_spi_ex.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_sram.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_timebase_tim_template.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_usart.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_usart_ex.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_wwdg.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_adc.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_delayblock.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_dma.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_exti.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_fmc.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_gpio.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_i2c.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_lptim.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_pwr.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_rcc.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_rtc.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_sdmmc.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_spi.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_tim.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_usart.c|//libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_utils.c|//rt-thread/components/cplusplus|//rt-thread/components/dfs|//rt-thread/components/drivers/audio|//rt-thread/components/drivers/can|//rt-thread/components/drivers/cputime|//rt-thread/components/drivers/hwcrypto|//rt-thread/components/drivers/hwtimer|//rt-thread/components/drivers/i2c|//rt-thread/components/drivers/misc/adc.c|//rt-thread/components/drivers/misc/dac.c|//rt-thread/components/drivers/misc/pulse_encoder.c|//rt-thread/components/drivers/misc/rt_drv_pwm.c|//rt-thread/components/drivers/misc/rt_inputcapture.c|//rt-thread/components/drivers/mtd|//rt-thread/components/drivers/pm|//rt-thread/components/drivers/rtc|//rt-thread/components/drivers/sdio|//rt-thread/components/drivers/sensors|//rt-thread/components/drivers/spi|//rt-thread/components/drivers/touch|//rt-thread/components/drivers/usb|//rt-thread/components/drivers/watchdog|//rt-thread/components/drivers/wlan|//rt-thread/components/finsh/msh_file.c|//rt-thread/components/finsh/symbol.c|//rt-thread/components/libc/aio|//rt-thread/components/libc/compilers/armlibc|//rt-thread/components/libc/compilers/common|//rt-thread/components/libc/compilers/dlib|//rt-thread/components/libc/compilers/minilibc|//rt-thread/components/libc/libdl|//rt-thread/components/libc/mmap|//rt-thread/components/libc/pthreads|//rt-thread/components/libc/signal|//rt-thread/components/libc/termios|//rt-thread/components/libc/time|//rt-thread/components/lwp|//rt-thread/components/net|//rt-thread/components/utilities|//rt-thread/components/vbus|//rt-thread/components/vmm|//rt-thread/libcpu/arm/AT91SAM7S|//rt-thread/libcpu/arm/AT91SAM7X|//rt-thread/libcpu/arm/am335x|//rt-thread/libcpu/arm/arm926|//rt-thread/libcpu/arm/armv6|//rt-thread/libcpu/arm/common/divsi3.S|//rt-thread/libcpu/arm/cortex-a|//rt-thread/libcpu/arm/cortex-m0|//rt-thread/libcpu/arm/cortex-m23|//rt-thread/libcpu/arm/cortex-m3|//rt-thread/libcpu/arm/cortex-m33|//rt-thread/libcpu/arm/cortex-m4/context_iar.S|//rt-thread/libcpu/arm/cortex-m4/context_rvds.S|//rt-thread/libcpu/arm/cortex-m7|//rt-thread/libcpu/arm/cortex-r4|//rt-thread/libcpu/arm/dm36x|//rt-thread/libcpu/arm/lpc214x|//rt-thread/libcpu/arm/lpc24xx|//rt-thread/libcpu/arm/realview-a8-vmm|//rt-thread/libcpu/arm/s3c24x0|//rt-thread/libcpu/arm/s3c44b0|//rt-thread/libcpu/arm/sep4020|//rt-thread/libcpu/arm/zynq7000|//rt-thread/src/cpu.c|//rt-thread/src/mem.c|//rt-thread/src/slab.c|//rt-thread/tools" flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name="" /> <entry excluding="//board/CubeMX_Config/CM4/Src/main.c|//board/CubeMX_Config/CM4/Src/mbox_ipcc.c|//board/CubeMX_Config/CM4/Src/openamp.c|//board/CubeMX_Config/CM4/Src/openamp_log.c|//board/CubeMX_Config/CM4/Src/rsc_table.c|//board/CubeMX_Config/CM4/Src/stm32mp1xx_it.c|//board/ports|//rt-thread/bsp/CME_M7|//rt-thread/bsp/allwinner_tina|//rt-thread/bsp/amebaz|//rt-thread/bsp/apollo2|//rt-thread/bsp/asm9260t|//rt-thread/bsp/at32|//rt-thread/bsp/at91sam9260|//rt-thread/bsp/at91sam9g45|//rt-thread/bsp/avr32uc3b0|//rt-thread/bsp/beaglebone|//rt-thread/bsp/bf533|//rt-thread/bsp/ck802|//rt-thread/bsp/cypress|//rt-thread/bsp/dm365|//rt-thread/bsp/efm32|//rt-thread/bsp/essemi|//rt-thread/bsp/fh8620|//rt-thread/bsp/frdm-k64f|//rt-thread/bsp/gd32303e-eval|//rt-thread/bsp/gd32450z-eval|//rt-thread/bsp/gd32e230k-start|//rt-thread/bsp/gd32vf103v-eval|//rt-thread/bsp/gkipc|//rt-thread/bsp/hifive1|//rt-thread/bsp/imx6sx|//rt-thread/bsp/imx6ul|//rt-thread/bsp/imxrt|//rt-thread/bsp/k210|//rt-thread/bsp/lm3s8962|//rt-thread/bsp/lm3s9b9x|//rt-thread/bsp/lm4f232|//rt-thread/bsp/lpc1114|//rt-thread/bsp/lpc176x|//rt-thread/bsp/lpc178x|//rt-thread/bsp/lpc2148|//rt-thread/bsp/lpc2478|//rt-thread/bsp/lpc408x|//rt-thread/bsp/lpc43xx|//rt-thread/bsp/lpc5410x|//rt-thread/bsp/lpc54114-lite|//rt-thread/bsp/lpc54608-LPCXpresso|//rt-thread/bsp/lpc55sxx|//rt-thread/bsp/lpc824|//rt-thread/bsp/ls1bdev|//rt-thread/bsp/ls1cdev|//rt-thread/bsp/ls2kdev|//rt-thread/bsp/m16c62p|//rt-thread/bsp/mb9bf500r|//rt-thread/bsp/mb9bf506r|//rt-thread/bsp/mb9bf568r|//rt-thread/bsp/mb9bf618s|//rt-thread/bsp/microblaze|//rt-thread/bsp/mini2440|//rt-thread/bsp/mini4020|//rt-thread/bsp/mipssim|//rt-thread/bsp/mm32l07x|//rt-thread/bsp/mm32l3xx|//rt-thread/bsp/nios_ii|//rt-thread/bsp/nrf51822|//rt-thread/bsp/nrf52832|//rt-thread/bsp/nrf5x|//rt-thread/bsp/nuclei|//rt-thread/bsp/nuvoton|//rt-thread/bsp/nv32f100x|//rt-thread/bsp/pic32ethernet|//rt-thread/bsp/qemu-vexpress-a9|//rt-thread/bsp/qemu-vexpress-gemini|//rt-thread/bsp/raspberry-pi|//rt-thread/bsp/realview-a8|//rt-thread/bsp/rm48x50|//rt-thread/bsp/rv32m1_vega|//rt-thread/bsp/rx|//rt-thread/bsp/sam7x|//rt-thread/bsp/samd21|//rt-thread/bsp/sep6200|//rt-thread/bsp/simulator|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_adc.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_can.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_crypto.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_dac.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_eth.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_flash|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_hwtimer.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_lcd.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_lcd_mipi.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_lptim.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_pm.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_pulse_encoder.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_pwm.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_qspi.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_rtc.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_sdio.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_sdram.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_soft_i2c.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_spi.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_usbd.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_usbh.c|//rt-thread/bsp/stm32/libraries/HAL_Drivers/drv_wdt.c|//rt-thread/bsp/stm32/libraries/STM32F0xx_HAL|//rt-thread/bsp/stm32/libraries/STM32F1xx_HAL|//rt-thread/bsp/stm32/libraries/STM32F2xx_HAL|//rt-thread/bsp/stm32/libraries/STM32F4xx_HAL|//rt-thread/bsp/stm32/libraries/STM32F7xx_HAL|//rt-thread/bsp/stm32/libraries/STM32G0xx_HAL|//rt-thread/bsp/stm32/libraries/STM32G4xx_HAL|//rt-thread/bsp/stm32/libraries/STM32H7xx_HAL|//rt-thread/bsp/stm32/libraries/STM32L0xx_HAL|//rt-thread/bsp/stm32/libraries/STM32L1xx_HAL|//rt-thread/bsp/stm32/libraries/STM32L4xx_HAL|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/CMSIS/Device/ST/STM32MP1xx/Source/Templates/arm|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/CMSIS/Device/ST/STM32MP1xx/Source/Templates/iar|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/CMSIS/Device/ST/STM32MP1xx/Source/Templates/system_stm32mp1xx.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_cec.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_crc.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_crc_ex.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_cryp.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_cryp_ex.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_dcmi.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_dfsdm.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_dfsdm_ex.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_fdcan.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_hash.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_hash_ex.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_lptim.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_mdios.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_msp_template.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_qspi.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_rng.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_rtc.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_rtc_ex.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_sai.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_sai_ex.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_sd.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_sd_ex.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_smbus.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_spdifrx.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_spi_ex.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_sram.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_timebase_tim_template.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_usart.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_usart_ex.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_hal_wwdg.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_adc.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_delayblock.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_dma.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_exti.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_fmc.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_gpio.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_i2c.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_lptim.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_pwr.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_rcc.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_rtc.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_sdmmc.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_spi.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_tim.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_usart.c|//rt-thread/bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Src/stm32mp1xx_ll_utils.c|//rt-thread/bsp/stm32/libraries/templates|//rt-thread/bsp/stm32/stm32f072-st-nucleo|//rt-thread/bsp/stm32/stm32f091-st-nucleo|//rt-thread/bsp/stm32/stm32f103-atk-nano|//rt-thread/bsp/stm32/stm32f103-atk-warshipv3|//rt-thread/bsp/stm32/stm32f103-dofly-M3S|//rt-thread/bsp/stm32/stm32f103-dofly-lyc8|//rt-thread/bsp/stm32/stm32f103-fire-arbitrary|//rt-thread/bsp/stm32/stm32f103-gizwits-gokitv21|//rt-thread/bsp/stm32/stm32f103-hw100k-ibox|//rt-thread/bsp/stm32/stm32f103-mini-system|//rt-thread/bsp/stm32/stm32f103-onenet-nbiot|//rt-thread/bsp/stm32/stm32f103-yf-ufun|//rt-thread/bsp/stm32/stm32f107-uc-eval|//rt-thread/bsp/stm32/stm32f401-st-nucleo|//rt-thread/bsp/stm32/stm32f405-smdz-breadfruit|//rt-thread/bsp/stm32/stm32f407-atk-explorer|//rt-thread/bsp/stm32/stm32f407-st-discovery|//rt-thread/bsp/stm32/stm32f410-st-nucleo|//rt-thread/bsp/stm32/stm32f411-atk-nano|//rt-thread/bsp/stm32/stm32f411-st-nucleo|//rt-thread/bsp/stm32/stm32f411-weact-MiniF4|//rt-thread/bsp/stm32/stm32f412-st-nucleo|//rt-thread/bsp/stm32/stm32f413-st-nucleo|//rt-thread/bsp/stm32/stm32f427-robomaster-a|//rt-thread/bsp/stm32/stm32f429-armfly-v6|//rt-thread/bsp/stm32/stm32f429-atk-apollo|//rt-thread/bsp/stm32/stm32f429-fire-challenger|//rt-thread/bsp/stm32/stm32f429-st-disco|//rt-thread/bsp/stm32/stm32f446-st-nucleo|//rt-thread/bsp/stm32/stm32f469-st-disco|//rt-thread/bsp/stm32/stm32f746-st-disco|//rt-thread/bsp/stm32/stm32f767-atk-apollo|//rt-thread/bsp/stm32/stm32f767-fire-challenger|//rt-thread/bsp/stm32/stm32f767-st-nucleo|//rt-thread/bsp/stm32/stm32f769-st-disco|//rt-thread/bsp/stm32/stm32g070-st-nucleo|//rt-thread/bsp/stm32/stm32g071-st-nucleo|//rt-thread/bsp/stm32/stm32g431-st-nucleo|//rt-thread/bsp/stm32/stm32h743-atk-apollo|//rt-thread/bsp/stm32/stm32h743-st-nucleo|//rt-thread/bsp/stm32/stm32h747-st-discovery|//rt-thread/bsp/stm32/stm32h750-armfly-h7-tool|//rt-thread/bsp/stm32/stm32l010-st-nucleo|//rt-thread/bsp/stm32/stm32l053-st-nucleo|//rt-thread/bsp/stm32/stm32l412-st-nucleo|//rt-thread/bsp/stm32/stm32l432-st-nucleo|//rt-thread/bsp/stm32/stm32l433-st-nucleo|//rt-thread/bsp/stm32/stm32l452-st-nucleo|//rt-thread/bsp/stm32/stm32l475-atk-pandora|//rt-thread/bsp/stm32/stm32l475-st-discovery|//rt-thread/bsp/stm32/stm32l476-st-nucleo|//rt-thread/bsp/stm32/stm32l496-ali-developer|//rt-thread/bsp/stm32/stm32l496-st-nucleo|//rt-thread/bsp/stm32/stm32l4r5-st-nucleo|//rt-thread/bsp/stm32/stm32l4r9-st-eval|//rt-thread/bsp/stm32/stm32mp157a-st-discovery|//rt-thread/bsp/stm32f20x|//rt-thread/bsp/swm320-lq100|//rt-thread/bsp/synopsys|//rt-thread/bsp/taihu|//rt-thread/bsp/tm4c123bsp|//rt-thread/bsp/tm4c129x|//rt-thread/bsp/tms320f28379d|//rt-thread/bsp/upd70f3454|//rt-thread/bsp/w60x|//rt-thread/bsp/wh44b0|//rt-thread/bsp/x86|//rt-thread/bsp/xplorer4330|//rt-thread/bsp/zynq7000|//rt-thread/components/cplusplus|//rt-thread/components/dfs|//rt-thread/components/drivers/audio|//rt-thread/components/drivers/can|//rt-thread/components/drivers/cputime|//rt-thread/components/drivers/hwcrypto|//rt-thread/components/drivers/hwtimer|//rt-thread/components/drivers/i2c|//rt-thread/components/drivers/misc/adc.c|//rt-thread/components/drivers/misc/dac.c|//rt-thread/components/drivers/misc/pulse_encoder.c|//rt-thread/components/drivers/misc/rt_drv_pwm.c|//rt-thread/components/drivers/misc/rt_inputcapture.c|//rt-thread/components/drivers/mtd|//rt-thread/components/drivers/pm|//rt-thread/components/drivers/rtc|//rt-thread/components/drivers/sdio|//rt-thread/components/drivers/sensors|//rt-thread/components/drivers/spi|//rt-thread/components/drivers/touch|//rt-thread/components/drivers/usb|//rt-thread/components/drivers/watchdog|//rt-thread/components/drivers/wlan|//rt-thread/components/finsh/msh_file.c|//rt-thread/components/finsh/symbol.c|//rt-thread/components/libc|//rt-thread/components/lwp|//rt-thread/components/net|//rt-thread/components/utilities|//rt-thread/components/vbus|//rt-thread/components/vmm|//rt-thread/examples|//rt-thread/libcpu/aarch64|//rt-thread/libcpu/arc|//rt-thread/libcpu/arm/AT91SAM7S|//rt-thread/libcpu/arm/AT91SAM7X|//rt-thread/libcpu/arm/am335x|//rt-thread/libcpu/arm/arm926|//rt-thread/libcpu/arm/armv6|//rt-thread/libcpu/arm/common/divsi3.S|//rt-thread/libcpu/arm/cortex-a|//rt-thread/libcpu/arm/cortex-m0|//rt-thread/libcpu/arm/cortex-m23|//rt-thread/libcpu/arm/cortex-m3|//rt-thread/libcpu/arm/cortex-m33|//rt-thread/libcpu/arm/cortex-m4/context_iar.S|//rt-thread/libcpu/arm/cortex-m4/context_rvds.S|//rt-thread/libcpu/arm/cortex-m7|//rt-thread/libcpu/arm/cortex-r4|//rt-thread/libcpu/arm/dm36x|//rt-thread/libcpu/arm/lpc214x|//rt-thread/libcpu/arm/lpc24xx|//rt-thread/libcpu/arm/realview-a8-vmm|//rt-thread/libcpu/arm/s3c24x0|//rt-thread/libcpu/arm/s3c44b0|//rt-thread/libcpu/arm/sep4020|//rt-thread/libcpu/arm/zynq7000|//rt-thread/libcpu/avr32|//rt-thread/libcpu/blackfin|//rt-thread/libcpu/c-sky|//rt-thread/libcpu/ia32|//rt-thread/libcpu/m16c|//rt-thread/libcpu/mips|//rt-thread/libcpu/nios|//rt-thread/libcpu/ppc|//rt-thread/libcpu/risc-v|//rt-thread/libcpu/rx|//rt-thread/libcpu/sim|//rt-thread/libcpu/ti-dsp|//rt-thread/libcpu/unicore32|//rt-thread/libcpu/v850|//rt-thread/libcpu/xilinx|//rt-thread/src/cpu.c|//rt-thread/src/mem.c|//rt-thread/src/slab.c|//rt-thread/tools" flags="VALUE_WORKSPACE_PATH|RESOLVED" kind="sourcePath" name="" />
</sourceEntries> </sourceEntries>
</configuration> </configuration>
</storageModule> </storageModule>
......
/**
******************************************************************************
* File Name : IPCC.h
* Description : This file provides code for the configuration
* of the IPCC instances.
******************************************************************************
* @attention
*
* <h2><center>&copy; Copyright (c) 2020 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under Ultimate Liberty license
* SLA0044, the "License"; You may not use this file except in compliance with
* the License. You may obtain a copy of the License at:
* www.st.com/SLA0044
*
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __ipcc_H
#define __ipcc_H
#ifdef __cplusplus
extern "C" {
#endif
/* Includes ------------------------------------------------------------------*/
#include "main.h"
/* USER CODE BEGIN Includes */
/* USER CODE END Includes */
extern IPCC_HandleTypeDef hipcc;
/* USER CODE BEGIN Private defines */
/* USER CODE END Private defines */
void MX_IPCC_Init(void);
/* USER CODE BEGIN Prototypes */
/* USER CODE END Prototypes */
#ifdef __cplusplus
}
#endif
#endif /*__ ipcc_H */
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
/**
******************************************************************************
* @file mbox_ipcc.h
* @author MCD Application Team
* @brief Header for mbox_ipcc.c module
******************************************************************************
* @attention
*
* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
#ifndef MBOX_IPCC_H_
#define MBOX_IPCC_H_
/* USER CODE BEGIN firstSection */
/* can be used to modify / undefine following code or add new definitions */
/* USER CODE END firstSection */
/* Includes ------------------------------------------------------------------*/
/* Exported types ------------------------------------------------------------*/
/* Exported constants --------------------------------------------------------*/
/* Exported functions ------------------------------------------------------- */
int MAILBOX_Notify(void *priv, uint32_t id);
int MAILBOX_Init(void);
int MAILBOX_Poll(struct virtio_device *vdev);
/* USER CODE BEGIN lastSection */
/* can be used to modify / undefine previous code or add new definitions */
/* USER CODE END lastSection */
#endif /* MBOX_IPCC_H_ */
/**
******************************************************************************
* @file openamp.h
* @brief Header for openamp applications
* @author MCD Application Team
******************************************************************************
* @attention
*
* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __openamp_H
#define __openamp_H
#ifdef __cplusplus
extern "C" {
#endif
#include "openamp/open_amp.h"
#include "openamp_conf.h"
#define OPENAMP_send rpmsg_send
#define OPENAMP_destroy_ept rpmsg_destroy_ept
/* Initialize the openamp framework*/
int MX_OPENAMP_Init(int RPMsgRole, rpmsg_ns_bind_cb ns_bind_cb);
/* Deinitialize the openamp framework*/
void OPENAMP_DeInit(void);
/* Initialize the endpoint struct*/
void OPENAMP_init_ept(struct rpmsg_endpoint *ept);
/* Create and register the endpoint */
int OPENAMP_create_endpoint(struct rpmsg_endpoint *ept, const char *name,
uint32_t dest, rpmsg_ept_cb cb,
rpmsg_ns_unbind_cb unbind_cb);
/* Check for new rpmsg reception */
void OPENAMP_check_for_message(void);
/* Wait loop on endpoint ready ( message dest address is know)*/
void OPENAMP_Wait_EndPointready(struct rpmsg_endpoint *rp_ept);
#ifdef __cplusplus
}
#endif
#endif /*__openamp_H */
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
/**
******************************************************************************
* @file openamp_conf.h
* @author MCD Application Team
* @brief Configuration file for OpenAMP MW
******************************************************************************
* @attention
*
* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __OPENAMP_CONF__H__
#define __OPENAMP_CONF__H__
#ifdef __cplusplus
extern "C" {
#endif
/* Includes ------------------------------------------------------------------*/
#if defined (__LOG_TRACE_IO_) || defined(__LOG_UART_IO_)
#include "openamp_log.h"
#endif
/* ########################## Mailbox Interface Selection ############################## */
/**
* @brief This is the list of Mailbox interface to be used in the OpenAMP MW
* Please note that not all interfaces are supported by a STM32 device
*/
#define MAILBOX_IPCC_IF_ENABLED
//#define MAILBOX_HSEM_IF_ENABLED
/* Includes ------------------------------------------------------------------*/
/**
* @brief Include Maibox interface header file
*/
#ifdef MAILBOX_IPCC_IF_ENABLED
#include "mbox_ipcc.h"
#endif /* MAILBOX_IPCC_IF_ENABLED */
#ifdef MAILBOX_HSEM_IF_ENABLED
#include "mbox_hsem.h"
#endif /* MAILBOX_HSEM_IF_ENABLED */
/* ########################## Virtual Diver Module Selection ############################## */
/**
* @brief This is the list of modules to be used in the OpenAMP Virtual driver module
* Please note that virtual driver are not supported on all stm32 families
*/
//#define VIRTUAL_UART_MODULE_ENABLED
//#define VIRTUAL_I2C_MODULE_ENABLED
/* Includes ------------------------------------------------------------------*/
/**
* @brief Include Virtual Driver module's header file
*/
#ifdef VIRTUAL_UART_MODULE_ENABLED
#include "virt_uart.h"
#endif /* VIRTUAL_UART_MODULE_ENABLED */
#ifdef VIRTUAL_I2C_MODULE_ENABLED
#include "virt_i2c.h"
#endif /* VIRTUAL_I2C_MODULE_ENABLED */
/* ########################## Linux Master Selection ############################## */
/**
* @brief Due to Linux compatibility, it's important to distinguish if the MASTER is Linux or not.
* In that case, the LINUX_RPROC_MASTER define is required
*/
#define LINUX_RPROC_MASTER
/* USER CODE BEGIN INCLUDE */
/* USER CODE END INCLUDE */
/** @addtogroup OPENAMP_MW
* @{
*/
/** @defgroup OPENAMP_CONF OPENAMP_CONF
* @brief Configuration file for Openamp mw
* @{
*/
/** @defgroup OPENAMP_CONF_Exported_Variables OPENAMP_CONF_Exported_Variables
* @brief Public variables.
* @{
*/
/**
* @}
*/
/** @defgroup OPENAMP_CONF_Exported_Defines OPENAMP_CONF_Exported_Defines
* @brief Defines for configuration of the Openamp mw
* @{
*/
#if defined (__ICCARM__)
/*
* For IAR, the .icf file should contain the following lines:
* define symbol __OPENAMP_region_start__ = BASE_ADDRESS; (0x38000400 for example)
* define symbol __OPENAMP_region_size__ = MEM_SIZE; (0xB000 as example)
*
* export symbol __OPENAMP_region_start__;
* export symbol __OPENAMP_region_size__;
*/
extern const uint32_t __OPENAMP_region_start__;
extern const uint8_t __OPENAMP_region_size__;
#define SHM_START_ADDRESS ((metal_phys_addr_t)&__OPENAMP_region_start__)
#define SHM_SIZE ((size_t)&__OPENAMP_region_size__)
#elif defined(__CC_ARM)
/*
* For MDK-ARM, the scatter file .sct should contain the following line:
* LR_IROM1 .... {
* ...
* __OpenAMP_SHMEM__ 0x38000400 EMPTY 0x0000B000 {} ; Shared Memory area used by OpenAMP
* }
*
*/
extern unsigned int Image$$__OpenAMP_SHMEM__$$Base;
extern unsigned int Image$$__OpenAMP_SHMEM__$$ZI$$Length;
#define SHM_START_ADDRESS (unsigned int)&Image$$__OpenAMP_SHMEM__$$Base
#define SHM_SIZE ((size_t)&Image$$__OpenAMP_SHMEM__$$ZI$$Length)
#else
/*
* for GCC add the following content to the .ld file:
* MEMORY
* {
* ...
* OPEN_AMP_SHMEM (xrw) : ORIGIN = 0x38000400, LENGTH = 63K
* }
* __OPENAMP_region_start__ = ORIGIN(OPEN_AMP_SHMEM);
* __OPENAMP_region_end__ = ORIGIN(OPEN_AMP_SHMEM) + LENGTH(OPEN_AMP_SHMEM);
*
* using the LENGTH(OPEN_AMP_SHMEM) to set the SHM_SIZE lead to a crash thus we
* use the start and end address.
*/
extern int __OPENAMP_region_start__[]; /* defined by linker script */
extern int __OPENAMP_region_end__[]; /* defined by linker script */
#define SHM_START_ADDRESS ((metal_phys_addr_t)__OPENAMP_region_start__)
#define SHM_SIZE (size_t)((void *)__OPENAMP_region_end__ - (void *) __OPENAMP_region_start__)
#endif
#if defined LINUX_RPROC_MASTER
#define VRING_RX_ADDRESS -1 /* allocated by Master processor: CA7 */
#define VRING_TX_ADDRESS -1 /* allocated by Master processor: CA7 */
#define VRING_BUFF_ADDRESS -1 /* allocated by Master processor: CA7 */
#define VRING_ALIGNMENT 16 /* fixed to match with linux constraint */
#define VRING_NUM_BUFFS 16 /* number of rpmsg buffer */
#else
#define VRING_RX_ADDRESS 0x10040000 /* allocated by Master processor: CA7 */
#define VRING_TX_ADDRESS 0x10040400 /* allocated by Master processor: CA7 */
#define VRING_BUFF_ADDRESS 0x10040800 /* allocated by Master processor: CA7 */
#define VRING_ALIGNMENT 16 /* fixed to match with 4k page alignement requested by linux */
#define VRING_NUM_BUFFS 16 /* number of rpmsg buffer */
#endif
/* Fixed parameter */
#define NUM_RESOURCE_ENTRIES 2
#define VRING_COUNT 2
#define VDEV_ID 0xFF
#define VRING0_ID 0 /* VRING0 ID (master to remote) fixed to 0 for linux compatibility*/
#define VRING1_ID 1 /* VRING1 ID (remote to master) fixed to 1 for linux compatibility */
/**
* @}
*/
/** @defgroup OPENAMP_CONF_Exported_Macros OPENAMP_CONF_Exported_Macros
* @brief Aliases.
* @{
*/
/* DEBUG macros */
#if defined (__LOG_TRACE_IO_) || defined(__LOG_UART_IO_)
#define OPENAMP_log_dbg log_dbg
#define OPENAMP_log_info log_info
#define OPENAMP_log_warn log_warn
#define OPENAMP_log_err log_err
#else
#define OPENAMP_log_dbg(...)
#define OPENAMP_log_info(...)
#define OPENAMP_log_warn(...)
#define OPENAMP_log_err(...)
#endif
/**
* @}
*/
/** @defgroup OPENAMP_CONF_Exported_Types OPENAMP_CONF_Exported_Types
* @brief Types.
* @{
*/
/**
* @}
*/
/** @defgroup OPENAMP_CONF_Exported_FunctionsPrototype OPENAMP_CONF_Exported_FunctionsPrototype
* @brief Declaration of public functions for OpenAMP mw.
* @{
*/
/* Exported functions -------------------------------------------------------*/
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* __OPENAMP_CONF__H__ */
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
/**
******************************************************************************
* @file log.h
* @author MCD Application Team
* @brief logging services
******************************************************************************
*
* @attention
*
* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
*
******************************************************************************
*/
/** @addtogroup LOG
* @{
*/
/** @addtogroup stm32mp1xx_Log
* @{
*/
/**
* @brief Define to prevent recursive inclusion
*/
#ifndef __LOG_STM32MP1XX_H
#define __LOG_STM32MP1XX_H
#ifdef __cplusplus
extern "C" {
#endif
/** @addtogroup STM32MP1xx_Log_Includes
* @{
*/
#include "stm32mp1xx_hal.h"
/**
* @}
*/
/** @addtogroup STM32MP1xx_Log_Exported_Constants
* @{
*/
#if defined (__LOG_TRACE_IO_)
#define SYSTEM_TRACE_BUF_SZ 2048
#endif
#define LOGQUIET 0
#define LOGERR 1
#define LOGWARN 2
#define LOGINFO 3
#define LOGDBG 4
#ifndef LOGLEVEL
#define LOGLEVEL LOGINFO
#endif
/**
* @}
*/
/** @addtogroup STM32MP1xx_Log_Exported_types
* @{
*/
#if defined (__LOG_TRACE_IO_)
extern char system_log_buf[SYSTEM_TRACE_BUF_SZ]; /*!< buffer for debug traces */
#endif /* __LOG_TRACE_IO_ */
/**
* @}
*/
/** @addtogroup STM32MP1xx_Log_Exported_Macros
* @{
*/
#if defined (__LOG_TRACE_IO_) || defined(__LOG_UART_IO_)
#if LOGLEVEL >= LOGDBG
#define log_dbg(fmt, ...) printf("[%05ld.%03ld][DBG ]" fmt, HAL_GetTick()/1000, HAL_GetTick() % 1000, ##__VA_ARGS__)
#else
#define log_dbg(fmt, ...)
#endif
#if LOGLEVEL >= LOGINFO
#define log_info(fmt, ...) printf("[%05ld.%03ld][INFO ]" fmt, HAL_GetTick()/1000, HAL_GetTick() % 1000, ##__VA_ARGS__)
#else
#define log_info(fmt, ...)
#endif
#if LOGLEVEL >= LOGWARN
#define log_warn(fmt, ...) printf("[%05ld.%03ld][WARN ]" fmt, HAL_GetTick()/1000, HAL_GetTick() % 1000, ##__VA_ARGS__)
#else
#define log_warn(fmt, ...)
#endif
#if LOGLEVEL >= LOGERR
#define log_err(fmt, ...) printf("[%05ld.%03ld][ERR ]" fmt, HAL_GetTick()/1000, HAL_GetTick() % 1000, ##__VA_ARGS__)
#else
#define log_err(fmt, ...)
#endif
#else
#define log_dbg(fmt, ...)
#define log_info(fmt, ...)
#define log_warn(fmt, ...)
#define log_err(fmt, ...)
#endif /* __LOG_TRACE_IO_ */
/**
* @}
*/
/** @addtogroup STM32MP1xx_Log_Exported_Functions
* @{
*/
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /*__LOG_STM32MP1XX_H */
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
/*
* Copyright (c) 2019 STMicroelectronics.
* All rights reserved.
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
*/
/* This file populates resource table for BM remote
* for use by the Linux Master */
#ifndef RSC_TABLE_H_
#define RSC_TABLE_H_
#include "openamp/open_amp.h"
#include "openamp_conf.h"
/* Place resource table in special ELF section */
//#define __section_t(S) __attribute__((__section__(#S)))
//#define __resource __section_t(.resource_table)
/* Resource table for the given remote */
struct shared_resource_table {
unsigned int version;
unsigned int num;
unsigned int reserved[2];
unsigned int offset[NUM_RESOURCE_ENTRIES];
/* text carveout entry */
/* rpmsg vdev entry */
struct fw_rsc_vdev vdev;
struct fw_rsc_vdev_vring vring0;
struct fw_rsc_vdev_vring vring1;
struct fw_rsc_trace cm_trace;
};
void resource_table_init(int RPMsgRole, void **table_ptr, int *length);
#endif /* RSC_TABLE_H_ */
/**
******************************************************************************
* @file mbox_ipcc.c
* @author MCD Application Team
* @brief This file provides code for the configuration
* of the mailbox_ipcc_if.c MiddleWare.
******************************************************************************
* @attention
*
* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
/*
* Channel direction and usage:
*
* ======== <-- new msg ---=============--------<------ =======
* || || || CHANNEL 1 || || ||
* || A7 || ------->-------=============--- buf free--> || M4 ||
* || || || ||
* ||master|| <-- buf free---=============--------<------ ||slave||
* || || || CHANNEL 2 || || ||
* ======== ------->-------=============----new msg --> =======
*/
/* Includes ------------------------------------------------------------------*/
#include "openamp/open_amp.h"
#include "stm32mp1xx_hal.h"
#include "openamp_conf.h"
/* Within 'USER CODE' section, code will be kept by default at each generation */
/* USER CODE BEGIN 0 */
/* USER CODE END 0 */
/* Private define ------------------------------------------------------------*/
#define MASTER_CPU_ID 0
#define REMOTE_CPU_ID 1
#define IPCC_CPU_A7 MASTER_CPU_ID
#define IPCC_CPU_M4 REMOTE_CPU_ID
#define RX_NO_MSG 0
#define RX_NEW_MSG 1
#define RX_BUF_FREE 2
/* Private variables ---------------------------------------------------------*/
extern IPCC_HandleTypeDef hipcc;
int msg_received_ch1 = RX_NO_MSG;
int msg_received_ch2 = RX_NO_MSG;
uint32_t vring0_id = 0; /* used for channel 1 */
uint32_t vring1_id = 1; /* used for channel 2 */
/* Private function prototypes -----------------------------------------------*/
void IPCC_channel1_callback(IPCC_HandleTypeDef * hipcc, uint32_t ChannelIndex, IPCC_CHANNELDirTypeDef ChannelDir);
void IPCC_channel2_callback(IPCC_HandleTypeDef * hipcc, uint32_t ChannelIndex, IPCC_CHANNELDirTypeDef ChannelDir);
/**
* @brief Initialize MAILBOX with IPCC peripheral
* @param None
* @retval : Operation result
*/
int MAILBOX_Init(void)
{
if (HAL_IPCC_ActivateNotification(&hipcc, IPCC_CHANNEL_1, IPCC_CHANNEL_DIR_RX,
IPCC_channel1_callback) != HAL_OK) {
OPENAMP_log_err("%s: ch_1 RX fail\n", __func__);
return -1;
}
if (HAL_IPCC_ActivateNotification(&hipcc, IPCC_CHANNEL_2, IPCC_CHANNEL_DIR_RX,
IPCC_channel2_callback) != HAL_OK) {
OPENAMP_log_err("%s: ch_2 RX fail\n", __func__);
return -1;
}
return 0;
}
/**
* @brief Initialize MAILBOX with IPCC peripheral
* @param virtio device
* @retval : Operation result
*/
int MAILBOX_Poll(struct virtio_device *vdev)
{
/* If we got an interrupt, ask for the corresponding virtqueue processing */
if (msg_received_ch1 == RX_BUF_FREE) {
OPENAMP_log_dbg("Running virt0 (ch_1 buf free)\r\n");
rproc_virtio_notified(vdev, VRING0_ID);
msg_received_ch1 = RX_NO_MSG;
return 0;
}
if (msg_received_ch2 == RX_NEW_MSG) {
OPENAMP_log_dbg("Running virt1 (ch_2 new msg)\r\n");
rproc_virtio_notified(vdev, VRING1_ID);
msg_received_ch2 = RX_NO_MSG;
/* The OpenAMP framework does not notify for free buf: do it here */
rproc_virtio_notified(NULL, VRING1_ID);
return 0;
}
return -1;
}
/**
* @brief Callback function called by OpenAMP MW to notify message processing
* @param VRING id
* @retval Operation result
*/
int MAILBOX_Notify(void *priv, uint32_t id)
{
uint32_t channel;
(void)priv;
/* Called after virtqueue processing: time to inform the remote */
if (id == VRING0_ID) {
channel = IPCC_CHANNEL_1;
OPENAMP_log_dbg("Send msg on ch_1\r\n");
}
else if (id == VRING1_ID) {
/* Note: the OpenAMP framework never notifies this */
channel = IPCC_CHANNEL_2;
OPENAMP_log_dbg("Send 'buff free' on ch_2\r\n");
}
else {
OPENAMP_log_err("invalid vring (%d)\r\n", (int)id);
return -1;
}
/* Check that the channel is free (otherwise wait until it is) */
if (HAL_IPCC_GetChannelStatus(&hipcc, channel, IPCC_CHANNEL_DIR_TX) == IPCC_CHANNEL_STATUS_OCCUPIED) {
OPENAMP_log_dbg("Waiting for channel to be freed\r\n");
while (HAL_IPCC_GetChannelStatus(&hipcc, channel, IPCC_CHANNEL_DIR_TX) == IPCC_CHANNEL_STATUS_OCCUPIED)
;
}
/* Inform A7 (either new message, or buf free) */
HAL_IPCC_NotifyCPU(&hipcc, channel, IPCC_CHANNEL_DIR_TX);
return 0;
}
/* Private function ---------------------------------------------------------*/
/* Callback from IPCC Interrupt Handler: Master Processor informs that there are some free buffers */
void IPCC_channel1_callback(IPCC_HandleTypeDef * hipcc,
uint32_t ChannelIndex, IPCC_CHANNELDirTypeDef ChannelDir)
{
if (msg_received_ch1 != RX_NO_MSG)
OPENAMP_log_dbg("IPCC_channel1_callback: previous IRQ not treated (status = %d)\r\n", msg_received_ch1);
msg_received_ch1 = RX_BUF_FREE;
/* Inform A7 that we have received the 'buff free' msg */
OPENAMP_log_dbg("Ack 'buff free' message on ch1\r\n");
HAL_IPCC_NotifyCPU(hipcc, ChannelIndex, IPCC_CHANNEL_DIR_RX);
}
/* Callback from IPCC Interrupt Handler: new message received from Master Processor */
void IPCC_channel2_callback(IPCC_HandleTypeDef * hipcc,
uint32_t ChannelIndex, IPCC_CHANNELDirTypeDef ChannelDir)
{
if (msg_received_ch2 != RX_NO_MSG)
OPENAMP_log_dbg("IPCC_channel2_callback: previous IRQ not treated (status = %d)\r\n", msg_received_ch2);
msg_received_ch2 = RX_NEW_MSG;
/* Inform A7 that we have received the new msg */
OPENAMP_log_dbg("Ack new message on ch2\r\n");
HAL_IPCC_NotifyCPU(hipcc, ChannelIndex, IPCC_CHANNEL_DIR_RX);
}
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
/**
******************************************************************************
* @file openamp.c
* @author MCD Application Team
* @brief Code for openamp applications
******************************************************************************
* @attention
*
* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
#include "openamp.h"
#include "rsc_table.h"
#include "metal/sys.h"
#include "metal/device.h"
/* Private define ------------------------------------------------------------*/
#define SHM_DEVICE_NAME "STM32_SHM"
/* Globals */
static struct metal_io_region *shm_io;
static struct metal_io_region *rsc_io;
static struct shared_resource_table *rsc_table;
static struct rpmsg_virtio_shm_pool shpool;
static struct rpmsg_virtio_device rvdev;
static metal_phys_addr_t shm_physmap;
struct metal_device shm_device = {
.name = SHM_DEVICE_NAME,
.num_regions = 2,
.regions = {
{.virt = NULL}, /* shared memory */
{.virt = NULL}, /* rsc_table memory */
},
.node = { NULL },
.irq_num = 0,
.irq_info = NULL
};
static int OPENAMP_shmem_init(int RPMsgRole)
{
int status = 0;
struct metal_device *device;
struct metal_init_params metal_params = METAL_INIT_DEFAULTS;
void* rsc_tab_addr;
int rsc_size;
metal_init(&metal_params);
status = metal_register_generic_device(&shm_device);
if (status != 0) {
return status;
}
status = metal_device_open("generic", SHM_DEVICE_NAME, &device);
if (status != 0) {
return status;
}
shm_physmap = SHM_START_ADDRESS;
metal_io_init(&device->regions[0], (void *)SHM_START_ADDRESS, &shm_physmap,
SHM_SIZE, -1, 0, NULL);
shm_io = metal_device_io_region(device, 0);
if (shm_io == NULL) {
return -1;
}
/* Initialize resources table variables */
resource_table_init(RPMsgRole, &rsc_tab_addr, &rsc_size);
rsc_table = (struct shared_resource_table *)rsc_tab_addr;
if (!rsc_table)
{
return -1;
}
metal_io_init(&device->regions[1], rsc_table,
(metal_phys_addr_t *)rsc_table, rsc_size, -1U, 0, NULL);
rsc_io = metal_device_io_region(device, 1);
if (rsc_io == NULL) {
return -1;
}
return 0;
}
int MX_OPENAMP_Init(int RPMsgRole, rpmsg_ns_bind_cb ns_bind_cb)
{
struct fw_rsc_vdev_vring *vring_rsc;
struct virtio_device *vdev;
int status = 0;
MAILBOX_Init();
/* Libmetal Initilalization */
status = OPENAMP_shmem_init(RPMsgRole);
if(status)
{
return status;
}
vdev = rproc_virtio_create_vdev(RPMsgRole, VDEV_ID, &rsc_table->vdev,
rsc_io, NULL, MAILBOX_Notify, NULL);
if (vdev == NULL)
{
return -1;
}
rproc_virtio_wait_remote_ready(vdev);
vring_rsc = &rsc_table->vring0;
status = rproc_virtio_init_vring(vdev, 0, vring_rsc->notifyid,
(void *)vring_rsc->da, shm_io,
vring_rsc->num, vring_rsc->align);
if (status != 0)
{
return status;
}
vring_rsc = &rsc_table->vring1;
status = rproc_virtio_init_vring(vdev, 1, vring_rsc->notifyid,
(void *)vring_rsc->da, shm_io,
vring_rsc->num, vring_rsc->align);
if (status != 0)
{
return status;
}
rpmsg_virtio_init_shm_pool(&shpool, (void *)VRING_BUFF_ADDRESS,
(size_t)SHM_SIZE);
rpmsg_init_vdev(&rvdev, vdev, ns_bind_cb, shm_io, &shpool);
return 0;
}
void OPENAMP_DeInit()
{
rpmsg_deinit_vdev(&rvdev);
metal_finish();
}
void OPENAMP_init_ept(struct rpmsg_endpoint *ept)
{
rpmsg_init_ept(ept, "", RPMSG_ADDR_ANY, RPMSG_ADDR_ANY, NULL, NULL);
}
int OPENAMP_create_endpoint(struct rpmsg_endpoint *ept, const char *name,
uint32_t dest, rpmsg_ept_cb cb,
rpmsg_ns_unbind_cb unbind_cb)
{
return rpmsg_create_ept(ept, &rvdev.rdev, name, RPMSG_ADDR_ANY, dest, cb,
unbind_cb);
}
void OPENAMP_check_for_message(void)
{
MAILBOX_Poll(rvdev.vdev);
}
void OPENAMP_Wait_EndPointready(struct rpmsg_endpoint *rp_ept)
{
while(!is_rpmsg_ept_ready(rp_ept))
MAILBOX_Poll(rvdev.vdev);
}
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
/**
******************************************************************************
* @file log.c
* @author MCD Application Team
* @brief Ressource table
*
* This file provides services for logging
*
******************************************************************************
*
* @attention
*
* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
*
******************************************************************************
*/
/** @addtogroup LOG
* @{
*/
/** @addtogroup STM32MP1xx_log
* @{
*/
/** @addtogroup STM32MP1xx_Log_Private_Includes
* @{
*/
#include "openamp_log.h"
/**
* @}
*/
/** @addtogroup STM32MP1xx_Log_Private_TypesDefinitions
* @{
*/
/**
* @}
*/
/** @addtogroup STM32MP1xx_Log_Private_Defines
* @{
*/
/**
* @}
*/
#if defined (__LOG_TRACE_IO_)
char system_log_buf[SYSTEM_TRACE_BUF_SZ];
__weak void log_buff(int ch)
{
/* Place your implementation of fputc here */
/* e.g. write a character to the USART1 and Loop until the end of transmission */
static int offset = 0;
if (offset + 1 >= SYSTEM_TRACE_BUF_SZ)
offset = 0;
system_log_buf[offset] = ch;
system_log_buf[offset++ + 1] = '\0';
}
#endif
#if defined ( __CC_ARM) || (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
#define PUTCHAR_PROTOTYPE int stdout_putchar(int ch)
#elif __GNUC__
/* With GCC/RAISONANCE, small log_info (option LD Linker->Libraries->Small log_info
set to 'Yes') calls __io_putchar() */
#define PUTCHAR_PROTOTYPE int __attribute__(( weak )) __io_putchar(int ch)
#else
#define PUTCHAR_PROTOTYPE int __attribute__(( weak )) fputc(int ch, FILE *f)
#endif /* __GNUC__ */
#if defined (__LOG_UART_IO_) || defined (__LOG_TRACE_IO_)
PUTCHAR_PROTOTYPE
{
/* Place your implementation of fputc here */
/* e.g. write a character to the USART1 and Loop until the end of transmission */
#if defined (__LOG_UART_IO_)
extern UART_HandleTypeDef huart;
HAL_UART_Transmit(&huart, (uint8_t *)&ch, 1, HAL_MAX_DELAY);
#endif
#if defined (__LOG_TRACE_IO_)
log_buff(ch);
#endif
return ch;
}
#else
/* No printf output */
#endif
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
/**
******************************************************************************
* @file rsc_table.c
* @author MCD Application Team
* @brief Ressource table
*
* This file provides a default resource table requested by remote proc to
* load the elf file. It also allows to add debug trace using a shared buffer.
*
******************************************************************************
* @attention
*
* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
/** @addtogroup RSC_TABLE
* @{
*/
/** @addtogroup resource_table
* @{
*/
/** @addtogroup resource_table_Private_Includes
* @{
*/
#if defined(__ICCARM__) || defined (__CC_ARM)
#include <stddef.h> /* needed for offsetof definition*/
#endif
#include "rsc_table.h"
#include "openamp/open_amp.h"
/**
* @}
*/
/** @addtogroup resource_table_Private_TypesDefinitions
* @{
*/
/**
* @}
*/
/** @addtogroup resource_table_Private_Defines
* @{
*/
/* Place resource table in special ELF section */
#if defined(__GNUC__)
#define __section_t(S) __attribute__((__section__(#S)))
#define __resource __section_t(.resource_table)
#endif
#if defined (LINUX_RPROC_MASTER)
#ifdef VIRTIO_MASTER_ONLY
#define CONST
#else
#define CONST const
#endif
#else
#define CONST
#endif
#define RPMSG_IPU_C0_FEATURES 1
#define VRING_COUNT 2
/* VirtIO rpmsg device id */
#define VIRTIO_ID_RPMSG_ 7
#if defined (__LOG_TRACE_IO_)
extern char system_log_buf[];
#endif
#if defined(__GNUC__)
#if !defined (__CC_ARM) && !defined (LINUX_RPROC_MASTER)
/* Since GCC is not initializing the resource_table at startup, it is declared as volatile to avoid compiler optimization
* for the CM4 (see resource_table_init() below)
*/
volatile struct shared_resource_table __resource __attribute__((used)) resource_table;
#else
CONST struct shared_resource_table __resource __attribute__((used)) resource_table = {
#endif
#elif defined(__ICCARM__)
__root CONST struct shared_resource_table resource_table @ ".resource_table" = {
#endif
#if defined(__ICCARM__) || defined (__CC_ARM) || defined (LINUX_RPROC_MASTER)
.version = 1,
#if defined (__LOG_TRACE_IO_)
.num = 2,
#else
.num = 1,
#endif
.reserved = {0, 0},
.offset = {
offsetof(struct shared_resource_table, vdev),
offsetof(struct shared_resource_table, cm_trace),
},
/* Virtio device entry */
.vdev= {
RSC_VDEV, VIRTIO_ID_RPMSG_, 0, RPMSG_IPU_C0_FEATURES, 0, 0, 0,
VRING_COUNT, {0, 0},
},
/* Vring rsc entry - part of vdev rsc entry */
.vring0 = {VRING_TX_ADDRESS, VRING_ALIGNMENT, VRING_NUM_BUFFS, VRING0_ID, 0},
.vring1 = {VRING_RX_ADDRESS, VRING_ALIGNMENT, VRING_NUM_BUFFS, VRING1_ID, 0},
#if defined (__LOG_TRACE_IO_)
.cm_trace = {
RSC_TRACE,
(uint32_t)system_log_buf, SYSTEM_TRACE_BUF_SZ, 0, "cm4_log",
},
#endif
} ;
#endif
void resource_table_init(int RPMsgRole, void **table_ptr, int *length)
{
#if !defined (LINUX_RPROC_MASTER)
#if defined (__GNUC__) && ! defined (__CC_ARM)
#ifdef VIRTIO_MASTER_ONLY
/*
* Currently the GCC linker doesn't initialize the resource_table global variable at startup
* it is done here by the master application.
*/
memset(&resource_table, '\0', sizeof(struct shared_resource_table));
resource_table.num = 1;
resource_table.version = 1;
resource_table.offset[0] = offsetof(struct shared_resource_table, vdev);
resource_table.vring0.da = VRING_TX_ADDRESS;
resource_table.vring0.align = VRING_ALIGNMENT;
resource_table.vring0.num = VRING_NUM_BUFFS;
resource_table.vring0.notifyid = VRING0_ID;
resource_table.vring1.da = VRING_RX_ADDRESS;
resource_table.vring1.align = VRING_ALIGNMENT;
resource_table.vring1.num = VRING_NUM_BUFFS;
resource_table.vring1.notifyid = VRING1_ID;
resource_table.vdev.type = RSC_VDEV;
resource_table.vdev.id = VIRTIO_ID_RPMSG_;
resource_table.vdev.num_of_vrings=VRING_COUNT;
resource_table.vdev.dfeatures = RPMSG_IPU_C0_FEATURES;
#else
/* For the slave application let's wait until the resource_table is correctly initialized */
while(resource_table.vring1.da != VRING_RX_ADDRESS)
{
}
#endif
#endif
#endif
(void)RPMsgRole;
*length = sizeof(resource_table);
*table_ptr = (void *)&resource_table;
}
...@@ -73,7 +73,14 @@ void HAL_MspInit(void) ...@@ -73,7 +73,14 @@ void HAL_MspInit(void)
/* System interrupt init*/ /* System interrupt init*/
/* USER CODE BEGIN MspInit 1 */ /* USER CODE BEGIN MspInit 1 */
#if !defined(BSP_USING_OPENAMP)
__HAL_RCC_SYSRAM_CLK_ENABLE();
__HAL_RCC_RETRAM_CLK_ENABLE();
#endif
HAL_NVIC_SetPriority(RCC_WAKEUP_IRQn, 0, 0);
HAL_NVIC_EnableIRQ(RCC_WAKEUP_IRQn);
__HAL_RCC_ENABLE_IT(RCC_IT_WKUP);
/* USER CODE END MspInit 1 */ /* USER CODE END MspInit 1 */
} }
......
...@@ -28,6 +28,11 @@ menu "Onboard Peripheral Drivers" ...@@ -28,6 +28,11 @@ menu "Onboard Peripheral Drivers"
select RT_MTD_NAND_DEBUG select RT_MTD_NAND_DEBUG
default n default n
config BSP_USING_OPENAMP
bool "Enable OpenAMP"
select RT_USING_OPENAMP
default n
endmenu endmenu
menu "On-chip Peripheral Drivers" menu "On-chip Peripheral Drivers"
......
...@@ -19,10 +19,32 @@ if GetDepend(['BSP_USING_PMIC']): ...@@ -19,10 +19,32 @@ if GetDepend(['BSP_USING_PMIC']):
if GetDepend(['BSP_USING_NAND']): if GetDepend(['BSP_USING_NAND']):
src += Glob('ports/drv_nand.c') src += Glob('ports/drv_nand.c')
if GetDepend(['BSP_USING_OPENAMP']):
src += Glob('CubeMX_Config/CM4/Src/ipcc.c')
src += Glob('CubeMX_Config/CM4/Src/openamp.c')
src += Glob('CubeMX_Config/CM4/Src/openamp_log.c')
src += Glob('CubeMX_Config/CM4/Src/mbox_ipcc.c')
src += Glob('CubeMX_Config/CM4/Src/rsc_table.c')
src += Glob('ports/OpenAMP/libmetal/lib/*.c')
src += Glob('ports/OpenAMP/libmetal/lib/system/generic/*.c')
src += Glob('ports/OpenAMP/libmetal/lib/system/generic/cortexm/*.c')
src += Glob('ports/OpenAMP/open-amp/lib/rpmsg/*.c')
src += Glob('ports/OpenAMP/open-amp/lib/remoteproc/*.c')
src += Glob('ports/OpenAMP/open-amp/lib/virtio/*.c')
src += Glob('ports/OpenAMP/virtual_driver/*.c')
src += Glob('ports/OpenAMP/drv_openamp.c')
path = [cwd] path = [cwd]
path += [cwd + '/CubeMX_Config/CM4/Inc'] path += [cwd + '/CubeMX_Config/CM4/Inc']
path += [cwd + '/ports'] path += [cwd + '/ports']
if GetDepend(['BSP_USING_OPENAMP']):
path += [cwd + '/ports/OpenAMP']
path += [cwd + '/ports/OpenAMP/open-amp/lib/include']
path += [cwd + '/ports/OpenAMP/libmetal/lib/include']
path += [cwd + '/ports/OpenAMP/virtual_driver']
path += [cwd + '/CubeMX_Config/CM4/Inc']
startup_path_prefix = SDK_LIB startup_path_prefix = SDK_LIB
if rtconfig.CROSS_TOOL == 'gcc': if rtconfig.CROSS_TOOL == 'gcc':
...@@ -34,7 +56,7 @@ elif rtconfig.CROSS_TOOL == 'iar': ...@@ -34,7 +56,7 @@ elif rtconfig.CROSS_TOOL == 'iar':
src = list(set(src)) src = list(set(src))
CPPDEFINES = ['CORE_CM4', 'STM32MP157Axx', 'USE_HAL_DRIVER'] CPPDEFINES = ['CORE_CM4','NO_ATOMIC_64_SUPPORT','METAL_INTERNAL','METAL_MAX_DEVICE_REGIONS=2','VIRTIO_SLAVE_ONLY','STM32MP157Axx','__LOG_TRACE_IO_']
group = DefineGroup('Drivers', src, depend = [''], CPPPATH = path, CPPDEFINES = CPPDEFINES) group = DefineGroup('Drivers', src, depend = [''], CPPPATH = path, CPPDEFINES = CPPDEFINES)
......
...@@ -22,25 +22,25 @@ ...@@ -22,25 +22,25 @@
extern "C" { extern "C" {
#endif #endif
#define STM32_FLASH_START_ADRESS ((uint32_t)0x10000000) #define STM32_FLASH_START_ADRESS ((uint32_t)0x10000000)
#define STM32_FLASH_SIZE (256 * 1024) #if defined(BSP_USING_OPENAMP)
#define STM32_FLASH_SIZE (64 * 1024)
#else
#define STM32_FLASH_SIZE (256 * 1024)
#endif
#define STM32_FLASH_END_ADDRESS ((uint32_t)(STM32_FLASH_START_ADRESS + STM32_FLASH_SIZE)) #define STM32_FLASH_END_ADDRESS ((uint32_t)(STM32_FLASH_START_ADRESS + STM32_FLASH_SIZE))
#define STM32_SRAM_SIZE (128)
#define STM32_SRAM_END ((uint32_t)0x10040000 + (STM32_SRAM_SIZE * 1024)) #if defined(BSP_USING_OPENAMP)
#define STM32_SRAM_BEGIN (uint32_t)0x10020000
#if defined(__CC_ARM) || defined(__CLANG_ARM)
extern int Image$$RW_IRAM1$$ZI$$Limit;
#define HEAP_BEGIN (&Image$$RW_IRAM1$$ZI$$Limit)
#elif __ICCARM__
#pragma section="CSTACK"
#define HEAP_BEGIN (__segment_end("CSTACK"))
#else #else
extern int __bss_end__; #define STM32_SRAM_BEGIN (uint32_t)0x2FFF0000
#define HEAP_BEGIN (0x10040000 + 64 * 1024)
#endif #endif
#define STM32_SRAM_SIZE (64)
#define STM32_SRAM_END (STM32_SRAM_BEGIN + (STM32_SRAM_SIZE * 1024))
#define HEAP_END STM32_SRAM_END #define HEAP_BEGIN STM32_SRAM_BEGIN
#define HEAP_END STM32_SRAM_END
void SystemClock_Config(void); void SystemClock_Config(void);
......
...@@ -4,33 +4,42 @@ ...@@ -4,33 +4,42 @@
/*-Specials-*/ /*-Specials-*/
define symbol __ICFEDIT_intvec_start__ = 0x00000000; define symbol __ICFEDIT_intvec_start__ = 0x00000000;
/*-Memory Regions-*/ /*-Memory Regions-*/
define symbol __ICFEDIT_region_ROM_start__ = 0x10000000; define symbol __ICFEDIT_region_text_start__ = 0x10000000;
define symbol __ICFEDIT_region_ROM_end__ = 0x1003FFFF; define symbol __ICFEDIT_region_text_end__ = 0x1001FFFF;
define symbol __ICFEDIT_region_RAM_start__ = 0x10050000; define symbol __ICFEDIT_region_data_start__ = 0x10030000;
define symbol __ICFEDIT_region_RAM_end__ = 0x1005FFFF; define symbol __ICFEDIT_region_data_end__ = 0x1003FFFF;
/*-Sizes-*/ /*-Sizes-*/
define symbol __ICFEDIT_size_cstack__ = 0x0400; define symbol __ICFEDIT_size_cstack__ = 0x400;
define symbol __ICFEDIT_size_heap__ = 0x0000; define symbol __ICFEDIT_size_heap__ = 0x000;
/**** End of ICF editor section. ###ICF###*/ /**** End of ICF editor section. ###ICF###*/
define memory mem with size = 4G;
define region ROM_region = mem:[from __ICFEDIT_region_ROM_start__ to __ICFEDIT_region_ROM_end__];
define region RAM_region = mem:[from __ICFEDIT_region_RAM_start__ to __ICFEDIT_region_RAM_end__];
define block CSTACK with alignment = 8, size = __ICFEDIT_size_cstack__ { }; define memory mem with size = 4G;
define region text_region = mem:[from __ICFEDIT_region_text_start__ to __ICFEDIT_region_text_end__];
define region data_region = mem:[from __ICFEDIT_region_data_start__ to __ICFEDIT_region_data_end__];
initialize by copy { readwrite }; keep { section .resource_table };
do not initialize { section .noinit }; ".resource_table" : place in data_region {section .resource_table};
/* Create region for OPENAMP */ /* Create region for OPENAMP */
/* !!! These 4 lines can be commented if OPENAMP is not used !!!*/ /* !!! These 4 lines can be commented if OPENAMP is not used !!!*/
define symbol __OPENAMP_region_start__ = 0x10040000; define symbol __OPENAMP_region_start__ = 0x10040000;
define symbol __OPENAMP_region_size__ = 0x8000; define symbol __OPENAMP_region_size__ = 0x8000;
export symbol __OPENAMP_region_start__; export symbol __OPENAMP_region_start__;
export symbol __OPENAMP_region_size__; export symbol __OPENAMP_region_size__;
place at address mem:__ICFEDIT_intvec_start__ { readonly section .intvec }; define symbol __SDMMC_region_start__ = 0x10048000;
define symbol __SDMMC_region_size__ = 0x1FFFF;
export symbol __SDMMC_region_start__;
export symbol __SDMMC_region_size__;
place in ROM_region { readonly }; define block CSTACK with alignment = 8, size = __ICFEDIT_size_cstack__ { };
place in RAM_region { readwrite, last block CSTACK}; define block HEAP with alignment = 8, size = __ICFEDIT_size_heap__ { };
\ No newline at end of file
initialize by copy { readwrite };
do not initialize { section .noinit};
place at address mem:__ICFEDIT_intvec_start__ { readonly section .intvec };
place in text_region { readonly };
place in data_region { readwrite,
block CSTACK, block HEAP};
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-06-24 thread-liu first version
*/
#include <board.h>
#ifdef BSP_USING_OPENAMP
#include <drv_openamp.h>
#include <openamp.h>
#include <virt_uart.h>
#include <openamp/rpmsg_virtio.h>
//#define DRV_DEBUG
#define LOG_TAG "drv.openamp"
#include <drv_log.h>
IPCC_HandleTypeDef hipcc;
static VIRT_UART_HandleTypeDef huart0;
static rt_uint8_t rx_buffer[MAX_BUFFER_SIZE];
static rt_uint8_t tx_buffer[MAX_BUFFER_SIZE];
struct rthw_openamp
{
struct rt_device parent;
struct rt_openamp serial;
struct rt_semaphore sema;
};
static struct rthw_openamp dev_openamp;
void IPCC_RX1_IRQHandler(void)
{
rt_interrupt_enter();
HAL_IPCC_RX_IRQHandler(&hipcc);
rt_interrupt_leave();
}
void IPCC_TX1_IRQHandler(void)
{
rt_interrupt_enter();
HAL_IPCC_TX_IRQHandler(&hipcc);
rt_interrupt_leave();
}
void VIRT_UART0_RxCpltCallback(VIRT_UART_HandleTypeDef *huart)
{
rt_uint16_t rx_size = 0, i = 0;
rt_size_t count, size, offset;
rt_uint8_t *buf = RT_NULL;
struct rthw_openamp *device;
device = (struct rthw_openamp *)rt_device_find("openamp");
RT_ASSERT(device != RT_NULL);
buf = device->serial.rbuf;
count = device->serial.rbuf_count;
size = device->serial.rbuf_size;
offset = device->serial.rbuf_start + count;
rt_sem_take(&device->sema, RT_WAITING_FOREVER);
rx_size = (huart->RxXferSize < MAX_BUFFER_SIZE) ? huart->RxXferSize : MAX_BUFFER_SIZE - 1;
if (count < size)
{
if (offset >= size)
{
offset -= size;
}
for (i = 0; i < rx_size; i++)
{
buf[offset++] = huart->pRxBuffPtr[i];
count++;
}
}
device->serial.rbuf_count = count;
rt_sem_release(&device->sema);
}
static rt_err_t _init(struct rt_device *dev)
{
struct rthw_openamp *device;
device = (struct rthw_openamp *)dev;
RT_ASSERT(device != RT_NULL);
device->serial.rbuf_start = 0;
device->serial.rbuf_count = 0;
device->serial.tbuf_start = 0;
device->serial.tbuf_count = 0;
device->serial.rbuf_size = MAX_BUFFER_SIZE;
device->serial.tbuf_size = MAX_BUFFER_SIZE;
device->serial.rbuf = rx_buffer;
device->serial.tbuf = tx_buffer;
if (rt_sem_init(&device->sema, "openamplock", 1, RT_IPC_FLAG_FIFO) != RT_EOK)
{
return RT_ERROR;
}
return RT_EOK;
}
static rt_size_t _read(struct rt_device *dev, rt_off_t pos, void *buffer, rt_size_t size)
{
rt_size_t count, rbsize, offset;
rt_uint8_t *buf = RT_NULL;
rt_uint8_t *pBuffer = RT_NULL;
rt_uint16_t i = 0;
struct rthw_openamp *device;
device = (struct rthw_openamp *)dev;
RT_ASSERT(device != RT_NULL);
pBuffer = (unsigned char*)buffer;
count = device->serial.rbuf_count;
buf = device->serial.rbuf;
if (count == 0)
{
return -RT_ERROR;
}
rt_sem_take(&device->sema, RT_WAITING_FOREVER);
if (count >= size)
{
count = size;
}
offset = device->serial.rbuf_start;
rbsize = device->serial.rbuf_size;
for (i = 0; i < count; i++)
{
*pBuffer++ = buf[offset++];
if (offset > rbsize)
{
offset = 0;
}
}
device->serial.rbuf_start = offset;
device->serial.rbuf_count -= count;
rt_sem_release(&device->sema);
return count;
}
static rt_size_t _write(struct rt_device *dev, rt_off_t pos, const void *buffer, rt_size_t size)
{
rt_err_t result = VIRT_UART_OK;
struct rthw_openamp *device;
device = (struct rthw_openamp *)dev;
RT_ASSERT(device != RT_NULL);
rt_sem_take(&device->sema, RT_WAITING_FOREVER);
result = VIRT_UART_Transmit(&huart0, (uint8_t *)buffer, size);
rt_sem_release(&device->sema);
if (result != VIRT_UART_OK)
{
return -RT_ERROR;
}
return size;
}
static rt_err_t rt_hw_openamp_register(struct rthw_openamp *openamp, const char *name, rt_uint32_t flag, void *data)
{
struct rt_device *device;
RT_ASSERT(openamp != RT_NULL);
device = &(openamp->parent);
device->type = RT_Device_Class_Char;
device->rx_indicate = RT_NULL;
device->tx_complete = RT_NULL;
device->init = _init;
device->open = RT_NULL;
device->close = RT_NULL;
device->read = _read;
device->write = _write;
device->control = RT_NULL;
device->user_data = data;
/* register a character device */
return rt_device_register(device, name, flag);
}
static int openamp_init(void)
{
extern int MX_OPENAMP_Init(int RPMsgRole, rpmsg_ns_bind_cb ns_bind_cb);
/* IPCC init */
hipcc.Instance = IPCC;
if (HAL_IPCC_Init(&hipcc) != HAL_OK)
{
return RT_ERROR;
}
/* openamp slave device */
MX_OPENAMP_Init(RPMSG_REMOTE, NULL);
if (VIRT_UART_Init(&huart0) != VIRT_UART_OK)
{
return RT_ERROR;
}
if (VIRT_UART_RegisterCallback(&huart0, VIRT_UART_RXCPLT_CB_ID, VIRT_UART0_RxCpltCallback) != VIRT_UART_OK)
{
return RT_ERROR;
}
return RT_EOK;
}
int rt_hw_openamp_init(void)
{
openamp_init();
rt_hw_openamp_register(&dev_openamp, "openamp", 0, NULL);
rt_console_set_device("openamp");
return RT_EOK;
}
INIT_PREV_EXPORT(rt_hw_openamp_init);
static void openamp_thread_entry(void *parameter)
{
rt_size_t size = 0;
struct rthw_openamp *device = RT_NULL;
device = (struct rthw_openamp *)rt_device_find("openamp");
RT_ASSERT(device != RT_NULL);
for (;;)
{
OPENAMP_check_for_message();
size = device->serial.rbuf_count;
if (size > 0)
{
if (device->parent.rx_indicate != RT_NULL)
{
device->parent.rx_indicate(&device->parent, size);
}
}
rt_thread_mdelay(1);
}
}
static int creat_openamp_thread(void)
{
rt_thread_t tid = RT_NULL;
tid = rt_thread_create("OpenAMP",
openamp_thread_entry,
RT_NULL,
OPENAMP_THREAD_STACK_SIZE,
OPENAMP_THREAD_PRIORITY,
OPENAMP_THREAD_TIMESLICE);
if (tid == RT_NULL)
{
LOG_E("openamp thread create failed!");
return RT_ERROR;
}
rt_thread_startup(tid);
return RT_EOK;
}
INIT_APP_EXPORT(creat_openamp_thread);
#endif
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-06-24 thread-liu first version
*/
#ifndef __DRV_OPENAMP_H__
#define __DRV_OPENAMP_H__
#include "board.h"
#ifdef __cplusplus
extern "C" {
#endif
struct rt_openamp
{
rt_uint8_t *rbuf;
rt_uint8_t *tbuf;
volatile rt_uint16_t rbuf_size;
volatile rt_uint16_t tbuf_size;
volatile rt_uint16_t rbuf_start;
volatile rt_uint16_t rbuf_count;
volatile rt_uint16_t tbuf_start;
volatile rt_uint16_t tbuf_count;
};
#define OPENAMP_THREAD_STACK_SIZE 512
#define OPENAMP_THREAD_PRIORITY 5
#define OPENAMP_THREAD_TIMESLICE 10
#define MAX_BUFFER_SIZE 256
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <metal/errno.h>
#include <string.h>
#include <metal/device.h>
#include <metal/log.h>
#include <metal/dma.h>
#include <metal/atomic.h>
int metal_dma_map(struct metal_device *dev,
uint32_t dir,
struct metal_sg *sg_in,
int nents_in,
struct metal_sg *sg_out)
{
int nents_out;
if (!dev || !sg_in || !sg_out)
return -EINVAL;
if (!dev->bus->ops.dev_dma_map)
return -ENODEV;
/* memory barrier */
if (dir == METAL_DMA_DEV_R)
/* If it is device read, apply memory write fence. */
atomic_thread_fence(memory_order_release);
else
/* If it is device write or device r/w,
apply memory r/w fence. */
atomic_thread_fence(memory_order_acq_rel);
nents_out = dev->bus->ops.dev_dma_map(dev->bus,
dev, dir, sg_in, nents_in, sg_out);
return nents_out;
}
void metal_dma_unmap(struct metal_device *dev,
uint32_t dir,
struct metal_sg *sg,
int nents)
{
/* memory barrier */
if (dir == METAL_DMA_DEV_R)
/* If it is device read, apply memory write fence. */
atomic_thread_fence(memory_order_release);
else
/* If it is device write or device r/w,
apply memory r/w fence. */
atomic_thread_fence(memory_order_acq_rel);
if (!dev || !dev->bus->ops.dev_dma_unmap || !sg)
return;
dev->bus->ops.dev_dma_unmap(dev->bus,
dev, dir, sg, nents);
}
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file alloc.h
* @brief Memory allocation handling primitives for libmetal.
*/
#ifndef __METAL_ALLOC__H__
#define __METAL_ALLOC__H__
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup Memory Allocation Interfaces
* @{ */
/**
* @brief allocate requested memory size
* return a pointer to the allocated memory
*
* @param[in] size size in byte of requested memory
* @return memory pointer, or 0 if it failed to allocate
*/
static inline void *metal_allocate_memory(unsigned int size);
/**
* @brief free the memory previously allocated
*
* @param[in] ptr pointer to memory
*/
static inline void metal_free_memory(void *ptr);
#include <metal/system/generic/alloc.h>
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_ALLOC__H__ */
/*
* Copyright (c) 2018, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file assert.h
* @brief Assertion support.
*/
#ifndef __METAL_ASSERT__H__
#define __METAL_ASSERT__H__
#include <metal/system/generic/assert.h>
/**
* @brief Assertion macro.
* @param cond Condition to test.
*/
#define metal_assert(cond) metal_sys_assert(cond)
#endif /* __METAL_ASSERT_H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file atomic.h
* @brief Atomic primitives for libmetal.
*/
#ifndef __METAL_ATOMIC__H__
#define __METAL_ATOMIC__H__
#include <metal/config.h>
#if defined(HAVE_STDATOMIC_H) && !defined (__CC_ARM) && \
!defined(__STDC_NO_ATOMICS__) && !defined(__cplusplus)
# include <stdatomic.h>
#ifndef atomic_thread_fence
#define atomic_thread_fence(order)
#endif
#elif defined(__GNUC__)
# include <metal/compiler/gcc/atomic.h>
#else
# include <metal/processor/arm/atomic.h>
#endif
#endif /* __METAL_ATOMIC__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file cache.h
* @brief CACHE operation primitives for libmetal.
*/
#ifndef __METAL_CACHE__H__
#define __METAL_CACHE__H__
#include <metal/system/generic/cache.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup cache CACHE Interfaces
* @{ */
/**
* @brief flush specified data cache
*
* @param[in] addr start memory logical address
* @param[in] len length of memory
* If addr is NULL, and len is 0,
* It will flush the whole data cache.
*/
static inline void metal_cache_flush(void *addr, unsigned int len)
{
__metal_cache_flush(addr, len);
}
/**
* @brief invalidate specified data cache
*
* @param[in] addr start memory logical address
* @param[in] len length of memory
* If addr is NULL, and len is 0,
* It will invalidate the whole data cache.
*/
static inline void metal_cache_invalidate(void *addr, unsigned int len)
{
__metal_cache_invalidate(addr, len);
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_CACHE__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file compiler.h
* @brief Compiler specific primitives for libmetal.
*/
#ifndef __METAL_COMPILER__H__
#define __METAL_COMPILER__H__
#if defined(__GNUC__)
# include <metal/compiler/gcc/compiler.h>
#elif defined(__ICCARM__)
# include <metal/compiler/iar/compiler.h>
#elif defined (__CC_ARM)
# error "MDK-ARM ARMCC compiler requires the GNU extentions to work correctly"
#else
# error "Missing compiler support"
#endif
#endif /* __METAL_COMPILER__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file gcc/atomic.h
* @brief GCC specific atomic primitives for libmetal.
*/
#ifndef __METAL_GCC_ATOMIC__H__
#define __METAL_GCC_ATOMIC__H__
#ifdef __cplusplus
extern "C" {
#endif
typedef int atomic_flag;
typedef char atomic_char;
typedef unsigned char atomic_uchar;
typedef short atomic_short;
typedef unsigned short atomic_ushort;
typedef int atomic_int;
typedef unsigned int atomic_uint;
typedef long atomic_long;
typedef unsigned long atomic_ulong;
typedef long long atomic_llong;
typedef unsigned long long atomic_ullong;
#define ATOMIC_FLAG_INIT 0
#define ATOMIC_VAR_INIT(VAL) (VAL)
typedef enum {
memory_order_relaxed,
memory_order_consume,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst,
} memory_order;
#define atomic_flag_test_and_set(FLAG) \
__sync_lock_test_and_set((FLAG), 1)
#define atomic_flag_test_and_set_explicit(FLAG, MO) \
atomic_flag_test_and_set(FLAG)
#define atomic_flag_clear(FLAG) \
__sync_lock_release((FLAG))
#define atomic_flag_clear_explicit(FLAG, MO) \
atomic_flag_clear(FLAG)
#define atomic_init(OBJ, VAL) \
do { *(OBJ) = (VAL); } while (0)
#define atomic_is_lock_free(OBJ) \
(sizeof(*(OBJ)) <= sizeof(long))
#define atomic_store(OBJ, VAL) \
do { *(OBJ) = (VAL); __sync_synchronize(); } while (0)
#define atomic_store_explicit(OBJ, VAL, MO) \
atomic_store((OBJ), (VAL))
#define atomic_load(OBJ) \
({ __sync_synchronize(); *(OBJ); })
#define atomic_load_explicit(OBJ, MO) \
atomic_load(OBJ)
#define atomic_exchange(OBJ, DES) \
({ \
typeof(OBJ) obj = (OBJ); \
typeof(*obj) des = (DES); \
typeof(*obj) expval; \
typeof(*obj) oldval = atomic_load(obj); \
do { \
expval = oldval; \
oldval = __sync_val_compare_and_swap( \
obj, expval, des); \
} while (oldval != expval); \
oldval; \
})
#define atomic_exchange_explicit(OBJ, DES, MO) \
atomic_exchange((OBJ), (DES))
#define atomic_compare_exchange_strong(OBJ, EXP, DES) \
({ \
typeof(OBJ) obj = (OBJ); \
typeof(EXP) exp = (EXP); \
typeof(*obj) expval = *exp; \
typeof(*obj) oldval = __sync_val_compare_and_swap( \
obj, expval, (DES)); \
*exp = oldval; \
oldval == expval; \
})
#define atomic_compare_exchange_strong_explicit(OBJ, EXP, DES, MO) \
atomic_compare_exchange_strong((OBJ), (EXP), (DES))
#define atomic_compare_exchange_weak(OBJ, EXP, DES) \
atomic_compare_exchange_strong((OBJ), (EXP), (DES))
#define atomic_compare_exchange_weak_explicit(OBJ, EXP, DES, MO) \
atomic_compare_exchange_weak((OBJ), (EXP), (DES))
#define atomic_fetch_add(OBJ, VAL) \
__sync_fetch_and_add((OBJ), (VAL))
#define atomic_fetch_add_explicit(OBJ, VAL, MO) \
atomic_fetch_add((OBJ), (VAL))
#define atomic_fetch_sub(OBJ, VAL) \
__sync_fetch_and_sub((OBJ), (VAL))
#define atomic_fetch_sub_explicit(OBJ, VAL, MO) \
atomic_fetch_sub((OBJ), (VAL))
#define atomic_fetch_or(OBJ, VAL) \
__sync_fetch_and_or((OBJ), (VAL))
#define atomic_fetch_or_explicit(OBJ, VAL, MO) \
atomic_fetch_or((OBJ), (VAL))
#define atomic_fetch_xor(OBJ, VAL) \
__sync_fetch_and_xor((OBJ), (VAL))
#define atomic_fetch_xor_explicit(OBJ, VAL, MO) \
atomic_fetch_xor((OBJ), (VAL))
#define atomic_fetch_and(OBJ, VAL) \
__sync_fetch_and_and((OBJ), (VAL))
#define atomic_fetch_and_explicit(OBJ, VAL, MO) \
atomic_fetch_and((OBJ), (VAL))
#define atomic_thread_fence(MO) \
__sync_synchronize()
#define atomic_signal_fence(MO) \
__sync_synchronize()
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GCC_ATOMIC__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file gcc/compiler.h
* @brief GCC specific primitives for libmetal.
*/
#ifndef __METAL_GCC_COMPILER__H__
#define __METAL_GCC_COMPILER__H__
#ifdef __cplusplus
extern "C" {
#endif
#define restrict __restrict__
#define metal_align(n) __attribute__((aligned(n)))
#define metal_weak __attribute__((weak))
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GCC_COMPILER__H__ */
/*
* Copyright (c) 2018, ST Microelectronics. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file iar/compiler.h
* @brief IAR specific primitives for libmetal.
*/
#ifndef __METAL_IAR_COMPILER__H__
#define __METAL_IAR_COMPILER__H__
#ifdef __cplusplus
extern "C" {
#endif
#define restrict __restrict__
#define metal_align(n) __attribute__((aligned(n)))
#define metal_weak __attribute__((weak))
#ifdef __cplusplus
}
#endif
#endif /* __METAL_IAR_COMPILER__H__ */
/*
* * Copyright (c) 2019 STMicroelectronics. All rights reserved.
* *
* * Copyright (c) 1982, 1986, 1989, 1993
* * The Regents of the University of California. All rights reserved.
* * Copyright (c) 1982, 1986, 1989, 1993
* * The Regents of the University of California. All rights reserved.
* * (c) UNIX System Laboratories, Inc.
* * All or some portions of this file are derived from material licensed
* * to the University of California by American Telephone and Telegraph
* * Co. or Unix System Laboratories, Inc. and are reproduced herein with
* * the permission of UNIX System Laboratories, Inc.
*
* * SPDX-License-Identifier: BSD-3-Clause
* */
#ifndef __METAL_ERRNO__H__
#error "Include metal/errno.h instead of metal/iar/errno.h"
#endif
#ifndef _ERRNO_H_
#ifdef __cplusplus
extern "C" {
#endif
#define _ERRNO_H_
#ifndef EPERM
#define EPERM 1 /* Not owner */
#endif
#ifndef ENOENT
#define ENOENT 2 /* No such file or directory */
#endif
#ifndef ESRCH
#define ESRCH 3 /* No such process */
#endif
#ifndef EINTR
#define EINTR 4 /* Interrupted system call */
#endif
#ifndef EIO
#define EIO 5 /* I/O error */
#endif
#ifndef ENXIO
#define ENXIO 6 /* No such device or address */
#endif
#ifndef E2BIG
#define E2BIG 7 /* Arg list too long */
#endif
#ifndef ENOEXEC
#define ENOEXEC 8 /* Exec format error */
#endif
#ifndef EBADF
#define EBADF 9 /* Bad file number */
#endif
#ifndef ECHILD
#define ECHILD 10 /* No children */
#endif
#ifndef EAGAIN
#define EAGAIN 11 /* No more processes */
#endif
#ifndef ENOMEM
#define ENOMEM 12 /* Not enough space */
#endif
#ifndef EACCES
#define EACCES 13 /* Permission denied */
#endif
#ifndef EFAULT
#define EFAULT 14 /* Bad address */
#endif
#ifndef EBUSY
#define EBUSY 16 /* Device or resource busy */
#endif
#ifndef EEXIST
#define EEXIST 17 /* File exists */
#endif
#ifndef EXDEV
#define EXDEV 18 /* Cross-device link */
#endif
#ifndef ENODEV
#define ENODEV 19 /* No such device */
#endif
#ifndef ENOTDIR
#define ENOTDIR 20 /* Not a directory */
#endif
#ifndef EISDIR
#define EISDIR 21 /* Is a directory */
#endif
#ifndef EINVAL
#define EINVAL 22 /* Invalid argument */
#endif
#ifndef ENFILE
#define ENFILE 23 /* Too many open files in system */
#endif
#ifndef EMFILE
#define EMFILE 24 /* File descriptor value too large */
#endif
#ifndef ENOTTY
#define ENOTTY 25 /* Not a character device */
#endif
#ifndef ETXTBSY
#define ETXTBSY 26 /* Text file busy */
#endif
#ifndef EFBIG
#define EFBIG 27 /* File too large */
#endif
#ifndef ENOSPC
#define ENOSPC 28 /* No space left on device */
#endif
#ifndef ESPIPE
#define ESPIPE 29 /* Illegal seek */
#endif
#ifndef EROFS
#define EROFS 30 /* Read-only file system */
#endif
#ifndef EMLINK
#define EMLINK 31 /* Too many links */
#endif
#ifndef EPIPE
#define EPIPE 32 /* Broken pipe */
#endif
#ifndef EDOM
#define EDOM 33 /* Mathematics argument out of domain of function */
#endif
#ifndef ERANGE
#define ERANGE 34 /* Result too large */
#endif
#ifndef ENOMSG
#define ENOMSG 35 /* No message of desired type */
#endif
#ifndef EIDRM
#define EIDRM 36 /* Identifier removed */
#endif
#ifndef EDEADLK
#define EDEADLK 45 /* Deadlock */
#endif
#ifndef ENOLCK
#define ENOLCK 46 /* No lock */
#endif
#ifndef ENOSTR
#define ENOSTR 60 /* Not a stream */
#endif
#ifndef ENODATA
#define ENODATA 61 /* No data (for no delay io) */
#endif
#ifndef ETIME
#define ETIME 62 /* Stream ioctl timeout */
#endif
#ifndef ENOSR
#define ENOSR 63 /* No stream resources */
#endif
#ifndef ENOLINK
#define ENOLINK 67 /* Virtual circuit is gone */
#endif
#ifndef EPROTO
#define EPROTO 71 /* Protocol error */
#endif
#ifndef EMULTIHOP
#define EMULTIHOP 74 /* Multihop attempted */
#endif
#ifndef EBADMSG
#define EBADMSG 77 /* Bad message */
#endif
#ifndef EFTYPE
#define EFTYPE 79 /* Inappropriate file type or format */
#endif
#ifndef ENOSYS
#define ENOSYS 88 /* Function not implemented */
#endif
#ifndef ENOTEMPTY
#define ENOTEMPTY 90 /* Directory not empty */
#endif
#ifndef ENAMETOOLONG
#define ENAMETOOLONG 91 /* File or path name too long */
#endif
#ifndef ELOOP
#define ELOOP 92 /* Too many symbolic links */
#endif
#ifndef EOPNOTSUPP
#define EOPNOTSUPP 95 /* Operation not supported on socket */
#endif
#ifndef EPFNOSUPPORT
#define EPFNOSUPPORT 96 /* Protocol family not supported */
#endif
#ifndef ECONNRESET
#define ECONNRESET 104 /* Connection reset by peer */
#endif
#ifndef ENOBUFS
#define ENOBUFS 105 /* No buffer space available */
#endif
#ifndef EAFNOSUPPORT
#define EAFNOSUPPORT 106 /* Address family not supported by protocol family */
#endif
#ifndef EPROTOTYPE
#define EPROTOTYPE 107 /* Protocol wrong type for socket */
#endif
#ifndef ENOTSOCK
#define ENOTSOCK 108 /* Socket operation on non-socket */
#endif
#ifndef ENOPROTOOPT
#define ENOPROTOOPT 109 /* Protocol not available */
#endif
#ifndef ECONNREFUSED
#define ECONNREFUSED 111 /* Connection refused */
#endif
#ifndef EADDRINUSE
#define EADDRINUSE 112 /* Address already in use */
#endif
#ifndef ECONNABORTED
#define ECONNABORTED 113 /* Software caused connection abort */
#endif
#ifndef ENETUNREACH
#define ENETUNREACH 114 /* Network is unreachable */
#endif
#ifndef ENETDOWN
#define ENETDOWN 115 /* Network interface is not configured */
#endif
#ifndef ETIMEDOUT
#define ETIMEDOUT 116 /* Connection timed out */
#endif
#ifndef EHOSTDOWN
#define EHOSTDOWN 117 /* Host is down */
#endif
#ifndef EHOSTUNREACH
#define EHOSTUNREACH 118 /* Host is unreachable */
#endif
#ifndef EINPROGRESS
#define EINPROGRESS 119 /* Connection already in progress */
#endif
#ifndef EALREADY
#define EALREADY 120 /* Socket already connected */
#endif
#ifndef EDESTADDRREQ
#define EDESTADDRREQ 121 /* Destination address required */
#endif
#ifndef EMSGSIZE
#define EMSGSIZE 122 /* Message too long */
#endif
#ifndef EPROTONOSUPPORT
#define EPROTONOSUPPORT 123 /* Unknown protocol */
#endif
#ifndef EADDRNOTAVAIL
#define EADDRNOTAVAIL 125 /* Address not available */
#endif
#ifndef ENETRESET
#define ENETRESET 126 /* Connection aborted by network */
#endif
#ifndef EISCONN
#define EISCONN 127 /* Socket is already connected */
#endif
#ifndef ENOTCONN
#define ENOTCONN 128 /* Socket is not connected */
#endif
#ifndef ETOOMANYREFS
#define ETOOMANYREFS 129
#endif
#ifndef EDQUOT
#define EDQUOT 132
#endif
#ifndef ESTALE
#define ESTALE 133
#endif
#ifndef ENOTSUP
#define ENOTSUP 134 /* Not supported */
#endif
#ifndef EILSEQ
#define EILSEQ 138 /* Illegal byte sequence */
#endif
#ifndef EOVERFLOW
#define EOVERFLOW 139 /* Value too large for defined data type */
#endif
#ifndef ECANCELED
#define ECANCELED 140 /* Operation canceled */
#endif
#ifndef ENOTRECOVERABLE
#define ENOTRECOVERABLE 141 /* State not recoverable */
#endif
#ifndef EOWNERDEAD
#define EOWNERDEAD 142 /* Previous owner died */
#endif
#ifndef EWOULDBLOCK
#define EWOULDBLOCK EAGAIN /* Operation would block */
#endif
#define __ELASTERROR 2000 /* Users can add values starting here */
#ifdef __cplusplus
}
#endif
#endif /* _ERRNO_H */
/*
* * Copyright (c) 2019 STMicroelectronics. All rights reserved.
* *
* * Copyright (c) 1982, 1986, 1989, 1993
* * The Regents of the University of California. All rights reserved.
* * Copyright (c) 1982, 1986, 1989, 1993
* * The Regents of the University of California. All rights reserved.
* * (c) UNIX System Laboratories, Inc.
* * All or some portions of this file are derived from material licensed
* * to the University of California by American Telephone and Telegraph
* * Co. or Unix System Laboratories, Inc. and are reproduced herein with
* * the permission of UNIX System Laboratories, Inc.
*
* * SPDX-License-Identifier: BSD-3-Clause
* */
#ifndef __METAL_ERRNO__H__
#error "Include metal/errno.h instead of metal/mdk-arm/errno.h"
#endif
#ifndef _ERRNO_H_
#ifdef __cplusplus
extern "C" {
#endif
#define _ERRNO_H_
#define EPERM 1 /* Not owner */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Arg list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No children */
#define EAGAIN 11 /* No more processes */
#ifdef ENOMEM
#undef ENOMEM
#endif
#define ENOMEM 12 /* Not enough space */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#ifdef EINVAL
#undef EINVAL
#endif
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* Too many open files in system */
#define EMFILE 24 /* File descriptor value too large */
#define ENOTTY 25 /* Not a character device */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#ifdef EDOM
#undef EDOM
#endif
#define EDOM 33 /* Mathematics argument out of domain of function */
#ifdef ERANGE
#undef ERANGE
#endif
#define ERANGE 34 /* Result too large */
#define ENOMSG 35 /* No message of desired type */
#define EIDRM 36 /* Identifier removed */
#define EDEADLK 45 /* Deadlock */
#define ENOLCK 46 /* No lock */
#define ENOSTR 60 /* Not a stream */
#define ENODATA 61 /* No data (for no delay io) */
#define ETIME 62 /* Stream ioctl timeout */
#define ENOSR 63 /* No stream resources */
#define ENOLINK 67 /* Virtual circuit is gone */
#define EPROTO 71 /* Protocol error */
#define EMULTIHOP 74 /* Multihop attempted */
#define EBADMSG 77 /* Bad message */
#define EFTYPE 79 /* Inappropriate file type or format */
#define ENOSYS 88 /* Function not implemented */
#define ENOTEMPTY 90 /* Directory not empty */
#define ENAMETOOLONG 91 /* File or path name too long */
#define ELOOP 92 /* Too many symbolic links */
#define EOPNOTSUPP 95 /* Operation not supported on socket */
#define EPFNOSUPPORT 96 /* Protocol family not supported */
#define ECONNRESET 104 /* Connection reset by peer */
#define ENOBUFS 105 /* No buffer space available */
#define EAFNOSUPPORT 106 /* Address family not supported by protocol family */
#define EPROTOTYPE 107 /* Protocol wrong type for socket */
#define ENOTSOCK 108 /* Socket operation on non-socket */
#define ENOPROTOOPT 109 /* Protocol not available */
#define ECONNREFUSED 111 /* Connection refused */
#define EADDRINUSE 112 /* Address already in use */
#define ECONNABORTED 113 /* Software caused connection abort */
#define ENETUNREACH 114 /* Network is unreachable */
#define ENETDOWN 115 /* Network interface is not configured */
#define ETIMEDOUT 116 /* Connection timed out */
#define EHOSTDOWN 117 /* Host is down */
#define EHOSTUNREACH 118 /* Host is unreachable */
#define EINPROGRESS 119 /* Connection already in progress */
#define EALREADY 120 /* Socket already connected */
#define EDESTADDRREQ 121 /* Destination address required */
#define EMSGSIZE 122 /* Message too long */
#define EPROTONOSUPPORT 123 /* Unknown protocol */
#define EADDRNOTAVAIL 125 /* Address not available */
#define ENETRESET 126 /* Connection aborted by network */
#define EISCONN 127 /* Socket is already connected */
#define ENOTCONN 128 /* Socket is not connected */
#define ETOOMANYREFS 129
#define EDQUOT 132
#define ESTALE 133
#define ENOTSUP 134 /* Not supported */
#ifdef EILSEQ
#undef EILSEQ
#endif
#define EILSEQ 138 /* Illegal byte sequence */
#define EOVERFLOW 139 /* Value too large for defined data type */
#define ECANCELED 140 /* Operation canceled */
#define ENOTRECOVERABLE 141 /* State not recoverable */
#define EOWNERDEAD 142 /* Previous owner died */
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define __ELASTERROR 2000 /* Users can add values starting here */
#ifdef __cplusplus
}
#endif
#endif /* _ERRNO_H */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file condition.h
* @brief Condition variable for libmetal.
*/
#ifndef __METAL_CONDITION__H__
#define __METAL_CONDITION__H__
#include <metal/mutex.h>
#include <metal/utilities.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup condition Condition Variable Interfaces
* @{ */
/** Opaque libmetal condition variable data structure. */
struct metal_condition;
/**
* @brief Initialize a libmetal condition variable.
* @param[in] cv condition variable to initialize.
*/
static inline void metal_condition_init(struct metal_condition *cv);
/**
* @brief Notify one waiter.
* Before calling this function, the caller
* should have acquired the mutex.
* @param[in] cv condition variable
* @return zero on no errors, non-zero on errors
* @see metal_condition_wait, metal_condition_broadcast
*/
static inline int metal_condition_signal(struct metal_condition *cv);
/**
* @brief Notify all waiters.
* Before calling this function, the caller
* should have acquired the mutex.
* @param[in] cv condition variable
* @return zero on no errors, non-zero on errors
* @see metal_condition_wait, metal_condition_signal
*/
static inline int metal_condition_broadcast(struct metal_condition *cv);
/**
* @brief Block until the condition variable is notified.
* Before calling this function, the caller should
* have acquired the mutex.
* @param[in] cv condition variable
* @param[in] m mutex
* @return 0 on success, non-zero on failure.
* @see metal_condition_signal
*/
int metal_condition_wait(struct metal_condition *cv, metal_mutex_t *m);
#include <metal/system/generic/condition.h>
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_CONDITION__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file config.h
* @brief Generated configuration settings for libmetal.
*/
#ifndef __METAL_CONFIG__H__
#define __METAL_CONFIG__H__
#ifdef __cplusplus
extern "C" {
#endif
/** Library major version number. */
#define METAL_VER_MAJOR 0
/** Library minor version number. */
#define METAL_VER_MINOR 1
/** Library patch level. */
#define METAL_VER_PATCH 0
/** Library version string. */
#define METAL_VER "0.1.0"
/** System type (linux, generic, ...). */
#define METAL_SYSTEM "generic"
#define METAL_SYSTEM_GENERIC
/** Processor type (arm, x86_64, ...). */
#define METAL_PROCESSOR "arm"
#define METAL_PROCESSOR_ARM
/** Machine type (zynq, zynqmp, ...). */
#define METAL_MACHINE "cortexm"
#define METAL_MACHINE_CORTEXM
#define HAVE_STDATOMIC_H
/* #undef HAVE_FUTEX_H */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_CONFIG__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file cpu.h
* @brief CPU primitives for libmetal.
*/
#ifndef __METAL_CPU__H__
#define __METAL_CPU__H__
# include <metal/processor/arm/cpu.h>
#endif /* __METAL_CPU__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file device.h
* @brief Bus abstraction for libmetal.
*/
#ifndef __METAL_BUS__H__
#define __METAL_BUS__H__
#include <stdint.h>
#include <metal/io.h>
#include <metal/list.h>
#include <metal/dma.h>
#include <metal/sys.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup device Bus Abstraction
* @{ */
#ifndef METAL_MAX_DEVICE_REGIONS
#define METAL_MAX_DEVICE_REGIONS 32
#endif
struct metal_bus;
struct metal_device;
/** Bus operations. */
struct metal_bus_ops {
void (*bus_close)(struct metal_bus *bus);
int (*dev_open)(struct metal_bus *bus,
const char *dev_name,
struct metal_device **device);
void (*dev_close)(struct metal_bus *bus,
struct metal_device *device);
void (*dev_irq_ack)(struct metal_bus *bus,
struct metal_device *device,
int irq);
int (*dev_dma_map)(struct metal_bus *bus,
struct metal_device *device,
uint32_t dir,
struct metal_sg *sg_in,
int nents_in,
struct metal_sg *sg_out);
void (*dev_dma_unmap)(struct metal_bus *bus,
struct metal_device *device,
uint32_t dir,
struct metal_sg *sg,
int nents);
};
/** Libmetal bus structure. */
struct metal_bus {
const char *name;
struct metal_bus_ops ops;
struct metal_list devices;
struct metal_list node;
};
/** Libmetal generic bus. */
extern struct metal_bus metal_generic_bus;
/** Libmetal device structure. */
struct metal_device {
const char *name; /**< Device name */
struct metal_bus *bus; /**< Bus that contains device */
unsigned num_regions; /**< Number of I/O regions in
device */
struct metal_io_region regions[METAL_MAX_DEVICE_REGIONS]; /**< Array of
I/O regions in device*/
struct metal_list node; /**< Node on bus' list of devices */
int irq_num; /**< Number of IRQs per device */
void *irq_info; /**< IRQ ID */
};
/**
* @brief Register a libmetal bus.
* @param[in] bus Pre-initialized bus structure.
* @return 0 on success, or -errno on failure.
*/
extern int metal_bus_register(struct metal_bus *bus);
/**
* @brief Unregister a libmetal bus.
* @param[in] bus Pre-registered bus structure.
* @return 0 on success, or -errno on failure.
*/
extern int metal_bus_unregister(struct metal_bus *bus);
/**
* @brief Find a libmetal bus by name.
* @param[in] name Bus name.
* @param[out] bus Returned bus handle.
* @return 0 on success, or -errno on failure.
*/
extern int metal_bus_find(const char *name, struct metal_bus **bus);
/**
* @brief Statically register a generic libmetal device.
*
* In non-Linux systems, devices are always required to be statically
* registered at application initialization.
* In Linux system, devices can be dynamically opened via sysfs or libfdt based
* enumeration at runtime.
* This interface is used for static registration of devices. Subsequent calls
* to metal_device_open() look up in this list of pre-registered devices on the
* "generic" bus.
* "generic" bus is used on non-Linux system to group the memory mapped devices.
*
* @param[in] device Generic device.
* @return 0 on success, or -errno on failure.
*/
extern int metal_register_generic_device(struct metal_device *device);
/**
* @brief Open a libmetal device by name.
* @param[in] bus_name Bus name.
* @param[in] dev_name Device name.
* @param[out] device Returned device handle.
* @return 0 on success, or -errno on failure.
*/
extern int metal_device_open(const char *bus_name, const char *dev_name,
struct metal_device **device);
/**
* @brief Close a libmetal device.
* @param[in] device Device handle.
*/
extern void metal_device_close(struct metal_device *device);
/**
* @brief Get an I/O region accessor for a device region.
*
* @param[in] device Device handle.
* @param[in] index Region index.
* @return I/O accessor handle, or NULL on failure.
*/
static inline struct metal_io_region *
metal_device_io_region(struct metal_device *device, unsigned index)
{
return (index < device->num_regions
? &device->regions[index]
: NULL);
}
/** @} */
#ifdef METAL_INTERNAL
extern int metal_generic_dev_sys_open(struct metal_device *dev);
extern int metal_generic_dev_open(struct metal_bus *bus, const char *dev_name,
struct metal_device **device);
extern int metal_generic_dev_dma_map(struct metal_bus *bus,
struct metal_device *device,
uint32_t dir,
struct metal_sg *sg_in,
int nents_in,
struct metal_sg *sg_out);
extern void metal_generic_dev_dma_unmap(struct metal_bus *bus,
struct metal_device *device,
uint32_t dir,
struct metal_sg *sg,
int nents);
#endif /* METAL_INTERNAL */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_BUS__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file dma.h
* @brief DMA primitives for libmetal.
*/
#ifndef __METAL_DMA__H__
#define __METAL_DMA__H__
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup dma DMA Interfaces
* @{ */
#include <stdint.h>
#include <metal/sys.h>
#define METAL_DMA_DEV_R 1 /**< DMA direction, device read */
#define METAL_DMA_DEV_W 2 /**< DMA direction, device write */
#define METAL_DMA_DEV_WR 3 /**< DMA direction, device read/write */
/**
* @brief scatter/gather list element structure
*/
struct metal_sg {
void *virt; /**< CPU virtual address */
struct metal_io_region *io; /**< IO region */
int len; /**< length */
};
struct metal_device;
/**
* @brief Map memory for DMA transaction.
* After the memory is DMA mapped, the memory should be
* accessed by the DMA device but not the CPU.
*
* @param[in] dev DMA device
* @param[in] dir DMA direction
* @param[in] sg_in sg list of memory to map
* @param[in] nents_in number of sg list entries of memory to map
* @param[out] sg_out sg list of mapped memory
* @return number of mapped sg entries, -error on failure.
*/
int metal_dma_map(struct metal_device *dev,
uint32_t dir,
struct metal_sg *sg_in,
int nents_in,
struct metal_sg *sg_out);
/**
* @brief Unmap DMA memory
* After the memory is DMA unmapped, the memory should
* be accessed by the CPU but not the DMA device.
*
* @param[in] dev DMA device
* @param[in] dir DMA direction
* @param[in] sg sg list of mapped DMA memory
* @param[in] nents number of sg list entries of DMA memory
*/
void metal_dma_unmap(struct metal_device *dev,
uint32_t dir,
struct metal_sg *sg,
int nents);
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_DMA__H__ */
/*
* * Copyright (c) 2019 STMicrolectonics , Xilinx Inc. and Contributors. All rights reserved.
* *
* * SPDX-License-Identifier: BSD-3-Clause
* */
/*
* * @file metal/errno.h
* * @brief error specific primitives for libmetal.
* */
#ifndef __METAL_ERRNO__H__
#define __METAL_ERRNO__H__
#if defined (__CC_ARM)
# include <metal/compiler/mdk-arm/errno.h>
#elif defined (__ICCARM__)
# include <metal/compiler/iar/errno.h>
#else
#include <errno.h>
#endif
#endif /* __METAL_ERRNO__H__ */
/*
* Copyright (c) 2015 - 2017, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file io.h
* @brief I/O access primitives for libmetal.
*/
#ifndef __METAL_IO__H__
#define __METAL_IO__H__
#include <limits.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <metal/assert.h>
#include <metal/compiler.h>
#include <metal/atomic.h>
#include <metal/sys.h>
#include <metal/cpu.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup io IO Interfaces
* @{ */
#ifdef __MICROBLAZE__
#define NO_ATOMIC_64_SUPPORT
#endif
struct metal_io_region;
/** Generic I/O operations. */
struct metal_io_ops {
uint64_t (*read)(struct metal_io_region *io,
unsigned long offset,
memory_order order,
int width);
void (*write)(struct metal_io_region *io,
unsigned long offset,
uint64_t value,
memory_order order,
int width);
int (*block_read)(struct metal_io_region *io,
unsigned long offset,
void *restrict dst,
memory_order order,
int len);
int (*block_write)(struct metal_io_region *io,
unsigned long offset,
const void *restrict src,
memory_order order,
int len);
void (*block_set)(struct metal_io_region *io,
unsigned long offset,
unsigned char value,
memory_order order,
int len);
void (*close)(struct metal_io_region *io);
};
/** Libmetal I/O region structure. */
struct metal_io_region {
void *virt; /**< base virtual address */
const metal_phys_addr_t *physmap; /**< table of base physical address
of each of the pages in the I/O
region */
size_t size; /**< size of the I/O region */
unsigned long page_shift; /**< page shift of I/O region */
metal_phys_addr_t page_mask; /**< page mask of I/O region */
unsigned int mem_flags; /**< memory attribute of the
I/O region */
struct metal_io_ops ops; /**< I/O region operations */
};
/**
* @brief Open a libmetal I/O region.
*
* @param[in, out] io I/O region handle.
* @param[in] virt Virtual address of region.
* @param[in] physmap Array of physical addresses per page.
* @param[in] size Size of region.
* @param[in] page_shift Log2 of page size (-1 for single page).
* @param[in] mem_flags Memory flags
* @param[in] ops ops
*/
void
metal_io_init(struct metal_io_region *io, void *virt,
const metal_phys_addr_t *physmap, size_t size,
unsigned page_shift, unsigned int mem_flags,
const struct metal_io_ops *ops);
/**
* @brief Close a libmetal shared memory segment.
* @param[in] io I/O region handle.
*/
static inline void metal_io_finish(struct metal_io_region *io)
{
if (io->ops.close)
(*io->ops.close)(io);
memset(io, 0, sizeof(*io));
}
/**
* @brief Get size of I/O region.
*
* @param[in] io I/O region handle.
* @return Size of I/O region.
*/
static inline size_t metal_io_region_size(struct metal_io_region *io)
{
return io->size;
}
/**
* @brief Get virtual address for a given offset into the I/O region.
* @param[in] io I/O region handle.
* @param[in] offset Offset into shared memory segment.
* @return NULL if offset is out of range, or pointer to offset.
*/
static inline void *
metal_io_virt(struct metal_io_region *io, unsigned long offset)
{
return (io->virt != METAL_BAD_VA && offset <= io->size
? (uint8_t *)io->virt + offset
: NULL);
}
/**
* @brief Convert a virtual address to offset within I/O region.
* @param[in] io I/O region handle.
* @param[in] virt Virtual address within segment.
* @return METAL_BAD_OFFSET if out of range, or offset.
*/
static inline unsigned long
metal_io_virt_to_offset(struct metal_io_region *io, void *virt)
{
size_t offset = (uint8_t *)virt - (uint8_t *)io->virt;
return (offset < io->size ? offset : METAL_BAD_OFFSET);
}
/**
* @brief Get physical address for a given offset into the I/O region.
* @param[in] io I/O region handle.
* @param[in] offset Offset into shared memory segment.
* @return METAL_BAD_PHYS if offset is out of range, or physical address
* of offset.
*/
static inline metal_phys_addr_t
metal_io_phys(struct metal_io_region *io, unsigned long offset)
{
unsigned long page = (io->page_shift >=
sizeof(offset) * CHAR_BIT ?
0 : offset >> io->page_shift);
return (io->physmap != NULL && offset <= io->size
? io->physmap[page] + (offset & io->page_mask)
: METAL_BAD_PHYS);
}
/**
* @brief Convert a physical address to offset within I/O region.
* @param[in] io I/O region handle.
* @param[in] phys Physical address within segment.
* @return METAL_BAD_OFFSET if out of range, or offset.
*/
static inline unsigned long
metal_io_phys_to_offset(struct metal_io_region *io, metal_phys_addr_t phys)
{
unsigned long offset =
(io->page_mask == (metal_phys_addr_t)(-1) ?
phys - io->physmap[0] : phys & io->page_mask);
do {
if (metal_io_phys(io, offset) == phys)
return offset;
offset += io->page_mask + 1;
} while (offset < io->size);
return METAL_BAD_OFFSET;
}
/**
* @brief Convert a physical address to virtual address.
* @param[in] io Shared memory segment handle.
* @param[in] phys Physical address within segment.
* @return NULL if out of range, or corresponding virtual address.
*/
static inline void *
metal_io_phys_to_virt(struct metal_io_region *io, metal_phys_addr_t phys)
{
return metal_io_virt(io, metal_io_phys_to_offset(io, phys));
}
/**
* @brief Convert a virtual address to physical address.
* @param[in] io Shared memory segment handle.
* @param[in] virt Virtual address within segment.
* @return METAL_BAD_PHYS if out of range, or corresponding
* physical address.
*/
static inline metal_phys_addr_t
metal_io_virt_to_phys(struct metal_io_region *io, void *virt)
{
return metal_io_phys(io, metal_io_virt_to_offset(io, virt));
}
/**
* @brief Read a value from an I/O region.
* @param[in] io I/O region handle.
* @param[in] offset Offset into I/O region.
* @param[in] order Memory ordering.
* @param[in] width Width in bytes of datatype to read. This must be 1, 2,
* 4, or 8, and a compile time constant for this function
* to inline cleanly.
* @return Value.
*/
static inline uint64_t
metal_io_read(struct metal_io_region *io, unsigned long offset,
memory_order order, int width)
{
void *ptr = metal_io_virt(io, offset);
if (io->ops.read)
return (*io->ops.read)(io, offset, order, width);
else if (ptr && sizeof(atomic_uchar) == width)
return atomic_load_explicit((atomic_uchar *)ptr, order);
else if (ptr && sizeof(atomic_ushort) == width)
return atomic_load_explicit((atomic_ushort *)ptr, order);
else if (ptr && sizeof(atomic_uint) == width)
return atomic_load_explicit((atomic_uint *)ptr, order);
else if (ptr && sizeof(atomic_ulong) == width)
return atomic_load_explicit((atomic_ulong *)ptr, order);
#ifndef NO_ATOMIC_64_SUPPORT
else if (ptr && sizeof(atomic_ullong) == width)
return atomic_load_explicit((atomic_ullong *)ptr, order);
#endif
metal_assert(0);
return 0; /* quiet compiler */
}
/**
* @brief Write a value into an I/O region.
* @param[in] io I/O region handle.
* @param[in] offset Offset into I/O region.
* @param[in] value Value to write.
* @param[in] order Memory ordering.
* @param[in] width Width in bytes of datatype to read. This must be 1, 2,
* 4, or 8, and a compile time constant for this function
* to inline cleanly.
*/
static inline void
metal_io_write(struct metal_io_region *io, unsigned long offset,
uint64_t value, memory_order order, int width)
{
void *ptr = metal_io_virt(io, offset);
if (io->ops.write)
(*io->ops.write)(io, offset, value, order, width);
else if (ptr && sizeof(atomic_uchar) == width)
atomic_store_explicit((atomic_uchar *)ptr, value, order);
else if (ptr && sizeof(atomic_ushort) == width)
atomic_store_explicit((atomic_ushort *)ptr, value, order);
else if (ptr && sizeof(atomic_uint) == width)
atomic_store_explicit((atomic_uint *)ptr, value, order);
else if (ptr && sizeof(atomic_ulong) == width)
atomic_store_explicit((atomic_ulong *)ptr, value, order);
#ifndef NO_ATOMIC_64_SUPPORT
else if (ptr && sizeof(atomic_ullong) == width)
atomic_store_explicit((atomic_ullong *)ptr, value, order);
#endif
else
metal_assert (0);
}
#define metal_io_read8_explicit(_io, _ofs, _order) \
metal_io_read((_io), (_ofs), (_order), 1)
#define metal_io_read8(_io, _ofs) \
metal_io_read((_io), (_ofs), memory_order_seq_cst, 1)
#define metal_io_write8_explicit(_io, _ofs, _val, _order) \
metal_io_write((_io), (_ofs), (_val), (_order), 1)
#define metal_io_write8(_io, _ofs, _val) \
metal_io_write((_io), (_ofs), (_val), memory_order_seq_cst, 1)
#define metal_io_read16_explicit(_io, _ofs, _order) \
metal_io_read((_io), (_ofs), (_order), 2)
#define metal_io_read16(_io, _ofs) \
metal_io_read((_io), (_ofs), memory_order_seq_cst, 2)
#define metal_io_write16_explicit(_io, _ofs, _val, _order) \
metal_io_write((_io), (_ofs), (_val), (_order), 2)
#define metal_io_write16(_io, _ofs, _val) \
metal_io_write((_io), (_ofs), (_val), memory_order_seq_cst, 2)
#define metal_io_read32_explicit(_io, _ofs, _order) \
metal_io_read((_io), (_ofs), (_order), 4)
#define metal_io_read32(_io, _ofs) \
metal_io_read((_io), (_ofs), memory_order_seq_cst, 4)
#define metal_io_write32_explicit(_io, _ofs, _val, _order) \
metal_io_write((_io), (_ofs), (_val), (_order), 4)
#define metal_io_write32(_io, _ofs, _val) \
metal_io_write((_io), (_ofs), (_val), memory_order_seq_cst, 4)
#define metal_io_read64_explicit(_io, _ofs, _order) \
metal_io_read((_io), (_ofs), (_order), 8)
#define metal_io_read64(_io, _ofs) \
metal_io_read((_io), (_ofs), memory_order_seq_cst, 8)
#define metal_io_write64_explicit(_io, _ofs, _val, _order) \
metal_io_write((_io), (_ofs), (_val), (_order), 8)
#define metal_io_write64(_io, _ofs, _val) \
metal_io_write((_io), (_ofs), (_val), memory_order_seq_cst, 8)
/**
* @brief Read a block from an I/O region.
* @param[in] io I/O region handle.
* @param[in] offset Offset into I/O region.
* @param[in] dst destination to store the read data.
* @param[in] len length in bytes to read.
* @return On success, number of bytes read. On failure, negative value
*/
int metal_io_block_read(struct metal_io_region *io, unsigned long offset,
void *restrict dst, int len);
/**
* @brief Write a block into an I/O region.
* @param[in] io I/O region handle.
* @param[in] offset Offset into I/O region.
* @param[in] src source to write.
* @param[in] len length in bytes to write.
* @return On success, number of bytes written. On failure, negative value
*/
int metal_io_block_write(struct metal_io_region *io, unsigned long offset,
const void *restrict src, int len);
/**
* @brief fill a block of an I/O region.
* @param[in] io I/O region handle.
* @param[in] offset Offset into I/O region.
* @param[in] value value to fill into the block
* @param[in] len length in bytes to fill.
* @return On success, number of bytes filled. On failure, negative value
*/
int metal_io_block_set(struct metal_io_region *io, unsigned long offset,
unsigned char value, int len);
#include <metal/system/generic/io.h>
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_IO__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file irq.h
* @brief Interrupt handling primitives for libmetal.
*/
#ifndef __METAL_IRQ__H__
#define __METAL_IRQ__H__
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup irq Interrupt Handling Interfaces
* @{ */
#include <stdlib.h>
/** IRQ handled status */
#define METAL_IRQ_NOT_HANDLED 0
#define METAL_IRQ_HANDLED 1
/**
* @brief type of interrupt handler
* @param[in] irq interrupt id
* @param[in] priv private data
* @return irq handled status
*/
typedef int (*metal_irq_handler) (int irq, void *priv);
struct metal_device;
/**
* @brief Register interrupt handler for driver ID/device.
*
* @param[in] irq interrupt id
* @param[in] irq_handler interrupt handler
* @param[in] dev metal device this irq belongs to (can be NULL).
* @param[in] drv_id driver id is a unique interrupt handler identifier.
* It can also be used for driver data.
* @return 0 for success, non-zero on failure
*/
int metal_irq_register(int irq,
metal_irq_handler irq_handler,
struct metal_device *dev,
void *drv_id);
/**
* @brief Unregister interrupt handler for driver ID and/or device.
*
* If interrupt handler (hd), driver ID (drv_id) and device (dev)
* are NULL, unregister all handlers for this interrupt.
*
* If interrupt handler (hd), device (dev) or driver ID (drv_id),
* are not NULL, unregister handlers matching non NULL criterias.
* e.g: when call is made with drv_id and dev non NULL,
* all handlers matching both are unregistered.
*
* If interrupt is not found, or other criterias not matching,
* return -ENOENT
*
* @param[in] irq interrupt id
* @param[in] irq_handler interrupt handler
* @param[in] dev metal device this irq belongs to
* @param[in] drv_id driver id. It can be used for driver data.
* @return 0 for success, non-zero on failure
*/
int metal_irq_unregister(int irq,
metal_irq_handler irq_handler,
struct metal_device *dev,
void *drv_id);
/**
* @brief disable interrupts
* @return interrupts state
*/
unsigned int metal_irq_save_disable(void);
/**
* @brief restore interrupts to their previous state
* @param[in] flags previous interrupts state
*/
void metal_irq_restore_enable(unsigned int flags);
/**
* @brief metal_irq_enable
*
* Enables the given interrupt
*
* @param vector - interrupt vector number
*/
void metal_irq_enable(unsigned int vector);
/**
* @brief metal_irq_disable
*
* Disables the given interrupt
*
* @param vector - interrupt vector number
*/
void metal_irq_disable(unsigned int vector);
#include <metal/system/generic/irq.h>
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_IRQ__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file list.h
* @brief List primitives for libmetal.
*/
#ifndef __METAL_LIST__H__
#define __METAL_LIST__H__
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup list List Primitives
* @{ */
struct metal_list {
struct metal_list *next, *prev;
};
/*
* METAL_INIT_LIST - used for initializing an list elmenet in a static struct
* or global
*/
#define METAL_INIT_LIST(name) { .next = &name, .prev = &name }
/*
* METAL_DECLARE_LIST - used for defining and initializing a global or
* static singleton list
*/
#define METAL_DECLARE_LIST(name) \
struct metal_list name = METAL_INIT_LIST(name)
static inline void metal_list_init(struct metal_list *list)
{
list->next = list->prev = list;
}
static inline void metal_list_add_before(struct metal_list *node,
struct metal_list *new_node)
{
new_node->prev = node->prev;
new_node->next = node;
new_node->next->prev = new_node;
new_node->prev->next = new_node;
}
static inline void metal_list_add_after(struct metal_list *node,
struct metal_list *new_node)
{
new_node->prev = node;
new_node->next = node->next;
new_node->next->prev = new_node;
new_node->prev->next = new_node;
}
static inline void metal_list_add_head(struct metal_list *list,
struct metal_list *node)
{
metal_list_add_after(list, node);
}
static inline void metal_list_add_tail(struct metal_list *list,
struct metal_list *node)
{
metal_list_add_before(list, node);
}
static inline int metal_list_is_empty(struct metal_list *list)
{
return list->next == list;
}
static inline void metal_list_del(struct metal_list *node)
{
node->next->prev = node->prev;
node->prev->next = node->next;
node->next = node->prev = node;
}
static inline struct metal_list *metal_list_first(struct metal_list *list)
{
return metal_list_is_empty(list) ? NULL : list->next;
}
#define metal_list_for_each(list, node) \
for ((node) = (list)->next; \
(node) != (list); \
(node) = (node)->next)
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_LIST__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file log.h
* @brief Logging support for libmetal.
*/
#ifndef __METAL_METAL_LOG__H__
#define __METAL_METAL_LOG__H__
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup logging Library Logging Interfaces
* @{ */
/** Log message priority levels for libmetal. */
enum metal_log_level {
METAL_LOG_EMERGENCY, /**< system is unusable. */
METAL_LOG_ALERT, /**< action must be taken immediately. */
METAL_LOG_CRITICAL, /**< critical conditions. */
METAL_LOG_ERROR, /**< error conditions. */
METAL_LOG_WARNING, /**< warning conditions. */
METAL_LOG_NOTICE, /**< normal but significant condition. */
METAL_LOG_INFO, /**< informational messages. */
METAL_LOG_DEBUG, /**< debug-level messages. */
};
/** Log message handler type. */
typedef void (*metal_log_handler)(enum metal_log_level level,
const char *format, ...);
/**
* @brief Set libmetal log handler.
* @param[in] handler log message handler.
* @return 0 on success, or -errno on failure.
*/
extern void metal_set_log_handler(metal_log_handler handler);
/**
* @brief Get the current libmetal log handler.
* @return Current log handler.
*/
extern metal_log_handler metal_get_log_handler(void);
/**
* @brief Set the level for libmetal logging.
* @param[in] level log message level.
*/
extern void metal_set_log_level(enum metal_log_level level);
/**
* @brief Get the current level for libmetal logging.
* @return Current log level.
*/
extern enum metal_log_level metal_get_log_level(void);
/**
* @brief Default libmetal log handler. This handler prints libmetal log
* mesages to stderr.
* @param[in] level log message level.
* @param[in] format log message format string.
* @return 0 on success, or -errno on failure.
*/
extern void metal_default_log_handler(enum metal_log_level level,
const char *format, ...);
/**
* Emit a log message if the log level permits.
*
* @param level Log level.
* @param ... Format string and arguments.
*/
#define metal_log(level, ...) \
((level <= _metal.common.log_level && _metal.common.log_handler) \
? (void)_metal.common.log_handler(level, __VA_ARGS__) \
: (void)0)
/** @} */
#ifdef __cplusplus
}
#endif
#include <metal/system/generic/log.h>
#endif /* __METAL_METAL_LOG__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file mutex.h
* @brief Mutex primitives for libmetal.
*/
#ifndef __METAL_MUTEX__H__
#define __METAL_MUTEX__H__
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup mutex Mutex Interfaces
* @{ */
#include <metal/system/generic/mutex.h>
/**
* @brief Initialize a libmetal mutex.
* @param[in] mutex Mutex to initialize.
*/
static inline void metal_mutex_init(metal_mutex_t *mutex)
{
__metal_mutex_init(mutex);
}
/**
* @brief Deinitialize a libmetal mutex.
* @param[in] mutex Mutex to deinitialize.
*/
static inline void metal_mutex_deinit(metal_mutex_t *mutex)
{
__metal_mutex_deinit(mutex);
}
/**
* @brief Try to acquire a mutex
* @param[in] mutex Mutex to mutex.
* @return 0 on failure to acquire, non-zero on success.
*/
static inline int metal_mutex_try_acquire(metal_mutex_t *mutex)
{
return __metal_mutex_try_acquire(mutex);
}
/**
* @brief Acquire a mutex
* @param[in] mutex Mutex to mutex.
*/
static inline void metal_mutex_acquire(metal_mutex_t *mutex)
{
__metal_mutex_acquire(mutex);
}
/**
* @brief Release a previously acquired mutex.
* @param[in] mutex Mutex to mutex.
* @see metal_mutex_try_acquire, metal_mutex_acquire
*/
static inline void metal_mutex_release(metal_mutex_t *mutex)
{
__metal_mutex_release(mutex);
}
/**
* @brief Checked if a mutex has been acquired.
* @param[in] mutex mutex to check.
* @see metal_mutex_try_acquire, metal_mutex_acquire
*/
static inline int metal_mutex_is_acquired(metal_mutex_t *mutex)
{
return __metal_mutex_is_acquired(mutex);
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_MUTEX__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file arm/atomic.h
* @brief ARM specific atomic primitives for libmetal.
*/
#ifndef __METAL_ARM_ATOMIC__H__
#define __METAL_ARM_ATOMIC__H__
#endif /* __METAL_ARM_ATOMIC__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file cpu.h
* @brief CPU specific primatives
*/
#ifndef __METAL_ARM_CPU__H__
#define __METAL_ARM_CPU__H__
#define metal_cpu_yield()
#endif /* __METAL_ARM_CPU__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file shmem.h
* @brief Shared memory primitives for libmetal.
*/
#ifndef __METAL_SHMEM__H__
#define __METAL_SHMEM__H__
#include <metal/io.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup shmem Shared Memory Interfaces
* @{ */
/** Generic shared memory data structure. */
struct metal_generic_shmem {
const char *name;
struct metal_io_region io;
struct metal_list node;
};
/**
* @brief Open a libmetal shared memory segment.
*
* Open a shared memory segment.
*
* @param[in] name Name of segment to open.
* @param[in] size Size of segment.
* @param[out] io I/O region handle, if successful.
* @return 0 on success, or -errno on failure.
*
* @see metal_shmem_create
*/
extern int metal_shmem_open(const char *name, size_t size,
struct metal_io_region **io);
/**
* @brief Statically register a generic shared memory region.
*
* Shared memory regions may be statically registered at application
* initialization, or may be dynamically opened. This interface is used for
* static registration of regions. Subsequent calls to metal_shmem_open() look
* up in this list of pre-registered regions.
*
* @param[in] shmem Generic shmem structure.
* @return 0 on success, or -errno on failure.
*/
extern int metal_shmem_register_generic(struct metal_generic_shmem *shmem);
#ifdef METAL_INTERNAL
/**
* @brief Open a statically registered shmem segment.
*
* This interface is meant for internal libmetal use within system specific
* shmem implementations.
*
* @param[in] name Name of segment to open.
* @param[in] size Size of segment.
* @param[out] io I/O region handle, if successful.
* @return 0 on success, or -errno on failure.
*/
int metal_shmem_open_generic(const char *name, size_t size,
struct metal_io_region **result);
#endif
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_SHMEM__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file sleep.h
* @brief Sleep primitives for libmetal.
*/
#ifndef __METAL_SLEEP__H__
#define __METAL_SLEEP__H__
#include <metal/system/generic/sleep.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup sleep Sleep Interfaces
* @{ */
/**
* @brief delay in microseconds
* delay the next execution in the calling thread
* fo usec microseconds.
*
* @param[in] usec microsecond intervals
* @return 0 on success, non-zero for failures
*/
static inline int metal_sleep_usec(unsigned int usec)
{
return __metal_sleep_usec(usec);
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_SLEEP__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file spinlock.h
* @brief Spinlock primitives for libmetal.
*/
#ifndef __METAL_SPINLOCK__H__
#define __METAL_SPINLOCK__H__
#include <metal/atomic.h>
#include <metal/cpu.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup spinlock Spinlock Interfaces
* @{ */
struct metal_spinlock {
union{
atomic_int v;
atomic_flag w;
};
};
/** Static metal spinlock initialization. */
#define METAL_SPINLOCK_INIT {ATOMIC_VAR_INIT(0)}
/**
* @brief Initialize a libmetal spinlock.
* @param[in] slock Spinlock to initialize.
*/
static inline void metal_spinlock_init(struct metal_spinlock *slock)
{
atomic_store(&slock->v, 0);
}
/**
* @brief Acquire a spinlock.
* @param[in] slock Spinlock to acquire.
* @see metal_spinlock_release
*/
static inline void metal_spinlock_acquire(struct metal_spinlock *slock)
{
while (atomic_flag_test_and_set(&slock->w)) {
metal_cpu_yield();
}
}
/**
* @brief Release a previously acquired spinlock.
* @param[in] slock Spinlock to release.
* @see metal_spinlock_acquire
*/
static inline void metal_spinlock_release(struct metal_spinlock *slock)
{
atomic_flag_clear(&slock->w);
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_SPINLOCK__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file sys.h
* @brief System primitives for libmetal.
* @brief Top level include internal to libmetal library code.
*/
#ifndef __METAL_SYS__H__
#define __METAL_SYS__H__
#include <stdlib.h>
#include <metal/log.h>
#include <metal/list.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup system Top Level Interfaces
* @{ */
/** Physical address type. */
typedef unsigned long metal_phys_addr_t;
/** Interrupt request number. */
typedef int metal_irq_t;
/** Bad offset into shared memory or I/O region. */
#define METAL_BAD_OFFSET ((unsigned long)-1)
/** Bad physical address value. */
#define METAL_BAD_PHYS ((metal_phys_addr_t)-1)
/** Bad virtual address value. */
#define METAL_BAD_VA ((void *)-1)
/** Bad IRQ. */
#define METAL_BAD_IRQ ((metal_irq_t)-1)
/**
* Initialization configuration for libmetal.
*/
struct metal_init_params {
/** log message handler (defaults to stderr). */
metal_log_handler log_handler;
/** default log message level (defaults to emergency). */
enum metal_log_level log_level;
};
/**
* System independent runtime state for libmetal. This is part of a system
* specific singleton data structure (@see _metal).
*/
struct metal_common_state {
/** Current log level. */
enum metal_log_level log_level;
/** Current log handler (null for none). */
metal_log_handler log_handler;
/** List of registered buses. */
struct metal_list bus_list;
/** Generic statically defined shared memory segments. */
struct metal_list generic_shmem_list;
/** Generic statically defined devices. */
struct metal_list generic_device_list;
};
struct metal_state;
#include <metal/system/generic/sys.h>
#ifndef METAL_INIT_DEFAULTS
#define METAL_INIT_DEFAULTS \
{ \
.log_handler = metal_default_log_handler, \
.log_level = METAL_LOG_INFO, \
}
#endif
/** System specific runtime data. */
extern struct metal_state _metal;
/**
* @brief Initialize libmetal.
*
* Initialize the libmetal library.
*
* @param[in] params Initialization params (@see metal_init_params).
*
* @return 0 on success, or -errno on failure.
*
* @see metal_finish
*/
extern int metal_init(const struct metal_init_params *params);
/**
* @brief Shutdown libmetal.
*
* Shutdown the libmetal library, and release all reserved resources.
*
* @see metal_init
*/
extern void metal_finish(void);
#ifdef METAL_INTERNAL
/**
* @brief libmetal system initialization.
*
* This function initializes libmetal on Linux or Generic platforms. This
* involves obtaining necessary pieces of system information (sysfs mount path,
* page size, etc.).
*
* @param[in] params Initialization parameters (@see metal_init_params).
* @return 0 on success, or -errno on failure.
*/
extern int metal_sys_init(const struct metal_init_params *params);
/**
* @brief libmetal system shutdown.
*
* This function shuts down and releases resources held by libmetal Linux or
* Generic platform layers.
*
* @see metal_sys_init
*/
extern void metal_sys_finish(void);
#endif
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_SYS__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/alloc.c
* @brief generic libmetal memory allocattion definitions.
*/
#ifndef __METAL_ALLOC__H__
#error "Include metal/alloc.h instead of metal/generic/alloc.h"
#endif
#ifndef __METAL_GENERIC_ALLOC__H__
#define __METAL_GENERIC_ALLOC__H__
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
static inline void *metal_allocate_memory(unsigned int size)
{
return (malloc(size));
}
static inline void metal_free_memory(void *ptr)
{
free(ptr);
}
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_ALLOC__H__ */
/*
* Copyright (c) 2018, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file assert.h
* @brief Generic assertion support.
*/
#ifndef __METAL_ASSERT__H__
#error "Include metal/assert.h instead of metal/generic/assert.h"
#endif
#ifndef __METAL_GENERIC_ASSERT__H__
#define __METAL_GENERIC_ASSERT__H__
#include <assert.h>
/**
* @brief Assertion macro for bare-metal applications.
* @param cond Condition to evaluate.
*/
#define metal_sys_assert(cond) assert(cond)
#endif /* __METAL_GENERIC_ASSERT__H__ */
/*
* Copyright (c) 2018, Linaro Limited. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/cache.h
* @brief generic cache operation primitives for libmetal.
*/
#ifndef __METAL_CACHE__H__
#error "Include metal/cache.h instead of metal/generic/cache.h"
#endif
#ifndef __METAL_GENERIC_CACHE__H__
#define __METAL_GENERIC_CACHE__H__
#ifdef __cplusplus
extern "C" {
#endif
extern void metal_machine_cache_flush(void *addr, unsigned int len);
extern void metal_machine_cache_invalidate(void *addr, unsigned int len);
static inline void __metal_cache_flush(void *addr, unsigned int len)
{
metal_machine_cache_flush(addr, len);
}
static inline void __metal_cache_invalidate(void *addr, unsigned int len)
{
metal_machine_cache_invalidate(addr, len);
}
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_CACHE__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/condition.h
* @brief Generic condition variable primitives for libmetal.
*/
#ifndef __METAL_CONDITION__H__
#error "Include metal/condition.h instead of metal/generic/condition.h"
#endif
#ifndef __METAL_GENERIC_CONDITION__H__
#define __METAL_GENERIC_CONDITION__H__
#if defined (__CC_ARM)
#include <stdio.h>
#endif
#include <metal/atomic.h>
#include <stdint.h>
#include <limits.h>
#include <metal/errno.h>
#ifdef __cplusplus
extern "C" {
#endif
struct metal_condition {
metal_mutex_t *m; /**< mutex.
The condition variable is attached to
this mutex when it is waiting.
It is also used to check correctness
in case there are multiple waiters. */
atomic_int v; /**< condition variable value. */
};
/** Static metal condition variable initialization. */
#define METAL_CONDITION_INIT { NULL, ATOMIC_VAR_INIT(0) }
static inline void metal_condition_init(struct metal_condition *cv)
{
cv->m = NULL;
atomic_init(&cv->v, 0);
}
static inline int metal_condition_signal(struct metal_condition *cv)
{
if (!cv)
return -EINVAL;
/** wake up waiters if there are any. */
atomic_fetch_add(&cv->v, 1);
return 0;
}
static inline int metal_condition_broadcast(struct metal_condition *cv)
{
return metal_condition_signal(cv);
}
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_CONDITION__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of Xilinx nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* @file generic/mp1_m4/sys.h
* @brief generic mp1_m4 system primitives for libmetal.
*/
#ifndef __METAL_GENERIC_SYS__H__
#error "Include metal/sys.h instead of metal/generic/cortexm/sys.h"
#endif
#ifndef __METAL_GENERIC_MP1_M4_SYS__H__
#define __METAL_GENERIC_MP1_M4_SYS__H__
#ifdef __cplusplus
extern "C" {
#endif
#if !defined(MAX_IRQS)
#define MAX_IRQS 8 /**< maximum number of irqs */
#endif
static inline void sys_irq_enable(unsigned int vector)
{
(void)vector;
}
static inline void sys_irq_disable(unsigned int vector)
{
(void)vector;
}
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_MP1_M4_SYS__H__ */
/*
* Copyright (c) 2017, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/io.h
* @brief Generic specific io definitions.
*/
#ifndef __METAL_IO__H__
#error "Include metal/io.h instead of metal/generic/io.h"
#endif
#ifndef __METAL_GENERIC_IO__H__
#define __METAL_GENERIC_IO__H__
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef METAL_INTERNAL
/**
* @brief memory mapping for an I/O region
*/
void metal_sys_io_mem_map(struct metal_io_region *io);
/**
* @brief memory mapping
*/
void *metal_machine_io_mem_map(void *va, metal_phys_addr_t pa,
size_t size, unsigned int flags);
#endif
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_IO__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/irq.c
* @brief Generic libmetal irq definitions.
*/
#ifndef __METAL_IRQ__H__
#error "Include metal/irq.h instead of metal/generic/irq.h"
#endif
#ifndef __METAL_GENERIC_IRQ__H__
#define __METAL_GENERIC_IRQ__H__
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief default interrupt handler
* @param[in] vector interrupt vector
*/
void metal_irq_isr(unsigned int vector);
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_IRQ__H__ */
/*
* Copyright (c) 2018, Linaro Limited. and Contributors. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of Linaro nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* @file generic/log.h
* @brief Generic libmetal log handler definition.
*/
#ifndef __METAL_METAL_LOG__H__
#error "Include metal/log.h instead of metal/generic/log.h"
#endif
#ifndef __METAL_GENERIC_LOG__H__
#define __METAL_GENERIC_LOG__H__
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_LOG__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/mutex.h
* @brief Generic mutex primitives for libmetal.
*/
#ifndef __METAL_MUTEX__H__
#error "Include metal/mutex.h instead of metal/generic/mutex.h"
#endif
#ifndef __METAL_GENERIC_MUTEX__H__
#define __METAL_GENERIC_MUTEX__H__
#include <metal/atomic.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
union{
atomic_int v;
atomic_flag w;
};
} metal_mutex_t;
/*
* METAL_MUTEX_INIT - used for initializing an mutex elmenet in a static struct
* or global
*/
#define METAL_MUTEX_INIT(m) { ATOMIC_VAR_INIT(0) }
/*
* METAL_MUTEX_DEFINE - used for defining and initializing a global or
* static singleton mutex
*/
#define METAL_MUTEX_DEFINE(m) metal_mutex_t m = METAL_MUTEX_INIT(m)
static inline void __metal_mutex_init(metal_mutex_t *mutex)
{
atomic_store(&mutex->v, 0);
}
static inline void __metal_mutex_deinit(metal_mutex_t *mutex)
{
(void)mutex;
}
static inline int __metal_mutex_try_acquire(metal_mutex_t *mutex)
{
return 1 - atomic_flag_test_and_set(&mutex->w);
}
static inline void __metal_mutex_acquire(metal_mutex_t *mutex)
{
while (atomic_flag_test_and_set(&mutex->w)) {
;
}
}
static inline void __metal_mutex_release(metal_mutex_t *mutex)
{
atomic_flag_clear(&mutex->w);
}
static inline int __metal_mutex_is_acquired(metal_mutex_t *mutex)
{
return atomic_load(&mutex->v);
}
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_MUTEX__H__ */
/*
* Copyright (c) 2018, Linaro Limited. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/sleep.h
* @brief Generic sleep primitives for libmetal.
*/
#ifndef __METAL_SLEEP__H__
#error "Include metal/sleep.h instead of metal/generic/sleep.h"
#endif
#ifndef __METAL_GENERIC_SLEEP__H__
#define __METAL_GENERIC_SLEEP__H__
#include <metal/utilities.h>
#ifdef __cplusplus
extern "C" {
#endif
static inline int __metal_sleep_usec(unsigned int usec)
{
metal_unused(usec);
/* Fix me */
return 0;
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_SLEEP__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/sys.h
* @brief Generic system primitives for libmetal.
*/
#ifndef __METAL_SYS__H__
#error "Include metal/sys.h instead of metal/generic/sys.h"
#endif
#ifndef __METAL_GENERIC_SYS__H__
#define __METAL_GENERIC_SYS__H__
#include <metal/errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include "./cortexm/sys.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef METAL_MAX_DEVICE_REGIONS
#define METAL_MAX_DEVICE_REGIONS 1
#endif
/** Structure of generic libmetal runtime state. */
struct metal_state {
/** Common (system independent) data. */
struct metal_common_state common;
};
#ifdef METAL_INTERNAL
/**
* @brief restore interrupts to state before disable_global_interrupt()
*/
void sys_irq_restore_enable(unsigned int flags);
/**
* @brief disable all interrupts
*/
unsigned int sys_irq_save_disable(void);
#endif /* METAL_INTERNAL */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_SYS__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file time.h
* @brief Time primitives for libmetal.
*/
#ifndef __METAL_TIME__H__
#define __METAL_TIME__H__
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup time TIME Interfaces
* @{ */
#include <stdint.h>
#include <metal/sys.h>
/**
* @brief get timestamp
* This function returns the timestampe as unsigned long long
* value.
*
* @return timestamp
*/
unsigned long long metal_get_timestamp(void);
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_TIME__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file utilities.h
* @brief Utility routines for libmetal.
*/
#ifndef __METAL_UTILITIES__H__
#define __METAL_UTILITIES__H__
#include <stdint.h>
#include <metal/assert.h>
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup utilities Simple Utilities
* @{ */
/** Marker for unused function arguments/variables. */
#define metal_unused(x) do { (x) = (x); } while (0)
/** Figure out number of elements in an array. */
#define metal_dim(x) (sizeof(x) / sizeof(x[0]))
/** Minimum of two numbers (warning: multiple evaluation!). */
#define metal_min(x, y) ((x) < (y) ? (x) : (y))
/** Maximum of two numbers (warning: multiple evaluation!). */
#define metal_max(x, y) ((x) > (y) ? (x) : (y))
/** Sign of a number [-1, 0, or 1] (warning: multiple evaluation!). */
#define metal_sign(x) ((x) < 0 ? -1 : ((x) > 0 ? 1 : 0))
/** Align 'size' down to a multiple of 'align' (must be a power of two). */
#define metal_align_down(size, align) \
((size) & ~((align) - 1))
/** Align 'size' up to a multiple of 'align' (must be a power of two). */
#define metal_align_up(size, align) \
metal_align_down((size) + (align) - 1, align)
/** Divide (and round down). */
#define metal_div_round_down(num, den) \
((num) / (den))
/** Divide (and round up). */
#define metal_div_round_up(num, den) \
metal_div_round_down((num) + (den) - 1, (den))
/** Align 'ptr' down to a multiple of 'align' (must be a power of two). */
#define metal_ptr_align_down(ptr, align) \
(void *)(metal_align_down((uintptr_t)(ptr), (uintptr_t)(align)))
/** Align 'ptr' up to a multiple of 'align' (must be a power of two). */
#define metal_ptr_align_up(ptr, align) \
(void *)(metal_align_up((uintptr_t)(ptr), (uintptr_t)(align)))
/** Compute offset of a field within a structure. */
#define metal_offset_of(structure, member) \
((uintptr_t) &(((structure *) 0)->member))
/** Compute pointer to a structure given a pointer to one of its fields. */
#define metal_container_of(ptr, structure, member) \
(void *)((uintptr_t)(ptr) - metal_offset_of(structure, member))
#define METAL_BITS_PER_ULONG (8 * sizeof(unsigned long))
#define metal_bit(bit) (1UL << (bit))
#define metal_bitmap_longs(x) metal_div_round_up((x), METAL_BITS_PER_ULONG)
static inline void metal_bitmap_set_bit(unsigned long *bitmap, int bit)
{
bitmap[bit / METAL_BITS_PER_ULONG] |=
metal_bit(bit & (METAL_BITS_PER_ULONG - 1));
}
static inline int metal_bitmap_is_bit_set(unsigned long *bitmap, int bit)
{
return bitmap[bit / METAL_BITS_PER_ULONG] &
metal_bit(bit & (METAL_BITS_PER_ULONG - 1));
}
static inline void metal_bitmap_clear_bit(unsigned long *bitmap, int bit)
{
bitmap[bit / METAL_BITS_PER_ULONG] &=
~metal_bit(bit & (METAL_BITS_PER_ULONG - 1));
}
static inline int metal_bitmap_is_bit_clear(unsigned long *bitmap, int bit)
{
return !metal_bitmap_is_bit_set(bitmap, bit);
}
static inline unsigned int
metal_bitmap_next_set_bit(unsigned long *bitmap, unsigned int start,
unsigned int max)
{
unsigned int bit;
for (bit = start;
bit < max && !metal_bitmap_is_bit_set(bitmap, bit);
bit ++)
;
return bit;
}
#define metal_bitmap_for_each_set_bit(bitmap, bit, max) \
for ((bit) = metal_bitmap_next_set_bit((bitmap), 0, (max)); \
(bit) < (max); \
(bit) = metal_bitmap_next_set_bit((bitmap), (bit), (max)))
static inline unsigned int
metal_bitmap_next_clear_bit(unsigned long *bitmap, unsigned int start,
unsigned int max)
{
unsigned int bit;
for (bit = start;
bit < max && !metal_bitmap_is_bit_clear(bitmap, bit);
bit ++)
;
return bit;
}
#define metal_bitmap_for_each_clear_bit(bitmap, bit, max) \
for ((bit) = metal_bitmap_next_clear_bit((bitmap), 0, (max)); \
(bit) < (max); \
(bit) = metal_bitmap_next_clear_bit((bitmap), (bit), (max)))
static inline unsigned long metal_log2(unsigned long in)
{
unsigned long result;
metal_assert((in & (in - 1)) == 0);
for (result = 0; (1UL << result) < in; result ++)
;
return result;
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_UTILITIES__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file version.h
* @brief Library version information for libmetal.
*/
#ifndef __METAL_VERSION__H__
#define __METAL_VERSION__H__
#ifdef __cplusplus
extern "C" {
#endif
/** \defgroup versions Library Version Interfaces
* @{ */
/**
* @brief Library major version number.
*
* Return the major version number of the library linked into the application.
* This is required to match the value of METAL_VER_MAJOR, which is the major
* version of the library that the application was compiled against.
*
* @return Library major version number.
* @see METAL_VER_MAJOR
*/
extern int metal_ver_major(void);
/**
* @brief Library minor version number.
*
* Return the minor version number of the library linked into the application.
* This could differ from the value of METAL_VER_MINOR, which is the minor
* version of the library that the application was compiled against.
*
* @return Library minor version number.
* @see METAL_VER_MINOR
*/
extern int metal_ver_minor(void);
/**
* @brief Library patch level.
*
* Return the patch level of the library linked into the application. This
* could differ from the value of METAL_VER_PATCH, which is the patch level of
* the library that the application was compiled against.
*
* @return Library patch level.
* @see METAL_VER_PATCH
*/
extern int metal_ver_patch(void);
/**
* @brief Library version string.
*
* Return the version string of the library linked into the application. This
* could differ from the value of METAL_VER, which is the version string of
* the library that the application was compiled against.
*
* @return Library version string.
* @see METAL_VER
*/
extern const char *metal_ver(void);
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_VERSION__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <metal/sys.h>
int metal_init(const struct metal_init_params *params)
{
int error = 0;
memset(&_metal, 0, sizeof(_metal));
_metal.common.log_handler = params->log_handler;
_metal.common.log_level = params->log_level;
metal_list_init(&_metal.common.bus_list);
metal_list_init(&_metal.common.generic_shmem_list);
metal_list_init(&_metal.common.generic_device_list);
error = metal_sys_init(params);
if (error)
return error;
return error;
}
void metal_finish(void)
{
metal_sys_finish();
memset(&_metal, 0, sizeof(_metal));
}
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <metal/errno.h>
#include <limits.h>
#include <metal/io.h>
#include <metal/sys.h>
void metal_io_init(struct metal_io_region *io, void *virt,
const metal_phys_addr_t *physmap, size_t size,
unsigned page_shift, unsigned int mem_flags,
const struct metal_io_ops *ops)
{
const struct metal_io_ops nops = {NULL, NULL, NULL, NULL, NULL, NULL};
io->virt = virt;
io->physmap = physmap;
io->size = size;
io->page_shift = page_shift;
if (page_shift >= sizeof(io->page_mask) * CHAR_BIT)
/* avoid overflow */
io->page_mask = -1UL;
else
io->page_mask = (1UL << page_shift) - 1UL;
io->mem_flags = mem_flags;
io->ops = ops ? *ops : nops;
metal_sys_io_mem_map(io);
}
int metal_io_block_read(struct metal_io_region *io, unsigned long offset,
void *restrict dst, int len)
{
unsigned char *ptr = metal_io_virt(io, offset);
unsigned char *dest = dst;
int retlen;
if (offset > io->size)
return -ERANGE;
if ((offset + len) > io->size)
len = io->size - offset;
retlen = len;
if (io->ops.block_read) {
retlen = (*io->ops.block_read)(
io, offset, dst, memory_order_seq_cst, len);
} else {
atomic_thread_fence(memory_order_seq_cst);
while ( len && (
((uintptr_t)dest % sizeof(int)) ||
((uintptr_t)ptr % sizeof(int)))) {
*(unsigned char *)dest =
*(const unsigned char *)ptr;
dest++;
ptr++;
len--;
}
for (; len >= (int)sizeof(int); dest += sizeof(int),
ptr += sizeof(int),
len -= sizeof(int))
*(unsigned int *)dest = *(const unsigned int *)ptr;
for (; len != 0; dest++, ptr++, len--)
*(unsigned char *)dest =
*(const unsigned char *)ptr;
}
return retlen;
}
int metal_io_block_write(struct metal_io_region *io, unsigned long offset,
const void *restrict src, int len)
{
unsigned char *ptr = metal_io_virt(io, offset);
const unsigned char *source = src;
int retlen;
if (offset > io->size)
return -ERANGE;
if ((offset + len) > io->size)
len = io->size - offset;
retlen = len;
if (io->ops.block_write) {
retlen = (*io->ops.block_write)(
io, offset, src, memory_order_seq_cst, len);
} else {
while ( len && (
((uintptr_t)ptr % sizeof(int)) ||
((uintptr_t)source % sizeof(int)))) {
*(unsigned char *)ptr =
*(const unsigned char *)source;
ptr++;
source++;
len--;
}
for (; len >= (int)sizeof(int); ptr += sizeof(int),
source += sizeof(int),
len -= sizeof(int))
*(unsigned int *)ptr = *(const unsigned int *)source;
for (; len != 0; ptr++, source++, len--)
*(unsigned char *)ptr =
*(const unsigned char *)source;
atomic_thread_fence(memory_order_seq_cst);
}
return retlen;
}
int metal_io_block_set(struct metal_io_region *io, unsigned long offset,
unsigned char value, int len)
{
unsigned char *ptr = metal_io_virt(io, offset);
int retlen = len;
if (offset > io->size)
return -ERANGE;
if ((offset + len) > io->size)
len = io->size - offset;
retlen = len;
if (io->ops.block_set) {
(*io->ops.block_set)(
io, offset, value, memory_order_seq_cst, len);
} else {
unsigned int cint = value;
unsigned int i;
for (i = 1; i < sizeof(int); i++)
cint |= ((unsigned int)value << (8 * i));
for (; len && ((uintptr_t)ptr % sizeof(int)); ptr++, len--)
*(unsigned char *)ptr = (unsigned char) value;
for (; len >= (int)sizeof(int); ptr += sizeof(int),
len -= sizeof(int))
*(unsigned int *)ptr = cint;
for (; len != 0; ptr++, len--)
*(unsigned char *)ptr = (unsigned char) value;
atomic_thread_fence(memory_order_seq_cst);
}
return retlen;
}
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdarg.h>
#include <stdio.h>
#include <metal/log.h>
#include <metal/sys.h>
void metal_default_log_handler(enum metal_log_level level,
const char *format, ...)
{
#ifdef DEFAULT_LOGGER_ON
char msg[1024];
va_list args;
static const char *level_strs[] = {
"metal: emergency: ",
"metal: alert: ",
"metal: critical: ",
"metal: error: ",
"metal: warning: ",
"metal: notice: ",
"metal: info: ",
"metal: debug: ",
};
va_start(args, format);
vsnprintf(msg, sizeof(msg), format, args);
va_end(args);
if (level <= METAL_LOG_EMERGENCY || level > METAL_LOG_DEBUG)
level = METAL_LOG_EMERGENCY;
fprintf(stderr, "%s%s", level_strs[level], msg);
#else
(void)level;
(void)format;
#endif
}
void metal_set_log_handler(metal_log_handler handler)
{
_metal.common.log_handler = handler;
}
metal_log_handler metal_get_log_handler(void)
{
return _metal.common.log_handler;
}
void metal_set_log_level(enum metal_log_level level)
{
_metal.common.log_level = level;
}
enum metal_log_level metal_get_log_level(void)
{
return _metal.common.log_level;
}
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/shmem.c
* @brief Generic libmetal shared memory handling.
*/
#include <metal/errno.h>
#include <metal/assert.h>
#include <metal/shmem.h>
#include <metal/sys.h>
#include <metal/utilities.h>
int metal_shmem_register_generic(struct metal_generic_shmem *shmem)
{
/* Make sure that we can be found. */
metal_assert(shmem->name && strlen(shmem->name) != 0);
/* Statically registered shmem regions cannot have a destructor. */
metal_assert(!shmem->io.ops.close);
metal_list_add_tail(&_metal.common.generic_shmem_list,
&shmem->node);
return 0;
}
int metal_shmem_open_generic(const char *name, size_t size,
struct metal_io_region **result)
{
struct metal_generic_shmem *shmem;
struct metal_list *node;
metal_list_for_each(&_metal.common.generic_shmem_list, node) {
shmem = metal_container_of(node, struct metal_generic_shmem, node);
if (strcmp(shmem->name, name) != 0)
continue;
if (size > metal_io_region_size(&shmem->io))
continue;
*result = &shmem->io;
return 0;
}
return -ENOENT;
}
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <metal/errno.h>
#include <metal/assert.h>
#include <metal/device.h>
#include <metal/list.h>
#include <metal/log.h>
#include <metal/sys.h>
#include <metal/utilities.h>
#include <metal/dma.h>
#include <metal/cache.h>
int metal_bus_register(struct metal_bus *bus)
{
if (!bus || !bus->name || !strlen(bus->name))
return -EINVAL;
if (metal_bus_find(bus->name, NULL) == 0)
return -EEXIST;
metal_list_init(&bus->devices);
metal_list_add_tail(&_metal.common.bus_list, &bus->node);
metal_log(METAL_LOG_DEBUG, "registered %s bus\n", bus->name);
return 0;
}
int metal_bus_unregister(struct metal_bus *bus)
{
metal_list_del(&bus->node);
if (bus->ops.bus_close)
bus->ops.bus_close(bus);
metal_log(METAL_LOG_DEBUG, "unregistered %s bus\n", bus->name);
return 0;
}
int metal_bus_find(const char *name, struct metal_bus **result)
{
struct metal_list *node;
struct metal_bus *bus;
metal_list_for_each(&_metal.common.bus_list, node) {
bus = metal_container_of(node, struct metal_bus, node);
if (strcmp(bus->name, name) != 0)
continue;
if (result)
*result = bus;
return 0;
}
return -ENOENT;
}
int metal_device_open(const char *bus_name, const char *dev_name,
struct metal_device **device)
{
struct metal_bus *bus;
int error;
if (!bus_name || !strlen(bus_name) ||
!dev_name || !strlen(dev_name) ||
!device)
return -EINVAL;
error = metal_bus_find(bus_name, &bus);
if (error)
return error;
if (!bus->ops.dev_open)
return -ENODEV;
error = (*bus->ops.dev_open)(bus, dev_name, device);
if (error)
return error;
return 0;
}
void metal_device_close(struct metal_device *device)
{
metal_assert(device && device->bus);
if (device->bus->ops.dev_close)
device->bus->ops.dev_close(device->bus, device);
}
int metal_register_generic_device(struct metal_device *device)
{
if (!device->name || !strlen(device->name) ||
device->num_regions > METAL_MAX_DEVICE_REGIONS)
return -EINVAL;
device->bus = &metal_generic_bus;
metal_list_add_tail(&_metal.common.generic_device_list,
&device->node);
return 0;
}
int metal_generic_dev_open(struct metal_bus *bus, const char *dev_name,
struct metal_device **device)
{
struct metal_list *node;
struct metal_device *dev;
(void)bus;
metal_list_for_each(&_metal.common.generic_device_list, node) {
dev = metal_container_of(node, struct metal_device, node);
if (strcmp(dev->name, dev_name) != 0)
continue;
*device = dev;
return metal_generic_dev_sys_open(dev);
}
return -ENODEV;
}
int metal_generic_dev_dma_map(struct metal_bus *bus,
struct metal_device *device,
uint32_t dir,
struct metal_sg *sg_in,
int nents_in,
struct metal_sg *sg_out)
{
(void)bus;
(void)device;
int i;
if (sg_out != sg_in)
memcpy(sg_out, sg_in, nents_in*(sizeof(struct metal_sg)));
for (i = 0; i < nents_in; i++) {
if (dir == METAL_DMA_DEV_W) {
metal_cache_flush(sg_out[i].virt, sg_out[i].len);
}
metal_cache_invalidate(sg_out[i].virt, sg_out[i].len);
}
return nents_in;
}
void metal_generic_dev_dma_unmap(struct metal_bus *bus,
struct metal_device *device,
uint32_t dir,
struct metal_sg *sg,
int nents)
{
(void)bus;
(void)device;
(void)dir;
int i;
for (i = 0; i < nents; i++) {
metal_cache_invalidate(sg[i].virt, sg[i].len);
}
}
struct metal_bus metal_weak metal_generic_bus = {
.name = "generic",
.ops = {
.bus_close = NULL,
.dev_open = metal_generic_dev_open,
.dev_close = NULL,
.dev_irq_ack = NULL,
.dev_dma_map = metal_generic_dev_dma_map,
.dev_dma_unmap = metal_generic_dev_dma_unmap,
},
};
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/alloc.c
* @brief generic libmetal memory allocattion definitions.
*/
#ifndef __METAL_ALLOC__H__
#error "Include metal/alloc.h instead of metal/generic/alloc.h"
#endif
#ifndef __METAL_GENERIC_ALLOC__H__
#define __METAL_GENERIC_ALLOC__H__
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
static inline void *metal_allocate_memory(unsigned int size)
{
return (malloc(size));
}
static inline void metal_free_memory(void *ptr)
{
free(ptr);
}
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_ALLOC__H__ */
/*
* Copyright (c) 2018, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file assert.h
* @brief Generic assertion support.
*/
#ifndef __METAL_ASSERT__H__
#error "Include metal/assert.h instead of metal/generic/assert.h"
#endif
#ifndef __METAL_GENERIC_ASSERT__H__
#define __METAL_GENERIC_ASSERT__H__
#include <assert.h>
/**
* @brief Assertion macro for bare-metal applications.
* @param cond Condition to evaluate.
*/
#define metal_sys_assert(cond) assert(cond)
#endif /* __METAL_GENERIC_ASSERT__H__ */
/*
* Copyright (c) 2018, Linaro Limited. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/cache.h
* @brief generic cache operation primitives for libmetal.
*/
#ifndef __METAL_CACHE__H__
#error "Include metal/cache.h instead of metal/generic/cache.h"
#endif
#ifndef __METAL_GENERIC_CACHE__H__
#define __METAL_GENERIC_CACHE__H__
#ifdef __cplusplus
extern "C" {
#endif
extern void metal_machine_cache_flush(void *addr, unsigned int len);
extern void metal_machine_cache_invalidate(void *addr, unsigned int len);
static inline void __metal_cache_flush(void *addr, unsigned int len)
{
metal_machine_cache_flush(addr, len);
}
static inline void __metal_cache_invalidate(void *addr, unsigned int len)
{
metal_machine_cache_invalidate(addr, len);
}
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_CACHE__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/condition.c
* @brief Generic libmetal condition variable handling.
*/
#include <metal/condition.h>
#include <metal/irq.h>
extern void metal_generic_default_poll(void);
int metal_condition_wait(struct metal_condition *cv,
metal_mutex_t *m)
{
metal_mutex_t *tmpm = 0;
int v;
unsigned int flags;
/* Check if the mutex has been acquired */
if (!cv || !m || !metal_mutex_is_acquired(m))
return -EINVAL;
if (!atomic_compare_exchange_strong(&cv->m->v, &tmpm->v, m->v)) {
if (m != tmpm)
return -EINVAL;
}
v = atomic_load(&cv->v);
/* Release the mutex first. */
metal_mutex_release(m);
do {
flags = metal_irq_save_disable();
if (atomic_load(&cv->v) != v) {
metal_irq_restore_enable(flags);
break;
}
metal_generic_default_poll();
metal_irq_restore_enable(flags);
} while(1);
/* Acquire the mutex again. */
metal_mutex_acquire(m);
return 0;
}
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/condition.h
* @brief Generic condition variable primitives for libmetal.
*/
#ifndef __METAL_CONDITION__H__
#error "Include metal/condition.h instead of metal/generic/condition.h"
#endif
#ifndef __METAL_GENERIC_CONDITION__H__
#define __METAL_GENERIC_CONDITION__H__
#include <unistd.h>
#include <metal/atomic.h>
#include <stdint.h>
#include <limits.h>
#include <metal/errno.h>
#ifdef __cplusplus
extern "C" {
#endif
struct metal_condition {
metal_mutex_t *m; /**< mutex.
The condition variable is attached to
this mutex when it is waiting.
It is also used to check correctness
in case there are multiple waiters. */
atomic_int v; /**< condition variable value. */
};
/** Static metal condition variable initialization. */
#define METAL_CONDITION_INIT { NULL, ATOMIC_VAR_INIT(0) }
static inline void metal_condition_init(struct metal_condition *cv)
{
cv->m = NULL;
atomic_init(&cv->v, 0);
}
static inline int metal_condition_signal(struct metal_condition *cv)
{
if (!cv)
return -EINVAL;
/** wake up waiters if there are any. */
atomic_fetch_add(&cv->v, 1);
return 0;
}
static inline int metal_condition_broadcast(struct metal_condition *cv)
{
return metal_condition_signal(cv);
}
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_CONDITION__H__ */
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of Xilinx nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* @file generic/sys.c
* @brief machine specific system primitives implementation.
*/
#include "metal/io.h"
#include "metal/sys.h"
void sys_irq_restore_enable(unsigned int flags)
{
}
unsigned int sys_irq_save_disable(void)
{
return 0;
}
void metal_machine_cache_flush(void *addr, unsigned int len)
{
(void)addr;
(void)len;
}
void metal_machine_cache_invalidate(void *addr, unsigned int len)
{
(void)addr;
(void)len;
}
/**
* @brief poll function until some event happens
*/
void __attribute__((weak)) metal_generic_default_poll(void)
{
}
void *metal_machine_io_mem_map(void *va, metal_phys_addr_t pa,
size_t size, unsigned int flags)
{
(void)va;
(void)pa;
(void)size;
(void)flags;
return va;
}
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of Xilinx nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* @file generic/mp1_m4/sys.h
* @brief generic mp1_m4 system primitives for libmetal.
*/
#ifndef __METAL_GENERIC_SYS__H__
#error "Include metal/sys.h instead of metal/generic/@PROJECT_MACHINE@/sys.h"
#endif
#ifndef __METAL_GENERIC_MP1_M4_SYS__H__
#define __METAL_GENERIC_MP1_M4_SYS__H__
#ifdef __cplusplus
extern "C" {
#endif
#if !defined(MAX_IRQS)
#define MAX_IRQS 8 /**< maximum number of irqs */
#endif
static inline void sys_irq_enable(unsigned int vector)
{
(void)vector;
}
static inline void sys_irq_disable(unsigned int vector)
{
(void)vector;
}
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_MP1_M4_SYS__H__ */
/*
* Copyright (c) 2017, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/device.c
* @brief Generic libmetal device operations.
*/
#include <metal/device.h>
#include <metal/io.h>
#include <metal/sys.h>
#include <metal/utilities.h>
int metal_generic_dev_sys_open(struct metal_device *dev)
{
struct metal_io_region *io;
unsigned i;
/* map I/O memory regions */
for (i = 0; i < dev->num_regions; i++) {
io = &dev->regions[i];
if (!io->size)
break;
metal_sys_io_mem_map(io);
}
return 0;
}
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/init.c
* @brief Generic libmetal initialization.
*/
#include <metal/sys.h>
#include <metal/utilities.h>
#include <metal/device.h>
struct metal_state _metal;
int metal_sys_init(const struct metal_init_params *params)
{
metal_unused(params);
metal_bus_register(&metal_generic_bus);
return 0;
}
void metal_sys_finish(void)
{
metal_bus_unregister(&metal_generic_bus);
}
/*
* Copyright (c) 2017, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/io.c
* @brief Generic libmetal io operations
*/
#include <metal/io.h>
void metal_sys_io_mem_map(struct metal_io_region *io)
{
unsigned long p;
size_t psize;
size_t *va;
va = (size_t *)io->virt;
psize = io->size;
if (psize) {
if (psize >> io->page_shift)
psize = (size_t)1 << io->page_shift;
for (p = 0; p <= (io->size >> io->page_shift); p++) {
metal_machine_io_mem_map(va, io->physmap[p],
psize, io->mem_flags);
va += psize;
}
}
}
/*
* Copyright (c) 2016 - 2017, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/irq.c
* @brief generic libmetal irq definitions.
*/
#include <metal/errno.h>
#include <metal/irq.h>
#include <metal/sys.h>
#include <metal/log.h>
#include <metal/mutex.h>
#include <metal/list.h>
#include <metal/utilities.h>
#include <metal/alloc.h>
/** IRQ handlers descriptor structure */
struct metal_irq_hddesc {
metal_irq_handler hd; /**< irq handler */
void *drv_id; /**< id to identify the driver
of the irq handler */
struct metal_device *dev; /**< device identifier */
struct metal_list node; /**< node on irq handlers list */
};
/** IRQ descriptor structure */
struct metal_irq_desc {
int irq; /**< interrupt number */
struct metal_list hdls; /**< interrupt handlers */
struct metal_list node; /**< node on irqs list */
};
/** IRQ state structure */
struct metal_irqs_state {
struct metal_list irqs; /**< interrupt descriptors */
metal_mutex_t irq_lock; /**< access lock */
};
static struct metal_irqs_state _irqs = {
.irqs = METAL_INIT_LIST(_irqs.irqs),
.irq_lock = METAL_MUTEX_INIT(_irqs.irq_lock),
};
int metal_irq_register(int irq,
metal_irq_handler hd,
struct metal_device *dev,
void *drv_id)
{
struct metal_irq_desc *irq_p = NULL;
struct metal_irq_hddesc *hdl_p;
struct metal_list *node;
unsigned int irq_flags_save;
if (irq < 0) {
metal_log(METAL_LOG_ERROR,
"%s: irq %d need to be a positive number\n",
__func__, irq);
return -EINVAL;
}
if ((drv_id == NULL) || (hd == NULL)) {
metal_log(METAL_LOG_ERROR, "%s: irq %d need drv_id and hd.\n",
__func__, irq);
return -EINVAL;
}
/* Search for irq in list */
metal_mutex_acquire(&_irqs.irq_lock);
metal_list_for_each(&_irqs.irqs, node) {
irq_p = metal_container_of(node, struct metal_irq_desc, node);
if (irq_p->irq == irq) {
struct metal_list *h_node;
/* Check if drv_id already exist */
metal_list_for_each(&irq_p->hdls, h_node) {
hdl_p = metal_container_of(h_node,
struct metal_irq_hddesc,
node);
/* if drv_id already exist reject */
if ((hdl_p->drv_id == drv_id) &&
((dev == NULL) || (hdl_p->dev == dev))) {
metal_log(METAL_LOG_ERROR,
"%s: irq %d already registered."
"Will not register again.\n",
__func__, irq);
metal_mutex_release(&_irqs.irq_lock);
return -EINVAL;
}
}
/* irq found and drv_id not used, get out of metal_list_for_each */
break;
}
}
/* Either need to add handler to an existing list or to a new one */
hdl_p = metal_allocate_memory(sizeof(struct metal_irq_hddesc));
if (hdl_p == NULL) {
metal_log(METAL_LOG_ERROR,
"%s: irq %d cannot allocate mem for drv_id %d.\n",
__func__, irq, drv_id);
metal_mutex_release(&_irqs.irq_lock);
return -ENOMEM;
}
hdl_p->hd = hd;
hdl_p->drv_id = drv_id;
hdl_p->dev = dev;
/* interrupt already registered, add handler to existing list*/
if ((irq_p != NULL) && (irq_p->irq == irq)) {
irq_flags_save = metal_irq_save_disable();
metal_list_add_tail(&irq_p->hdls, &hdl_p->node);
metal_irq_restore_enable(irq_flags_save);
metal_log(METAL_LOG_DEBUG, "%s: success, irq %d add drv_id %p \n",
__func__, irq, drv_id);
metal_mutex_release(&_irqs.irq_lock);
return 0;
}
/* interrupt was not already registered, add */
irq_p = metal_allocate_memory(sizeof(struct metal_irq_desc));
if (irq_p == NULL) {
metal_log(METAL_LOG_ERROR, "%s: irq %d cannot allocate mem.\n",
__func__, irq);
metal_mutex_release(&_irqs.irq_lock);
return -ENOMEM;
}
irq_p->irq = irq;
metal_list_init(&irq_p->hdls);
metal_list_add_tail(&irq_p->hdls, &hdl_p->node);
irq_flags_save = metal_irq_save_disable();
metal_list_add_tail(&_irqs.irqs, &irq_p->node);
metal_irq_restore_enable(irq_flags_save);
metal_log(METAL_LOG_DEBUG, "%s: success, added irq %d\n", __func__, irq);
metal_mutex_release(&_irqs.irq_lock);
return 0;
}
/* helper function for metal_irq_unregister() */
static void metal_irq_delete_node(struct metal_list *node, void *p_to_free)
{
unsigned int irq_flags_save;
irq_flags_save=metal_irq_save_disable();
metal_list_del(node);
metal_irq_restore_enable(irq_flags_save);
metal_free_memory(p_to_free);
}
int metal_irq_unregister(int irq,
metal_irq_handler hd,
struct metal_device *dev,
void *drv_id)
{
struct metal_irq_desc *irq_p;
struct metal_list *node;
if (irq < 0) {
metal_log(METAL_LOG_ERROR, "%s: irq %d need to be a positive number\n",
__func__, irq);
return -EINVAL;
}
/* Search for irq in list */
metal_mutex_acquire(&_irqs.irq_lock);
metal_list_for_each(&_irqs.irqs, node) {
irq_p = metal_container_of(node, struct metal_irq_desc, node);
if (irq_p->irq == irq) {
struct metal_list *h_node, *h_prenode;
struct metal_irq_hddesc *hdl_p;
unsigned int delete_count = 0;
metal_log(METAL_LOG_DEBUG, "%s: found irq %d\n",
__func__, irq);
/* Search through handlers */
metal_list_for_each(&irq_p->hdls, h_node) {
hdl_p = metal_container_of(h_node,
struct metal_irq_hddesc,
node);
if (((hd == NULL) || (hdl_p->hd == hd)) &&
((drv_id == NULL) || (hdl_p->drv_id == drv_id)) &&
((dev == NULL) || (hdl_p->dev == dev))) {
metal_log(METAL_LOG_DEBUG,
"%s: unregister hd=%p drv_id=%p dev=%p\n",
__func__, hdl_p->hd, hdl_p->drv_id, hdl_p->dev);
h_prenode = h_node->prev;
metal_irq_delete_node(h_node, hdl_p);
h_node = h_prenode;
delete_count++;
}
}
/* we did not find any handler to delete */
if (!delete_count) {
metal_log(METAL_LOG_DEBUG, "%s: No matching entry\n",
__func__);
metal_mutex_release(&_irqs.irq_lock);
return -ENOENT;
}
/* if interrupt handlers list is empty, unregister interrupt */
if (metal_list_is_empty(&irq_p->hdls)) {
metal_log(METAL_LOG_DEBUG,
"%s: handlers list empty, unregister interrupt\n",
__func__);
metal_irq_delete_node(node, irq_p);
}
metal_log(METAL_LOG_DEBUG, "%s: success\n", __func__);
metal_mutex_release(&_irqs.irq_lock);
return 0;
}
}
metal_log(METAL_LOG_DEBUG, "%s: No matching IRQ entry\n", __func__);
metal_mutex_release(&_irqs.irq_lock);
return -ENOENT;
}
unsigned int metal_irq_save_disable(void)
{
return sys_irq_save_disable();
}
void metal_irq_restore_enable(unsigned int flags)
{
sys_irq_restore_enable(flags);
}
void metal_irq_enable(unsigned int vector)
{
sys_irq_enable(vector);
}
void metal_irq_disable(unsigned int vector)
{
sys_irq_disable(vector);
}
/**
* @brief default handler
*/
void metal_irq_isr(unsigned int vector)
{
struct metal_list *node;
struct metal_irq_desc *irq_p;
metal_list_for_each(&_irqs.irqs, node) {
irq_p = metal_container_of(node, struct metal_irq_desc, node);
if ((unsigned int)irq_p->irq == vector) {
struct metal_list *h_node;
struct metal_irq_hddesc *hdl_p;
metal_list_for_each(&irq_p->hdls, h_node) {
hdl_p = metal_container_of(h_node,
struct metal_irq_hddesc,
node);
(hdl_p->hd)(vector, hdl_p->drv_id);
}
}
}
}
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/irq.c
* @brief Generic libmetal irq definitions.
*/
#ifndef __METAL_IRQ__H__
#error "Include metal/irq.h instead of metal/generic/irq.h"
#endif
#ifndef __METAL_GENERIC_IRQ__H__
#define __METAL_GENERIC_IRQ__H__
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief default interrupt handler
* @param[in] vector interrupt vector
*/
void metal_irq_isr(unsigned int vector);
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_IRQ__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/shmem.c
* @brief Generic libmetal shared memory handling.
*/
#include <metal/shmem.h>
int metal_shmem_open(const char *name, size_t size,
struct metal_io_region **io)
{
return metal_shmem_open_generic(name, size, io);
}
/*
* Copyright (c) 2016, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/time.c
* @brief Generic libmetal time handling.
*/
#include <metal/time.h>
unsigned long long metal_get_timestamp(void)
{
/* TODO: Implement timestamp for generic system */
return 0;
}
/*
* Copyright (c) 2017, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/io.h
* @brief Generic specific io definitions.
*/
#ifndef __METAL_IO__H__
#error "Include metal/io.h instead of metal/generic/io.h"
#endif
#ifndef __METAL_GENERIC_IO__H__
#define __METAL_GENERIC_IO__H__
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef METAL_INTERNAL
/**
* @brief memory mapping for an I/O region
*/
void metal_sys_io_mem_map(struct metal_io_region *io);
/**
* @brief memory mapping
*/
void *metal_machine_io_mem_map(void *va, metal_phys_addr_t pa,
size_t size, unsigned int flags);
#endif
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_IO__H__ */
/*
* Copyright (c) 2018, Linaro Limited. and Contributors. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of Linaro nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* @file generic/log.h
* @brief Generic libmetal log handler definition.
*/
#ifndef __METAL_METAL_LOG__H__
#error "Include metal/log.h instead of metal/generic/log.h"
#endif
#ifndef __METAL_GENERIC_LOG__H__
#define __METAL_GENERIC_LOG__H__
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_LOG__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/mutex.h
* @brief Generic mutex primitives for libmetal.
*/
#ifndef __METAL_MUTEX__H__
#error "Include metal/mutex.h instead of metal/generic/mutex.h"
#endif
#ifndef __METAL_GENERIC_MUTEX__H__
#define __METAL_GENERIC_MUTEX__H__
#include <metal/atomic.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
union{
atomic_int v;
atomic_flag w;
};
} metal_mutex_t;
/*
* METAL_MUTEX_INIT - used for initializing an mutex elmenet in a static struct
* or global
*/
#define METAL_MUTEX_INIT(m) { ATOMIC_VAR_INIT(0) }
/*
* METAL_MUTEX_DEFINE - used for defining and initializing a global or
* static singleton mutex
*/
#define METAL_MUTEX_DEFINE(m) metal_mutex_t m = METAL_MUTEX_INIT(m)
static inline void __metal_mutex_init(metal_mutex_t *mutex)
{
atomic_store(&mutex->v, 0);
}
static inline void __metal_mutex_deinit(metal_mutex_t *mutex)
{
(void)mutex;
}
static inline int __metal_mutex_try_acquire(metal_mutex_t *mutex)
{
return 1 - atomic_flag_test_and_set(&mutex->w);
}
static inline void __metal_mutex_acquire(metal_mutex_t *mutex)
{
while (atomic_flag_test_and_set(&mutex->w)) {
;
}
}
static inline void __metal_mutex_release(metal_mutex_t *mutex)
{
atomic_flag_clear(&mutex->w);
}
static inline int __metal_mutex_is_acquired(metal_mutex_t *mutex)
{
return atomic_load(&mutex->v);
}
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_MUTEX__H__ */
/*
* Copyright (c) 2018, Linaro Limited. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/sleep.h
* @brief Generic sleep primitives for libmetal.
*/
#ifndef __METAL_SLEEP__H__
#error "Include metal/sleep.h instead of metal/generic/sleep.h"
#endif
#ifndef __METAL_GENERIC_SLEEP__H__
#define __METAL_GENERIC_SLEEP__H__
#include <metal/utilities.h>
#ifdef __cplusplus
extern "C" {
#endif
static inline int __metal_sleep_usec(unsigned int usec)
{
metal_unused(usec);
/* Fix me */
return 0;
}
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_SLEEP__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* @file generic/sys.h
* @brief Generic system primitives for libmetal.
*/
#ifndef __METAL_SYS__H__
#error "Include metal/sys.h instead of metal/generic/sys.h"
#endif
#ifndef __METAL_GENERIC_SYS__H__
#define __METAL_GENERIC_SYS__H__
#include <metal/errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include "./@PROJECT_MACHINE@/sys.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef METAL_MAX_DEVICE_REGIONS
#define METAL_MAX_DEVICE_REGIONS 1
#endif
/** Structure of generic libmetal runtime state. */
struct metal_state {
/** Common (system independent) data. */
struct metal_common_state common;
};
#ifdef METAL_INTERNAL
/**
* @brief restore interrupts to state before disable_global_interrupt()
*/
void sys_irq_restore_enable(unsigned int flags);
/**
* @brief disable all interrupts
*/
unsigned int sys_irq_save_disable(void);
#endif /* METAL_INTERNAL */
#ifdef __cplusplus
}
#endif
#endif /* __METAL_GENERIC_SYS__H__ */
/*
* Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <metal/config.h>
int metal_ver_major(void)
{
return METAL_VER_MAJOR;
}
int metal_ver_minor(void)
{
return METAL_VER_MINOR;
}
int metal_ver_patch(void)
{
return METAL_VER_PATCH;
}
const char *metal_ver(void)
{
return METAL_VER;
}
#ifndef _COMPILER_H_
#define _COMPILER_H_
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* compiler.h
*
* DESCRIPTION
*
* This file defines compiler-specific macros.
*
***************************************************************************/
#if defined __cplusplus
extern "C" {
#endif
/* IAR ARM build tools */
#if defined(__ICCARM__)
#ifndef OPENAMP_PACKED_BEGIN
#define OPENAMP_PACKED_BEGIN __packed
#endif
#ifndef OPENAMP_PACKED_END
#define OPENAMP_PACKED_END
#endif
/* GNUC */
#elif defined(__GNUC__)
#ifndef OPENAMP_PACKED_BEGIN
#define OPENAMP_PACKED_BEGIN
#endif
#ifndef OPENAMP_PACKED_END
#define OPENAMP_PACKED_END __attribute__((__packed__))
#endif
/* ARM GCC */
#elif defined(__CC_ARM)
#ifndef OPENAMP_PACKED_BEGIN
#define OPENAMP_PACKED_BEGIN _Pragma("pack(1U)")
#endif
#ifndef OPENAMP_PACKED_END
#define OPENAMP_PACKED_END _Pragma("pack()")
#endif
#else
/*
* There is no default definition here to avoid wrong structures packing in case
* of not supported compiler
*/
#error Please implement the structure packing macros for your compiler here!
#endif
#if defined __cplusplus
}
#endif
#endif /* _COMPILER_H_ */
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef ELF_LOADER_H_
#define ELF_LOADER_H_
#include <openamp/remoteproc.h>
#include <openamp/remoteproc_loader.h>
#if defined __cplusplus
extern "C" {
#endif
/* ELF32 base types - 32-bit. */
typedef uint32_t Elf32_Addr;
typedef uint16_t Elf32_Half;
typedef uint32_t Elf32_Off;
typedef int32_t Elf32_Sword;
typedef uint32_t Elf32_Word;
/* ELF64 base types - 64-bit. */
typedef uint64_t Elf64_Addr;
typedef uint16_t Elf64_Half;
typedef uint64_t Elf64_Off;
typedef int32_t Elf64_Sword;
typedef uint32_t Elf64_Word;
typedef uint64_t Elf64_Xword;
typedef int64_t Elf64_Sxword;
/* Size of ELF identifier field in the ELF file header. */
#define EI_NIDENT 16
/* ELF32 file header */
typedef struct {
unsigned char e_ident[EI_NIDENT];
Elf32_Half e_type;
Elf32_Half e_machine;
Elf32_Word e_version;
Elf32_Addr e_entry;
Elf32_Off e_phoff;
Elf32_Off e_shoff;
Elf32_Word e_flags;
Elf32_Half e_ehsize;
Elf32_Half e_phentsize;
Elf32_Half e_phnum;
Elf32_Half e_shentsize;
Elf32_Half e_shnum;
Elf32_Half e_shstrndx;
} Elf32_Ehdr;
/* ELF64 file header */
typedef struct {
unsigned char e_ident[EI_NIDENT];
Elf64_Half e_type;
Elf64_Half e_machine;
Elf64_Word e_version;
Elf64_Addr e_entry;
Elf64_Off e_phoff;
Elf64_Off e_shoff;
Elf64_Word e_flags;
Elf64_Half e_ehsize;
Elf64_Half e_phentsize;
Elf64_Half e_phnum;
Elf64_Half e_shentsize;
Elf64_Half e_shnum;
Elf64_Half e_shstrndx;
} Elf64_Ehdr;
/* e_ident */
#define ET_NONE 0
#define ET_REL 1 /* Re-locatable file */
#define ET_EXEC 2 /* Executable file */
#define ET_DYN 3 /* Shared object file */
#define ET_CORE 4 /* Core file */
#define ET_LOOS 0xfe00 /* Operating system-specific */
#define ET_HIOS 0xfeff /* Operating system-specific */
#define ET_LOPROC 0xff00 /* remote_proc-specific */
#define ET_HIPROC 0xffff /* remote_proc-specific */
/* e_machine */
#define EM_ARM 40 /* ARM/Thumb Architecture */
/* e_version */
#define EV_CURRENT 1 /* Current version */
/* e_ident[] Identification Indexes */
#define EI_MAG0 0 /* File identification */
#define EI_MAG1 1 /* File identification */
#define EI_MAG2 2 /* File identification */
#define EI_MAG3 3 /* File identification */
#define EI_CLASS 4 /* File class */
#define EI_DATA 5 /* Data encoding */
#define EI_VERSION 6 /* File version */
#define EI_OSABI 7 /* Operating system/ABI identification */
#define EI_ABIVERSION 8 /* ABI version */
#define EI_PAD 9 /* Start of padding bytes */
#define EI_NIDENT 16 /* Size of e_ident[] */
/*
* EI_MAG0 to EI_MAG3 - A file's first 4 bytes hold amagic number, identifying
* the file as an ELF object file
*/
#define ELFMAG0 0x7f /* e_ident[EI_MAG0] */
#define ELFMAG1 'E' /* e_ident[EI_MAG1] */
#define ELFMAG2 'L' /* e_ident[EI_MAG2] */
#define ELFMAG3 'F' /* e_ident[EI_MAG3] */
#define ELFMAG "\177ELF"
#define SELFMAG 4
/*
* EI_CLASS - The next byte, e_ident[EI_CLASS], identifies the file's class, or
* capacity.
*/
#define ELFCLASSNONE 0 /* Invalid class */
#define ELFCLASS32 1 /* 32-bit objects */
#define ELFCLASS64 2 /* 64-bit objects */
/*
* EI_DATA - Byte e_ident[EI_DATA] specifies the data encoding of the
* remote_proc-specific data in the object file. The following encodings are
* currently defined.
*/
#define ELFDATANONE 0 /* Invalid data encoding */
#define ELFDATA2LSB 1 /* See Data encodings, below */
#define ELFDATA2MSB 2 /* See Data encodings, below */
/* EI_OSABI - We do not define an OS specific ABI */
#define ELFOSABI_NONE 0
/* ELF32 program header */
typedef struct elf32_phdr{
Elf32_Word p_type;
Elf32_Off p_offset;
Elf32_Addr p_vaddr;
Elf32_Addr p_paddr;
Elf32_Word p_filesz;
Elf32_Word p_memsz;
Elf32_Word p_flags;
Elf32_Word p_align;
} Elf32_Phdr;
/* ELF64 program header */
typedef struct elf64_phdr {
Elf64_Word p_type;
Elf64_Word p_flags;
Elf64_Off p_offset;
Elf64_Addr p_vaddr;
Elf64_Addr p_paddr;
Elf64_Xword p_filesz;
Elf64_Xword p_memsz;
Elf64_Xword p_align;
} Elf64_Phdr;
/* segment types */
#define PT_NULL 0
#define PT_LOAD 1
#define PT_DYNAMIC 2
#define PT_INTERP 3
#define PT_NOTE 4
#define PT_SHLIB 5
#define PT_PHDR 6
#define PT_TLS 7 /* Thread local storage segment */
#define PT_LOOS 0x60000000 /* OS-specific */
#define PT_HIOS 0x6fffffff /* OS-specific */
#define PT_LOPROC 0x70000000
#define PT_HIPROC 0x7fffffff
/* ELF32 section header. */
typedef struct {
Elf32_Word sh_name;
Elf32_Word sh_type;
Elf32_Word sh_flags;
Elf32_Addr sh_addr;
Elf32_Off sh_offset;
Elf32_Word sh_size;
Elf32_Word sh_link;
Elf32_Word sh_info;
Elf32_Word sh_addralign;
Elf32_Word sh_entsize;
} Elf32_Shdr;
/* ELF64 section header. */
typedef struct {
Elf64_Word sh_name;
Elf64_Word sh_type;
Elf64_Xword sh_flags;
Elf64_Addr sh_addr;
Elf64_Off sh_offset;
Elf64_Xword sh_size;
Elf64_Word sh_link;
Elf64_Word sh_info;
Elf64_Xword sh_addralign;
Elf64_Xword sh_entsize;
} Elf64_Shdr;
/* sh_type */
#define SHT_NULL 0
#define SHT_PROGBITS 1
#define SHT_SYMTAB 2
#define SHT_STRTAB 3
#define SHT_RELA 4
#define SHT_HASH 5
#define SHT_DYNAMIC 6
#define SHT_NOTE 7
#define SHT_NOBITS 8
#define SHT_REL 9
#define SHT_SHLIB 10
#define SHT_DYNSYM 11
#define SHT_INIT_ARRAY 14
#define SHT_FINI_ARRAY 15
#define SHT_PREINIT_ARRAY 16
#define SHT_GROUP 17
#define SHT_SYMTAB_SHNDX 18
#define SHT_LOOS 0x60000000
#define SHT_HIOS 0x6fffffff
#define SHT_LOPROC 0x70000000
#define SHT_HIPROC 0x7fffffff
#define SHT_LOUSER 0x80000000
#define SHT_HIUSER 0xffffffff
/* sh_flags */
#define SHF_WRITE 0x1
#define SHF_ALLOC 0x2
#define SHF_EXECINSTR 0x4
#define SHF_MASKPROC 0xf0000000
/* Relocation entry (without addend) */
typedef struct {
Elf32_Addr r_offset;
Elf32_Word r_info;
} Elf32_Rel;
typedef struct {
Elf64_Addr r_offset;
Elf64_Xword r_info;
} Elf64_Rel;
/* Relocation entry with addend */
typedef struct {
Elf32_Addr r_offset;
Elf32_Word r_info;
Elf32_Sword r_addend;
} Elf32_Rela;
typedef struct elf64_rela {
Elf64_Addr r_offset;
Elf64_Xword r_info;
Elf64_Sxword r_addend;
} Elf64_Rela;
/* Macros to extract information from 'r_info' field of relocation entries */
#define ELF32_R_SYM(i) ((i) >> 8)
#define ELF32_R_TYPE(i) ((unsigned char)(i))
#define ELF64_R_SYM(i) ((i) >> 32)
#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
/* Symbol table entry */
typedef struct {
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf32_Half st_shndx;
} Elf32_Sym;
typedef struct elf64_sym {
Elf64_Word st_name;
unsigned char st_info;
unsigned char st_other;
Elf64_Half st_shndx;
Elf64_Addr st_value;
Elf64_Xword st_size;
} Elf64_Sym;
/* ARM specific dynamic relocation codes */
#define R_ARM_GLOB_DAT 21 /* 0x15 */
#define R_ARM_JUMP_SLOT 22 /* 0x16 */
#define R_ARM_RELATIVE 23 /* 0x17 */
#define R_ARM_ABS32 2 /* 0x02 */
/* ELF decoding information */
struct elf32_info {
Elf32_Ehdr ehdr;
unsigned int load_state;
Elf32_Phdr *phdrs;
Elf32_Shdr *shdrs;
void *shstrtab;
};
struct elf64_info {
Elf64_Ehdr ehdr;
unsigned int load_state;
Elf64_Phdr *phdrs;
Elf64_Shdr *shdrs;
void *shstrtab;
};
#define ELF_STATE_INIT 0x0UL
#define ELF_STATE_WAIT_FOR_PHDRS 0x100UL
#define ELF_STATE_WAIT_FOR_SHDRS 0x200UL
#define ELF_STATE_WAIT_FOR_SHSTRTAB 0x400UL
#define ELF_STATE_HDRS_COMPLETE 0x800UL
#define ELF_STATE_MASK 0xFF00UL
#define ELF_NEXT_SEGMENT_MASK 0x00FFUL
extern struct loader_ops elf_ops;
/**
* elf_identify - check if it is an ELF file
*
* It will check if the input image header is an ELF header.
*
* @img_data: firmware private data which will be passed to user defined loader
* operations
* @len: firmware header length
*
* return 0 for success or negative value for failure.
*/
int elf_identify(const void *img_data, size_t len);
/**
* elf_load_header - Load ELF headers
*
* It will get the ELF header, the program header, and the section header.
*
* @img_data: image data
* @offset: input image data offset to the start of image file
* @len: input image data length
* @img_info: pointer to store image information data
* @last_load_state: last state return by this function
* @noffset: pointer to next offset required by loading ELF header
* @nlen: pointer to next data length required by loading ELF header
*
* return ELF loading header state, or negative value for failure
*/
int elf_load_header(const void *img_data, size_t offset, size_t len,
void **img_info, int last_load_state,
size_t *noffset, size_t *nlen);
/**
* elf_load - load ELF data
*
* It will parse the ELF image and return the target device address,
* offset to the start of the ELF image of the data to load and the
* length of the data to load.
*
* @rproc: pointer to remoteproc instance
* @img_data: image data which will passed to the function.
* it can be NULL, if image data doesn't need to be handled
* by the load function. E.g. binary data which was
* loaded to the target memory.
* @offset: last loaded image data offset to the start of image file
* @len: last loaded image data length
* @img_info: pointer to store image information data
* @last_load_state: the returned state of the last function call.
* @da: target device address, if the data to load is not for target memory
* the da will be set to ANY.
* @noffset: pointer to next offset required by loading ELF header
* @nlen: pointer to next data length required by loading ELF header
* @padding: value to pad it is possible that a size of a segment in memory
* is larger than what it is in the ELF image. e.g. a segment
* can have stack section .bss. It doesn't need to copy image file
* space, in this case, it will be packed with 0.
* @nmemsize: pointer to next data target memory size. The size of a segment
* in the target memory can be larger than the its size in the
* image file.
*
* return 0 for success, otherwise negative value for failure
*/
int elf_load(struct remoteproc *rproc, const void *img_data,
size_t offset, size_t len,
void **img_info, int last_load_state,
metal_phys_addr_t *da,
size_t *noffset, size_t *nlen,
unsigned char *padding, size_t *nmemsize);
/**
* elf_release - Release ELF image information
*
* It will release ELF image information data.
*
* @img_info: pointer to ELF image information
*/
void elf_release(void *img_info);
/**
* elf_get_entry - Get entry point
*
* It will return entry point specified in the ELF file.
*
* @img_info: pointer to ELF image information
*
* return entry address
*/
metal_phys_addr_t elf_get_entry(void *img_info);
/**
* elf_locate_rsc_table - locate the resource table information
*
* It will return the length of the resource table, and the device address of
* the resource table.
*
* @img_info: pointer to ELF image information
* @da: pointer to the device address
* @offset: pointer to the offset to in the ELF image of the resource
* table section.
* @size: pointer to the size of the resource table section.
*
* return 0 if successfully locate the resource table, negative value for
* failure.
*/
int elf_locate_rsc_table(void *img_info, metal_phys_addr_t *da,
size_t *offset, size_t *size);
#if defined __cplusplus
}
#endif
#endif /* ELF_LOADER_H_ */
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef OPEN_AMP_H_
#define OPEN_AMP_H_
#include <openamp/rpmsg.h>
#include <openamp/rpmsg_virtio.h>
#include <openamp/remoteproc.h>
#include <openamp/remoteproc_virtio.h>
#endif /* OPEN_AMP_H_ */
/*
* Remoteproc Framework
*
* Copyright(c) 2018 Xilinx Ltd.
* Copyright(c) 2011 Texas Instruments, Inc.
* Copyright(c) 2011 Google, Inc.
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef REMOTEPROC_H
#define REMOTEPROC_H
#include <metal/io.h>
#include <metal/mutex.h>
#include <openamp/compiler.h>
#if defined __cplusplus
extern "C" {
#endif
#define RSC_NOTIFY_ID_ANY 0xFFFFFFFFUL
/**
* struct resource_table - firmware resource table header
* @ver: version number
* @num: number of resource entries
* @reserved: reserved (must be zero)
* @offset: array of offsets pointing at the various resource entries
*
* A resource table is essentially a list of system resources required
* by the remote remote_proc. It may also include configuration entries.
* If needed, the remote remote_proc firmware should contain this table
* as a dedicated ".resource_table" ELF section.
*
* Some resources entries are mere announcements, where the host is informed
* of specific remoteproc configuration. Other entries require the host to
* do something (e.g. allocate a system resource). Sometimes a negotiation
* is expected, where the firmware requests a resource, and once allocated,
* the host should provide back its details (e.g. address of an allocated
* memory region).
*
* The header of the resource table, as expressed by this structure,
* contains a version number (should we need to change this format in the
* future), the number of available resource entries, and their offsets
* in the table.
*
* Immediately following this header are the resource entries themselves,
* each of which begins with a resource entry header (as described below).
*/
OPENAMP_PACKED_BEGIN
struct resource_table {
uint32_t ver;
uint32_t num;
uint32_t reserved[2];
uint32_t offset[0];
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_hdr - firmware resource entry header
* @type: resource type
* @data: resource data
*
* Every resource entry begins with a 'struct fw_rsc_hdr' header providing
* its @type. The content of the entry itself will immediately follow
* this header, and it should be parsed according to the resource type.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_hdr {
uint32_t type;
uint8_t data[0];
} OPENAMP_PACKED_END;
/**
* enum fw_resource_type - types of resource entries
*
* @RSC_CARVEOUT: request for allocation of a physically contiguous
* memory region.
* @RSC_DEVMEM: request to iommu_map a memory-based peripheral.
* @RSC_TRACE: announces the availability of a trace buffer into which
* the remote remote_proc will be writing logs.
* @RSC_VDEV: declare support for a virtio device, and serve as its
* virtio header.
* @RSC_VENDOR_START: start of the vendor specific resource types range
* @RSC_VENDOR_END : end of the vendor specific resource types range
* @RSC_LAST: just keep this one at the end
*
* For more details regarding a specific resource type, please see its
* dedicated structure below.
*
* Please note that these values are used as indices to the rproc_handle_rsc
* lookup table, so please keep them sane. Moreover, @RSC_LAST is used to
* check the validity of an index before the lookup table is accessed, so
* please update it as needed.
*/
enum fw_resource_type {
RSC_CARVEOUT = 0,
RSC_DEVMEM = 1,
RSC_TRACE = 2,
RSC_VDEV = 3,
RSC_RPROC_MEM = 4,
RSC_FW_CHKSUM = 5,
RSC_LAST = 6,
RSC_VENDOR_START = 128,
RSC_VENDOR_END = 512,
};
#define FW_RSC_ADDR_ANY (0xFFFFFFFFFFFFFFFF)
#define FW_RSC_U32_ADDR_ANY (0xFFFFFFFF)
/**
* struct fw_rsc_carveout - physically contiguous memory request
* @da: device address
* @pa: physical address
* @len: length (in bytes)
* @flags: iommu protection flags
* @reserved: reserved (must be zero)
* @name: human-readable name of the requested memory region
*
* This resource entry requests the host to allocate a physically contiguous
* memory region.
*
* These request entries should precede other firmware resource entries,
* as other entries might request placing other data objects inside
* these memory regions (e.g. data/code segments, trace resource entries, ...).
*
* Allocating memory this way helps utilizing the reserved physical memory
* (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
* needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
* pressure is important; it may have a substantial impact on performance.
*
* If the firmware is compiled with static addresses, then @da should specify
* the expected device address of this memory region. If @da is set to
* FW_RSC_ADDR_ANY, then the host will dynamically allocate it, and then
* overwrite @da with the dynamically allocated address.
*
* We will always use @da to negotiate the device addresses, even if it
* isn't using an iommu. In that case, though, it will obviously contain
* physical addresses.
*
* Some remote remote_procs needs to know the allocated physical address
* even if they do use an iommu. This is needed, e.g., if they control
* hardware accelerators which access the physical memory directly (this
* is the case with OMAP4 for instance). In that case, the host will
* overwrite @pa with the dynamically allocated physical address.
* Generally we don't want to expose physical addresses if we don't have to
* (remote remote_procs are generally _not_ trusted), so we might want to
* change this to happen _only_ when explicitly required by the hardware.
*
* @flags is used to provide IOMMU protection flags, and @name should
* (optionally) contain a human readable name of this carveout region
* (mainly for debugging purposes).
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_carveout {
uint32_t type;
uint32_t da;
uint32_t pa;
uint32_t len;
uint32_t flags;
uint32_t reserved;
uint8_t name[32];
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_devmem - iommu mapping request
* @da: device address
* @pa: physical address
* @len: length (in bytes)
* @flags: iommu protection flags
* @reserved: reserved (must be zero)
* @name: human-readable name of the requested region to be mapped
*
* This resource entry requests the host to iommu map a physically contiguous
* memory region. This is needed in case the remote remote_proc requires
* access to certain memory-based peripherals; _never_ use it to access
* regular memory.
*
* This is obviously only needed if the remote remote_proc is accessing memory
* via an iommu.
*
* @da should specify the required device address, @pa should specify
* the physical address we want to map, @len should specify the size of
* the mapping and @flags is the IOMMU protection flags. As always, @name may
* (optionally) contain a human readable name of this mapping (mainly for
* debugging purposes).
*
* Note: at this point we just "trust" those devmem entries to contain valid
* physical addresses, but this isn't safe and will be changed: eventually we
* want remoteproc implementations to provide us ranges of physical addresses
* the firmware is allowed to request, and not allow firmwares to request
* access to physical addresses that are outside those ranges.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_devmem {
uint32_t type;
uint32_t da;
uint32_t pa;
uint32_t len;
uint32_t flags;
uint32_t reserved;
uint8_t name[32];
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_trace - trace buffer declaration
* @da: device address
* @len: length (in bytes)
* @reserved: reserved (must be zero)
* @name: human-readable name of the trace buffer
*
* This resource entry provides the host information about a trace buffer
* into which the remote remote_proc will write log messages.
*
* @da specifies the device address of the buffer, @len specifies
* its size, and @name may contain a human readable name of the trace buffer.
*
* After booting the remote remote_proc, the trace buffers are exposed to the
* user via debugfs entries (called trace0, trace1, etc..).
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_trace {
uint32_t type;
uint32_t da;
uint32_t len;
uint32_t reserved;
uint8_t name[32];
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_vdev_vring - vring descriptor entry
* @da: device address
* @align: the alignment between the consumer and producer parts of the vring
* @num: num of buffers supported by this vring (must be power of two)
* @notifyid is a unique rproc-wide notify index for this vring. This notify
* index is used when kicking a remote remote_proc, to let it know that this
* vring is triggered.
* @reserved: reserved (must be zero)
*
* This descriptor is not a resource entry by itself; it is part of the
* vdev resource type (see below).
*
* Note that @da should either contain the device address where
* the remote remote_proc is expecting the vring, or indicate that
* dynamically allocation of the vring's device address is supported.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_vdev_vring {
uint32_t da;
uint32_t align;
uint32_t num;
uint32_t notifyid;
uint32_t reserved;
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_vdev - virtio device header
* @id: virtio device id (as in virtio_ids.h)
* @notifyid is a unique rproc-wide notify index for this vdev. This notify
* index is used when kicking a remote remote_proc, to let it know that the
* status/features of this vdev have changes.
* @dfeatures specifies the virtio device features supported by the firmware
* @gfeatures is a place holder used by the host to write back the
* negotiated features that are supported by both sides.
* @config_len is the size of the virtio config space of this vdev. The config
* space lies in the resource table immediate after this vdev header.
* @status is a place holder where the host will indicate its virtio progress.
* @num_of_vrings indicates how many vrings are described in this vdev header
* @reserved: reserved (must be zero)
* @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
*
* This resource is a virtio device header: it provides information about
* the vdev, and is then used by the host and its peer remote remote_procs
* to negotiate and share certain virtio properties.
*
* By providing this resource entry, the firmware essentially asks remoteproc
* to statically allocate a vdev upon registration of the rproc (dynamic vdev
* allocation is not yet supported).
*
* Note: unlike virtualization systems, the term 'host' here means
* the Linux side which is running remoteproc to control the remote
* remote_procs. We use the name 'gfeatures' to comply with virtio's terms,
* though there isn't really any virtualized guest OS here: it's the host
* which is responsible for negotiating the final features.
* Yeah, it's a bit confusing.
*
* Note: immediately following this structure is the virtio config space for
* this vdev (which is specific to the vdev; for more info, read the virtio
* spec). the size of the config space is specified by @config_len.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_vdev {
uint32_t type;
uint32_t id;
uint32_t notifyid;
uint32_t dfeatures;
uint32_t gfeatures;
uint32_t config_len;
uint8_t status;
uint8_t num_of_vrings;
uint8_t reserved[2];
struct fw_rsc_vdev_vring vring[0];
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_vendor - remote processor vendor specific resource
* @len: length of the resource
*
* This resource entry tells the host the vendor specific resource
* required by the remote.
*
* These request entries should precede other shared resource entries
* such as vdevs, vrings.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_vendor {
uint32_t type;
uint32_t len;
} OPENAMP_PACKED_END;
/**
* struct fw_rsc_rproc_mem - remote processor memory
* @da: device address
* @pa: physical address
* @len: length (in bytes)
* @reserved: reserved (must be zero)
*
* This resource entry tells the host to the remote processor
* memory that the host can be used as shared memory.
*
* These request entries should precede other shared resource entries
* such as vdevs, vrings.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_rproc_mem {
uint32_t type;
uint32_t da;
uint32_t pa;
uint32_t len;
uint32_t reserved;
} OPENAMP_PACKED_END;
/*
* struct fw_rsc_fw_chksum - firmware checksum
* @algo: algorithm to generate the cheksum
* @chksum: checksum of the firmware loadable sections.
*
* This resource entry provides checksum for the firmware loadable sections.
* It is used to check if the remote already runs with the expected firmware to
* decide if it needs to start the remote if the remote is already running.
*/
OPENAMP_PACKED_BEGIN
struct fw_rsc_fw_chksum {
uint32_t type;
uint8_t algo[16];
uint8_t chksum[64];
} OPENAMP_PACKED_END;
struct loader_ops;
struct image_store_ops;
struct remoteproc_ops;
/**
* struct remoteproc_mem
*
* This structure presents the memory used by the remote processor
*
* @da: device memory
* @pa: physical memory
* @size: size of the memory
* @io: pointer to the I/O region
* @node: list node
*/
struct remoteproc_mem {
metal_phys_addr_t da;
metal_phys_addr_t pa;
size_t size;
char name[32];
struct metal_io_region *io;
struct metal_list node;
};
/**
* struct remoteproc
*
* This structure is maintained by the remoteproc to represent the remote
* processor instance. This structure acts as a prime parameter to use
* the remoteproc APIs.
*
* @bootadd: boot address
* @loader: executable loader
* @lock: mutext lock
* @ops: remoteproc operations
* @rsc_table: pointer to resource table
* @rsc_len: length of resource table
* @rsc_io: metal I/O region of resource table
* @mems: remoteproc memories
* @vdevs: remoteproc virtio devices
* @bitmap: bitmap for notify IDs for remoteproc subdevices
* @state: remote processor state
* @priv: private data
*/
struct remoteproc {
metal_mutex_t lock;
void *rsc_table;
size_t rsc_len;
struct metal_io_region *rsc_io;
struct metal_list mems;
struct metal_list vdevs;
unsigned long bitmap;
struct remoteproc_ops *ops;
metal_phys_addr_t bootaddr;
struct loader_ops *loader;
unsigned int state;
void *priv;
};
/**
* struct remoteproc_ops
*
* remoteproc operations needs to be implemented by each remoteproc driver
*
* @init: initialize the remoteproc instance
* @remove: remove the remoteproc instance
* @mmap: memory mapped the mempory with physical address or destination
* address as input.
* @handle_rsc: handle the vendor specific resource
* @config: configure the remoteproc to make it ready to load and run
* executable
* @start: kick the remoteproc to run application
* @stop: stop the remoteproc from running application, the resource such as
* memory may not be off.
* @shutdown: shutdown the remoteproc and release its resources.
* @notify: notify the remote
*/
struct remoteproc_ops {
struct remoteproc *(*init)(struct remoteproc *rproc,
struct remoteproc_ops *ops, void *arg);
void (*remove)(struct remoteproc *rproc);
void *(*mmap)(struct remoteproc *rproc,
metal_phys_addr_t *pa, metal_phys_addr_t *da,
size_t size, unsigned int attribute,
struct metal_io_region **io);
int (*handle_rsc)(struct remoteproc *rproc, void *rsc, size_t len);
int (*config)(struct remoteproc *rproc, void *data);
int (*start)(struct remoteproc *rproc);
int (*stop)(struct remoteproc *rproc);
int (*shutdown)(struct remoteproc *rproc);
int (*notify)(struct remoteproc *rproc, uint32_t id);
};
/* Remoteproc error codes */
#define RPROC_EBASE 0
#define RPROC_ENOMEM (RPROC_EBASE + 1)
#define RPROC_EINVAL (RPROC_EBASE + 2)
#define RPROC_ENODEV (RPROC_EBASE + 3)
#define RPROC_EAGAIN (RPROC_EBASE + 4)
#define RPROC_ERR_RSC_TAB_TRUNC (RPROC_EBASE + 5)
#define RPROC_ERR_RSC_TAB_VER (RPROC_EBASE + 6)
#define RPROC_ERR_RSC_TAB_RSVD (RPROC_EBASE + 7)
#define RPROC_ERR_RSC_TAB_VDEV_NRINGS (RPROC_EBASE + 9)
#define RPROC_ERR_RSC_TAB_NP (RPROC_EBASE + 10)
#define RPROC_ERR_RSC_TAB_NS (RPROC_EBASE + 11)
#define RPROC_ERR_LOADER_STATE (RPROC_EBASE + 12)
#define RPROC_EMAX (RPROC_EBASE + 16)
#define RPROC_EPTR (void *)(-1)
#define RPROC_EOF (void *)(-1)
static inline long RPROC_PTR_ERR(const void *ptr)
{
return (long)ptr;
}
static inline int RPROC_IS_ERR(const void *ptr)
{
if ((unsigned long)ptr >= (unsigned long)(-RPROC_EMAX))
return 1;
else
return 0;
}
static inline void *RPROC_ERR_PTR(long error)
{
return (void *)error;
}
/**
* enum rproc_state - remote processor states
* @RPROC_OFFLINE: remote is offline
* @RPROC_READY: remote is ready to start
* @RPROC_RUNNING: remote is up and running
* @RPROC_SUSPENDED: remote is suspended
* @RPROC_ERROR: remote has error; need to recover
* @RPROC_STOPPED: remote is stopped
* @RPROC_LAST: just keep this one at the end
*/
enum remoteproc_state {
RPROC_OFFLINE = 0,
RPROC_CONFIGURED = 1,
RPROC_READY = 2,
RPROC_RUNNING = 3,
RPROC_SUSPENDED = 4,
RPROC_ERROR = 5,
RPROC_STOPPED = 6,
RPROC_LAST = 7,
};
/**
* remoteproc_init
*
* Initializes remoteproc resource.
*
* @rproc - pointer to remoteproc instance
* @ops - pointer to remoteproc operations
* @priv - pointer to private data
*
* @returns created remoteproc pointer
*/
struct remoteproc *remoteproc_init(struct remoteproc *rproc,
struct remoteproc_ops *ops, void *priv);
/**
* remoteproc_remove
*
* Remove remoteproc resource
*
* @rproc - pointer to remoteproc instance
*
* returns 0 for success, negative value for failure
*/
int remoteproc_remove(struct remoteproc *rproc);
/**
* remoteproc_init_mem
*
* Initialize remoteproc memory
*
* @mem - pointer to remoteproc memory
* @char - memory name
* @pa - physcial address
* @da - device address
* @size - memory size
* @io - pointer to the I/O region
*/
static inline void
remoteproc_init_mem(struct remoteproc_mem *mem, const char *name,
metal_phys_addr_t pa, metal_phys_addr_t da,
size_t size, struct metal_io_region *io)
{
if (!mem)
return;
if (name)
strncpy(mem->name, name, sizeof(mem->name));
else
mem->name[0] = 0;
mem->pa = pa;
mem->da = da;
mem->io = io;
mem->size = size;
}
/**
* remoteproc_add_mem
*
* Add remoteproc memory
*
* @rproc - pointer to remoteproc
* @mem - pointer to remoteproc memory
*/
static inline void
remoteproc_add_mem(struct remoteproc *rproc, struct remoteproc_mem *mem)
{
if (!rproc || !mem)
return;
metal_list_add_tail(&rproc->mems, &mem->node);
}
/**
* remoteproc_get_io_with_name
*
* get remoteproc memory I/O region with name
*
* @rproc - pointer to the remote processor
* @name - name of the shared memory
* @io - pointer to the pointer of the I/O region
*
* returns metal I/O region pointer, NULL for failure
*/
struct metal_io_region *
remoteproc_get_io_with_name(struct remoteproc *rproc,
const char *name);
/**
* remoteproc_get_io_with_pa
*
* get remoteproc memory I/O region with physical address
*
* @rproc - pointer to the remote processor
* @pa - physical address
*
* returns metal I/O region pointer, NULL for failure
*/
struct metal_io_region *
remoteproc_get_io_with_pa(struct remoteproc *rproc,
metal_phys_addr_t pa);
/**
* remoteproc_get_io_with_da
*
* get remoteproc memory I/O region with device address
*
* @rproc - pointer to the remote processor
* @da - device address
* @offset - I/O region offset of the device address
*
* returns metal I/O region pointer, NULL for failure
*/
struct metal_io_region *
remoteproc_get_io_with_da(struct remoteproc *rproc,
metal_phys_addr_t da,
unsigned long *offset);
/**
* remoteproc_get_io_with_va
*
* get remoteproc memory I/O region with virtual address
*
* @rproc - pointer to the remote processor
* @va - virtual address
*
* returns metal I/O region pointer, NULL for failure
*/
struct metal_io_region *
remoteproc_get_io_with_va(struct remoteproc *rproc,
void *va);
/**
* remoteproc_mmap
*
* remoteproc mmap memory
*
* @rproc - pointer to the remote processor
* @pa - physical address pointer
* @da - device address pointer
* @size - size of the memory
* @attribute - memory attribute
* @io - pointer to the I/O region
*
* returns pointer to the memory
*/
void *remoteproc_mmap(struct remoteproc *rproc,
metal_phys_addr_t *pa, metal_phys_addr_t *da,
size_t size, unsigned int attribute,
struct metal_io_region **io);
/**
* remoteproc_parse_rsc_table
*
* Parse resource table of remoteproc
*
* @rproc - pointer to remoteproc instance
* @rsc_table - pointer to resource table
* @rsc_size - resource table size
*
* returns 0 for success and negative value for errors
*/
int remoteproc_parse_rsc_table(struct remoteproc *rproc,
struct resource_table *rsc_table,
size_t rsc_size);
/**
* remoteproc_set_rsc_table
*
* Parse and set resource table of remoteproc
*
* @rproc - pointer to remoteproc instance
* @rsc_table - pointer to resource table
* @rsc_size - resource table size
*
* returns 0 for success and negative value for errors
*/
int remoteproc_set_rsc_table(struct remoteproc *rproc,
struct resource_table *rsc_table,
size_t rsc_size);
/**
* remoteproc_config
*
* This function configures the remote processor to get it
* ready to load and run executable.
*
* @rproc - pointer to remoteproc instance to start
* @data - configuration data
*
* returns 0 for success and negative value for errors
*/
int remoteproc_config(struct remoteproc *rproc, void *data);
/**
* remoteproc_start
*
* This function starts the remote processor.
* It assumes the firmware is already loaded,
*
* @rproc - pointer to remoteproc instance to start
*
* returns 0 for success and negative value for errors
*/
int remoteproc_start(struct remoteproc *rproc);
/**
* remoteproc_stop
*
* This function stops the remote processor but it
* will not release its resource.
*
* @rproc - pointer to remoteproc instance
*
* returns 0 for success and negative value for errors
*/
int remoteproc_stop(struct remoteproc *rproc);
/**
* remoteproc_shutdown
*
* This function shutdown the remote processor and
* release its resources.
*
* @rproc - pointer to remoteproc instance
*
* returns 0 for success and negative value for errors
*/
int remoteproc_shutdown(struct remoteproc *rproc);
/**
* remoteproc_load
*
* load executable, it expects the user application defines how to
* open the executable file and how to get data from the executable file
* and how to load data to the target memory.
*
* @rproc: pointer to the remoteproc instance
* @path: optional path to the image file
* @store: pointer to user defined image store argument
* @store_ops: pointer to image store operations
* @image_info: pointer to memory which stores image information used
* by remoteproc loader
*
* return 0 for success and negative value for failure
*/
int remoteproc_load(struct remoteproc *rproc, const char *path,
void *store, struct image_store_ops *store_ops,
void **img_info);
/**
* remoteproc_load_noblock
*
* load executable, it expects the caller has loaded image data to local
* memory and passed to the this function. If the function needs more
* image data it will return the next expected image data offset and
* the next expected image data length. If the function requires the
* caller to download image data to the target memory, it will also
* return the target physical address besides the offset and length.
* This function can be used to load firmware in stream mode. In this
* mode, you cannot do seek to the executable file. If the executable
* is ELF, it cannot get the resource table section before it loads
* the full ELF file. Furthermore, application usually don't store
* the data which is loaded to local memory in streaming mode, and
* thus, in this mode, it will load the binrary to the target memory
* before it gets the resource table. And thus, when calling this funciton
* don't put the target exectuable memory in the resource table, as
* this function will parse the resource table after it loads the binary
* to target memory.
*
* @rproc: pointer to the remoteproc instance
* @img_data: pointer to image data for remoteproc loader to parse
* @offset: image data offset to the beginning of the image file
* @len: image data length
* @image_info: pointer to memory which stores image information used
* by remoteproc loader
* @pa: pointer to the target memory physical address. If the next expected
* data doesn't need to load to the target memory, the function will
* set it to ANY.
* @io: pointer to the target memory physical address. If the next expected
* data doesn't need to load to the target memory, the function will
* set it to ANY.
* @noffset: pointer to the next image data offset to the beginning of
* the image file needs to load to local or to the target
* memory.
* @nlen: pointer to the next image data length needs to load to local
* or to the target memory.
* @nmlen: pointer to the memory size. It is only used when the next
* expected data is going to be loaded to the target memory. E.g.
* in ELF, it is possible that loadable segment in memory is
* larger that the segment data in the ELF file. In this case,
* application will need to pad the rest of the memory with
* padding.
* @padding: pointer to the padding value. It is only used when the next
* expected data is going to be loaded to the target memory.
* and the target memory size is larger than the segment data in
* the executable file.
*
* return 0 for success and negative value for failure
*/
int remoteproc_load_noblock(struct remoteproc *rproc,
const void *img_data, size_t offset, size_t len,
void **img_info,
metal_phys_addr_t *pa, struct metal_io_region **io,
size_t *noffset, size_t *nlen,
size_t *nmlen, unsigned char *padding);
/**
* remoteproc_allocate_id
*
* allocate notifyid for resource
*
* @rproc - pointer to the remoteproc instance
* @start - start of the id range
* @end - end of the id range
*
* return allocated notify id
*/
unsigned int remoteproc_allocate_id(struct remoteproc *rproc,
unsigned int start,
unsigned int end);
/* remoteproc_create_virtio
*
* create virtio device, it returns pointer to the created virtio device.
*
* @rproc: pointer to the remoteproc instance
* @vdev_id: virtio device ID
* @role: virtio device role
* @rst_cb: virtio device reset callback
*
* return pointer to the created virtio device, NULL for failure.
*/
struct virtio_device *
remoteproc_create_virtio(struct remoteproc *rproc,
int vdev_id, unsigned int role,
void (*rst_cb)(struct virtio_device *vdev));
/* remoteproc_remove_virtio
*
* Remove virtio device
*
* @rproc: pointer to the remoteproc instance
* @vdev: pointer to the virtio device
*
*/
void remoteproc_remove_virtio(struct remoteproc *rproc,
struct virtio_device *vdev);
/* remoteproc_get_notification
*
* remoteproc is got notified, it will check its subdevices
* for the notification
*
* @rproc - pointer to the remoteproc instance
* @notifyid - notificatin id
*
* return 0 for succeed, negative value for failure
*/
int remoteproc_get_notification(struct remoteproc *rproc,
uint32_t notifyid);
#if defined __cplusplus
}
#endif
#endif /* REMOTEPROC_H_ */
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**************************************************************************
* FILE NAME
*
* remoteproc_loader.h
*
* COMPONENT
*
* OpenAMP stack.
*
* DESCRIPTION
*
* This file provides definitions for remoteproc loader
*
*
**************************************************************************/
#ifndef REMOTEPROC_LOADER_H_
#define REMOTEPROC_LOADER_H_
#include <metal/io.h>
#include <metal/list.h>
#include <metal/sys.h>
#include <openamp/remoteproc.h>
#if defined __cplusplus
extern "C" {
#endif
/* Loader feature macros */
#define SUPPORT_SEEK 1UL
/* Remoteproc loader any address */
#define RPROC_LOAD_ANYADDR ((metal_phys_addr_t)-1)
/* Remoteproc loader Exectuable Image Parsing States */
/* Remoteproc loader parser intial state */
#define RPROC_LOADER_NOT_READY 0x0UL
/* Remoteproc loader ready to load, even it can be not finish parsing */
#define RPROC_LOADER_READY_TO_LOAD 0x10000UL
/* Remoteproc loader post data load */
#define RPROC_LOADER_POST_DATA_LOAD 0x20000UL
/* Remoteproc loader finished loading */
#define RPROC_LOADER_LOAD_COMPLETE 0x40000UL
/* Remoteproc loader state mask */
#define RPROC_LOADER_MASK 0x00FF0000UL
/* Remoteproc loader private mask */
#define RPROC_LOADER_PRIVATE_MASK 0x0000FFFFUL
/* Remoteproc loader reserved mask */
#define RPROC_LOADER_RESERVED_MASK 0x0F000000UL
/**
* struct image_store_ops - user defined image store operations
* @open: user defined callback to open the "firmware" to prepare loading
* @close: user defined callback to close the "firmware" to clean up
* after loading
* @load: user defined callback to load the firmware contents to target
* memory or local memory
* @features: loader supported features. e.g. seek
*/
struct image_store_ops {
int (*open)(void *store, const char *path, const void **img_data);
void (*close)(void *store);
int (*load)(void *store, size_t offset, size_t size,
const void **data,
metal_phys_addr_t pa,
struct metal_io_region *io, char is_blocking);
unsigned int features;
};
/**
* struct loader_ops - loader oeprations
* @load_header: define how to get the executable headers
* @load_data: define how to load the target data
* @locate_rsc_table: define how to get the resource table target address,
* offset to the ELF image file and size of the resource
* table.
* @release: define how to release the loader
* @get_entry: get entry address
* @get_load_state: get load state from the image information
*/
struct loader_ops {
int (*load_header)(const void *img_data, size_t offset, size_t len,
void **img_info, int last_state,
size_t *noffset, size_t *nlen);
int (*load_data)(struct remoteproc *rproc,
const void *img_data, size_t offset, size_t len,
void **img_info, int last_load_state,
metal_phys_addr_t *da,
size_t *noffset, size_t *nlen,
unsigned char *padding, size_t *nmemsize);
int (*locate_rsc_table)(void *img_info, metal_phys_addr_t *da,
size_t *offset, size_t *size);
void (*release)(void *img_info);
metal_phys_addr_t (*get_entry)(void *img_info);
int (*get_load_state)(void *img_info);
};
#if defined __cplusplus
}
#endif
#endif /* REMOTEPROC_LOADER_H_ */
/*
* Remoteproc Virtio Framework
*
* Copyright(c) 2018 Xilinx Ltd.
* Copyright(c) 2011 Texas Instruments, Inc.
* Copyright(c) 2011 Google, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Texas Instruments nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef REMOTEPROC_VIRTIO_H
#define REMOTEPROC_VIRTIO_H
#include <metal/io.h>
#include <metal/list.h>
#include <openamp/virtio.h>
#if defined __cplusplus
extern "C" {
#endif
/* define vdev notification funciton user should implement */
typedef int (*rpvdev_notify_func)(void *priv, uint32_t id);
/**
* struct remoteproc_virtio
* @priv pointer to private data
* @notifyid notification id
* @vdev_rsc address of vdev resource
* @vdev_rsc_io metal I/O region of vdev_info, can be NULL
* @notify notification function
* @vdev virtio device
* @node list node
*/
struct remoteproc_virtio {
void *priv;
uint32_t notify_id;
void *vdev_rsc;
struct metal_io_region *vdev_rsc_io;
rpvdev_notify_func notify;
struct virtio_device vdev;
struct metal_list node;
};
/**
* rproc_virtio_create_vdev
*
* Create rproc virtio vdev
*
* @role: 0 - virtio master, 1 - virtio slave
* @notifyid: virtio device notification id
* @rsc: pointer to the virtio device resource
* @rsc_io: pointer to the virtio device resource I/O region
* @priv: pointer to the private data
* @notify: vdev and virtqueue notification function
* @rst_cb: reset virtio device callback
*
* return pointer to the created virtio device for success,
* NULL for failure.
*/
struct virtio_device *
rproc_virtio_create_vdev(unsigned int role, unsigned int notifyid,
void *rsc, struct metal_io_region *rsc_io,
void *priv,
rpvdev_notify_func notify,
virtio_dev_reset_cb rst_cb);
/**
* rproc_virtio_remove_vdev
*
* Create rproc virtio vdev
*
* @vdev - pointer to the virtio device
*/
void rproc_virtio_remove_vdev(struct virtio_device *vdev);
/**
* rproc_virtio_create_vring
*
* Create rproc virtio vring
*
* @vdev: pointer to the virtio device
* @index: vring index in the virtio device
* @notifyid: remoteproc vring notification id
* @va: vring virtual address
* @io: pointer to vring I/O region
* @num_desc: number of descriptors
* @align: vring alignment
*
* return 0 for success, negative value for failure.
*/
int rproc_virtio_init_vring(struct virtio_device *vdev, unsigned int index,
unsigned int notifyid, void *va,
struct metal_io_region *io,
unsigned int num_descs, unsigned int align);
/**
* rproc_virtio_notified
*
* remoteproc virtio is got notified
*
* @vdev - pointer to the virtio device
* @notifyid - notify id
*
* return 0 for successful, negative value for failure
*/
int rproc_virtio_notified(struct virtio_device *vdev, uint32_t notifyid);
/**
* rproc_virtio_wait_remote_ready
*
* Blocking function, waiting for the remote core is ready to start
* communications.
*
* @vdev - pointer to the virtio device
*
* return true when remote processor is ready.
*/
void rproc_virtio_wait_remote_ready(struct virtio_device *vdev);
#if defined __cplusplus
}
#endif
#endif /* REMOTEPROC_VIRTIO_H */
/*
* Remote processor messaging
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
* All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _RPMSG_H_
#define _RPMSG_H_
#include <openamp/compiler.h>
#include <metal/mutex.h>
#include <metal/list.h>
#include <metal/utilities.h>
#include <string.h>
#include <stdbool.h>
#include <stdint.h>
#if defined __cplusplus
extern "C" {
#endif
/* Configurable parameters */
#define RPMSG_NAME_SIZE (32)
#define RPMSG_ADDR_BMP_SIZE (128)
#define RPMSG_NS_EPT_ADDR (0x35)
#define RPMSG_ADDR_ANY 0xFFFFFFFF
/* Error macros. */
#define RPMSG_SUCCESS 0
#define RPMSG_ERROR_BASE -2000
#define RPMSG_ERR_NO_MEM (RPMSG_ERROR_BASE - 1)
#define RPMSG_ERR_NO_BUFF (RPMSG_ERROR_BASE - 2)
#define RPMSG_ERR_PARAM (RPMSG_ERROR_BASE - 3)
#define RPMSG_ERR_DEV_STATE (RPMSG_ERROR_BASE - 4)
#define RPMSG_ERR_BUFF_SIZE (RPMSG_ERROR_BASE - 5)
#define RPMSG_ERR_INIT (RPMSG_ERROR_BASE - 6)
#define RPMSG_ERR_ADDR (RPMSG_ERROR_BASE - 7)
struct rpmsg_endpoint;
struct rpmsg_device;
typedef int (*rpmsg_ept_cb)(struct rpmsg_endpoint *ept, void *data,
size_t len, uint32_t src, void *priv);
typedef void (*rpmsg_ns_unbind_cb)(struct rpmsg_endpoint *ept);
typedef void (*rpmsg_ns_bind_cb)(struct rpmsg_device *rdev,
const char *name, uint32_t dest);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
* @name:name of the service supported
* @rdev: pointer to the rpmsg device
* @addr: local address of the endpoint
* @dest_addr: address of the default remote endpoint binded.
* @cb: user rx callback, return value of this callback is reserved
* for future use, for now, only allow RPMSG_SUCCESS as return value.
* @ns_unbind_cb: end point service service unbind callback, called when remote
* ept is destroyed.
* @node: end point node.
* @addr: local rpmsg address
* @priv: private data for the driver's use
*
* In essence, an rpmsg endpoint represents a listener on the rpmsg bus, as
* it binds an rpmsg address with an rx callback handler.
*/
struct rpmsg_endpoint {
char name[RPMSG_NAME_SIZE];
struct rpmsg_device *rdev;
uint32_t addr;
uint32_t dest_addr;
rpmsg_ept_cb cb;
rpmsg_ns_unbind_cb ns_unbind_cb;
struct metal_list node;
void *priv;
};
/**
* struct rpmsg_device_ops - RPMsg device operations
* @send_offchannel_raw: send RPMsg data
*/
struct rpmsg_device_ops {
int (*send_offchannel_raw)(struct rpmsg_device *rdev,
uint32_t src, uint32_t dst,
const void *data, int size, int wait);
};
/**
* struct rpmsg_device - representation of a RPMsg device
* @endpoints: list of endpoints
* @ns_ept: name service endpoint
* @bitmap: table endpoin address allocation.
* @lock: mutex lock for rpmsg management
* @ns_bind_cb: callback handler for name service announcement without local
* endpoints waiting to bind.
* @ops: RPMsg device operations
*/
struct rpmsg_device {
struct metal_list endpoints;
struct rpmsg_endpoint ns_ept;
unsigned long bitmap[metal_bitmap_longs(RPMSG_ADDR_BMP_SIZE)];
metal_mutex_t lock;
rpmsg_ns_bind_cb ns_bind_cb;
struct rpmsg_device_ops ops;
};
/**
* rpmsg_send_offchannel_raw() - send a message across to the remote processor,
* specifying source and destination address.
* @ept: the rpmsg endpoint
* @data: payload of the message
* @len: length of the payload
*
* This function sends @data of length @len to the remote @dst address from
* the source @src address.
* The message will be sent to the remote processor which the channel belongs
* to.
* In case there are no TX buffers available, the function will block until
* one becomes available, or a timeout of 15 seconds elapses. When the latter
* happens, -ERESTARTSYS is returned.
*
* Returns number of bytes it has sent or negative error value on failure.
*/
int rpmsg_send_offchannel_raw(struct rpmsg_endpoint *ept, uint32_t src,
uint32_t dst, const void *data, int size,
int wait);
/**
* rpmsg_send() - send a message across to the remote processor
* @ept: the rpmsg endpoint
* @data: payload of the message
* @len: length of the payload
*
* This function sends @data of length @len based on the @ept.
* The message will be sent to the remote processor which the channel belongs
* to, using @ept's source and destination addresses.
* In case there are no TX buffers available, the function will block until
* one becomes available, or a timeout of 15 seconds elapses. When the latter
* happens, -ERESTARTSYS is returned.
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_send(struct rpmsg_endpoint *ept, const void *data,
int len)
{
if (ept->dest_addr == RPMSG_ADDR_ANY)
return RPMSG_ERR_ADDR;
return rpmsg_send_offchannel_raw(ept, ept->addr, ept->dest_addr, data,
len, true);
}
/**
* rpmsg_sendto() - send a message across to the remote processor, specify dst
* @ept: the rpmsg endpoint
* @data: payload of message
* @len: length of payload
* @dst: destination address
*
* This function sends @data of length @len to the remote @dst address.
* The message will be sent to the remote processor which the @ept
* channel belongs to, using @ept's source address.
* In case there are no TX buffers available, the function will block until
* one becomes available, or a timeout of 15 seconds elapses. When the latter
* happens, -ERESTARTSYS is returned.
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, const void *data,
int len, uint32_t dst)
{
return rpmsg_send_offchannel_raw(ept, ept->addr, dst, data, len, true);
}
/**
* rpmsg_send_offchannel() - send a message using explicit src/dst addresses
* @ept: the rpmsg endpoint
* @src: source address
* @dst: destination address
* @data: payload of message
* @len: length of payload
*
* This function sends @data of length @len to the remote @dst address,
* and uses @src as the source address.
* The message will be sent to the remote processor which the @ept
* channel belongs to.
* In case there are no TX buffers available, the function will block until
* one becomes available, or a timeout of 15 seconds elapses. When the latter
* happens, -ERESTARTSYS is returned.
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept,
uint32_t src, uint32_t dst,
const void *data, int len)
{
return rpmsg_send_offchannel_raw(ept, src, dst, data, len, true);
}
/**
* rpmsg_trysend() - send a message across to the remote processor
* @ept: the rpmsg endpoint
* @data: payload of message
* @len: length of payload
*
* This function sends @data of length @len on the @ept channel.
* The message will be sent to the remote processor which the @ept
* channel belongs to, using @ept's source and destination addresses.
* In case there are no TX buffers available, the function will immediately
* return -ENOMEM without waiting until one becomes available.
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, const void *data,
int len)
{
if (ept->dest_addr == RPMSG_ADDR_ANY)
return RPMSG_ERR_ADDR;
return rpmsg_send_offchannel_raw(ept, ept->addr, ept->dest_addr, data,
len, false);
}
/**
* rpmsg_trysendto() - send a message across to the remote processor,
* specify dst
* @ept: the rpmsg endpoint
* @data: payload of message
* @len: length of payload
* @dst: destination address
*
* This function sends @data of length @len to the remote @dst address.
* The message will be sent to the remote processor which the @ept
* channel belongs to, using @ept's source address.
* In case there are no TX buffers available, the function will immediately
* return -ENOMEM without waiting until one becomes available.
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, const void *data,
int len, uint32_t dst)
{
return rpmsg_send_offchannel_raw(ept, ept->addr, dst, data, len, false);
}
/**
* rpmsg_trysend_offchannel() - send a message using explicit src/dst addresses
* @ept: the rpmsg endpoint
* @src: source address
* @dst: destination address
* @data: payload of message
* @len: length of payload
*
* This function sends @data of length @len to the remote @dst address,
* and uses @src as the source address.
* The message will be sent to the remote processor which the @ept
* channel belongs to.
* In case there are no TX buffers available, the function will immediately
* return -ENOMEM without waiting until one becomes available.
*
* Returns number of bytes it has sent or negative error value on failure.
*/
static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept,
uint32_t src, uint32_t dst,
const void *data, int len)
{
return rpmsg_send_offchannel_raw(ept, src, dst, data, len, false);
}
/**
* rpmsg_init_ept - initialize rpmsg endpoint
*
* Initialize an RPMsg endpoint with a name, source address,
* remoteproc address, endpoitn callback, and destroy endpoint callback.
*
* @ept: pointer to rpmsg endpoint
* @name: service name associated to the endpoint
* @src: local address of the endpoint
* @dest: target address of the endpoint
* @cb: endpoint callback
* @ns_unbind_cb: end point service unbind callback, called when remote ept is
* destroyed.
*/
static inline void rpmsg_init_ept(struct rpmsg_endpoint *ept,
const char *name,
uint32_t src, uint32_t dest,
rpmsg_ept_cb cb,
rpmsg_ns_unbind_cb ns_unbind_cb)
{
strncpy(ept->name, name, sizeof(ept->name));
ept->addr = src;
ept->dest_addr = dest;
ept->cb = cb;
ept->ns_unbind_cb = ns_unbind_cb;
}
/**
* rpmsg_create_ept - create rpmsg endpoint and register it to rpmsg device
*
* Create a RPMsg endpoint, initialize it with a name, source address,
* remoteproc address, endpoitn callback, and destroy endpoint callback,
* and register it to the RPMsg device.
*
* @ept: pointer to rpmsg endpoint
* @name: service name associated to the endpoint
* @src: local address of the endpoint
* @dest: target address of the endpoint
* @cb: endpoint callback
* @ns_unbind_cb: end point service unbind callback, called when remote ept is
* destroyed.
*
* In essence, an rpmsg endpoint represents a listener on the rpmsg bus, as
* it binds an rpmsg address with an rx callback handler.
*
* Rpmsg client should create an endpoint to discuss with remote. rpmsg client
* provide at least a channel name, a callback for message notification and by
* default endpoint source address should be set to RPMSG_ADDR_ANY.
*
* As an option Some rpmsg clients can specify an endpoint with a specific
* source address.
*/
int rpmsg_create_ept(struct rpmsg_endpoint *ept, struct rpmsg_device *rdev,
const char *name, uint32_t src, uint32_t dest,
rpmsg_ept_cb cb, rpmsg_ns_unbind_cb ns_unbind_cb);
/**
* rpmsg_destroy_ept - destroy rpmsg endpoint and unregister it from rpmsg
* device
*
* @ept: pointer to the rpmsg endpoint
*
* It unregisters the rpmsg endpoint from the rpmsg device and calls the
* destroy endpoint callback if it is provided.
*/
void rpmsg_destroy_ept(struct rpmsg_endpoint *ept);
/**
* is_rpmsg_ept_ready - check if the rpmsg endpoint ready to send
*
* @ept: pointer to rpmsg endpoint
*
* Returns 1 if the rpmsg endpoint has both local addr and destination
* addr set, 0 otherwise
*/
static inline unsigned int is_rpmsg_ept_ready(struct rpmsg_endpoint *ept)
{
return (ept->dest_addr != RPMSG_ADDR_ANY &&
ept->addr != RPMSG_ADDR_ANY);
}
#if defined __cplusplus
}
#endif
#endif /* _RPMSG_H_ */
#ifndef RPMSG_RETARGET_H
#define RPMSG_RETARGET_H
#include <metal/mutex.h>
#include <openamp/open_amp.h>
#include <stdint.h>
#if defined __cplusplus
extern "C" {
#endif
/* File Operations System call definitions */
#define OPEN_SYSCALL_ID 0x1UL
#define CLOSE_SYSCALL_ID 0x2UL
#define WRITE_SYSCALL_ID 0x3UL
#define READ_SYSCALL_ID 0x4UL
#define ACK_STATUS_ID 0x5UL
#define TERM_SYSCALL_ID 0x6UL
#define DEFAULT_PROXY_ENDPOINT 0xFFUL
struct rpmsg_rpc_data;
typedef int (*rpmsg_rpc_poll)(void *arg);
typedef void (*rpmsg_rpc_shutdown_cb)(struct rpmsg_rpc_data *rpc);
struct rpmsg_rpc_syscall_header {
int32_t int_field1;
int32_t int_field2;
uint32_t data_len;
};
struct rpmsg_rpc_syscall {
uint32_t id;
struct rpmsg_rpc_syscall_header args;
};
struct rpmsg_rpc_data {
struct rpmsg_endpoint ept;
int ept_destroyed;
atomic_int nacked;
void *respbuf;
size_t respbuf_len;
rpmsg_rpc_poll poll;
void *poll_arg;
rpmsg_rpc_shutdown_cb shutdown_cb;
metal_mutex_t lock;
struct metal_spinlock buflock;
};
/**
* rpmsg_rpc_init - initialize RPMsg remote procedure call
*
* This function is to intialize the remote procedure call
* global data. RPMsg RPC will send request to remote and
* wait for callback.
*
* @rpc: pointer to the global remote procedure call data
* @rdev: pointer to the rpmsg device
* @ept_name: name of the endpoint used by RPC
* @ept_addr: address of the endpoint used by RPC
* @ept_raddr: remote address of the endpoint used by RPC
* @poll_arg: pointer to poll function argument
* @poll: poll function
* @shutdown_cb: shutdown callback function
*
* return 0 for success, and negative value for failure.
*/
int rpmsg_rpc_init(struct rpmsg_rpc_data *rpc,
struct rpmsg_device *rdev,
const char *ept_name, uint32_t ept_addr,
uint32_t ept_raddr,
void *poll_arg, rpmsg_rpc_poll poll,
rpmsg_rpc_shutdown_cb shutdown_cb);
/**
* rpmsg_rpc_release - release RPMsg remote procedure call
*
* This function is to release remoteproc procedure call
* global data.
*
* @rpc: pointer to the globacl remote procedure call
*/
void rpmsg_rpc_release(struct rpmsg_rpc_data *rpc);
/**
* rpmsg_rpc_send - Request RPMsg RPC call
*
* This function sends RPC request it will return with the length
* of data and the response buffer.
*
* @rpc: pointer to remoteproc procedure call data struct
* @req: pointer to request buffer
* @len: length of the request data
* @resp: pointer to where store the response buffer
* @resp_len: length of the response buffer
*
* return length of the received response, negative value for failure.
*/
int rpmsg_rpc_send(struct rpmsg_rpc_data *rpc,
void *req, size_t len,
void *resp, size_t resp_len);
/**
* rpmsg_set_default_rpc - set default RPMsg RPC data
*
* The default RPC data is used to redirect standard C file operations
* to RPMsg channels.
*
* @rpc: pointer to remoteproc procedure call data struct
*/
void rpmsg_set_default_rpc(struct rpmsg_rpc_data *rpc);
#if defined __cplusplus
}
#endif
#endif /* RPMSG_RETARGET_H */
/*
* rpmsg based on virtio
*
* Copyright (C) 2018 Linaro, Inc.
*
* All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef _RPMSG_VIRTIO_H_
#define _RPMSG_VIRTIO_H_
#include <metal/io.h>
#include <metal/mutex.h>
#include <openamp/rpmsg.h>
#include <openamp/virtio.h>
#if defined __cplusplus
extern "C" {
#endif
/* Configurable parameters */
#ifndef RPMSG_BUFFER_SIZE
#define RPMSG_BUFFER_SIZE (512)
#endif
/* The feature bitmap for virtio rpmsg */
#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */
struct rpmsg_virtio_shm_pool;
/**
* struct rpmsg_virtio_shm_pool - shared memory pool used for rpmsg buffers
* @get_buffer: function to get buffer from the pool
* @base: base address of the memory pool
* @avail: available memory size
* @size: total pool size
*/
struct rpmsg_virtio_shm_pool {
void *base;
size_t avail;
size_t size;
};
/**
* struct rpmsg_virtio_device - representation of a rpmsg device based on virtio
* @rdev: rpmsg device, first property in the struct
* @vdev: pointer to the virtio device
* @rvq: pointer to receive virtqueue
* @svq: pointer to send virtqueue
* @shbuf_io: pointer to the shared buffer I/O region
* @shpool: pointer to the shared buffers pool
* @endpoints: list of endpoints.
*/
struct rpmsg_virtio_device {
struct rpmsg_device rdev;
struct virtio_device *vdev;
struct virtqueue *rvq;
struct virtqueue *svq;
struct metal_io_region *shbuf_io;
struct rpmsg_virtio_shm_pool *shpool;
};
#define RPMSG_REMOTE VIRTIO_DEV_SLAVE
#define RPMSG_MASTER VIRTIO_DEV_MASTER
static inline unsigned int
rpmsg_virtio_get_role(struct rpmsg_virtio_device *rvdev)
{
return rvdev->vdev->role;
}
static inline void rpmsg_virtio_set_status(struct rpmsg_virtio_device *rvdev,
uint8_t status)
{
rvdev->vdev->func->set_status(rvdev->vdev, status);
}
static inline uint8_t rpmsg_virtio_get_status(struct rpmsg_virtio_device *rvdev)
{
return rvdev->vdev->func->get_status(rvdev->vdev);
}
static inline uint32_t
rpmsg_virtio_get_features(struct rpmsg_virtio_device *rvdev)
{
return rvdev->vdev->func->get_features(rvdev->vdev);
}
static inline int
rpmsg_virtio_create_virtqueues(struct rpmsg_virtio_device *rvdev,
int flags, unsigned int nvqs,
const char *names[],
vq_callback * callbacks[])
{
return virtio_create_virtqueues(rvdev->vdev, flags, nvqs, names,
callbacks);
}
/**
* rpmsg_virtio_get_buffer_size - get rpmsg virtio buffer size
*
* @rdev - pointer to the rpmsg device
*
* @return - next available buffer size for text, negative value for failure
*/
int rpmsg_virtio_get_buffer_size(struct rpmsg_device *rdev);
/**
* rpmsg_init_vdev - initialize rpmsg virtio device
* Master side:
* Initialize RPMsg virtio queues and shared buffers, the address of shm can be
* ANY. In this case, function will get shared memory from system shared memory
* pools. If the vdev has RPMsg name service feature, this API will create an
* name service endpoint.
*
* Slave side:
* This API will not return until the driver ready is set by the master side.
*
* @param rvdev - pointer to the rpmsg virtio device
* @param vdev - pointer to the virtio device
* @param ns_bind_cb - callback handler for name service announcement without
* local endpoints waiting to bind.
* @param shm_io - pointer to the share memory I/O region.
* @param shpool - pointer to shared memory pool. rpmsg_virtio_init_shm_pool has
* to be called first to fill this structure.
*
* @return - status of function execution
*/
int rpmsg_init_vdev(struct rpmsg_virtio_device *rvdev,
struct virtio_device *vdev,
rpmsg_ns_bind_cb ns_bind_cb,
struct metal_io_region *shm_io,
struct rpmsg_virtio_shm_pool *shpool);
/**
* rpmsg_deinit_vdev - deinitialize rpmsg virtio device
*
* @param rvdev - pointer to the rpmsg virtio device
*/
void rpmsg_deinit_vdev(struct rpmsg_virtio_device *rvdev);
/**
* rpmsg_virtio_init_shm_pool - initialize default shared buffers pool
*
* RPMsg virtio has default shared buffers pool implementation.
* The memory assigned to this pool will be dedicated to the RPMsg
* virtio. This function has to be called before calling rpmsg_init_vdev,
* to initialize the rpmsg_virtio_shm_pool structure.
*
* @param shpool - pointer to the shared buffers pool structure
* @param shbuf - pointer to the beginning of shared buffers
* @param size - shared buffers total size
*/
void rpmsg_virtio_init_shm_pool(struct rpmsg_virtio_shm_pool *shpool,
void *shbuf, size_t size);
/**
* rpmsg_virtio_get_rpmsg_device - get RPMsg device from RPMsg virtio device
*
* @param rvdev - pointer to RPMsg virtio device
* @return - RPMsg device pointed by RPMsg virtio device
*/
static inline struct rpmsg_device *
rpmsg_virtio_get_rpmsg_device(struct rpmsg_virtio_device *rvdev)
{
return &rvdev->rdev;
}
/**
* rpmsg_virtio_shm_pool_get_buffer - get buffer in the shared memory pool
*
* RPMsg virtio has default shared buffers pool implementation.
* The memory assigned to this pool will be dedicated to the RPMsg
* virtio. If you prefer to have other shared buffers allocation,
* you can implement your rpmsg_virtio_shm_pool_get_buffer function.
*
* @param shpool - pointer to the shared buffers pool
* @param size - shared buffers total size
* @return - buffer pointer if free buffer is available, NULL otherwise.
*/
metal_weak void *
rpmsg_virtio_shm_pool_get_buffer(struct rpmsg_virtio_shm_pool *shpool,
size_t size);
#if defined __cplusplus
}
#endif
#endif /* _RPMSG_VIRTIO_H_ */
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef RSC_TABLE_PARSER_H
#define RSC_TABLE_PARSER_H
#include <openamp/remoteproc.h>
#if defined __cplusplus
extern "C" {
#endif
#define RSC_TAB_SUPPORTED_VERSION 1
#define RSC_TAB_HEADER_SIZE 12
#define RSC_TAB_MAX_VRINGS 2
/* Standard control request handling. */
typedef int (*rsc_handler) (struct remoteproc *rproc, void *rsc);
/**
* handle_rsc_table
*
* This function parses resource table.
*
* @param rproc - pointer to remote remoteproc
* @param rsc_table - resource table to parse
* @param size - size of rsc table
* @param io - pointer to the resource table I/O region
* It can be NULL if the resource table
* is in the local memory.
*
* @returns - execution status
*
*/
int handle_rsc_table(struct remoteproc *rproc,
struct resource_table *rsc_table, int len,
struct metal_io_region *io);
int handle_carve_out_rsc(struct remoteproc *rproc, void *rsc);
int handle_trace_rsc(struct remoteproc *rproc, void *rsc);
int handle_vdev_rsc(struct remoteproc *rproc, void *rsc);
int handle_vendor_rsc(struct remoteproc *rproc, void *rsc);
/**
* find_rsc
*
* find out location of a resource type in the resource table.
*
* @rsc_table - pointer to the resource table
* @rsc_type - type of the resource
* @index - index of the resource of the specified type
*
* return the offset to the resource on success, or 0 on failure
*/
size_t find_rsc(void *rsc_table, unsigned int rsc_type, unsigned int index);
#if defined __cplusplus
}
#endif
#endif /* RSC_TABLE_PARSER_H */
/*
* SPDX-License-Identifier: BSD-3-Clause
*
* $FreeBSD$
*/
#ifndef _VIRTIO_H_
#define _VIRTIO_H_
#include <openamp/virtqueue.h>
#include <metal/spinlock.h>
#if defined __cplusplus
extern "C" {
#endif
/* TODO: define this as compiler flags */
#ifndef VIRTIO_MAX_NUM_VRINGS
#define VIRTIO_MAX_NUM_VRINGS 2
#endif
/* VirtIO device IDs. */
#define VIRTIO_ID_NETWORK 0x01UL
#define VIRTIO_ID_BLOCK 0x02UL
#define VIRTIO_ID_CONSOLE 0x03UL
#define VIRTIO_ID_ENTROPY 0x04UL
#define VIRTIO_ID_BALLOON 0x05UL
#define VIRTIO_ID_IOMEMORY 0x06UL
#define VIRTIO_ID_RPMSG 0x07UL /* remote processor messaging */
#define VIRTIO_ID_SCSI 0x08UL
#define VIRTIO_ID_9P 0x09UL
#define VIRTIO_DEV_ANY_ID (-1)UL
/* Status byte for guest to report progress. */
#define VIRTIO_CONFIG_STATUS_ACK 0x01
#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
#define VIRTIO_CONFIG_STATUS_NEEDS_RESET 0x40
#define VIRTIO_CONFIG_STATUS_FAILED 0x80
/* Virtio device role */
#define VIRTIO_DEV_MASTER 0UL
#define VIRTIO_DEV_SLAVE 1UL
struct virtio_device_id {
uint32_t device;
uint32_t vendor;
};
/*
* Generate interrupt when the virtqueue ring is
* completely used, even if we've suppressed them.
*/
#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
/*
* The guest should never negotiate this feature; it
* is used to detect faulty drivers.
*/
#define VIRTIO_F_BAD_FEATURE (1 << 30)
/*
* Some VirtIO feature bits (currently bits 28 through 31) are
* reserved for the transport being used (eg. virtio_ring), the
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
#define VIRTIO_TRANSPORT_F_END 32
typedef void (*virtio_dev_reset_cb)(struct virtio_device *vdev);
struct virtio_dispatch;
struct virtio_feature_desc {
uint32_t vfd_val;
const char *vfd_str;
};
/**
* struct proc_shm
*
* This structure is maintained by hardware interface layer for
* shared memory information. The shared memory provides buffers
* for use by the vring to exchange messages between the cores.
*
*/
struct virtio_buffer_info {
/* Start address of shared memory used for buffers. */
void *vaddr;
/* Start physical address of shared memory used for buffers. */
metal_phys_addr_t paddr;
/* sharmed memory I/O region */
struct metal_io_region *io;
/* Size of shared memory. */
unsigned long size;
};
/**
* struct remoteproc_vring - remoteproc vring structure
* @vq virtio queue
* @va logical address
* @notifyid vring notify id
* @num_descs number of descriptors
* @align vring alignment
* @io metal I/O region of the vring memory, can be NULL
*/
struct virtio_vring_info {
struct virtqueue *vq;
struct vring_alloc_info info;
uint32_t notifyid;
struct metal_io_region *io;
};
/*
* Structure definition for virtio devices for use by the
* applications/drivers
*/
struct virtio_device {
uint32_t index; /**< unique position on the virtio bus */
struct virtio_device_id id; /**< the device type identification
* (used to match it with a driver
*/
uint64_t features; /**< the features supported by both ends. */
unsigned int role; /**< if it is virtio backend or front end. */
virtio_dev_reset_cb reset_cb; /**< user registered device callback */
const struct virtio_dispatch *func; /**< Virtio dispatch table */
void *priv; /**< TODO: remove pointer to virtio_device private data */
unsigned int vrings_num; /**< number of vrings */
struct virtio_vring_info *vrings_info;
};
/*
* Helper functions.
*/
const char *virtio_dev_name(uint16_t devid);
void virtio_describe(struct virtio_device *dev, const char *msg,
uint32_t features,
struct virtio_feature_desc *feature_desc);
/*
* Functions for virtio device configuration as defined in Rusty Russell's
* paper.
* Drivers are expected to implement these functions in their respective codes.
*/
struct virtio_dispatch {
uint8_t (*get_status)(struct virtio_device *dev);
void (*set_status)(struct virtio_device *dev, uint8_t status);
uint32_t (*get_features)(struct virtio_device *dev);
void (*set_features)(struct virtio_device *dev, uint32_t feature);
uint32_t (*negotiate_features)(struct virtio_device *dev,
uint32_t features);
/*
* Read/write a variable amount from the device specific (ie, network)
* configuration region. This region is encoded in the same endian as
* the guest.
*/
void (*read_config)(struct virtio_device *dev, uint32_t offset,
void *dst, int length);
void (*write_config)(struct virtio_device *dev, uint32_t offset,
void *src, int length);
void (*reset_device)(struct virtio_device *dev);
void (*notify)(struct virtqueue *vq);
};
int virtio_create_virtqueues(struct virtio_device *vdev, unsigned int flags,
unsigned int nvqs, const char *names[],
vq_callback *callbacks[]);
#if defined __cplusplus
}
#endif
#endif /* _VIRTIO_H_ */
/*
* Copyright Rusty Russell IBM Corporation 2007.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* $FreeBSD$
*/
#ifndef VIRTIO_RING_H
#define VIRTIO_RING_H
#if defined __cplusplus
extern "C" {
#endif
/* This marks a buffer as continuing via the next field. */
#define VRING_DESC_F_NEXT 1
/* This marks a buffer as write-only (otherwise read-only). */
#define VRING_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
/* The Host uses this in used->flags to advise the Guest: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
* optimization. Guest will still kick if it's out of buffers.
*/
#define VRING_USED_F_NO_NOTIFY 1
/* The Guest uses this in avail->flags to advise the Host: don't
* interrupt me when you consume a buffer. It's unreliable, so it's
* simply an optimization.
*/
#define VRING_AVAIL_F_NO_INTERRUPT 1
/* VirtIO ring descriptors: 16 bytes.
* These can chain together via "next".
*/
struct vring_desc {
/* Address (guest-physical). */
uint64_t addr;
/* Length. */
uint32_t len;
/* The flags as indicated above. */
uint16_t flags;
/* We chain unused descriptors via this, too. */
uint16_t next;
};
struct vring_avail {
uint16_t flags;
uint16_t idx;
uint16_t ring[0];
};
/* uint32_t is used here for ids for padding reasons. */
struct vring_used_elem {
/* Index of start of used descriptor chain. */
uint32_t id;
/* Total length of the descriptor chain which was written to. */
uint32_t len;
};
struct vring_used {
uint16_t flags;
uint16_t idx;
struct vring_used_elem ring[0];
};
struct vring {
unsigned int num;
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
};
/* The standard layout for the ring is a continuous chunk of memory which
* looks like this. We assume num is a power of 2.
*
* struct vring {
* // The actual descriptors (16 bytes each)
* struct vring_desc desc[num];
*
* // A ring of available descriptor heads with free-running index.
* __u16 avail_flags;
* __u16 avail_idx;
* __u16 available[num];
* __u16 used_event_idx;
*
* // Padding to the next align boundary.
* char pad[];
*
* // A ring of used descriptor heads with free-running index.
* __u16 used_flags;
* __u16 used_idx;
* struct vring_used_elem used[num];
* __u16 avail_event_idx;
* };
*
* NOTE: for VirtIO PCI, align is 4096.
*/
/*
* We publish the used event index at the end of the available ring, and vice
* versa. They are at the end for backwards compatibility.
*/
#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
#define vring_avail_event(vr) ((vr)->used->ring[(vr)->num].id & 0xFFFF)
static inline int vring_size(unsigned int num, unsigned long align)
{
int size;
size = num * sizeof(struct vring_desc);
size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)) +
sizeof(uint16_t);
size = (size + align - 1) & ~(align - 1);
size += sizeof(struct vring_used) +
(num * sizeof(struct vring_used_elem)) + sizeof(uint16_t);
return size;
}
static inline void
vring_init(struct vring *vr, unsigned int num, uint8_t *p, unsigned long align)
{
vr->num = num;
vr->desc = (struct vring_desc *)p;
vr->avail = (struct vring_avail *)(p + num * sizeof(struct vring_desc));
vr->used = (struct vring_used *)
(((unsigned long)&vr->avail->ring[num] + sizeof(uint16_t) +
align - 1) & ~(align - 1));
}
/*
* The following is used with VIRTIO_RING_F_EVENT_IDX.
*
* Assuming a given event_idx value from the other size, if we have
* just incremented index from old to new_idx, should we trigger an
* event?
*/
static inline int
vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
{
return (uint16_t)(new_idx - event_idx - 1) <
(uint16_t)(new_idx - old);
}
#if defined __cplusplus
}
#endif
#endif /* VIRTIO_RING_H */
#ifndef VIRTQUEUE_H_
#define VIRTQUEUE_H_
/*-
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* SPDX-License-Identifier: BSD-2-Clause
*
* $FreeBSD$
*/
#include <stdbool.h>
#include <stdint.h>
#if defined __cplusplus
extern "C" {
#endif
typedef uint8_t boolean;
#include <openamp/virtio_ring.h>
#include <metal/alloc.h>
#include <metal/io.h>
/*Error Codes*/
#define VQ_ERROR_BASE -3000
#define ERROR_VRING_FULL (VQ_ERROR_BASE - 1)
#define ERROR_INVLD_DESC_IDX (VQ_ERROR_BASE - 2)
#define ERROR_EMPTY_RING (VQ_ERROR_BASE - 3)
#define ERROR_NO_MEM (VQ_ERROR_BASE - 4)
#define ERROR_VRING_MAX_DESC (VQ_ERROR_BASE - 5)
#define ERROR_VRING_ALIGN (VQ_ERROR_BASE - 6)
#define ERROR_VRING_NO_BUFF (VQ_ERROR_BASE - 7)
#define ERROR_VQUEUE_INVLD_PARAM (VQ_ERROR_BASE - 8)
#define VQUEUE_SUCCESS 0
/* The maximum virtqueue size is 2^15. Use that value as the end of
* descriptor chain terminator since it will never be a valid index
* in the descriptor table. This is used to verify we are correctly
* handling vq_free_cnt.
*/
#define VQ_RING_DESC_CHAIN_END 32768
#define VIRTQUEUE_FLAG_INDIRECT 0x0001
#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
#define VIRTQUEUE_MAX_NAME_SZ 32
/* Support for indirect buffer descriptors. */
#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28)
/* Support to suppress interrupt until specific index is reached. */
#define VIRTIO_RING_F_EVENT_IDX (1 << 29)
struct virtqueue_buf {
void *buf;
int len;
};
struct virtqueue {
struct virtio_device *vq_dev;
const char *vq_name;
uint16_t vq_queue_index;
uint16_t vq_nentries;
uint32_t vq_flags;
void (*callback)(struct virtqueue *vq);
void (*notify)(struct virtqueue *vq);
struct vring vq_ring;
uint16_t vq_free_cnt;
uint16_t vq_queued_cnt;
void *shm_io; /* opaque pointer to data needed to allow v2p & p2v */
/*
* Head of the free chain in the descriptor table. If
* there are no free descriptors, this will be set to
* VQ_RING_DESC_CHAIN_END.
*/
uint16_t vq_desc_head_idx;
/*
* Last consumed descriptor in the used table,
* trails vq_ring.used->idx.
*/
uint16_t vq_used_cons_idx;
/*
* Last consumed descriptor in the available table -
* used by the consumer side.
*/
uint16_t vq_available_idx;
#ifdef VQUEUE_DEBUG
boolean vq_inuse;
#endif
/*
* Used by the host side during callback. Cookie
* holds the address of buffer received from other side.
* Other fields in this structure are not used currently.
*/
struct vq_desc_extra {
void *cookie;
uint16_t ndescs;
} vq_descx[0];
};
/* struct to hold vring specific information */
struct vring_alloc_info {
void *vaddr;
uint32_t align;
uint16_t num_descs;
uint16_t pad;
};
typedef void vq_callback(struct virtqueue *);
typedef void vq_notify(struct virtqueue *);
#ifdef VQUEUE_DEBUG
#include <metal/log.h>
#include <metal/assert.h>
#define VQASSERT(_vq, _exp, _msg) \
do { \
if (!(_exp)) { \
metal_log(METAL_LOG_EMERGENCY, \
"%s: %s - _msg", __func__, (_vq)->vq_name); \
metal_assert(_exp); \
} \
} while (0)
#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, "invalid ring index")
#define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
VQ_RING_DESC_CHAIN_END, \
"full ring terminated incorrectly: invalid head")
#define VQ_PARAM_CHK(condition, status_var, status_err) \
do { \
if (((status_var) == 0) && (condition)) { \
status_var = status_err; \
} \
} while (0)
#define VQUEUE_BUSY(vq) \
do { \
if (!(vq)->vq_inuse) \
(vq)->vq_inuse = true; \
else \
VQASSERT(vq, !(vq)->vq_inuse,\
"VirtQueue already in use") \
} while (0)
#define VQUEUE_IDLE(vq) ((vq)->vq_inuse = false)
#else
#define KASSERT(cond, str)
#define VQASSERT(_vq, _exp, _msg)
#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)
#define VQ_RING_ASSERT_CHAIN_TERM(_vq)
#define VQ_PARAM_CHK(condition, status_var, status_err)
#define VQUEUE_BUSY(vq)
#define VQUEUE_IDLE(vq)
#endif
int virtqueue_create(struct virtio_device *device, unsigned short id,
const char *name, struct vring_alloc_info *ring,
void (*callback)(struct virtqueue *vq),
void (*notify)(struct virtqueue *vq),
struct virtqueue *v_queue);
/*
* virtqueue_set_shmem_io
*
* set virtqueue shared memory I/O region
*
* @vq - virt queue
* @io - pointer to the shared memory I/O region
*/
static inline void virtqueue_set_shmem_io(struct virtqueue *vq,
struct metal_io_region *io)
{
vq->shm_io = io;
}
int virtqueue_add_buffer(struct virtqueue *vq, struct virtqueue_buf *buf_list,
int readable, int writable, void *cookie);
void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx);
void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,
uint32_t *len);
int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
uint32_t len);
void virtqueue_disable_cb(struct virtqueue *vq);
int virtqueue_enable_cb(struct virtqueue *vq);
void virtqueue_kick(struct virtqueue *vq);
static inline struct virtqueue *virtqueue_allocate(unsigned int num_desc_extra)
{
struct virtqueue *vqs;
uint32_t vq_size = sizeof(struct virtqueue) +
num_desc_extra * sizeof(struct vq_desc_extra);
vqs = (struct virtqueue *)metal_allocate_memory(vq_size);
if (vqs) {
memset(vqs, 0x00, vq_size);
}
return vqs;
}
void virtqueue_free(struct virtqueue *vq);
void virtqueue_dump(struct virtqueue *vq);
void virtqueue_notification(struct virtqueue *vq);
uint32_t virtqueue_get_desc_size(struct virtqueue *vq);
uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx);
#if defined __cplusplus
}
#endif
#endif /* VIRTQUEUE_H_ */
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <string.h>
#include <metal/alloc.h>
#include <metal/log.h>
#include <openamp/elf_loader.h>
#include <openamp/remoteproc.h>
static int elf_is_64(const void *elf_info)
{
const unsigned char *tmp = elf_info;
if (tmp[EI_CLASS] == ELFCLASS64)
return 1;
else
return 0;
}
static size_t elf_ehdr_size(const void *elf_info)
{
if (elf_info == NULL)
return sizeof(Elf64_Ehdr);
else if (elf_is_64(elf_info) != 0)
return sizeof(Elf64_Ehdr);
else
return sizeof(Elf32_Ehdr);
}
static size_t elf_phoff(const void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
const Elf32_Ehdr *ehdr = elf_info;
return ehdr->e_phoff;
} else {
const Elf64_Ehdr *ehdr = elf_info;
return ehdr->e_phoff;
}
}
static size_t elf_phentsize(const void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
const Elf32_Ehdr *ehdr = elf_info;
return ehdr->e_phentsize;
} else {
const Elf64_Ehdr *ehdr = elf_info;
return ehdr->e_phentsize;
}
}
static int elf_phnum(const void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
const Elf32_Ehdr *ehdr = elf_info;
return ehdr->e_phnum;
} else {
const Elf64_Ehdr *ehdr = elf_info;
return ehdr->e_phnum;
}
}
static size_t elf_shoff(const void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
const Elf32_Ehdr *ehdr = elf_info;
return ehdr->e_shoff;
} else {
const Elf64_Ehdr *ehdr = elf_info;
return ehdr->e_shoff;
}
}
static size_t elf_shentsize(const void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
const Elf32_Ehdr *ehdr = elf_info;
return ehdr->e_shentsize;
} else {
const Elf64_Ehdr *ehdr = elf_info;
return ehdr->e_shentsize;
}
}
static int elf_shnum(const void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
const Elf32_Ehdr *ehdr = elf_info;
return ehdr->e_shnum;
} else {
const Elf64_Ehdr *ehdr = elf_info;
return ehdr->e_shnum;
}
}
static int elf_shstrndx(const void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
const Elf32_Ehdr *ehdr = elf_info;
return ehdr->e_shstrndx;
} else {
const Elf64_Ehdr *ehdr = elf_info;
return ehdr->e_shstrndx;
}
}
static void *elf_phtable_ptr(void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
struct elf32_info *einfo = elf_info;
return (void *)&einfo->phdrs;
} else {
struct elf64_info *einfo = elf_info;
return (void *)&einfo->phdrs;
}
}
static void *elf_shtable_ptr(void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
struct elf32_info *einfo = elf_info;
return (void *)(&einfo->shdrs);
} else {
struct elf64_info *einfo = elf_info;
return (void *)(&einfo->shdrs);
}
}
static void **elf_shstrtab_ptr(void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
struct elf32_info *einfo = elf_info;
return &einfo->shstrtab;
} else {
struct elf64_info *einfo = elf_info;
return &einfo->shstrtab;
}
}
static unsigned int *elf_load_state(void *elf_info)
{
if (elf_is_64(elf_info) == 0) {
struct elf32_info *einfo = elf_info;
return &einfo->load_state;
} else {
struct elf64_info *einfo = elf_info;
return &einfo->load_state;
}
}
static void elf_parse_segment(void *elf_info, const void *elf_phdr,
unsigned int *p_type, size_t *p_offset,
metal_phys_addr_t *p_vaddr,
metal_phys_addr_t *p_paddr,
size_t *p_filesz, size_t *p_memsz)
{
if (elf_is_64(elf_info) == 0) {
const Elf32_Phdr *phdr = elf_phdr;
if (p_type != NULL)
*p_type = (unsigned int)phdr->p_type;
if (p_offset != NULL)
*p_offset = (size_t)phdr->p_offset;
if (p_vaddr != NULL)
*p_vaddr = (metal_phys_addr_t)phdr->p_vaddr;
if (p_paddr != NULL)
*p_paddr = (metal_phys_addr_t)phdr->p_paddr;
if (p_filesz != NULL)
*p_filesz = (size_t)phdr->p_filesz;
if (p_memsz != NULL)
*p_memsz = (size_t)phdr->p_memsz;
} else {
const Elf64_Phdr *phdr = elf_phdr;
if (p_type != NULL)
*p_type = (unsigned int)phdr->p_type;
if (p_offset != NULL)
*p_offset = (size_t)phdr->p_offset;
if (p_vaddr != NULL)
if (p_vaddr != NULL)
*p_vaddr = (metal_phys_addr_t)phdr->p_vaddr;
if (p_paddr != NULL)
*p_paddr = (metal_phys_addr_t)phdr->p_paddr;
if (p_filesz != NULL)
*p_filesz = (size_t)phdr->p_filesz;
if (p_memsz != NULL)
*p_memsz = (size_t)phdr->p_memsz;
}
}
static const void *elf_get_segment_from_index(void *elf_info, int index)
{
if (elf_is_64(elf_info) == 0) {
const struct elf32_info *einfo = elf_info;
const Elf32_Ehdr *ehdr = &einfo->ehdr;
const Elf32_Phdr *phdrs = einfo->phdrs;
if (phdrs == NULL)
return NULL;
if (index < 0 || index > ehdr->e_phnum)
return NULL;
return &phdrs[index];
} else {
const struct elf64_info *einfo = elf_info;
const Elf64_Ehdr *ehdr = &einfo->ehdr;
const Elf64_Phdr *phdrs = einfo->phdrs;
if (phdrs == NULL)
return NULL;
if (index < 0 || index > ehdr->e_phnum)
return NULL;
return &phdrs[index];
}
}
static void *elf_get_section_from_name(void *elf_info, const char *name)
{
unsigned int i;
const char *name_table;
if (elf_is_64(elf_info) == 0) {
struct elf32_info *einfo = elf_info;
Elf32_Ehdr *ehdr = &einfo->ehdr;
Elf32_Shdr *shdr = einfo->shdrs;
name_table = einfo->shstrtab;
if (shdr == NULL || name_table == NULL)
return NULL;
for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
if (strcmp(name, name_table + shdr->sh_name))
continue;
else
return shdr;
}
} else {
struct elf64_info *einfo = elf_info;
Elf64_Ehdr *ehdr = &einfo->ehdr;
Elf64_Shdr *shdr = einfo->shdrs;
name_table = einfo->shstrtab;
if (shdr == NULL || name_table == NULL)
return NULL;
for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
if (strcmp(name, name_table + shdr->sh_name))
continue;
else
return shdr;
}
}
return NULL;
}
static void *elf_get_section_from_index(void *elf_info, int index)
{
if (elf_is_64(elf_info) == 0) {
struct elf32_info *einfo = elf_info;
Elf32_Ehdr *ehdr = &einfo->ehdr;
Elf32_Shdr *shdr = einfo->shdrs;
if (shdr == NULL)
return NULL;
if (index > ehdr->e_shnum)
return NULL;
return &einfo->shdrs[index];
} else {
struct elf64_info *einfo = elf_info;
Elf64_Ehdr *ehdr = &einfo->ehdr;
Elf64_Shdr *shdr = einfo->shdrs;
if (shdr == NULL)
return NULL;
if (index > ehdr->e_shnum)
return NULL;
return &einfo->shdrs[index];
}
}
static void elf_parse_section(void *elf_info, void *elf_shdr,
unsigned int *sh_type, unsigned int *sh_flags,
metal_phys_addr_t *sh_addr,
size_t *sh_offset, size_t *sh_size,
unsigned int *sh_link, unsigned int *sh_info,
unsigned int *sh_addralign,
size_t *sh_entsize)
{
if (elf_is_64(elf_info) == 0) {
Elf32_Shdr *shdr = elf_shdr;
if (sh_type != NULL)
*sh_type = shdr->sh_type;
if (sh_flags != NULL)
*sh_flags = shdr->sh_flags;
if (sh_addr != NULL)
*sh_addr = (metal_phys_addr_t)shdr->sh_addr;
if (sh_offset != NULL)
*sh_offset = shdr->sh_offset;
if (sh_size != NULL)
*sh_size = shdr->sh_size;
if (sh_link != NULL)
*sh_link = shdr->sh_link;
if (sh_info != NULL)
*sh_info = shdr->sh_info;
if (sh_addralign != NULL)
*sh_addralign = shdr->sh_addralign;
if (sh_entsize != NULL)
*sh_entsize = shdr->sh_entsize;
} else {
Elf64_Shdr *shdr = elf_shdr;
if (sh_type != NULL)
*sh_type = shdr->sh_type;
if (sh_flags != NULL)
*sh_flags = shdr->sh_flags;
if (sh_addr != NULL)
*sh_addr = (metal_phys_addr_t)(shdr->sh_addr &
(metal_phys_addr_t)(-1));
if (sh_offset != NULL)
*sh_offset = shdr->sh_offset;
if (sh_size != NULL)
*sh_size = shdr->sh_size;
if (sh_link != NULL)
*sh_link = shdr->sh_link;
if (sh_info != NULL)
*sh_info = shdr->sh_info;
if (sh_addralign != NULL)
*sh_addralign = shdr->sh_addralign;
if (sh_entsize != NULL)
*sh_entsize = shdr->sh_entsize;
}
}
static const void *elf_next_load_segment(void *elf_info, int *nseg,
metal_phys_addr_t *da,
size_t *noffset, size_t *nfsize,
size_t *nmsize)
{
const void *phdr;
unsigned int p_type = PT_NULL;
if (elf_info == NULL || nseg == NULL)
return NULL;
while(p_type != PT_LOAD) {
phdr = elf_get_segment_from_index(elf_info, *nseg);
if (phdr == NULL)
return NULL;
elf_parse_segment(elf_info, phdr, &p_type, noffset,
da, NULL, nfsize, nmsize);
*nseg = *nseg + 1;
}
return phdr;
}
static size_t elf_info_size(const void *img_data)
{
if (elf_is_64(img_data) == 0)
return sizeof(struct elf32_info);
else
return sizeof(struct elf64_info);
}
int elf_identify(const void *img_data, size_t len)
{
if (len < SELFMAG || img_data == NULL)
return -RPROC_EINVAL;
if (memcmp(img_data, ELFMAG, SELFMAG) != 0)
return -RPROC_EINVAL;
else
return 0;
}
int elf_load_header(const void *img_data, size_t offset, size_t len,
void **img_info, int last_load_state,
size_t *noffset, size_t *nlen)
{
unsigned int *load_state;
metal_assert(noffset != NULL);
metal_assert(nlen != NULL);
/* Get ELF header */
if (last_load_state == ELF_STATE_INIT) {
size_t tmpsize;
metal_log(METAL_LOG_DEBUG, "Loading ELF headering\r\n");
tmpsize = elf_ehdr_size(img_data);
if (len < tmpsize) {
*noffset = 0;
*nlen = tmpsize;
return ELF_STATE_INIT;
} else {
size_t infosize = elf_info_size(img_data);
if (*img_info == NULL) {
*img_info = metal_allocate_memory(infosize);
if (*img_info == NULL)
return -ENOMEM;
memset(*img_info, 0, infosize);
}
memcpy(*img_info, img_data, tmpsize);
load_state = elf_load_state(*img_info);
*load_state = ELF_STATE_WAIT_FOR_PHDRS;
last_load_state = ELF_STATE_WAIT_FOR_PHDRS;
}
}
metal_assert(*img_info != NULL);
load_state = elf_load_state(*img_info);
if (last_load_state != (int)*load_state)
return -RPROC_EINVAL;
/* Get ELF program headers */
if (*load_state == ELF_STATE_WAIT_FOR_PHDRS) {
size_t phdrs_size;
size_t phdrs_offset;
char **phdrs;
const void *img_phdrs;
metal_log(METAL_LOG_DEBUG, "Loading ELF program header.\r\n");
phdrs_offset = elf_phoff(*img_info);
phdrs_size = elf_phnum(*img_info) * elf_phentsize(*img_info);
if (offset > phdrs_offset ||
offset + len < phdrs_offset + phdrs_size) {
*noffset = phdrs_offset;
*nlen = phdrs_size;
return (int)*load_state;
}
/* caculate the programs headers offset to the image_data */
phdrs_offset -= offset;
img_phdrs = (const void *)
((const char *)img_data + phdrs_offset);
phdrs = (char **)elf_phtable_ptr(*img_info);
(*phdrs) = metal_allocate_memory(phdrs_size);
if (*phdrs == NULL)
return -ENOMEM;
memcpy((void *)(*phdrs), img_phdrs, phdrs_size);
*load_state = ELF_STATE_WAIT_FOR_SHDRS |
RPROC_LOADER_READY_TO_LOAD;
}
/* Get ELF Section Headers */
if ((*load_state & ELF_STATE_WAIT_FOR_SHDRS) != 0) {
size_t shdrs_size;
size_t shdrs_offset;
char **shdrs;
const void *img_shdrs;
metal_log(METAL_LOG_DEBUG, "Loading ELF section header.\r\n");
shdrs_offset = elf_shoff(*img_info);
if (elf_shnum(*img_info) == 0) {
*load_state = (*load_state & (~ELF_STATE_MASK)) |
ELF_STATE_HDRS_COMPLETE;
*nlen = 0;
return (int)*load_state;
}
shdrs_size = elf_shnum(*img_info) * elf_shentsize(*img_info);
if (offset > shdrs_offset ||
offset + len < shdrs_offset + shdrs_size) {
*noffset = shdrs_offset;
*nlen = shdrs_size;
return (int)*load_state;
}
/* caculate the sections headers offset to the image_data */
shdrs_offset -= offset;
img_shdrs = (const void *)
((const char *)img_data + shdrs_offset);
shdrs = (char **)elf_shtable_ptr(*img_info);
(*shdrs) = metal_allocate_memory(shdrs_size);
if (*shdrs == NULL)
return -ENOMEM;
memcpy((void *)*shdrs, img_shdrs, shdrs_size);
*load_state = (*load_state & (~ELF_STATE_MASK)) |
ELF_STATE_WAIT_FOR_SHSTRTAB;
metal_log(METAL_LOG_DEBUG,
"Loading ELF section header complete.\r\n");
}
/* Get ELF SHSTRTAB section */
if ((*load_state & ELF_STATE_WAIT_FOR_SHSTRTAB) != 0) {
size_t shstrtab_size;
size_t shstrtab_offset;
int shstrndx;
void *shdr;
void **shstrtab;
metal_log(METAL_LOG_DEBUG, "Loading ELF shstrtab.\r\n");
shstrndx = elf_shstrndx(*img_info);
shdr = elf_get_section_from_index(*img_info, shstrndx);
if (shdr == NULL)
return -RPROC_EINVAL;
elf_parse_section(*img_info, shdr, NULL, NULL,
NULL, &shstrtab_offset,
&shstrtab_size, NULL, NULL,
NULL, NULL);
if (offset > shstrtab_offset ||
offset + len < shstrtab_offset + shstrtab_size) {
*noffset = shstrtab_offset;
*nlen = shstrtab_size;
return (int)*load_state;
}
/* Caculate shstrtab section offset to the input image data */
shstrtab_offset -= offset;
shstrtab = elf_shstrtab_ptr(*img_info);
*shstrtab = metal_allocate_memory(shstrtab_size);
if (*shstrtab == NULL)
return -ENOMEM;
memcpy(*shstrtab,
(const void *)((const char *)img_data + shstrtab_offset),
shstrtab_size);
*load_state = (*load_state & (~ELF_STATE_MASK)) |
ELF_STATE_HDRS_COMPLETE;
*nlen = 0;
return *load_state;
}
return last_load_state;
}
int elf_load(struct remoteproc *rproc,
const void *img_data, size_t offset, size_t len,
void **img_info, int last_load_state,
metal_phys_addr_t *da,
size_t *noffset, size_t *nlen,
unsigned char *padding, size_t *nmemsize)
{
unsigned int *load_state;
const void *phdr;
(void)rproc;
metal_assert(da != NULL);
metal_assert(noffset != NULL);
metal_assert(nlen != NULL);
if ((last_load_state & RPROC_LOADER_MASK) == RPROC_LOADER_NOT_READY) {
metal_log(METAL_LOG_DEBUG,
"%s, needs to load header first\r\n");
last_load_state = elf_load_header(img_data, offset, len,
img_info, last_load_state,
noffset, nlen);
if ((last_load_state & RPROC_LOADER_MASK) ==
RPROC_LOADER_NOT_READY) {
*da = RPROC_LOAD_ANYADDR;
return last_load_state;
}
}
metal_assert(img_info != NULL && *img_info != NULL);
load_state = elf_load_state(*img_info);
/* For ELF, segment padding value is 0 */
if (padding != NULL)
*padding = 0;
if ((*load_state & RPROC_LOADER_READY_TO_LOAD) != 0) {
int nsegment;
size_t nsegmsize = 0;
size_t nsize = 0;
int phnums = 0;
nsegment = (int)(*load_state & ELF_NEXT_SEGMENT_MASK);
phdr = elf_next_load_segment(*img_info, &nsegment, da,
noffset, &nsize, &nsegmsize);
if (phdr == NULL) {
metal_log(METAL_LOG_DEBUG, "cannot find more segement\r\n");
*load_state = (*load_state & (~ELF_NEXT_SEGMENT_MASK)) |
(unsigned int)(nsegment & ELF_NEXT_SEGMENT_MASK);
return *load_state;
}
*nlen = nsize;
*nmemsize = nsegmsize;
phnums = elf_phnum(*img_info);
metal_log(METAL_LOG_DEBUG, "segment: %d, total segs %d\r\n",
nsegment, phnums);
if (nsegment == elf_phnum(*img_info)) {
*load_state = (*load_state & (~RPROC_LOADER_MASK)) |
RPROC_LOADER_POST_DATA_LOAD;
}
*load_state = (*load_state & (~ELF_NEXT_SEGMENT_MASK)) |
(unsigned int)(nsegment & ELF_NEXT_SEGMENT_MASK);
} else if ((*load_state & RPROC_LOADER_POST_DATA_LOAD) != 0) {
if ((*load_state & ELF_STATE_HDRS_COMPLETE) == 0) {
last_load_state = elf_load_header(img_data, offset,
len, img_info,
last_load_state,
noffset, nlen);
if (last_load_state < 0)
return last_load_state;
if ((last_load_state & ELF_STATE_HDRS_COMPLETE) != 0) {
*load_state = (*load_state &
(~RPROC_LOADER_MASK)) |
RPROC_LOADER_LOAD_COMPLETE;
*nlen = 0;
}
*da = RPROC_LOAD_ANYADDR;
} else {
/* TODO: will handle relocate later */
*nlen = 0;
*load_state = (*load_state &
(~RPROC_LOADER_MASK)) |
RPROC_LOADER_LOAD_COMPLETE;
}
}
return *load_state;
}
void elf_release(void *img_info)
{
if (img_info == NULL)
return;
if (elf_is_64(img_info) == 0) {
struct elf32_info *elf_info = img_info;
if (elf_info->phdrs != NULL)
metal_free_memory(elf_info->phdrs);
if (elf_info->shdrs != NULL)
metal_free_memory(elf_info->shdrs);
if (elf_info->shstrtab != NULL)
metal_free_memory(elf_info->shstrtab);
metal_free_memory(img_info);
} else {
struct elf64_info *elf_info = img_info;
if (elf_info->phdrs != NULL)
metal_free_memory(elf_info->phdrs);
if (elf_info->shdrs != NULL)
metal_free_memory(elf_info->shdrs);
if (elf_info->shstrtab != NULL)
metal_free_memory(elf_info->shstrtab);
metal_free_memory(img_info);
}
}
metal_phys_addr_t elf_get_entry(void *elf_info)
{
if (!elf_info)
return METAL_BAD_PHYS;
if (elf_is_64(elf_info) == 0) {
Elf32_Ehdr *elf_ehdr = (Elf32_Ehdr *)elf_info;
Elf32_Addr e_entry;
e_entry = elf_ehdr->e_entry;
return (metal_phys_addr_t)e_entry;
} else {
Elf64_Ehdr *elf_ehdr = (Elf64_Ehdr *)elf_info;
Elf64_Addr e_entry;
e_entry = elf_ehdr->e_entry;
return (metal_phys_addr_t)(e_entry & (metal_phys_addr_t)(-1));
}
}
int elf_locate_rsc_table(void *elf_info, metal_phys_addr_t *da,
size_t *offset, size_t *size)
{
char *sect_name = ".resource_table";
void *shdr;
unsigned int *load_state;
if (elf_info == NULL)
return -RPROC_EINVAL;
load_state = elf_load_state(elf_info);
if ((*load_state & ELF_STATE_HDRS_COMPLETE) == 0)
return -RPROC_ERR_LOADER_STATE;
shdr = elf_get_section_from_name(elf_info, sect_name);
if (shdr == NULL) {
metal_assert(size != NULL);
*size = 0;
return 0;
}
elf_parse_section(elf_info, shdr, NULL, NULL,
da, offset, size,
NULL, NULL, NULL, NULL);
return 0;
}
int elf_get_load_state(void *img_info)
{
unsigned int *load_state;
if (img_info == NULL)
return -RPROC_EINVAL;
load_state = elf_load_state(img_info);
return (int)(*load_state);
}
struct loader_ops elf_ops = {
.load_header = elf_load_header,
.load_data = elf_load,
.locate_rsc_table = elf_locate_rsc_table,
.release = elf_release,
.get_entry = elf_get_entry,
.get_load_state = elf_get_load_state,
};
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2015 Xilinx, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <metal/alloc.h>
#include <metal/log.h>
#include <metal/utilities.h>
#include <openamp/elf_loader.h>
#include <openamp/remoteproc.h>
#include <openamp/remoteproc_loader.h>
#include <openamp/remoteproc_virtio.h>
#include <openamp/rsc_table_parser.h>
/******************************************************************************
* static functions
*****************************************************************************/
static struct loader_ops *
remoteproc_check_fw_format(const void *img_data, size_t img_len)
{
if (img_len <= 0)
return NULL;
else if (elf_identify(img_data, img_len) == 0)
return &elf_ops;
else
return NULL;
}
static struct remoteproc_mem *
remoteproc_get_mem(struct remoteproc *rproc, const char *name,
metal_phys_addr_t pa, metal_phys_addr_t da,
void *va, size_t size)
{
struct metal_list *node;
struct remoteproc_mem *mem;
metal_list_for_each(&rproc->mems, node) {
mem = metal_container_of(node, struct remoteproc_mem, node);
if (name) {
if (!strncmp(name, mem->name, sizeof(mem->name)))
return mem;
} else if (pa != METAL_BAD_PHYS) {
metal_phys_addr_t pa_start, pa_end;
pa_start = mem->pa;
pa_end = pa_start + mem->size;
if (pa >= pa_start && (pa + size) <= pa_end)
return mem;
} else if (da != METAL_BAD_PHYS) {
metal_phys_addr_t da_start, da_end;
da_start = mem->da;
da_end = da_start + mem->size;
if (da >= da_start && (da + size) <= da_end)
return mem;
} else if (va) {
if (metal_io_virt_to_offset(mem->io, va) !=
METAL_BAD_OFFSET)
return mem;
} else {
return NULL;
}
}
return NULL;
}
static metal_phys_addr_t
remoteproc_datopa(struct remoteproc_mem *mem, metal_phys_addr_t da)
{
metal_phys_addr_t pa;
pa = mem->pa + da - mem->da;
return pa;
}
static metal_phys_addr_t
remoteproc_patoda(struct remoteproc_mem *mem, metal_phys_addr_t pa)
{
metal_phys_addr_t da;
da = mem->da + pa - mem->pa;
return da;
}
static void *remoteproc_get_rsc_table(struct remoteproc *rproc,
void *store,
struct image_store_ops *store_ops,
size_t offset,
size_t len)
{
int ret;
void *rsc_table = NULL;
const void *img_data;
/* Copy the resource table to local memory,
* the caller should be responsible to release the memory
*/
rsc_table = metal_allocate_memory(len);
if (!rsc_table) {
return RPROC_ERR_PTR(-RPROC_ENOMEM);
}
ret = store_ops->load(store, offset, len, &img_data, RPROC_LOAD_ANYADDR,
NULL, 1);
if (ret < 0 || ret < (int)len || img_data == NULL) {
metal_log(METAL_LOG_ERROR,
"get rsc failed: 0x%llx, 0x%llx\r\n", offset, len);
rsc_table = RPROC_ERR_PTR(-RPROC_EINVAL);
goto error;
}
memcpy(rsc_table, img_data, len);
ret = handle_rsc_table(rproc, rsc_table, len, NULL);
if (ret < 0) {
rsc_table = RPROC_ERR_PTR(ret);
goto error;
}
return rsc_table;
error:
metal_free_memory(rsc_table);
return rsc_table;
}
int remoteproc_parse_rsc_table(struct remoteproc *rproc,
struct resource_table *rsc_table,
size_t rsc_size)
{
struct metal_io_region *io;
io = remoteproc_get_io_with_va(rproc, (void *)rsc_table);
return handle_rsc_table(rproc, rsc_table, rsc_size, io);
}
int remoteproc_set_rsc_table(struct remoteproc *rproc,
struct resource_table *rsc_table,
size_t rsc_size)
{
int ret;
struct metal_io_region *io;
io = remoteproc_get_io_with_va(rproc, (void *)rsc_table);
if (!io)
return -EINVAL;
ret = remoteproc_parse_rsc_table(rproc, rsc_table, rsc_size);
if (!ret) {
rproc->rsc_table = rsc_table;
rproc->rsc_len = rsc_size;
rproc->rsc_io = io;
}
return ret;
}
struct remoteproc *remoteproc_init(struct remoteproc *rproc,
struct remoteproc_ops *ops, void *priv)
{
if (rproc) {
memset(rproc, 0, sizeof (*rproc));
rproc->state = RPROC_OFFLINE;
metal_mutex_init(&rproc->lock);
metal_list_init(&rproc->mems);
metal_list_init(&rproc->vdevs);
}
rproc = ops->init(rproc, ops, priv);
return rproc;
}
int remoteproc_remove(struct remoteproc *rproc)
{
int ret;
if (rproc) {
metal_mutex_acquire(&rproc->lock);
if (rproc->state == RPROC_OFFLINE)
rproc->ops->remove(rproc);
else
ret = -EBUSY;
metal_mutex_release(&rproc->lock);
} else {
ret = -EINVAL;
}
return ret;
}
int remoteproc_config(struct remoteproc *rproc, void *data)
{
int ret = -RPROC_ENODEV;
if (rproc) {
metal_mutex_acquire(&rproc->lock);
if (rproc->state == RPROC_OFFLINE) {
/* configure operation is allowed if the state is
* offline or ready. This function can be called
* mulitple times before start the remote.
*/
if (rproc->ops->config)
ret = rproc->ops->config(rproc, data);
rproc->state = RPROC_READY;
} else {
ret = -RPROC_EINVAL;
}
metal_mutex_release(&rproc->lock);
}
return ret;
}
int remoteproc_start(struct remoteproc *rproc)
{
int ret = -RPROC_ENODEV;
if (rproc) {
metal_mutex_acquire(&rproc->lock);
if (rproc->state == RPROC_READY) {
ret = rproc->ops->start(rproc);
rproc->state = RPROC_RUNNING;
} else {
ret = -RPROC_EINVAL;
}
metal_mutex_release(&rproc->lock);
}
return ret;
}
int remoteproc_stop(struct remoteproc *rproc)
{
int ret = -RPROC_ENODEV;
if (rproc) {
metal_mutex_acquire(&rproc->lock);
if (rproc->state != RPROC_STOPPED &&
rproc->state != RPROC_OFFLINE) {
if (rproc->ops->stop)
ret = rproc->ops->stop(rproc);
rproc->state = RPROC_STOPPED;
} else {
ret = 0;
}
metal_mutex_release(&rproc->lock);
}
return ret;
}
int remoteproc_shutdown(struct remoteproc *rproc)
{
int ret = -RPROC_ENODEV;
if (rproc) {
ret = 0;
metal_mutex_acquire(&rproc->lock);
if (rproc->state != RPROC_OFFLINE) {
if (rproc->state != RPROC_STOPPED) {
if (rproc->ops->stop)
ret = rproc->ops->stop(rproc);
}
if (!ret) {
if (rproc->ops->shutdown)
ret = rproc->ops->shutdown(rproc);
if (!ret) {
rproc->state = RPROC_OFFLINE;
}
}
}
metal_mutex_release(&rproc->lock);
}
return ret;
}
struct metal_io_region *
remoteproc_get_io_with_name(struct remoteproc *rproc,
const char *name)
{
struct remoteproc_mem *mem;
mem = remoteproc_get_mem(rproc, name,
METAL_BAD_PHYS, METAL_BAD_PHYS, NULL, 0);
if (mem)
return mem->io;
else
return NULL;
}
struct metal_io_region *
remoteproc_get_io_with_pa(struct remoteproc *rproc,
metal_phys_addr_t pa)
{
struct remoteproc_mem *mem;
mem = remoteproc_get_mem(rproc, NULL, pa, METAL_BAD_PHYS, NULL, 0);
if (mem)
return mem->io;
else
return NULL;
}
struct metal_io_region *
remoteproc_get_io_with_da(struct remoteproc *rproc,
metal_phys_addr_t da,
unsigned long *offset)
{
struct remoteproc_mem *mem;
mem = remoteproc_get_mem(rproc, NULL, METAL_BAD_PHYS, da, NULL, 0);
if (mem) {
struct metal_io_region *io;
metal_phys_addr_t pa;
io = mem->io;
pa = remoteproc_datopa(mem, da);
*offset = metal_io_phys_to_offset(io, pa);
return io;
} else {
return NULL;
}
}
struct metal_io_region *
remoteproc_get_io_with_va(struct remoteproc *rproc, void *va)
{
struct remoteproc_mem *mem;
mem = remoteproc_get_mem(rproc, NULL, METAL_BAD_PHYS, METAL_BAD_PHYS,
va, 0);
if (mem)
return mem->io;
else
return NULL;
}
void *remoteproc_mmap(struct remoteproc *rproc,
metal_phys_addr_t *pa, metal_phys_addr_t *da,
size_t size, unsigned int attribute,
struct metal_io_region **io)
{
void *va = NULL;
metal_phys_addr_t lpa, lda;
struct remoteproc_mem *mem;
if (!rproc)
return NULL;
else if (!pa && !da)
return NULL;
if (pa)
lpa = *pa;
else
lpa = METAL_BAD_PHYS;
if (da)
lda = *da;
else
lda = METAL_BAD_PHYS;
mem = remoteproc_get_mem(rproc, NULL, lpa, lda, NULL, size);
if (mem) {
if (lpa != METAL_BAD_PHYS)
lda = remoteproc_patoda(mem, lpa);
else if (lda != METAL_BAD_PHYS)
lpa = remoteproc_datopa(mem, lda);
if (io)
*io = mem->io;
va = metal_io_phys_to_virt(mem->io, lpa);
} else if (rproc->ops->mmap) {
va = rproc->ops->mmap(rproc, &lpa, &lda, size, attribute, io);
}
if (pa)
*pa = lpa;
if (da)
*da = lda;
return va;
}
int remoteproc_load(struct remoteproc *rproc, const char *path,
void *store, struct image_store_ops *store_ops,
void **img_info)
{
int ret;
struct loader_ops *loader;
const void *img_data;
void *limg_info = NULL;
size_t offset, noffset;
size_t len, nlen;
int last_load_state;
metal_phys_addr_t da, rsc_da;
int rsc_len;
size_t rsc_size;
void *rsc_table = NULL;
struct metal_io_region *io = NULL;
if (!rproc)
return -RPROC_ENODEV;
metal_mutex_acquire(&rproc->lock);
metal_log(METAL_LOG_DEBUG, "%s: check remoteproc status\r\n", __func__);
/* If remoteproc is not in ready state, cannot load executable */
if (rproc->state != RPROC_READY && rproc->state != RPROC_CONFIGURED) {
metal_log(METAL_LOG_ERROR,
"load failure: invalid rproc state %d.\r\n",
rproc->state);
metal_mutex_release(&rproc->lock);
return -RPROC_EINVAL;
}
if (!store_ops) {
metal_log(METAL_LOG_ERROR,
"load failure: loader ops is not set.\r\n");
metal_mutex_release(&rproc->lock);
return -RPROC_EINVAL;
}
/* Open exectuable to get ready to parse */
metal_log(METAL_LOG_DEBUG, "%s: open exectuable image\r\n", __func__);
ret = store_ops->open(store, path, &img_data);
if (ret <= 0) {
metal_log(METAL_LOG_ERROR,
"load failure: failed to open firmware %d.\n",
ret);
metal_mutex_release(&rproc->lock);
return -RPROC_EINVAL;
}
len = ret;
metal_assert(img_data != NULL);
/* Check executable format to select a parser */
loader = rproc->loader;
if (!loader) {
metal_log(METAL_LOG_DEBUG, "%s: check loader\r\n", __func__);
loader = remoteproc_check_fw_format(img_data, len);
if (!loader) {
metal_log(METAL_LOG_ERROR,
"load failure: failed to get store ops.\n");
ret = -RPROC_EINVAL;
goto error1;
}
rproc->loader = loader;
}
/* Load exectuable headers */
metal_log(METAL_LOG_DEBUG, "%s: loading headers\r\n", __func__);
offset = 0;
last_load_state = RPROC_LOADER_NOT_READY;
while(1) {
ret = loader->load_header(img_data, offset, len,
&limg_info, last_load_state,
&noffset, &nlen);
last_load_state = (unsigned int)ret;
metal_log(METAL_LOG_DEBUG,
"%s, load header 0x%lx, 0x%x, next 0x%lx, 0x%x\r\n",
__func__, offset, len, noffset, nlen);
if (ret < 0) {
metal_log(METAL_LOG_ERROR,
"load header failed 0x%lx,%d.\r\n",
offset, len);
goto error2;
} else if ((ret & RPROC_LOADER_READY_TO_LOAD) != 0) {
if (nlen == 0)
break;
else if ((noffset > (offset + len)) &&
(store_ops->features & SUPPORT_SEEK) == 0) {
/* Required data is not continued, however
* seek is not supported, stop to load
* headers such as ELF section headers which
* is usually located to the end of image.
* Continue to load binary data to target
* memory.
*/
break;
}
}
/* Continue to load headers image data */
img_data = NULL;
ret = store_ops->load(store, noffset, nlen,
&img_data,
RPROC_LOAD_ANYADDR,
NULL, 1);
if (ret < (int)nlen) {
metal_log(METAL_LOG_ERROR,
"load image data failed 0x%x,%d\r\n",
noffset, nlen);
goto error2;
}
offset = noffset;
len = nlen;
}
ret = elf_locate_rsc_table(limg_info, &rsc_da, &offset, &rsc_size);
if (ret == 0 && rsc_size > 0) {
/* parse resource table */
rsc_len = (int)rsc_size;
rsc_table = remoteproc_get_rsc_table(rproc, store, store_ops,
offset, rsc_len);
} else {
rsc_len = ret;
}
/* load executable data */
metal_log(METAL_LOG_DEBUG, "%s: load executable data\r\n", __func__);
offset = 0;
len = 0;
ret = -EINVAL;
while(1) {
unsigned char padding;
size_t nmemsize;
metal_phys_addr_t pa;
da = RPROC_LOAD_ANYADDR;
nlen = 0;
nmemsize = 0;
noffset = 0;
ret = loader->load_data(rproc, img_data, offset, len,
&limg_info, last_load_state, &da,
&noffset, &nlen, &padding, &nmemsize);
if (ret < 0) {
metal_log(METAL_LOG_ERROR,
"load data failed,0x%lx,%d\r\n",
noffset, nlen);
goto error3;
}
metal_log(METAL_LOG_DEBUG,
"load data: da 0x%lx, offset 0x%lx, len = 0x%lx, memsize = 0x%lx, state 0x%x\r\n",
da, noffset, nlen, nmemsize, ret);
last_load_state = ret;
if (da != RPROC_LOAD_ANYADDR) {
/* Data is supposed to be loaded to target memory */
img_data = NULL;
/* get the I/O region from remoteproc */
pa = METAL_BAD_PHYS;
(void)remoteproc_mmap(rproc, &pa, &da, nmemsize, 0, &io);
if (pa == METAL_BAD_PHYS || io == NULL) {
metal_log(METAL_LOG_ERROR,
"load failed, no mapping for 0x%llx.\r\n",
da);
ret = -RPROC_EINVAL;
goto error3;
}
if (nlen > 0) {
ret = store_ops->load(store, noffset, nlen,
&img_data, pa, io, 1);
if (ret != (int)nlen) {
metal_log(METAL_LOG_ERROR,
"load data failed 0x%lx, 0x%lx, 0x%x\r\n",
pa, noffset, nlen);
ret = -RPROC_EINVAL;
goto error3;
}
}
if (nmemsize > nlen) {
size_t tmpoffset;
tmpoffset = metal_io_phys_to_offset(io,
pa + nlen);
metal_io_block_set(io, tmpoffset,
padding, (nmemsize - nlen));
}
} else if (nlen != 0) {
ret = store_ops->load(store, noffset, nlen,
&img_data,
RPROC_LOAD_ANYADDR,
NULL, 1);
if (ret < (int)nlen) {
if ((last_load_state &
RPROC_LOADER_POST_DATA_LOAD) != 0) {
metal_log(METAL_LOG_WARNING,
"not all the headers are loaded\r\n");
break;
}
metal_log(METAL_LOG_ERROR,
"post-load image data failed 0x%x,%d\r\n",
noffset, nlen);
goto error3;
}
offset = noffset;
len = nlen;
} else {
/* (last_load_state & RPROC_LOADER_LOAD_COMPLETE) != 0 */
break;
}
}
if (rsc_len < 0) {
ret = elf_locate_rsc_table(limg_info, &rsc_da,
&offset, &rsc_size);
if (ret == 0 && rsc_size > 0) {
/* parse resource table */
rsc_len = (int)rsc_size;
rsc_table = remoteproc_get_rsc_table(rproc, store,
store_ops,
offset,
rsc_len);
}
}
/* Update resource table */
if (rsc_len && rsc_da != METAL_BAD_PHYS) {
void *rsc_table_cp = rsc_table;
metal_log(METAL_LOG_DEBUG,
"%s, update resource table\r\n", __func__);
rsc_table = remoteproc_mmap(rproc, NULL, &rsc_da,
rsc_len, 0, &io);
if (rsc_table) {
size_t rsc_io_offset;
/* Update resource table */
rsc_io_offset = metal_io_virt_to_offset(io, rsc_table);
ret = metal_io_block_write(io, rsc_io_offset,
rsc_table_cp, rsc_len);
if (ret != rsc_len) {
metal_log(METAL_LOG_WARNING,
"load: failed to update rsc\r\n");
}
rproc->rsc_table = rsc_table;
rproc->rsc_len = rsc_len;
} else {
metal_log(METAL_LOG_WARNING,
"load: not able to update rsc table.\n");
}
metal_free_memory(rsc_table_cp);
/* So that the rsc_table will not get released */
rsc_table = NULL;
}
metal_log(METAL_LOG_DEBUG, "%s: successfully load firmware\r\n",
__func__);
/* get entry point from the firmware */
rproc->bootaddr = loader->get_entry(limg_info);
rproc->state = RPROC_READY;
metal_mutex_release(&rproc->lock);
if (img_info)
*img_info = limg_info;
else
loader->release(limg_info);
store_ops->close(store);
return 0;
error3:
if (rsc_table)
metal_free_memory(rsc_table);
error2:
loader->release(limg_info);
error1:
store_ops->close(store);
metal_mutex_release(&rproc->lock);
return ret;
}
int remoteproc_load_noblock(struct remoteproc *rproc,
const void *img_data, size_t offset, size_t len,
void **img_info,
metal_phys_addr_t *pa, struct metal_io_region **io,
size_t *noffset, size_t *nlen,
size_t *nmlen, unsigned char *padding)
{
int ret;
struct loader_ops *loader;
void *limg_info = NULL;
int last_load_state;
metal_phys_addr_t da, rsc_da;
size_t rsc_size;
void *rsc_table = NULL, *lrsc_table = NULL;
if (!rproc)
return -RPROC_ENODEV;
metal_assert(pa != NULL);
metal_assert(io != NULL);
metal_assert(noffset != NULL);
metal_assert(nlen != NULL);
metal_assert(nmlen != NULL);
metal_assert(padding != NULL);
metal_mutex_acquire(&rproc->lock);
metal_log(METAL_LOG_DEBUG, "%s: check remoteproc status\r\n", __func__);
/* If remoteproc is not in ready state, cannot load executable */
if (rproc->state != RPROC_READY) {
metal_log(METAL_LOG_ERROR,
"load failure: invalid rproc state %d.\r\n",
rproc->state);
metal_mutex_release(&rproc->lock);
return -RPROC_EINVAL;
}
/* Check executable format to select a parser */
loader = rproc->loader;
if (!loader) {
metal_log(METAL_LOG_DEBUG, "%s: check loader\r\n", __func__);
if (img_data == NULL || offset != 0 || len == 0) {
metal_log(METAL_LOG_ERROR,
"load failure, invalid inputs, not able to identify image.\r\n");
metal_mutex_release(&rproc->lock);
return -RPROC_EINVAL;
}
loader = remoteproc_check_fw_format(img_data, len);
if (!loader) {
metal_log(METAL_LOG_ERROR,
"load failure: failed to identify image.\n");
ret = -RPROC_EINVAL;
metal_mutex_release(&rproc->lock);
return -RPROC_EINVAL;
}
rproc->loader = loader;
}
if (img_info == NULL || *img_info == NULL ) {
last_load_state = 0;
} else {
limg_info = *img_info;
last_load_state = loader->get_load_state(limg_info);
if (last_load_state < 0) {
metal_log(METAL_LOG_ERROR,
"load failure, not able get load state.\r\n");
metal_mutex_release(&rproc->lock);
return -RPROC_EINVAL;
}
}
da = RPROC_LOAD_ANYADDR;
*nlen = 0;
if ((last_load_state & RPROC_LOADER_READY_TO_LOAD) == 0 &&
(last_load_state & RPROC_LOADER_LOAD_COMPLETE) == 0) {
/* Get the mandatory executable headers */
ret = loader->load_header(img_data, offset, len,
&limg_info, last_load_state,
noffset, nlen);
last_load_state = (unsigned int)ret;
metal_log(METAL_LOG_DEBUG,
"%s, load header 0x%lx, 0x%x, next 0x%lx, 0x%x\r\n",
__func__, offset, len, *noffset, *nlen);
if (ret < 0) {
metal_log(METAL_LOG_ERROR,
"load header failed 0x%lx,%d.\r\n",
offset, len);
goto error1;
}
last_load_state = loader->get_load_state(limg_info);
if (*nlen != 0 &&
(last_load_state & RPROC_LOADER_READY_TO_LOAD) == 0)
goto out;
}
if ((last_load_state & RPROC_LOADER_READY_TO_LOAD) != 0 ||
(last_load_state & RPROC_LOADER_POST_DATA_LOAD) != 0) {
/* Enough information to know which target memory for
* which data.
*/
ret = loader->load_data(rproc, img_data, offset, len,
&limg_info, last_load_state, &da,
noffset, nlen, padding, nmlen);
metal_log(METAL_LOG_DEBUG,
"%s, load data 0x%lx, 0x%x, next 0x%lx, 0x%x\r\n",
__func__, offset, len, *noffset, *nlen);
if (ret < 0) {
metal_log(METAL_LOG_ERROR,
"load data failed,0x%lx,%d\r\n",
offset, len);
goto error1;
}
if (da != RPROC_LOAD_ANYADDR) {
/* get the I/O region from remoteproc */
*pa = METAL_BAD_PHYS;
(void)remoteproc_mmap(rproc, pa, &da, *nmlen, 0, io);
if (*pa == METAL_BAD_PHYS || io == NULL) {
metal_log(METAL_LOG_ERROR,
"load failed, no mapping for 0x%llx.\r\n",
da);
ret = -RPROC_EINVAL;
goto error1;
}
}
if (*nlen != 0)
goto out;
else
last_load_state = loader->get_load_state(limg_info);
}
if ((last_load_state & RPROC_LOADER_LOAD_COMPLETE) != 0) {
/* Get resource table */
size_t rsc_offset;
size_t rsc_io_offset;
ret = elf_locate_rsc_table(limg_info, &rsc_da,
&rsc_offset, &rsc_size);
if (ret == 0 && rsc_size > 0) {
lrsc_table = metal_allocate_memory(rsc_size);
if (lrsc_table == NULL) {
ret = -RPROC_ENOMEM;
goto error1;
}
rsc_table = remoteproc_mmap(rproc, NULL, &rsc_da,
rsc_size, 0, io);
if (*io == NULL) {
metal_log(METAL_LOG_ERROR,
"load failed: failed to mmap rsc\r\n");
metal_free_memory(lrsc_table);
goto error1;
}
rsc_io_offset = metal_io_virt_to_offset(*io, rsc_table);
ret = metal_io_block_read(*io, rsc_io_offset,
lrsc_table, (int)rsc_size);
if (ret != (int)rsc_size) {
metal_log(METAL_LOG_ERROR,
"load failed: failed to get rsc\r\n");
metal_free_memory(lrsc_table);
goto error1;
}
/* parse resource table */
ret = remoteproc_parse_rsc_table(rproc, lrsc_table,
rsc_size);
if (ret == (int)rsc_size) {
metal_log(METAL_LOG_ERROR,
"load failed: failed to parse rsc\r\n");
metal_free_memory(lrsc_table);
goto error1;
}
/* Update resource table */
ret = metal_io_block_write(*io, rsc_io_offset,
lrsc_table, (int)rsc_size);
if (ret != (int)rsc_size) {
metal_log(METAL_LOG_WARNING,
"load exectuable, failed to update rsc\r\n");
}
rproc->rsc_table = rsc_table;
rproc->rsc_len = (int)rsc_size;
metal_free_memory(lrsc_table);
}
}
out:
if (img_info != NULL)
*img_info = limg_info;
else
loader->release(limg_info);
metal_mutex_release(&rproc->lock);
return 0;
error1:
loader->release(limg_info);
metal_mutex_release(&rproc->lock);
return ret;
}
unsigned int remoteproc_allocate_id(struct remoteproc *rproc,
unsigned int start,
unsigned int end)
{
unsigned int notifyid;
if (start == RSC_NOTIFY_ID_ANY)
start = 0;
if (end == RSC_NOTIFY_ID_ANY)
end = METAL_BITS_PER_ULONG;
notifyid = metal_bitmap_next_set_bit(&rproc->bitmap,
start, end);
if (notifyid != end)
metal_bitmap_set_bit(&rproc->bitmap, notifyid);
return notifyid;
}
static int remoteproc_virtio_notify(void *priv, uint32_t id)
{
struct remoteproc *rproc = priv;
return rproc->ops->notify(rproc, id);
}
struct virtio_device *
remoteproc_create_virtio(struct remoteproc *rproc,
int vdev_id, unsigned int role,
void (*rst_cb)(struct virtio_device *vdev))
{
char *rsc_table;
struct fw_rsc_vdev *vdev_rsc;
struct metal_io_region *vdev_rsc_io;
struct virtio_device *vdev;
struct remoteproc_virtio *rpvdev;
size_t vdev_rsc_offset;
unsigned int notifyid;
unsigned int num_vrings, i;
struct metal_list *node;
metal_assert(rproc);
metal_mutex_acquire(&rproc->lock);
rsc_table = rproc->rsc_table;
vdev_rsc_io = rproc->rsc_io;
vdev_rsc_offset = find_rsc(rsc_table, RSC_VDEV, vdev_id);
if (!vdev_rsc_offset) {
metal_mutex_release(&rproc->lock);
return NULL;
}
vdev_rsc = (struct fw_rsc_vdev *)(rsc_table + vdev_rsc_offset);
notifyid = vdev_rsc->notifyid;
/* Check if the virtio device is already created */
metal_list_for_each(&rproc->vdevs, node) {
rpvdev = metal_container_of(node, struct remoteproc_virtio,
node);
if (rpvdev->vdev.index == notifyid)
return &rpvdev->vdev;
}
vdev = rproc_virtio_create_vdev(role, notifyid,
vdev_rsc, vdev_rsc_io, rproc,
remoteproc_virtio_notify,
rst_cb);
rpvdev = metal_container_of(vdev, struct remoteproc_virtio, vdev);
metal_list_add_tail(&rproc->vdevs, &rpvdev->node);
num_vrings = vdev_rsc->num_of_vrings;
/* set the notification id for vrings */
for (i = 0; i < num_vrings; i++) {
struct fw_rsc_vdev_vring *vring_rsc;
metal_phys_addr_t da;
unsigned int num_descs, align;
struct metal_io_region *io;
void *va;
size_t size;
int ret;
vring_rsc = &vdev_rsc->vring[i];
notifyid = vring_rsc->notifyid;
da = vring_rsc->da;
num_descs = vring_rsc->num;
align = vring_rsc->align;
size = vring_size(num_descs, align);
va = remoteproc_mmap(rproc, NULL, &da, size, 0, &io);
if (!va)
goto err1;
ret = rproc_virtio_init_vring(vdev, i, notifyid,
va, io, num_descs, align);
if (ret)
goto err1;
}
metal_mutex_release(&rproc->lock);
return vdev;
err1:
remoteproc_remove_virtio(rproc, vdev);
metal_mutex_release(&rproc->lock);
return NULL;
}
void remoteproc_remove_virtio(struct remoteproc *rproc,
struct virtio_device *vdev)
{
struct remoteproc_virtio *rpvdev;
(void)rproc;
metal_assert(vdev);
rpvdev = metal_container_of(vdev, struct remoteproc_virtio, vdev);
metal_list_del(&rpvdev->node);
rproc_virtio_remove_vdev(&rpvdev->vdev);
}
int remoteproc_get_notification(struct remoteproc *rproc, uint32_t notifyid)
{
struct remoteproc_virtio *rpvdev;
struct metal_list *node;
int ret;
metal_list_for_each(&rproc->vdevs, node) {
rpvdev = metal_container_of(node, struct remoteproc_virtio,
node);
ret = rproc_virtio_notified(&rpvdev->vdev, notifyid);
if (ret)
return ret;
}
return 0;
}
/*
* Remoteproc Virtio Framework Implementation
*
* Copyright(c) 2018 Xilinx Ltd.
* Copyright(c) 2011 Texas Instruments, Inc.
* Copyright(c) 2011 Google, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Texas Instruments nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <openamp/remoteproc.h>
#include <openamp/remoteproc_virtio.h>
#include <openamp/virtqueue.h>
#include <metal/utilities.h>
#include <metal/alloc.h>
static void rproc_virtio_virtqueue_notify(struct virtqueue *vq)
{
struct remoteproc_virtio *rpvdev;
struct virtio_vring_info *vring_info;
struct virtio_device *vdev;
unsigned int vq_id = vq->vq_queue_index;
vdev = vq->vq_dev;
rpvdev = metal_container_of(vdev, struct remoteproc_virtio, vdev);
metal_assert(vq_id <= vdev->vrings_num);
vring_info = &vdev->vrings_info[vq_id];
rpvdev->notify(rpvdev->priv, vring_info->notifyid);
}
static unsigned char rproc_virtio_get_status(struct virtio_device *vdev)
{
struct remoteproc_virtio *rpvdev;
struct fw_rsc_vdev *vdev_rsc;
struct metal_io_region *io;
char status;
rpvdev = metal_container_of(vdev, struct remoteproc_virtio, vdev);
vdev_rsc = rpvdev->vdev_rsc;
io = rpvdev->vdev_rsc_io;
status = metal_io_read8(io,
metal_io_virt_to_offset(io, &vdev_rsc->status));
return status;
}
#ifndef VIRTIO_SLAVE_ONLY
static void rproc_virtio_set_status(struct virtio_device *vdev,
unsigned char status)
{
struct remoteproc_virtio *rpvdev;
struct fw_rsc_vdev *vdev_rsc;
struct metal_io_region *io;
rpvdev = metal_container_of(vdev, struct remoteproc_virtio, vdev);
vdev_rsc = rpvdev->vdev_rsc;
io = rpvdev->vdev_rsc_io;
metal_io_write8(io,
metal_io_virt_to_offset(io, &vdev_rsc->status),
status);
rpvdev->notify(rpvdev->priv, vdev->index);
}
#endif
static uint32_t rproc_virtio_get_features(struct virtio_device *vdev)
{
struct remoteproc_virtio *rpvdev;
struct fw_rsc_vdev *vdev_rsc;
struct metal_io_region *io;
uint32_t features;
rpvdev = metal_container_of(vdev, struct remoteproc_virtio, vdev);
vdev_rsc = rpvdev->vdev_rsc;
io = rpvdev->vdev_rsc_io;
/* TODO: shall we get features based on the role ? */
features = metal_io_read32(io,
metal_io_virt_to_offset(io, &vdev_rsc->dfeatures));
return features;
}
#ifndef VIRTIO_SLAVE_ONLY
static void rproc_virtio_set_features(struct virtio_device *vdev,
uint32_t features)
{
struct remoteproc_virtio *rpvdev;
struct fw_rsc_vdev *vdev_rsc;
struct metal_io_region *io;
rpvdev = metal_container_of(vdev, struct remoteproc_virtio, vdev);
vdev_rsc = rpvdev->vdev_rsc;
io = rpvdev->vdev_rsc_io;
/* TODO: shall we set features based on the role ? */
metal_io_write32(io,
metal_io_virt_to_offset(io, &vdev_rsc->dfeatures),
features);
rpvdev->notify(rpvdev->priv, vdev->index);
}
#endif
static uint32_t rproc_virtio_negotiate_features(struct virtio_device *vdev,
uint32_t features)
{
(void)vdev;
(void)features;
return 0;
}
static void rproc_virtio_read_config(struct virtio_device *vdev,
uint32_t offset, void *dst, int length)
{
(void)vdev;
(void)offset;
(void)dst;
(void)length;
}
#ifndef VIRTIO_SLAVE_ONLY
static void rproc_virtio_write_config(struct virtio_device *vdev,
uint32_t offset, void *src, int length)
{
(void)vdev;
(void)offset;
(void)src;
(void)length;
}
static void rproc_virtio_reset_device(struct virtio_device *vdev)
{
if (vdev->role == VIRTIO_DEV_MASTER)
rproc_virtio_set_status(vdev,
VIRTIO_CONFIG_STATUS_NEEDS_RESET);
}
#endif
const struct virtio_dispatch remoteproc_virtio_dispatch_funcs = {
.get_status = rproc_virtio_get_status,
.get_features = rproc_virtio_get_features,
.read_config = rproc_virtio_read_config,
.notify = rproc_virtio_virtqueue_notify,
.negotiate_features = rproc_virtio_negotiate_features,
#ifndef VIRTIO_SLAVE_ONLY
/*
* We suppose here that the vdev is in a shared memory so that can
* be access only by one core: the master. In this case salve core has
* only read access right.
*/
.set_status = rproc_virtio_set_status,
.set_features = rproc_virtio_set_features,
.write_config = rproc_virtio_write_config,
.reset_device = rproc_virtio_reset_device,
#endif
};
struct virtio_device *
rproc_virtio_create_vdev(unsigned int role, unsigned int notifyid,
void *rsc, struct metal_io_region *rsc_io,
void *priv,
rpvdev_notify_func notify,
virtio_dev_reset_cb rst_cb)
{
struct remoteproc_virtio *rpvdev;
struct virtio_vring_info *vrings_info;
struct fw_rsc_vdev *vdev_rsc = rsc;
struct virtio_device *vdev;
unsigned int num_vrings = vdev_rsc->num_of_vrings;
unsigned int i;
rpvdev = metal_allocate_memory(sizeof(*rpvdev));
if (!rpvdev)
return NULL;
vrings_info = metal_allocate_memory(sizeof(*vrings_info) * num_vrings);
if (!vrings_info)
goto err0;
memset(rpvdev, 0, sizeof(*rpvdev));
memset(vrings_info, 0, sizeof(*vrings_info));
vdev = &rpvdev->vdev;
for (i = 0; i < num_vrings; i++) {
struct virtqueue *vq;
struct fw_rsc_vdev_vring *vring_rsc;
unsigned int num_extra_desc = 0;
vring_rsc = &vdev_rsc->vring[i];
if (role == VIRTIO_DEV_MASTER) {
num_extra_desc = vring_rsc->num;
}
vq = virtqueue_allocate(num_extra_desc);
if (!vq)
goto err1;
vrings_info[i].vq = vq;
}
/* FIXME commended as seems not nedded, already stored in vdev */
//rpvdev->notifyid = notifyid;
rpvdev->notify = notify;
rpvdev->priv = priv;
vdev->vrings_info = vrings_info;
/* Assuming the shared memory has been mapped and registered if
* necessary
*/
rpvdev->vdev_rsc = vdev_rsc;
rpvdev->vdev_rsc_io = rsc_io;
vdev->index = notifyid;
vdev->role = role;
vdev->reset_cb = rst_cb;
vdev->vrings_num = num_vrings;
vdev->func = &remoteproc_virtio_dispatch_funcs;
/* TODO: Shall we set features here ? */
return &rpvdev->vdev;
err1:
for (i = 0; i < num_vrings; i++) {
if (vrings_info[i].vq)
metal_free_memory(vrings_info[i].vq);
}
metal_free_memory(vrings_info);
err0:
metal_free_memory(rpvdev);
return NULL;
}
void rproc_virtio_remove_vdev(struct virtio_device *vdev)
{
struct remoteproc_virtio *rpvdev;
unsigned int i;
if (!vdev)
return;
rpvdev = metal_container_of(vdev, struct remoteproc_virtio, vdev);
for (i = 0; i < vdev->vrings_num; i++) {
struct virtqueue *vq;
vq = vdev->vrings_info[i].vq;
if (vq)
metal_free_memory(vq);
}
metal_free_memory(vdev->vrings_info);
metal_free_memory(rpvdev);
}
int rproc_virtio_init_vring(struct virtio_device *vdev, unsigned int index,
unsigned int notifyid, void *va,
struct metal_io_region *io,
unsigned int num_descs, unsigned int align)
{
struct virtio_vring_info *vring_info;
unsigned int num_vrings;
num_vrings = vdev->vrings_num;
if (index >= num_vrings)
return -RPROC_EINVAL;
vring_info = &vdev->vrings_info[index];
vring_info->io = io;
vring_info->notifyid = notifyid;
vring_info->info.vaddr = va;
vring_info->info.num_descs = num_descs;
vring_info->info.align = align;
return 0;
}
int rproc_virtio_notified(struct virtio_device *vdev, uint32_t notifyid)
{
unsigned int num_vrings, i;
struct virtio_vring_info *vring_info;
struct virtqueue *vq;
if (!vdev)
return -EINVAL;
/* We do nothing for vdev notification in this implementation */
if (vdev->index == notifyid)
return 0;
num_vrings = vdev->vrings_num;
for (i = 0; i < num_vrings; i++) {
vring_info = &vdev->vrings_info[i];
if (vring_info->notifyid == notifyid ||
notifyid == RSC_NOTIFY_ID_ANY) {
vq = vring_info->vq;
virtqueue_notification(vq);
}
}
return 0;
}
void rproc_virtio_wait_remote_ready(struct virtio_device *vdev)
{
uint8_t status;
/*
* No status available for slave. As Master has not to wait
* slave action, we can return. Behavior should be updated
* in future if a slave status is added.
*/
if (vdev->role == VIRTIO_DEV_MASTER)
return;
while (1) {
status = rproc_virtio_get_status(vdev);
if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
return;
}
}
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* Copyright (c) 2018, Xilinx Inc.
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <metal/io.h>
#include <openamp/rsc_table_parser.h>
static int handle_dummy_rsc(struct remoteproc *rproc, void *rsc);
/* Resources handler */
rsc_handler rsc_handler_table[] = {
handle_carve_out_rsc, /**< carved out resource */
handle_dummy_rsc, /**< IOMMU dev mem resource */
handle_trace_rsc, /**< trace buffer resource */
handle_vdev_rsc, /**< virtio resource */
handle_dummy_rsc, /**< rproc shared memory resource */
handle_dummy_rsc, /**< firmware checksum resource */
};
int handle_rsc_table(struct remoteproc *rproc,
struct resource_table *rsc_table, int size,
struct metal_io_region *io)
{
char *rsc_start;
unsigned int rsc_type;
unsigned int idx, offset;
int status = 0;
/* Validate rsc table header fields */
/* Minimum rsc table size */
if (sizeof(struct resource_table) > (unsigned int)size) {
return -RPROC_ERR_RSC_TAB_TRUNC;
}
/* Supported version */
if (rsc_table->ver != RSC_TAB_SUPPORTED_VERSION) {
return -RPROC_ERR_RSC_TAB_VER;
}
/* Offset array */
offset = sizeof(struct resource_table)
+ rsc_table->num * sizeof(rsc_table->offset[0]);
if (offset > (unsigned int)size) {
return -RPROC_ERR_RSC_TAB_TRUNC;
}
/* Reserved fields - must be zero */
if ((rsc_table->reserved[0] != 0 || rsc_table->reserved[1]) != 0) {
return -RPROC_ERR_RSC_TAB_RSVD;
}
/* Loop through the offset array and parse each resource entry */
for (idx = 0; idx < rsc_table->num; idx++) {
rsc_start = (char *)rsc_table;
rsc_start += rsc_table->offset[idx];
if (io &&
metal_io_virt_to_offset(io, rsc_start) == METAL_BAD_OFFSET)
return -RPROC_ERR_RSC_TAB_TRUNC;
rsc_type = *((uint32_t *)rsc_start);
if (rsc_type < RSC_LAST)
status = rsc_handler_table[rsc_type](rproc,
rsc_start);
else if (rsc_type >= RSC_VENDOR_START &&
rsc_type <= RSC_VENDOR_END)
status = handle_vendor_rsc(rproc, rsc_start);
if (status == -RPROC_ERR_RSC_TAB_NS) {
status = 0;
continue;
}
else if (status)
break;
}
return status;
}
/**
* handle_carve_out_rsc
*
* Carveout resource handler.
*
* @param rproc - pointer to remote remoteproc
* @param rsc - pointer to carveout resource
*
* @returns - 0 for success, or negative value for failure
*
*/
int handle_carve_out_rsc(struct remoteproc *rproc, void *rsc)
{
struct fw_rsc_carveout *carve_rsc = (struct fw_rsc_carveout *)rsc;
metal_phys_addr_t da;
metal_phys_addr_t pa;
size_t size;
unsigned int attribute;
/* Validate resource fields */
if (!carve_rsc) {
return -RPROC_ERR_RSC_TAB_NP;
}
if (carve_rsc->reserved) {
return -RPROC_ERR_RSC_TAB_RSVD;
}
pa = carve_rsc->pa;
da = carve_rsc->da;
size = carve_rsc->len;
attribute = carve_rsc->flags;
if (remoteproc_mmap(rproc, &pa, &da, size, attribute, NULL))
return 0;
else
return -RPROC_EINVAL;
}
int handle_vendor_rsc(struct remoteproc *rproc, void *rsc)
{
if (rproc && rproc->ops->handle_rsc) {
struct fw_rsc_vendor *vend_rsc = rsc;
size_t len = vend_rsc->len;
return rproc->ops->handle_rsc(rproc, rsc, len);
}
return -RPROC_ERR_RSC_TAB_NS;
}
int handle_vdev_rsc(struct remoteproc *rproc, void *rsc)
{
struct fw_rsc_vdev *vdev_rsc = (struct fw_rsc_vdev *)rsc;
unsigned int notifyid, i, num_vrings;
/* only assign notification IDs but do not initialize vdev */
notifyid = vdev_rsc->notifyid;
if (notifyid == RSC_NOTIFY_ID_ANY) {
notifyid = remoteproc_allocate_id(rproc,
notifyid, notifyid + 1);
vdev_rsc->notifyid = notifyid;
}
num_vrings = vdev_rsc->num_of_vrings;
for (i = 0; i < num_vrings; i++) {
struct fw_rsc_vdev_vring *vring_rsc;
vring_rsc = &vdev_rsc->vring[i];
notifyid = vring_rsc->notifyid;
if (notifyid == RSC_NOTIFY_ID_ANY) {
notifyid = remoteproc_allocate_id(rproc,
notifyid,
notifyid + 1);
vdev_rsc->notifyid = notifyid;
}
}
return 0;
}
/**
* handle_trace_rsc
*
* trace resource handler.
*
* @param rproc - pointer to remote remoteproc
* @param rsc - pointer to trace resource
*
* @returns - no service error
*
*/
int handle_trace_rsc(struct remoteproc *rproc, void *rsc)
{
struct fw_rsc_trace *vdev_rsc = (struct fw_rsc_trace *)rsc;
(void)rproc;
if (vdev_rsc->da != FW_RSC_U32_ADDR_ANY && vdev_rsc->len != 0)
return 0;
/* FIXME: master should allocated a memory used by slave */
return -RPROC_ERR_RSC_TAB_NS;
}
/**
* handle_dummy_rsc
*
* dummy resource handler.
*
* @param rproc - pointer to remote remoteproc
* @param rsc - pointer to trace resource
*
* @returns - no service error
*
*/
static int handle_dummy_rsc(struct remoteproc *rproc, void *rsc)
{
(void)rproc;
(void)rsc;
return -RPROC_ERR_RSC_TAB_NS;
}
size_t find_rsc(void *rsc_table, unsigned int rsc_type, unsigned int index)
{
struct resource_table *r_table = rsc_table;
unsigned int i, rsc_index;
unsigned int lrsc_type;
char *rsc_start;
metal_assert(r_table);
/* Loop through the offset array and parse each resource entry */
rsc_index = 0;
for (i = 0; i < r_table->num; i++) {
rsc_start = (char *)r_table;
rsc_start += r_table->offset[i];
lrsc_type = *((uint32_t *)rsc_start);
if (lrsc_type == rsc_type) {
if (rsc_index++ == index)
return r_table->offset[i];
}
}
return 0;
}
collect (PROJECT_LIB_SOURCES rpmsg.c)
collect (PROJECT_LIB_SOURCES rpmsg_virtio.c)
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
* Copyright (c) 2018 Linaro, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <openamp/rpmsg.h>
#include <metal/alloc.h>
#include "rpmsg_internal.h"
/**
* rpmsg_get_address
*
* This function provides unique 32 bit address.
*
* @param bitmap - bit map for addresses
* @param size - size of bitmap
*
* return - a unique address
*/
static uint32_t rpmsg_get_address(unsigned long *bitmap, int size)
{
unsigned int addr = RPMSG_ADDR_ANY;
unsigned int nextbit;
nextbit = metal_bitmap_next_clear_bit(bitmap, 0, size);
if (nextbit < (uint32_t)size) {
addr = nextbit;
metal_bitmap_set_bit(bitmap, nextbit);
}
return addr;
}
/**
* rpmsg_release_address
*
* Frees the given address.
*
* @param bitmap - bit map for addresses
* @param size - size of bitmap
* @param addr - address to free
*/
static void rpmsg_release_address(unsigned long *bitmap, int size,
int addr)
{
if (addr < size)
metal_bitmap_clear_bit(bitmap, addr);
}
/**
* rpmsg_is_address_set
*
* Checks whether address is used or free.
*
* @param bitmap - bit map for addresses
* @param size - size of bitmap
* @param addr - address to free
*
* return - TRUE/FALSE
*/
static int rpmsg_is_address_set(unsigned long *bitmap, int size, int addr)
{
if (addr < size)
return metal_bitmap_is_bit_set(bitmap, addr);
else
return RPMSG_ERR_PARAM;
}
/**
* rpmsg_set_address
*
* Marks the address as consumed.
*
* @param bitmap - bit map for addresses
* @param size - size of bitmap
* @param addr - address to free
*
* return - none
*/
static int rpmsg_set_address(unsigned long *bitmap, int size, int addr)
{
if (addr < size) {
metal_bitmap_set_bit(bitmap, addr);
return RPMSG_SUCCESS;
} else {
return RPMSG_ERR_PARAM;
}
}
/**
* This function sends rpmsg "message" to remote device.
*
* @param ept - pointer to end point
* @param src - source address of channel
* @param dst - destination address of channel
* @param data - data to transmit
* @param size - size of data
* @param wait - boolean, wait or not for buffer to become
* available
*
* @return - size of data sent or negative value for failure.
*
*/
int rpmsg_send_offchannel_raw(struct rpmsg_endpoint *ept, uint32_t src,
uint32_t dst, const void *data, int size,
int wait)
{
struct rpmsg_device *rdev;
if (!ept || !ept->rdev || !data || dst == RPMSG_ADDR_ANY)
return RPMSG_ERR_PARAM;
rdev = ept->rdev;
if (rdev->ops.send_offchannel_raw)
return rdev->ops.send_offchannel_raw(rdev, src, dst, data,
size, wait);
return RPMSG_ERR_PARAM;
}
int rpmsg_send_ns_message(struct rpmsg_endpoint *ept, unsigned long flags)
{
struct rpmsg_ns_msg ns_msg;
int ret;
ns_msg.flags = flags;
ns_msg.addr = ept->addr;
strncpy(ns_msg.name, ept->name, sizeof(ns_msg.name));
ret = rpmsg_send_offchannel_raw(ept, ept->addr,
RPMSG_NS_EPT_ADDR,
&ns_msg, sizeof(ns_msg), true);
if (ret < 0)
return ret;
else
return RPMSG_SUCCESS;
}
struct rpmsg_endpoint *rpmsg_get_endpoint(struct rpmsg_device *rdev,
const char *name, uint32_t addr,
uint32_t dest_addr)
{
struct metal_list *node;
struct rpmsg_endpoint *ept;
metal_list_for_each(&rdev->endpoints, node) {
int name_match = 0;
ept = metal_container_of(node, struct rpmsg_endpoint, node);
/* try to get by local address only */
if (addr != RPMSG_ADDR_ANY && ept->addr == addr)
return ept;
/* try to find match on local end remote address */
if (addr == ept->addr && dest_addr == ept->dest_addr)
return ept;
/* else use name service and destination address */
if (name)
name_match = !strncmp(ept->name, name,
sizeof(ept->name));
if (!name || !name_match)
continue;
/* destination address is known, equal to ept remote address*/
if (dest_addr != RPMSG_ADDR_ANY && ept->dest_addr == dest_addr)
return ept;
/* ept is registered but not associated to remote ept*/
if (addr == RPMSG_ADDR_ANY && ept->dest_addr == RPMSG_ADDR_ANY)
return ept;
}
return NULL;
}
static void rpmsg_unregister_endpoint(struct rpmsg_endpoint *ept)
{
struct rpmsg_device *rdev;
if (!ept)
return;
rdev = ept->rdev;
if (ept->addr != RPMSG_ADDR_ANY)
rpmsg_release_address(rdev->bitmap, RPMSG_ADDR_BMP_SIZE,
ept->addr);
metal_list_del(&ept->node);
}
int rpmsg_register_endpoint(struct rpmsg_device *rdev,
struct rpmsg_endpoint *ept)
{
ept->rdev = rdev;
metal_list_add_tail(&rdev->endpoints, &ept->node);
return RPMSG_SUCCESS;
}
int rpmsg_create_ept(struct rpmsg_endpoint *ept, struct rpmsg_device *rdev,
const char *name, uint32_t src, uint32_t dest,
rpmsg_ept_cb cb, rpmsg_ns_unbind_cb unbind_cb)
{
int status;
uint32_t addr = src;
if (!ept)
return RPMSG_ERR_PARAM;
metal_mutex_acquire(&rdev->lock);
if (src != RPMSG_ADDR_ANY) {
status = rpmsg_is_address_set(rdev->bitmap,
RPMSG_ADDR_BMP_SIZE, src);
if (!status) {
/* Mark the address as used in the address bitmap. */
rpmsg_set_address(rdev->bitmap, RPMSG_ADDR_BMP_SIZE,
src);
} else if (status > 0) {
status = RPMSG_SUCCESS;
goto ret_status;
} else {
goto ret_status;
}
} else {
addr = rpmsg_get_address(rdev->bitmap, RPMSG_ADDR_BMP_SIZE);
}
rpmsg_init_ept(ept, name, addr, dest, cb, unbind_cb);
status = rpmsg_register_endpoint(rdev, ept);
if (status < 0)
rpmsg_release_address(rdev->bitmap, RPMSG_ADDR_BMP_SIZE, addr);
if (!status && ept->dest_addr == RPMSG_ADDR_ANY) {
/* Send NS announcement to remote processor */
metal_mutex_release(&rdev->lock);
status = rpmsg_send_ns_message(ept, RPMSG_NS_CREATE);
metal_mutex_acquire(&rdev->lock);
if (status)
rpmsg_unregister_endpoint(ept);
}
ret_status:
metal_mutex_release(&rdev->lock);
return status;
}
/**
* rpmsg_destroy_ept
*
* This function deletes rpmsg endpoint and performs cleanup.
*
* @param ept - pointer to endpoint to destroy
*
*/
void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
{
struct rpmsg_device *rdev;
if (!ept)
return;
rdev = ept->rdev;
if (ept->addr != RPMSG_NS_EPT_ADDR)
(void)rpmsg_send_ns_message(ept, RPMSG_NS_DESTROY);
metal_mutex_acquire(&rdev->lock);
rpmsg_unregister_endpoint(ept);
metal_mutex_release(&rdev->lock);
}
/*
* SPDX-License-Identifier: BSD-3-Clause
*
* $FreeBSD$
*/
#ifndef _RPMSG_INTERNAL_H_
#define _RPMSG_INTERNAL_H_
#include <stdint.h>
#include <openamp/rpmsg.h>
#if defined __cplusplus
extern "C" {
#endif
#ifdef RPMSG_DEBUG
#define RPMSG_ASSERT(_exp, _msg) do { \
if (!(_exp)) { \
openamp_print("FATAL: %s - _msg", __func__); \
while (1) { \
; \
} \
} \
} while (0)
#else
#define RPMSG_ASSERT(_exp, _msg) do { \
if (!(_exp)) \
while (1) { \
; \
} \
} while (0)
#endif
#define RPMSG_LOCATE_DATA(p) ((unsigned char *)(p) + sizeof(struct rpmsg_hdr))
/**
* enum rpmsg_ns_flags - dynamic name service announcement flags
*
* @RPMSG_NS_CREATE: a new remote service was just created
* @RPMSG_NS_DESTROY: a known remote service was just destroyed
* @RPMSG_NS_CREATE_WITH_ACK: a new remote service was just created waiting
* acknowledgment.
*/
enum rpmsg_ns_flags {
RPMSG_NS_CREATE = 0,
RPMSG_NS_DESTROY = 1,
};
/**
* struct rpmsg_hdr - common header for all rpmsg messages
* @src: source address
* @dst: destination address
* @reserved: reserved for future use
* @len: length of payload (in bytes)
* @flags: message flags
*
* Every message sent(/received) on the rpmsg bus begins with this header.
*/
OPENAMP_PACKED_BEGIN
struct rpmsg_hdr {
uint32_t src;
uint32_t dst;
uint32_t reserved;
uint16_t len;
uint16_t flags;
} OPENAMP_PACKED_END;
/**
* struct rpmsg_ns_msg - dynamic name service announcement message
* @name: name of remote service that is published
* @addr: address of remote service that is published
* @flags: indicates whether service is created or destroyed
*
* This message is sent across to publish a new service, or announce
* about its removal. When we receive these messages, an appropriate
* rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
* or ->remove() handler of the appropriate rpmsg driver will be invoked
* (if/as-soon-as one is registered).
*/
OPENAMP_PACKED_BEGIN
struct rpmsg_ns_msg {
char name[RPMSG_NAME_SIZE];
uint32_t addr;
uint32_t flags;
} OPENAMP_PACKED_END;
int rpmsg_send_ns_message(struct rpmsg_endpoint *ept, unsigned long flags);
struct rpmsg_endpoint *rpmsg_get_endpoint(struct rpmsg_device *rvdev,
const char *name, uint32_t addr,
uint32_t dest_addr);
int rpmsg_register_endpoint(struct rpmsg_device *rdev,
struct rpmsg_endpoint *ept);
static inline struct rpmsg_endpoint *
rpmsg_get_ept_from_addr(struct rpmsg_device *rdev, uint32_t addr)
{
return rpmsg_get_endpoint(rdev, NULL, addr, RPMSG_ADDR_ANY);
}
#if defined __cplusplus
}
#endif
#endif /* _RPMSG_INTERNAL_H_ */
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
* Copyright (c) 2018 Linaro, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <metal/alloc.h>
#include <metal/cache.h>
#include <metal/sleep.h>
#include <metal/utilities.h>
#include <openamp/rpmsg_virtio.h>
#include <openamp/virtqueue.h>
#include "rpmsg_internal.h"
#define RPMSG_NUM_VRINGS (2)
/* Total tick count for 15secs - 1msec tick. */
#define RPMSG_TICK_COUNT 15000
/* Time to wait - In multiple of 10 msecs. */
#define RPMSG_TICKS_PER_INTERVAL 10
#define WORD_SIZE sizeof(unsigned long)
#define WORD_ALIGN(a) ((((a) & (WORD_SIZE - 1)) != 0) ? \
(((a) & (~(WORD_SIZE - 1))) + WORD_SIZE) : (a))
#ifndef VIRTIO_SLAVE_ONLY
metal_weak void *
rpmsg_virtio_shm_pool_get_buffer(struct rpmsg_virtio_shm_pool *shpool,
size_t size)
{
void *buffer;
if (shpool->avail < size)
return NULL;
buffer = (void *)((char *)shpool->base + shpool->size - shpool->avail);
shpool->avail -= size;
return buffer;
}
#endif /*!VIRTIO_SLAVE_ONLY*/
void rpmsg_virtio_init_shm_pool(struct rpmsg_virtio_shm_pool *shpool,
void *shb, size_t size)
{
if (!shpool)
return;
shpool->base = shb;
shpool->size = WORD_ALIGN(size);
shpool->avail = WORD_ALIGN(size);
}
/**
* rpmsg_virtio_return_buffer
*
* Places the used buffer back on the virtqueue.
*
* @param rvdev - pointer to remote core
* @param buffer - buffer pointer
* @param len - buffer length
* @param idx - buffer index
*
*/
static void rpmsg_virtio_return_buffer(struct rpmsg_virtio_device *rvdev,
void *buffer, unsigned long len,
unsigned short idx)
{
unsigned int role = rpmsg_virtio_get_role(rvdev);
#ifndef VIRTIO_SLAVE_ONLY
if (role == RPMSG_MASTER) {
struct virtqueue_buf vqbuf;
(void)idx;
/* Initialize buffer node */
vqbuf.buf = buffer;
vqbuf.len = len;
virtqueue_add_buffer(rvdev->rvq, &vqbuf, 0, 1, buffer);
}
#endif /*VIRTIO_SLAVE_ONLY*/
#ifndef VIRTIO_MASTER_ONLY
if (role == RPMSG_REMOTE) {
(void)buffer;
virtqueue_add_consumed_buffer(rvdev->rvq, idx, len);
}
#endif /*VIRTIO_MASTER_ONLY*/
}
/**
* rpmsg_virtio_enqueue_buffer
*
* Places buffer on the virtqueue for consumption by the other side.
*
* @param rvdev - pointer to rpmsg virtio
* @param buffer - buffer pointer
* @param len - buffer length
* @param idx - buffer index
*
* @return - status of function execution
*/
static int rpmsg_virtio_enqueue_buffer(struct rpmsg_virtio_device *rvdev,
void *buffer, unsigned long len,
unsigned short idx)
{
unsigned int role = rpmsg_virtio_get_role(rvdev);
#ifndef VIRTIO_SLAVE_ONLY
if (role == RPMSG_MASTER) {
struct virtqueue_buf vqbuf;
(void)idx;
/* Initialize buffer node */
vqbuf.buf = buffer;
vqbuf.len = len;
return virtqueue_add_buffer(rvdev->svq, &vqbuf, 0, 1, buffer);
}
#endif /*!VIRTIO_SLAVE_ONLY*/
#ifndef VIRTIO_MASTER_ONLY
if (role == RPMSG_REMOTE) {
(void)buffer;
return virtqueue_add_consumed_buffer(rvdev->svq, idx, len);
}
#endif /*!VIRTIO_MASTER_ONLY*/
return 0;
}
/**
* rpmsg_virtio_get_tx_buffer
*
* Provides buffer to transmit messages.
*
* @param rvdev - pointer to rpmsg device
* @param len - length of returned buffer
* @param idx - buffer index
*
* return - pointer to buffer.
*/
static void *rpmsg_virtio_get_tx_buffer(struct rpmsg_virtio_device *rvdev,
unsigned long *len,
unsigned short *idx)
{
unsigned int role = rpmsg_virtio_get_role(rvdev);
void *data = NULL;
#ifndef VIRTIO_SLAVE_ONLY
if (role == RPMSG_MASTER) {
data = virtqueue_get_buffer(rvdev->svq, (uint32_t *)len, idx);
if (data == NULL) {
data = rpmsg_virtio_shm_pool_get_buffer(rvdev->shpool,
RPMSG_BUFFER_SIZE);
*len = RPMSG_BUFFER_SIZE;
}
}
#endif /*!VIRTIO_SLAVE_ONLY*/
#ifndef VIRTIO_MASTER_ONLY
if (role == RPMSG_REMOTE) {
data = virtqueue_get_available_buffer(rvdev->svq, idx,
(uint32_t *)len);
}
#endif /*!VIRTIO_MASTER_ONLY*/
return data;
}
/**
* rpmsg_virtio_get_rx_buffer
*
* Retrieves the received buffer from the virtqueue.
*
* @param rvdev - pointer to rpmsg device
* @param len - size of received buffer
* @param idx - index of buffer
*
* @return - pointer to received buffer
*
*/
static void *rpmsg_virtio_get_rx_buffer(struct rpmsg_virtio_device *rvdev,
unsigned long *len,
unsigned short *idx)
{
unsigned int role = rpmsg_virtio_get_role(rvdev);
void *data = NULL;
#ifndef VIRTIO_SLAVE_ONLY
if (role == RPMSG_MASTER) {
data = virtqueue_get_buffer(rvdev->rvq, (uint32_t *)len, idx);
}
#endif /*!VIRTIO_SLAVE_ONLY*/
#ifndef VIRTIO_MASTER_ONLY
if (role == RPMSG_REMOTE) {
data =
virtqueue_get_available_buffer(rvdev->rvq, idx,
(uint32_t *)len);
}
#endif /*!VIRTIO_MASTER_ONLY*/
if (data) {
/* FIX ME: library should not worry about if it needs
* to flush/invalidate cache, it is shared memory.
* The shared memory should be mapped properly before
* using it.
*/
metal_cache_invalidate(data, (unsigned int)(*len));
}
return data;
}
#ifndef VIRTIO_MASTER_ONLY
/**
* check if the remote is ready to start RPMsg communication
*/
static int rpmsg_virtio_wait_remote_ready(struct rpmsg_virtio_device *rvdev)
{
uint8_t status;
while (1) {
status = rpmsg_virtio_get_status(rvdev);
/* Busy wait until the remote is ready */
if (status & VIRTIO_CONFIG_STATUS_NEEDS_RESET) {
rpmsg_virtio_set_status(rvdev, 0);
/* TODO notify remote processor */
} else if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) {
return true;
}
/* TODO: clarify metal_cpu_yield usage*/
metal_cpu_yield();
}
return false;
}
#endif /*!VIRTIO_MASTER_ONLY*/
/**
* _rpmsg_virtio_get_buffer_size
*
* Returns buffer size available for sending messages.
*
* @param channel - pointer to rpmsg channel
*
* @return - buffer size
*
*/
static int _rpmsg_virtio_get_buffer_size(struct rpmsg_virtio_device *rvdev)
{
unsigned int role = rpmsg_virtio_get_role(rvdev);
int length = 0;
#ifndef VIRTIO_SLAVE_ONLY
if (role == RPMSG_MASTER) {
/*
* If device role is Remote then buffers are provided by us
* (RPMSG Master), so just provide the macro.
*/
length = RPMSG_BUFFER_SIZE - sizeof(struct rpmsg_hdr);
}
#endif /*!VIRTIO_SLAVE_ONLY*/
#ifndef VIRTIO_MASTER_ONLY
if (role == RPMSG_REMOTE) {
/*
* If other core is Master then buffers are provided by it,
* so get the buffer size from the virtqueue.
*/
length =
(int)virtqueue_get_desc_size(rvdev->svq) -
sizeof(struct rpmsg_hdr);
}
#endif /*!VIRTIO_MASTER_ONLY*/
return length;
}
/**
* This function sends rpmsg "message" to remote device.
*
* @param rdev - pointer to rpmsg device
* @param src - source address of channel
* @param dst - destination address of channel
* @param data - data to transmit
* @param size - size of data
* @param wait - boolean, wait or not for buffer to become
* available
*
* @return - size of data sent or negative value for failure.
*
*/
static int rpmsg_virtio_send_offchannel_raw(struct rpmsg_device *rdev,
uint32_t src, uint32_t dst,
const void *data,
int size, int wait)
{
struct rpmsg_virtio_device *rvdev;
struct rpmsg_hdr rp_hdr;
void *buffer = NULL;
unsigned short idx;
int tick_count = 0;
unsigned long buff_len;
int status;
struct metal_io_region *io;
/* Get the associated remote device for channel. */
rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev);
status = rpmsg_virtio_get_status(rvdev);
/* Validate device state */
if (!(status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
return RPMSG_ERR_DEV_STATE;
}
if (wait)
tick_count = RPMSG_TICK_COUNT / RPMSG_TICKS_PER_INTERVAL;
else
tick_count = 0;
while (1) {
int avail_size;
/* Lock the device to enable exclusive access to virtqueues */
metal_mutex_acquire(&rdev->lock);
avail_size = _rpmsg_virtio_get_buffer_size(rvdev);
if (size <= avail_size)
buffer = rpmsg_virtio_get_tx_buffer(rvdev, &buff_len,
&idx);
metal_mutex_release(&rdev->lock);
if (buffer || !tick_count)
break;
if (avail_size != 0)
return RPMSG_ERR_BUFF_SIZE;
metal_sleep_usec(RPMSG_TICKS_PER_INTERVAL);
tick_count--;
}
if (!buffer)
return RPMSG_ERR_NO_BUFF;
/* Initialize RPMSG header. */
rp_hdr.dst = dst;
rp_hdr.src = src;
rp_hdr.len = size;
rp_hdr.reserved = 0;
/* Copy data to rpmsg buffer. */
io = rvdev->shbuf_io;
status = metal_io_block_write(io, metal_io_virt_to_offset(io, buffer),
&rp_hdr, sizeof(rp_hdr));
RPMSG_ASSERT(status == sizeof(rp_hdr), "failed to write header\n");
status = metal_io_block_write(io,
metal_io_virt_to_offset(io,
RPMSG_LOCATE_DATA(buffer)),
data, size);
RPMSG_ASSERT(status == size, "failed to write buffer\n");
metal_mutex_acquire(&rdev->lock);
/* Enqueue buffer on virtqueue. */
status = rpmsg_virtio_enqueue_buffer(rvdev, buffer, buff_len, idx);
RPMSG_ASSERT(status == VQUEUE_SUCCESS, "failed to enqueue buffer\n");
/* Let the other side know that there is a job to process. */
virtqueue_kick(rvdev->svq);
metal_mutex_release(&rdev->lock);
return size;
}
/**
* rpmsg_virtio_tx_callback
*
* Tx callback function.
*
* @param vq - pointer to virtqueue on which Tx is has been
* completed.
*
*/
static void rpmsg_virtio_tx_callback(struct virtqueue *vq)
{
(void)vq;
}
/**
* rpmsg_virtio_rx_callback
*
* Rx callback function.
*
* @param vq - pointer to virtqueue on which messages is received
*
*/
static void rpmsg_virtio_rx_callback(struct virtqueue *vq)
{
struct virtio_device *vdev = vq->vq_dev;
struct rpmsg_virtio_device *rvdev = vdev->priv;
struct rpmsg_device *rdev = &rvdev->rdev;
struct rpmsg_endpoint *ept;
struct rpmsg_hdr *rp_hdr;
unsigned long len;
unsigned short idx;
int status;
metal_mutex_acquire(&rdev->lock);
/* Process the received data from remote node */
rp_hdr = (struct rpmsg_hdr *)rpmsg_virtio_get_rx_buffer(rvdev,
&len, &idx);
metal_mutex_release(&rdev->lock);
while (rp_hdr) {
/* Get the channel node from the remote device channels list. */
metal_mutex_acquire(&rdev->lock);
ept = rpmsg_get_ept_from_addr(rdev, rp_hdr->dst);
metal_mutex_release(&rdev->lock);
if (!ept)
/* Fatal error no endpoint for the given dst addr. */
return;
if (ept->dest_addr == RPMSG_ADDR_ANY) {
/*
* First message received from the remote side,
* update channel destination address
*/
ept->dest_addr = rp_hdr->src;
}
status = ept->cb(ept, (void *)RPMSG_LOCATE_DATA(rp_hdr),
rp_hdr->len, ept->addr, ept->priv);
RPMSG_ASSERT(status == RPMSG_SUCCESS,
"unexpected callback status\n");
metal_mutex_acquire(&rdev->lock);
/* Return used buffers. */
rpmsg_virtio_return_buffer(rvdev, rp_hdr, len, idx);
rp_hdr = (struct rpmsg_hdr *)
rpmsg_virtio_get_rx_buffer(rvdev, &len, &idx);
metal_mutex_release(&rdev->lock);
}
}
/**
* rpmsg_virtio_ns_callback
*
* This callback handles name service announcement from the remote device
* and creates/deletes rpmsg channels.
*
* @param server_chnl - pointer to server channel control block.
* @param data - pointer to received messages
* @param len - length of received data
* @param priv - any private data
* @param src - source address
*
* @return - rpmag endpoint callback handled
*/
#if defined (__GNUC__) && ! defined (__CC_ARM)
#pragma GCC push_options
#pragma GCC optimize ("O0")
#elif defined (__CC_ARM)
#pragma push
#pragma O0
#endif
static int rpmsg_virtio_ns_callback(struct rpmsg_endpoint *ept, void *data,
size_t len, uint32_t src, void *priv)
{
struct rpmsg_device *rdev = ept->rdev;
struct rpmsg_virtio_device *rvdev = (struct rpmsg_virtio_device *)rdev;
struct metal_io_region *io = rvdev->shbuf_io;
struct rpmsg_endpoint *_ept;
struct rpmsg_ns_msg *ns_msg;
uint32_t dest;
char name[RPMSG_NAME_SIZE];
(void)priv;
(void)src;
ns_msg = (struct rpmsg_ns_msg *)data;
if (len != sizeof(*ns_msg))
/* Returns as the message is corrupted */
return RPMSG_SUCCESS;
metal_io_block_read(io,
metal_io_virt_to_offset(io, ns_msg->name),
&name, sizeof(name));
dest = ns_msg->addr;
/* check if a Ept has been locally registered */
metal_mutex_acquire(&rdev->lock);
_ept = rpmsg_get_endpoint(rdev, name, RPMSG_ADDR_ANY, dest);
if (ns_msg->flags & RPMSG_NS_DESTROY) {
if (_ept)
_ept->dest_addr = RPMSG_ADDR_ANY;
metal_mutex_release(&rdev->lock);
if (_ept && _ept->ns_unbind_cb)
_ept->ns_unbind_cb(ept);
} else {
if (!_ept) {
/*
* send callback to application, that can
* - create the associated endpoints.
* - store information for future use.
* - just ignore the request as service not supported.
*/
metal_mutex_release(&rdev->lock);
if (rdev->ns_bind_cb)
rdev->ns_bind_cb(rdev, name, dest);
} else {
_ept->dest_addr = dest;
metal_mutex_release(&rdev->lock);
}
}
return RPMSG_SUCCESS;
}
#if defined (__GNUC__) && ! defined (__CC_ARM)
#pragma GCC pop_options
#elif defined (__CC_ARM)
#pragma pop
#endif
int rpmsg_virtio_get_buffer_size(struct rpmsg_device *rdev)
{
int size;
struct rpmsg_virtio_device *rvdev;
if (!rdev)
return RPMSG_ERR_PARAM;
metal_mutex_acquire(&rdev->lock);
rvdev = (struct rpmsg_virtio_device *)rdev;
size = _rpmsg_virtio_get_buffer_size(rvdev);
metal_mutex_release(&rdev->lock);
return size;
}
int rpmsg_init_vdev(struct rpmsg_virtio_device *rvdev,
struct virtio_device *vdev,
rpmsg_ns_bind_cb ns_bind_cb,
struct metal_io_region *shm_io,
struct rpmsg_virtio_shm_pool *shpool)
{
struct rpmsg_device *rdev;
const char *vq_names[RPMSG_NUM_VRINGS];
typedef void (*vqcallback)(struct virtqueue *vq);
vqcallback callback[RPMSG_NUM_VRINGS];
unsigned long dev_features;
int status;
unsigned int i, role;
rdev = &rvdev->rdev;
memset(rdev, 0, sizeof(*rdev));
metal_mutex_init(&rdev->lock);
rvdev->vdev = vdev;
rdev->ns_bind_cb = ns_bind_cb;
vdev->priv = rvdev;
rdev->ops.send_offchannel_raw = rpmsg_virtio_send_offchannel_raw;
role = rpmsg_virtio_get_role(rvdev);
#ifndef VIRTIO_SLAVE_ONLY
if (role == RPMSG_MASTER) {
/*
* Since device is RPMSG Remote so we need to manage the
* shared buffers. Create shared memory pool to handle buffers.
*/
if (!shpool)
return RPMSG_ERR_PARAM;
if (!shpool->size)
return RPMSG_ERR_NO_BUFF;
rvdev->shpool = shpool;
vq_names[0] = "rx_vq";
vq_names[1] = "tx_vq";
callback[0] = rpmsg_virtio_rx_callback;
callback[1] = rpmsg_virtio_tx_callback;
rvdev->rvq = vdev->vrings_info[0].vq;
rvdev->svq = vdev->vrings_info[1].vq;
}
#endif /*!VIRTIO_SLAVE_ONLY*/
#ifndef VIRTIO_MASTER_ONLY
(void)shpool;
if (role == RPMSG_REMOTE) {
vq_names[0] = "tx_vq";
vq_names[1] = "rx_vq";
callback[0] = rpmsg_virtio_tx_callback;
callback[1] = rpmsg_virtio_rx_callback;
rvdev->rvq = vdev->vrings_info[1].vq;
rvdev->svq = vdev->vrings_info[0].vq;
}
#endif /*!VIRTIO_MASTER_ONLY*/
rvdev->shbuf_io = shm_io;
#ifndef VIRTIO_MASTER_ONLY
if (role == RPMSG_REMOTE) {
/* wait synchro with the master */
rpmsg_virtio_wait_remote_ready(rvdev);
}
#endif /*!VIRTIO_MASTER_ONLY*/
/* Create virtqueues for remote device */
status = rpmsg_virtio_create_virtqueues(rvdev, 0, RPMSG_NUM_VRINGS,
vq_names, callback);
if (status != RPMSG_SUCCESS)
return status;
/* TODO: can have a virtio function to set the shared memory I/O */
for (i = 0; i < RPMSG_NUM_VRINGS; i++) {
struct virtqueue *vq;
vq = vdev->vrings_info[i].vq;
vq->shm_io = shm_io;
}
#ifndef VIRTIO_SLAVE_ONLY
if (role == RPMSG_MASTER) {
struct virtqueue_buf vqbuf;
unsigned int idx;
void *buffer;
vqbuf.len = RPMSG_BUFFER_SIZE;
for (idx = 0; idx < rvdev->rvq->vq_nentries; idx++) {
/* Initialize TX virtqueue buffers for remote device */
buffer = rpmsg_virtio_shm_pool_get_buffer(shpool,
RPMSG_BUFFER_SIZE);
if (!buffer) {
return RPMSG_ERR_NO_BUFF;
}
vqbuf.buf = buffer;
metal_io_block_set(shm_io,
metal_io_virt_to_offset(shm_io,
buffer),
0x00, RPMSG_BUFFER_SIZE);
status =
virtqueue_add_buffer(rvdev->rvq, &vqbuf, 0, 1,
buffer);
if (status != RPMSG_SUCCESS) {
return status;
}
}
}
#endif /*!VIRTIO_SLAVE_ONLY*/
/* Initialize channels and endpoints list */
metal_list_init(&rdev->endpoints);
dev_features = rpmsg_virtio_get_features(rvdev);
/*
* Create name service announcement endpoint if device supports name
* service announcement feature.
*/
if ((dev_features & (1 << VIRTIO_RPMSG_F_NS))) {
rpmsg_init_ept(&rdev->ns_ept, "NS",
RPMSG_NS_EPT_ADDR, RPMSG_NS_EPT_ADDR,
rpmsg_virtio_ns_callback, NULL);
(void)rpmsg_register_endpoint(rdev, &rdev->ns_ept);
}
#ifndef VIRTIO_SLAVE_ONLY
if (role == RPMSG_MASTER)
rpmsg_virtio_set_status(rvdev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
#endif /*!VIRTIO_SLAVE_ONLY*/
return status;
}
void rpmsg_deinit_vdev(struct rpmsg_virtio_device *rvdev)
{
struct metal_list *node;
struct rpmsg_device *rdev;
struct rpmsg_endpoint *ept;
rdev = &rvdev->rdev;
while (!metal_list_is_empty(&rdev->endpoints)) {
node = rdev->endpoints.next;
ept = metal_container_of(node, struct rpmsg_endpoint, node);
rpmsg_destroy_ept(ept);
}
rvdev->rvq = 0;
rvdev->svq = 0;
metal_mutex_deinit(&rdev->lock);
}
/*-
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <openamp/virtio.h>
static const char *virtio_feature_name(unsigned long feature,
const struct virtio_feature_desc *);
//TODO : This structure may change depending on the types of devices we support.
static const struct virtio_ident {
unsigned short devid;
const char *name;
} virtio_ident_table[] = {
{
VIRTIO_ID_NETWORK, "Network"}, {
VIRTIO_ID_BLOCK, "Block"}, {
VIRTIO_ID_CONSOLE, "Console"}, {
VIRTIO_ID_ENTROPY, "Entropy"}, {
VIRTIO_ID_BALLOON, "Balloon"}, {
VIRTIO_ID_IOMEMORY, "IOMemory"}, {
VIRTIO_ID_SCSI, "SCSI"}, {
VIRTIO_ID_9P, "9P Transport"}, {
0, NULL}
};
/* Device independent features. */
static const struct virtio_feature_desc virtio_common_feature_desc[] = {
{VIRTIO_F_NOTIFY_ON_EMPTY, "NotifyOnEmpty"},
{VIRTIO_RING_F_INDIRECT_DESC, "RingIndirect"},
{VIRTIO_RING_F_EVENT_IDX, "EventIdx"},
{VIRTIO_F_BAD_FEATURE, "BadFeature"},
{0, NULL}
};
const char *virtio_dev_name(unsigned short devid)
{
const struct virtio_ident *ident;
for (ident = virtio_ident_table; ident->name != NULL; ident++) {
if (ident->devid == devid)
return (ident->name);
}
return (NULL);
}
static const char *virtio_feature_name(unsigned long val,
const struct virtio_feature_desc *desc)
{
int i, j;
const struct virtio_feature_desc *descs[2] = { desc,
virtio_common_feature_desc
};
for (i = 0; i < 2; i++) {
if (!descs[i])
continue;
for (j = 0; descs[i][j].vfd_val != 0; j++) {
if (val == descs[i][j].vfd_val)
return (descs[i][j].vfd_str);
}
}
return (NULL);
}
void virtio_describe(struct virtio_device *dev, const char *msg,
uint32_t features, struct virtio_feature_desc *desc)
{
(void)dev;
(void)msg;
(void)features;
// TODO: Not used currently - keeping it for future use
virtio_feature_name(0, desc);
}
int virtio_create_virtqueues(struct virtio_device *vdev, unsigned int flags,
unsigned int nvqs, const char *names[],
vq_callback *callbacks[])
{
struct virtio_vring_info *vring_info;
struct vring_alloc_info *vring_alloc;
unsigned int num_vrings, i;
int ret;
(void)flags;
num_vrings = vdev->vrings_num;
if (nvqs > num_vrings)
return -ERROR_VQUEUE_INVLD_PARAM;
/* Initialize virtqueue for each vring */
for (i = 0; i < nvqs; i++) {
vring_info = &vdev->vrings_info[i];
vring_alloc = &vring_info->info;
#ifndef VIRTIO_SLAVE_ONLY
if (vdev->role == VIRTIO_DEV_MASTER) {
size_t offset;
struct metal_io_region *io = vring_info->io;
offset = metal_io_virt_to_offset(io,
vring_alloc->vaddr);
metal_io_block_set(io, offset, 0,
vring_size(vring_alloc->num_descs,
vring_alloc->align));
}
#endif
ret = virtqueue_create(vdev, i, names[i], vring_alloc,
callbacks[i], vdev->func->notify,
vring_info->vq);
if (ret)
return ret;
}
return 0;
}
/*-
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <string.h>
#include <openamp/virtqueue.h>
#include <metal/atomic.h>
#include <metal/log.h>
#include <metal/alloc.h>
/* Prototype for internal functions. */
static void vq_ring_init(struct virtqueue *, void *, int);
static void vq_ring_update_avail(struct virtqueue *, uint16_t);
static uint16_t vq_ring_add_buffer(struct virtqueue *, struct vring_desc *,
uint16_t, struct virtqueue_buf *, int, int);
static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
static void vq_ring_free_chain(struct virtqueue *, uint16_t);
static int vq_ring_must_notify_host(struct virtqueue *vq);
static void vq_ring_notify_host(struct virtqueue *vq);
static int virtqueue_nused(struct virtqueue *vq);
/* Default implementation of P2V based on libmetal */
static inline void *virtqueue_phys_to_virt(struct virtqueue *vq,
metal_phys_addr_t phys)
{
struct metal_io_region *io = vq->shm_io;
return metal_io_phys_to_virt(io, phys);
}
/* Default implementation of V2P based on libmetal */
static inline metal_phys_addr_t virtqueue_virt_to_phys(struct virtqueue *vq,
void *buf)
{
struct metal_io_region *io = vq->shm_io;
return metal_io_virt_to_phys(io, buf);
}
/**
* virtqueue_create - Creates new VirtIO queue
*
* @param device - Pointer to VirtIO device
* @param id - VirtIO queue ID , must be unique
* @param name - Name of VirtIO queue
* @param ring - Pointer to vring_alloc_info control block
* @param callback - Pointer to callback function, invoked
* when message is available on VirtIO queue
* @param notify - Pointer to notify function, used to notify
* other side that there is job available for it
* @param vq - Created VirtIO queue.
*
* @return - Function status
*/
int virtqueue_create(struct virtio_device *virt_dev, unsigned short id,
const char *name, struct vring_alloc_info *ring,
void (*callback)(struct virtqueue *vq),
void (*notify)(struct virtqueue *vq),
struct virtqueue *vq)
{
int status = VQUEUE_SUCCESS;
VQ_PARAM_CHK(ring == NULL, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,
ERROR_VRING_ALIGN);
VQ_PARAM_CHK(vq == NULL, status, ERROR_NO_MEM);
if (status == VQUEUE_SUCCESS) {
vq->vq_dev = virt_dev;
vq->vq_name = name;
vq->vq_queue_index = id;
vq->vq_nentries = ring->num_descs;
vq->vq_free_cnt = vq->vq_nentries;
vq->callback = callback;
vq->notify = notify;
/* Initialize vring control block in virtqueue. */
vq_ring_init(vq, (void *)ring->vaddr, ring->align);
/* Disable callbacks - will be enabled by the application
* once initialization is completed.
*/
virtqueue_disable_cb(vq);
}
return (status);
}
/**
* virtqueue_add_buffer() - Enqueues new buffer in vring for consumption
* by other side. Readable buffers are always
* inserted before writable buffers
*
* @param vq - Pointer to VirtIO queue control block.
* @param buf_list - Pointer to a list of virtqueue buffers.
* @param readable - Number of readable buffers
* @param writable - Number of writable buffers
* @param cookie - Pointer to hold call back data
*
* @return - Function status
*/
int virtqueue_add_buffer(struct virtqueue *vq, struct virtqueue_buf *buf_list,
int readable, int writable, void *cookie)
{
struct vq_desc_extra *dxp = NULL;
int status = VQUEUE_SUCCESS;
uint16_t head_idx;
uint16_t idx;
int needed;
needed = readable + writable;
VQ_PARAM_CHK(vq == NULL, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(needed < 1, status, ERROR_VQUEUE_INVLD_PARAM);
VQ_PARAM_CHK(vq->vq_free_cnt == 0, status, ERROR_VRING_FULL);
VQUEUE_BUSY(vq);
if (status == VQUEUE_SUCCESS) {
VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
head_idx = vq->vq_desc_head_idx;
VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
dxp = &vq->vq_descx[head_idx];
VQASSERT(vq, dxp->cookie == NULL,
"cookie already exists for index");
dxp->cookie = cookie;
dxp->ndescs = needed;
/* Enqueue buffer onto the ring. */
idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx,
buf_list, readable, writable);
vq->vq_desc_head_idx = idx;
vq->vq_free_cnt -= needed;
if (vq->vq_free_cnt == 0) {
VQ_RING_ASSERT_CHAIN_TERM(vq);
} else {
VQ_RING_ASSERT_VALID_IDX(vq, idx);
}
/*
* Update vring_avail control block fields so that other
* side can get buffer using it.
*/
vq_ring_update_avail(vq, head_idx);
}
VQUEUE_IDLE(vq);
return status;
}
/**
* virtqueue_get_buffer - Returns used buffers from VirtIO queue
*
* @param vq - Pointer to VirtIO queue control block
* @param len - Length of conumed buffer
* @param idx - index of the buffer
*
* @return - Pointer to used buffer
*/
void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx)
{
struct vring_used_elem *uep;
void *cookie;
uint16_t used_idx, desc_idx;
if (!vq || vq->vq_used_cons_idx == vq->vq_ring.used->idx)
return (NULL);
VQUEUE_BUSY(vq);
used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
uep = &vq->vq_ring.used->ring[used_idx];
atomic_thread_fence(memory_order_seq_cst);
desc_idx = (uint16_t)uep->id;
if (len)
*len = uep->len;
vq_ring_free_chain(vq, desc_idx);
cookie = vq->vq_descx[desc_idx].cookie;
vq->vq_descx[desc_idx].cookie = NULL;
if (idx)
*idx = used_idx;
VQUEUE_IDLE(vq);
return cookie;
}
uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx)
{
return vq->vq_ring.desc[idx].len;
}
/**
* virtqueue_free - Frees VirtIO queue resources
*
* @param vq - Pointer to VirtIO queue control block
*
*/
void virtqueue_free(struct virtqueue *vq)
{
if (vq) {
if (vq->vq_free_cnt != vq->vq_nentries) {
metal_log(METAL_LOG_WARNING,
"%s: freeing non-empty virtqueue\r\n",
vq->vq_name);
}
metal_free_memory(vq);
}
}
/**
* virtqueue_get_available_buffer - Returns buffer available for use in the
* VirtIO queue
*
* @param vq - Pointer to VirtIO queue control block
* @param avail_idx - Pointer to index used in vring desc table
* @param len - Length of buffer
*
* @return - Pointer to available buffer
*/
void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,
uint32_t *len)
{
uint16_t head_idx = 0;
void *buffer;
atomic_thread_fence(memory_order_seq_cst);
if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
return NULL;
}
VQUEUE_BUSY(vq);
head_idx = vq->vq_available_idx++ & (vq->vq_nentries - 1);
*avail_idx = vq->vq_ring.avail->ring[head_idx];
buffer = virtqueue_phys_to_virt(vq, vq->vq_ring.desc[*avail_idx].addr);
*len = vq->vq_ring.desc[*avail_idx].len;
VQUEUE_IDLE(vq);
return buffer;
}
/**
* virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue
*
* @param vq - Pointer to VirtIO queue control block
* @param head_idx - Index of vring desc containing used buffer
* @param len - Length of buffer
*
* @return - Function status
*/
int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
uint32_t len)
{
struct vring_used_elem *used_desc = NULL;
uint16_t used_idx;
if (head_idx > vq->vq_nentries) {
return ERROR_VRING_NO_BUFF;
}
VQUEUE_BUSY(vq);
used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1);
used_desc = &vq->vq_ring.used->ring[used_idx];
used_desc->id = head_idx;
used_desc->len = len;
atomic_thread_fence(memory_order_seq_cst);
vq->vq_ring.used->idx++;
VQUEUE_IDLE(vq);
return VQUEUE_SUCCESS;
}
/**
* virtqueue_enable_cb - Enables callback generation
*
* @param vq - Pointer to VirtIO queue control block
*
* @return - Function status
*/
int virtqueue_enable_cb(struct virtqueue *vq)
{
return vq_ring_enable_interrupt(vq, 0);
}
/**
* virtqueue_enable_cb - Disables callback generation
*
* @param vq - Pointer to VirtIO queue control block
*
*/
void virtqueue_disable_cb(struct virtqueue *vq)
{
VQUEUE_BUSY(vq);
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
vring_used_event(&vq->vq_ring) =
vq->vq_used_cons_idx - vq->vq_nentries - 1;
} else {
vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
VQUEUE_IDLE(vq);
}
/**
* virtqueue_kick - Notifies other side that there is buffer available for it.
*
* @param vq - Pointer to VirtIO queue control block
*/
void virtqueue_kick(struct virtqueue *vq)
{
VQUEUE_BUSY(vq);
/* Ensure updated avail->idx is visible to host. */
atomic_thread_fence(memory_order_seq_cst);
if (vq_ring_must_notify_host(vq))
vq_ring_notify_host(vq);
vq->vq_queued_cnt = 0;
VQUEUE_IDLE(vq);
}
/**
* virtqueue_dump Dumps important virtqueue fields , use for debugging purposes
*
* @param vq - Pointer to VirtIO queue control block
*/
void virtqueue_dump(struct virtqueue *vq)
{
if (!vq)
return;
metal_log(METAL_LOG_DEBUG,
"VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
"desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
"used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n",
vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
vq->vq_ring.used->flags);
}
/**
* virtqueue_get_desc_size - Returns vring descriptor size
*
* @param vq - Pointer to VirtIO queue control block
*
* @return - Descriptor length
*/
uint32_t virtqueue_get_desc_size(struct virtqueue *vq)
{
uint16_t head_idx = 0;
uint16_t avail_idx = 0;
uint32_t len = 0;
if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
return 0;
}
VQUEUE_BUSY(vq);
head_idx = vq->vq_available_idx & (vq->vq_nentries - 1);
avail_idx = vq->vq_ring.avail->ring[head_idx];
len = vq->vq_ring.desc[avail_idx].len;
VQUEUE_IDLE(vq);
return len;
}
/**************************************************************************
* Helper Functions *
**************************************************************************/
/**
*
* vq_ring_add_buffer
*
*/
static uint16_t vq_ring_add_buffer(struct virtqueue *vq,
struct vring_desc *desc, uint16_t head_idx,
struct virtqueue_buf *buf_list, int readable,
int writable)
{
struct vring_desc *dp;
int i, needed;
uint16_t idx;
(void)vq;
needed = readable + writable;
for (i = 0, idx = head_idx; i < needed; i++, idx = dp->next) {
VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
"premature end of free desc chain");
dp = &desc[idx];
dp->addr = virtqueue_virt_to_phys(vq, buf_list[i].buf);
dp->len = buf_list[i].len;
dp->flags = 0;
if (i < needed - 1)
dp->flags |= VRING_DESC_F_NEXT;
/*
* Readable buffers are inserted into vring before the
* writable buffers.
*/
if (i >= readable)
dp->flags |= VRING_DESC_F_WRITE;
}
return (idx);
}
/**
*
* vq_ring_free_chain
*
*/
static void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp;
struct vq_desc_extra *dxp;
VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
dp = &vq->vq_ring.desc[desc_idx];
dxp = &vq->vq_descx[desc_idx];
if (vq->vq_free_cnt == 0) {
VQ_RING_ASSERT_CHAIN_TERM(vq);
}
vq->vq_free_cnt += dxp->ndescs;
dxp->ndescs--;
if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
while (dp->flags & VRING_DESC_F_NEXT) {
VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
dp = &vq->vq_ring.desc[dp->next];
dxp->ndescs--;
}
}
VQASSERT(vq, (dxp->ndescs == 0),
"failed to free entire desc chain, remaining");
/*
* We must append the existing free chain, if any, to the end of
* newly freed chain. If the virtqueue was completely used, then
* head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
*/
dp->next = vq->vq_desc_head_idx;
vq->vq_desc_head_idx = desc_idx;
}
/**
*
* vq_ring_init
*
*/
static void vq_ring_init(struct virtqueue *vq, void *ring_mem, int alignment)
{
struct vring *vr;
int i, size;
size = vq->vq_nentries;
vr = &vq->vq_ring;
vring_init(vr, size, (unsigned char *)ring_mem, alignment);
for (i = 0; i < size - 1; i++)
vr->desc[i].next = i + 1;
vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
}
/**
*
* vq_ring_update_avail
*
*/
static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx;
/*
* Place the head of the descriptor chain into the next slot and make
* it usable to the host. The chain is made available now rather than
* deferring to virtqueue_notify() in the hopes that if the host is
* currently running on another CPU, we can keep it processing the new
* descriptor.
*/
avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
atomic_thread_fence(memory_order_seq_cst);
vq->vq_ring.avail->idx++;
/* Keep pending count until virtqueue_notify(). */
vq->vq_queued_cnt++;
}
/**
*
* vq_ring_enable_interrupt
*
*/
static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
{
/*
* Enable interrupts, making sure we get the latest index of
* what's already been consumed.
*/
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
} else {
vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
}
atomic_thread_fence(memory_order_seq_cst);
/*
* Enough items may have already been consumed to meet our threshold
* since we last checked. Let our caller know so it processes the new
* entries.
*/
if (virtqueue_nused(vq) > ndesc) {
return 1;
}
return 0;
}
/**
*
* virtqueue_interrupt
*
*/
void virtqueue_notification(struct virtqueue *vq)
{
atomic_thread_fence(memory_order_seq_cst);
if (vq->callback)
vq->callback(vq);
}
/**
*
* vq_ring_must_notify_host
*
*/
static int vq_ring_must_notify_host(struct virtqueue *vq)
{
uint16_t new_idx, prev_idx, event_idx;
if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
new_idx = vq->vq_ring.avail->idx;
prev_idx = new_idx - vq->vq_queued_cnt;
event_idx = vring_avail_event(&vq->vq_ring);
return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
}
return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
}
/**
*
* vq_ring_notify_host
*
*/
static void vq_ring_notify_host(struct virtqueue *vq)
{
if (vq->notify)
vq->notify(vq);
}
/**
*
* virtqueue_nused
*
*/
static int virtqueue_nused(struct virtqueue *vq)
{
uint16_t used_idx, nused;
used_idx = vq->vq_ring.used->idx;
nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
return nused;
}
/**
******************************************************************************
* @file virt_uart.c
* @author MCD Application Team
* @brief UART HAL module driver.
* This file provides firmware functions to manage an rpmsg endpoint
* from user application
*
*
@verbatim
===============================================================================
##### How to use this driver #####
===============================================================================
[..]
The VIRTUAL UART driver can be used as follows:
(#) Initialize the Virtual UART by calling the VIRT_UART_Init() API.
(++) create an endpoint. listener on the OpenAMP-rpmsg channel is now enabled.
Receive data is now possible if user registers a callback to this VIRTUAL UART instance
by calling in providing a callback function when a message is received from
remote processor (VIRT_UART_read_cb)
OpenAMP MW deals with memory allocation/free and signal events
(#) Transmit data on the created rpmsg channel by calling the VIRT_UART_Transmit()
(#) Receive data in calling VIRT_UART_RegisterCallback to register user callback
@endverbatim
******************************************************************************
* @attention
*
* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "virt_uart.h"
#include "metal/utilities.h"
/* Private typedef -----------------------------------------------------------*/
/* Private define ------------------------------------------------------------*/
/* this string will be sent to remote processor */
#define RPMSG_SERVICE_NAME "rpmsg-tty-channel"
/* Private macro -------------------------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
static int VIRT_UART_read_cb(struct rpmsg_endpoint *ept, void *data,
size_t len, uint32_t src, void *priv)
{
VIRT_UART_HandleTypeDef *huart = metal_container_of(ept, VIRT_UART_HandleTypeDef, ept);
(void)src;
huart->pRxBuffPtr = data;
huart->RxXferSize = len;
if (huart->RxCpltCallback != NULL) {
huart->RxCpltCallback(huart);
}
return 0;
}
VIRT_UART_StatusTypeDef VIRT_UART_Init(VIRT_UART_HandleTypeDef *huart)
{
int status;
/* Create a endpoint for rmpsg communication */
status = OPENAMP_create_endpoint(&huart->ept, RPMSG_SERVICE_NAME, RPMSG_ADDR_ANY,
VIRT_UART_read_cb, NULL);
if(status < 0) {
return VIRT_UART_ERROR;
}
return VIRT_UART_OK;
}
VIRT_UART_StatusTypeDef VIRT_UART_DeInit (VIRT_UART_HandleTypeDef *huart)
{
OPENAMP_destroy_ept(&huart->ept);
return VIRT_UART_OK;
}
VIRT_UART_StatusTypeDef VIRT_UART_RegisterCallback(VIRT_UART_HandleTypeDef *huart,
VIRT_UART_CallbackIDTypeDef CallbackID,
void (* pCallback)(VIRT_UART_HandleTypeDef *_huart))
{
VIRT_UART_StatusTypeDef status = VIRT_UART_OK;
switch (CallbackID)
{
case VIRT_UART_RXCPLT_CB_ID :
huart->RxCpltCallback = pCallback;
break;
default :
/* Return error status */
status = VIRT_UART_ERROR;
break;
}
return status;
}
VIRT_UART_StatusTypeDef VIRT_UART_Transmit(VIRT_UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size)
{
int res;
if (Size > (RPMSG_BUFFER_SIZE-16))
return VIRT_UART_ERROR;
res = OPENAMP_send(&huart->ept, pData, Size);
if (res <0) {
return VIRT_UART_ERROR;
}
return VIRT_UART_OK;
}
/**
******************************************************************************
* @file virt_uart.h
* @author MCD Application Team
* @brief Header file of UART VIRT module.
******************************************************************************
* @attention
*
* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __VIRT_UART_H
#define __VIRT_UART_H
#ifdef __cplusplus
extern "C" {
#endif
/* Includes ------------------------------------------------------------------*/
#include "openamp.h"
/* Exported structures --------------------------------------------------------*/
typedef struct __VIRT_UART_HandleTypeDef
{
struct rpmsg_endpoint ept; /*!< rpmsg endpoint */
struct rpmsg_virtio_device *rvdev; /*< pointer to the rpmsg virtio device */
uint8_t *pRxBuffPtr; /*!< Pointer to VIRTUAL UART Rx transfer Buffer */
uint16_t RxXferSize; /*!< VIRTUAL UART Rx Transfer size */
void (* RxCpltCallback)( struct __VIRT_UART_HandleTypeDef * hppp); /*!< RX CPLT callback */
}VIRT_UART_HandleTypeDef;
typedef enum
{
VIRT_UART_OK = 0x00U,
VIRT_UART_ERROR = 0x01U,
VIRT_UART_BUSY = 0x02U,
VIRT_UART_TIMEOUT = 0x03U
} VIRT_UART_StatusTypeDef;
typedef enum
{
VIRT_UART_RXCPLT_CB_ID = 0x00U, /*!< PPP event 1 callback ID */
}VIRT_UART_CallbackIDTypeDef;
/* Exported functions --------------------------------------------------------*/
/* Initialization and de-initialization functions ****************************/
VIRT_UART_StatusTypeDef VIRT_UART_Init(VIRT_UART_HandleTypeDef *huart);
VIRT_UART_StatusTypeDef VIRT_UART_DeInit (VIRT_UART_HandleTypeDef *huart);
VIRT_UART_StatusTypeDef VIRT_UART_RegisterCallback(VIRT_UART_HandleTypeDef *huart,
VIRT_UART_CallbackIDTypeDef CallbackID,
void (* pCallback)(VIRT_UART_HandleTypeDef *_huart));
/* IO operation functions *****************************************************/
VIRT_UART_StatusTypeDef VIRT_UART_Transmit(VIRT_UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size);
#ifdef __cplusplus
}
#endif
#endif /* __VIRT_UART_H */
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
...@@ -226,8 +226,12 @@ ...@@ -226,8 +226,12 @@
<state /> <state />
<state>RT_USING_DLIBC</state> <state>RT_USING_DLIBC</state>
<state>CORE_CM4</state> <state>CORE_CM4</state>
<state>NO_ATOMIC_64_SUPPORT</state>
<state>METAL_INTERNAL</state>
<state>METAL_MAX_DEVICE_REGIONS=2</state>
<state>VIRTIO_SLAVE_ONLY</state>
<state>STM32MP157Axx</state> <state>STM32MP157Axx</state>
<state>USE_HAL_DRIVER</state> <state>__LOG_TRACE_IO_</state>
<state>USE_HAL_DRIVER</state> <state>USE_HAL_DRIVER</state>
</option> </option>
<option> <option>
...@@ -679,7 +683,7 @@ ...@@ -679,7 +683,7 @@
</option> </option>
<option> <option>
<name>OOCObjCopyEnable</name> <name>OOCObjCopyEnable</name>
<state>1</state> <state>0</state>
</option> </option>
</data> </data>
</settings> </settings>
...@@ -1300,8 +1304,12 @@ ...@@ -1300,8 +1304,12 @@
<state /> <state />
<state>RT_USING_DLIBC</state> <state>RT_USING_DLIBC</state>
<state>CORE_CM4</state> <state>CORE_CM4</state>
<state>NO_ATOMIC_64_SUPPORT</state>
<state>METAL_INTERNAL</state>
<state>METAL_MAX_DEVICE_REGIONS=2</state>
<state>VIRTIO_SLAVE_ONLY</state>
<state>STM32MP157Axx</state> <state>STM32MP157Axx</state>
<state>USE_HAL_DRIVER</state> <state>__LOG_TRACE_IO_</state>
<state>USE_HAL_DRIVER</state> <state>USE_HAL_DRIVER</state>
</option> </option>
<option> <option>
...@@ -2177,12 +2185,6 @@ ...@@ -2177,12 +2185,6 @@
<file> <file>
<name>$PROJ_DIR$\..\..\..\components\drivers\misc\pin.c</name> <name>$PROJ_DIR$\..\..\..\components\drivers\misc\pin.c</name>
</file> </file>
<file>
<name>$PROJ_DIR$\..\..\..\components\drivers\misc\adc.c</name>
</file>
<file>
<name>$PROJ_DIR$\..\..\..\components\drivers\misc\dac.c</name>
</file>
<file> <file>
<name>$PROJ_DIR$\..\..\..\components\drivers\serial\serial.c</name> <name>$PROJ_DIR$\..\..\..\components\drivers\serial\serial.c</name>
</file> </file>
...@@ -2246,23 +2248,17 @@ ...@@ -2246,23 +2248,17 @@
</group> </group>
<group> <group>
<name>Drivers</name> <name>Drivers</name>
<file>
<name>$PROJ_DIR$\board\board.c</name>
</file>
<file> <file>
<name>$PROJ_DIR$\board\CubeMX_Config\Common\System\system_stm32mp1xx.c</name> <name>$PROJ_DIR$\board\CubeMX_Config\Common\System\system_stm32mp1xx.c</name>
</file> </file>
<file> <file>
<name>$PROJ_DIR$\board\CubeMX_Config\CM4\Src\stm32mp1xx_hal_msp.c</name> <name>$PROJ_DIR$\..\libraries\STM32MPxx_HAL\CMSIS\Device\ST\STM32MP1xx\Source\Templates\iar\startup_stm32mp15xx.s</name>
</file>
<file>
<name>$PROJ_DIR$\board\ports\drv_hard_i2c.c</name>
</file> </file>
<file> <file>
<name>$PROJ_DIR$\board\ports\stpmic.c</name> <name>$PROJ_DIR$\board\CubeMX_Config\CM4\Src\stm32mp1xx_hal_msp.c</name>
</file> </file>
<file> <file>
<name>$PROJ_DIR$\..\libraries\STM32MPxx_HAL\CMSIS\Device\ST\STM32MP1xx\Source\Templates\iar\startup_stm32mp15xx.s</name> <name>$PROJ_DIR$\board\board.c</name>
</file> </file>
<file> <file>
<name>$PROJ_DIR$\..\libraries\HAL_Drivers\drv_gpio.c</name> <name>$PROJ_DIR$\..\libraries\HAL_Drivers\drv_gpio.c</name>
...@@ -2270,12 +2266,6 @@ ...@@ -2270,12 +2266,6 @@
<file> <file>
<name>$PROJ_DIR$\..\libraries\HAL_Drivers\drv_usart.c</name> <name>$PROJ_DIR$\..\libraries\HAL_Drivers\drv_usart.c</name>
</file> </file>
<file>
<name>$PROJ_DIR$\..\libraries\HAL_Drivers\drv_adc.c</name>
</file>
<file>
<name>$PROJ_DIR$\..\libraries\HAL_Drivers\drv_dac.c</name>
</file>
<file> <file>
<name>$PROJ_DIR$\..\libraries\HAL_Drivers\drv_common.c</name> <name>$PROJ_DIR$\..\libraries\HAL_Drivers\drv_common.c</name>
</file> </file>
......
...@@ -323,7 +323,7 @@ ...@@ -323,7 +323,7 @@
<uThumb>0</uThumb> <uThumb>0</uThumb>
<uSurpInc>0</uSurpInc> <uSurpInc>0</uSurpInc>
<uC99>1</uC99> <uC99>1</uC99>
<uGnu>1</uGnu> <uGnu>0</uGnu>
<useXO>0</useXO> <useXO>0</useXO>
<v6Lang>1</v6Lang> <v6Lang>1</v6Lang>
<v6LangP>1</v6LangP> <v6LangP>1</v6LangP>
...@@ -334,7 +334,7 @@ ...@@ -334,7 +334,7 @@
<v6Rtti>0</v6Rtti> <v6Rtti>0</v6Rtti>
<VariousControls> <VariousControls>
<MiscControls /> <MiscControls />
<Define>CORE_CM4, USE_HAL_DRIVER, STM32MP157Axx, RT_USING_ARM_LIBC</Define> <Define>CORE_CM4, __LOG_TRACE_IO_, METAL_MAX_DEVICE_REGIONS=2, VIRTIO_SLAVE_ONLY, NO_ATOMIC_64_SUPPORT, STM32MP157Axx, METAL_INTERNAL, USE_HAL_DRIVER, RT_USING_ARM_LIBC</Define>
<Undefine /> <Undefine />
<IncludePath>applications;.;..\..\..\libcpu\arm\common;..\..\..\libcpu\arm\cortex-m4;..\..\..\components\drivers\include;..\..\..\components\drivers\include;..\..\..\components\drivers\include;board;board\CubeMX_Config\CM4\Inc;board\ports;..\libraries\HAL_Drivers;..\libraries\HAL_Drivers\config;..\..\..\components\finsh;.;..\..\..\include;..\..\..\components\libc\compilers\armlibc;..\..\..\components\libc\compilers\common;..\libraries\STM32MPxx_HAL\STM32MP1xx_HAL_Driver\Inc;..\libraries\STM32MPxx_HAL\CMSIS\Device\ST\STM32MP1xx\Include;..\libraries\STM32MPxx_HAL\CMSIS\Core\Include;..\libraries\STM32MPxx_HAL\CMSIS\Include</IncludePath> <IncludePath>applications;.;..\..\..\libcpu\arm\common;..\..\..\libcpu\arm\cortex-m4;..\..\..\components\drivers\include;..\..\..\components\drivers\include;..\..\..\components\drivers\include;board;board\CubeMX_Config\CM4\Inc;board\ports;..\libraries\HAL_Drivers;..\libraries\HAL_Drivers\config;..\..\..\components\finsh;.;..\..\..\include;..\..\..\components\libc\compilers\armlibc;..\..\..\components\libc\compilers\common;..\libraries\STM32MPxx_HAL\STM32MP1xx_HAL_Driver\Inc;..\libraries\STM32MPxx_HAL\CMSIS\Device\ST\STM32MP1xx\Include;..\libraries\STM32MPxx_HAL\CMSIS\Core\Include;..\libraries\STM32MPxx_HAL\CMSIS\Include</IncludePath>
</VariousControls> </VariousControls>
......
...@@ -152,12 +152,6 @@ ...@@ -152,12 +152,6 @@
/* samples: kernel and components samples */ /* samples: kernel and components samples */
/* Privated Packages of RealThread */
/* Network Utilities */
#define SOC_FAMILY_STM32 #define SOC_FAMILY_STM32
#define SOC_SERIES_STM32MP1 #define SOC_SERIES_STM32MP1
......
...@@ -657,7 +657,7 @@ ...@@ -657,7 +657,7 @@
</option> </option>
<option> <option>
<name>OOCObjCopyEnable</name> <name>OOCObjCopyEnable</name>
<state>1</state> <state>0</state>
</option> </option>
</data> </data>
</settings> </settings>
......
...@@ -326,7 +326,7 @@ ...@@ -326,7 +326,7 @@
<uThumb>0</uThumb> <uThumb>0</uThumb>
<uSurpInc>0</uSurpInc> <uSurpInc>0</uSurpInc>
<uC99>1</uC99> <uC99>1</uC99>
<uGnu>1</uGnu> <uGnu>0</uGnu>
<useXO>0</useXO> <useXO>0</useXO>
<v6Lang>1</v6Lang> <v6Lang>1</v6Lang>
<v6LangP>1</v6LangP> <v6LangP>1</v6LangP>
......
import os
import time
def main():
if (os.path.exists("/tmp/RT-Thread-STM32MP1_CM4.elf")):
os.system("mv /tmp/RT-Thread-STM32MP1_CM4.elf /lib/firmware/")
os.system("echo stop > /sys/class/remoteproc/remoteproc0/state")
os.system("echo RT-Thread-STM32MP1_CM4.elf > /sys/class/remoteproc/remoteproc0/firmware")
os.system("echo start > /sys/class/remoteproc/remoteproc0/state")
os.system("stty -onlcr -echo -F /dev/ttyRPMSG0")
os.system("cat /dev/ttyRPMSG0 &")
time.sleep(1)
print("\nYou can input \"exit\" to quit rt-thread shell!\n")
os.system("echo version >/dev/ttyRPMSG0")
while True:
a = input()
if a == "exit":
os.system("echo stop > /sys/class/remoteproc/remoteproc0/state")
return
os.system("echo {0} >/dev/ttyRPMSG0".format(a))
if __name__ == "__main__":
main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册