diff --git a/.cproject b/.cproject index d0be3d87..6c5656b4 100644 --- a/.cproject +++ b/.cproject @@ -66,6 +66,11 @@ + + + + + @@ -109,7 +114,6 @@ - @@ -191,6 +195,11 @@ + + + + + @@ -234,7 +243,6 @@ - @@ -320,6 +328,11 @@ + + + + + @@ -363,7 +376,6 @@ - @@ -442,6 +454,11 @@ + + + + + @@ -485,7 +502,6 @@ - @@ -567,6 +583,11 @@ + + + + + @@ -610,7 +631,6 @@ - @@ -689,6 +709,11 @@ + + + + + @@ -732,7 +757,6 @@ - @@ -810,6 +834,11 @@ + + + + + @@ -853,7 +882,6 @@ - @@ -936,6 +964,11 @@ + + + + + @@ -979,7 +1012,6 @@ - @@ -1066,6 +1098,11 @@ + + + + + @@ -1109,7 +1146,6 @@ - @@ -1195,6 +1231,11 @@ + + + + + @@ -1238,7 +1279,6 @@ - @@ -1323,6 +1363,11 @@ + + + + + @@ -1366,7 +1411,6 @@ - @@ -1452,6 +1496,11 @@ + + + + + @@ -1495,7 +1544,6 @@ - @@ -1574,6 +1622,11 @@ + + + + + @@ -1617,7 +1670,6 @@ - @@ -1697,6 +1749,11 @@ + + + + + @@ -1740,7 +1797,6 @@ - @@ -1821,6 +1877,11 @@ + + + + + @@ -1895,7 +1956,6 @@ - @@ -1980,6 +2040,11 @@ + + + + + @@ -2054,7 +2119,6 @@ - @@ -2134,6 +2198,11 @@ + + + + + @@ -2208,7 +2277,6 @@ - @@ -2293,6 +2361,11 @@ + + + + + @@ -2367,7 +2440,6 @@ - @@ -2452,6 +2524,11 @@ + + + + + @@ -2526,7 +2603,6 @@ - @@ -2611,6 +2687,11 @@ + + + + + @@ -2685,7 +2766,6 @@ - @@ -2770,6 +2850,11 @@ + + + + + @@ -2844,7 +2929,6 @@ - @@ -2924,6 +3008,11 @@ + + + + + @@ -2998,7 +3087,6 @@ - @@ -3099,6 +3187,11 @@ + + + + + @@ -3173,7 +3266,6 @@ - @@ -3254,6 +3346,11 @@ + + + + + @@ -3328,7 +3425,6 @@ - @@ -3413,6 +3509,11 @@ + + + + + @@ -3487,7 +3588,6 @@ - @@ -3503,7 +3603,9 @@ - + + + @@ -3584,6 +3686,11 @@ + + + + + @@ -3724,6 +3831,11 @@ + + + + + @@ -3767,7 +3879,6 @@ - @@ -3782,6 +3893,147 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -3793,6 +4045,7 @@ + @@ -3802,6 +4055,9 @@ + + + @@ -3816,6 +4072,7 @@ + @@ -3836,6 +4093,9 @@ + + + @@ -3857,6 +4117,9 @@ + + + @@ -3866,6 +4129,9 @@ + + + @@ -3878,6 +4144,9 @@ + + + @@ -3893,6 +4162,9 @@ + + + @@ -3902,5 +4174,8 @@ + + + \ No newline at end of file diff --git a/Drivers/BSP/Components/dp83848/dp83848.c b/Drivers/BSP/Components/dp83848/dp83848.c new file mode 100644 index 00000000..6fa49691 --- /dev/null +++ b/Drivers/BSP/Components/dp83848/dp83848.c @@ -0,0 +1,613 @@ +/** + ****************************************************************************** + * @file dp83848.c + * @author MCD Application Team + * @brief This file provides a set of functions needed to manage the DP83848 + * PHY devices. + ****************************************************************************** + * @attention + * + * Copyright (c) 2021 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "dp83848.h" + +/** @addtogroup BSP + * @{ + */ + +/** @addtogroup Component + * @{ + */ + +/** @defgroup DP83848 DP83848 + * @{ + */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @defgroup DP83848_Private_Defines DP83848 Private Defines + * @{ + */ +#define DP83848_MAX_DEV_ADDR ((uint32_t)31U) +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @defgroup DP83848_Private_Functions DP83848 Private Functions + * @{ + */ + +/** + * @brief Register IO functions to component object + * @param pObj: device object of DP83848_Object_t. + * @param ioctx: holds device IO functions. + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_ERROR if missing mandatory function + */ +int32_t DP83848_RegisterBusIO(dp83848_Object_t *pObj, dp83848_IOCtx_t *ioctx) +{ + if(!pObj || !ioctx->ReadReg || !ioctx->WriteReg || !ioctx->GetTick) + { + return DP83848_STATUS_ERROR; + } + + pObj->IO.Init = ioctx->Init; + pObj->IO.DeInit = ioctx->DeInit; + pObj->IO.ReadReg = ioctx->ReadReg; + pObj->IO.WriteReg = ioctx->WriteReg; + pObj->IO.GetTick = ioctx->GetTick; + + return DP83848_STATUS_OK; +} + +/** + * @brief Initialize the DP83848 and configure the needed hardware resources + * @param pObj: device object DP83848_Object_t. + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_ADDRESS_ERROR if cannot find device address + * DP83848_STATUS_READ_ERROR if connot read register + */ + int32_t DP83848_Init(dp83848_Object_t *pObj) + { + uint32_t regvalue = 0, addr = 0; + int32_t status = DP83848_STATUS_OK; + + if(pObj->Is_Initialized == 0) + { + if(pObj->IO.Init != 0) + { + /* GPIO and Clocks initialization */ + pObj->IO.Init(); + } + + /* for later check */ + pObj->DevAddr = DP83848_MAX_DEV_ADDR + 1; + + /* Get the device address from special mode register */ + for(addr = 0; addr <= DP83848_MAX_DEV_ADDR; addr ++) + { + if(pObj->IO.ReadReg(addr, DP83848_SMR, ®value) < 0) + { + status = DP83848_STATUS_READ_ERROR; + /* Can't read from this device address + continue with next address */ + continue; + } + + if((regvalue & DP83848_SMR_PHY_ADDR) == addr) + { + pObj->DevAddr = addr; + status = DP83848_STATUS_OK; + break; + } + } + + if(pObj->DevAddr > DP83848_MAX_DEV_ADDR) + { + status = DP83848_STATUS_ADDRESS_ERROR; + } + + /* if device address is matched */ + if(status == DP83848_STATUS_OK) + { + pObj->Is_Initialized = 1; + } + } + + return status; + } + +/** + * @brief De-Initialize the dp83848 and it's hardware resources + * @param pObj: device object DP83848_Object_t. + * @retval None + */ +int32_t DP83848_DeInit(dp83848_Object_t *pObj) +{ + if(pObj->Is_Initialized) + { + if(pObj->IO.DeInit != 0) + { + if(pObj->IO.DeInit() < 0) + { + return DP83848_STATUS_ERROR; + } + } + + pObj->Is_Initialized = 0; + } + + return DP83848_STATUS_OK; +} + +/** + * @brief Disable the DP83848 power down mode. + * @param pObj: device object DP83848_Object_t. + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_READ_ERROR if connot read register + * DP83848_STATUS_WRITE_ERROR if connot write to register + */ +int32_t DP83848_DisablePowerDownMode(dp83848_Object_t *pObj) +{ + uint32_t readval = 0; + int32_t status = DP83848_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_BCR, &readval) >= 0) + { + readval &= ~DP83848_BCR_POWER_DOWN; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, DP83848_BCR, readval) < 0) + { + status = DP83848_STATUS_WRITE_ERROR; + } + } + else + { + status = DP83848_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Enable the DP83848 power down mode. + * @param pObj: device object DP83848_Object_t. + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_READ_ERROR if connot read register + * DP83848_STATUS_WRITE_ERROR if connot write to register + */ +int32_t DP83848_EnablePowerDownMode(dp83848_Object_t *pObj) +{ + uint32_t readval = 0; + int32_t status = DP83848_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_BCR, &readval) >= 0) + { + readval |= DP83848_BCR_POWER_DOWN; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, DP83848_BCR, readval) < 0) + { + status = DP83848_STATUS_WRITE_ERROR; + } + } + else + { + status = DP83848_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Start the auto negotiation process. + * @param pObj: device object DP83848_Object_t. + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_READ_ERROR if connot read register + * DP83848_STATUS_WRITE_ERROR if connot write to register + */ +int32_t DP83848_StartAutoNego(dp83848_Object_t *pObj) +{ + uint32_t readval = 0; + int32_t status = DP83848_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_BCR, &readval) >= 0) + { + readval |= DP83848_BCR_AUTONEGO_EN; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, DP83848_BCR, readval) < 0) + { + status = DP83848_STATUS_WRITE_ERROR; + } + } + else + { + status = DP83848_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Get the link state of DP83848 device. + * @param pObj: Pointer to device object. + * @param pLinkState: Pointer to link state + * @retval DP83848_STATUS_LINK_DOWN if link is down + * DP83848_STATUS_AUTONEGO_NOTDONE if Auto nego not completed + * DP83848_STATUS_100MBITS_FULLDUPLEX if 100Mb/s FD + * DP83848_STATUS_100MBITS_HALFDUPLEX if 100Mb/s HD + * DP83848_STATUS_10MBITS_FULLDUPLEX if 10Mb/s FD + * DP83848_STATUS_10MBITS_HALFDUPLEX if 10Mb/s HD + * DP83848_STATUS_READ_ERROR if connot read register + * DP83848_STATUS_WRITE_ERROR if connot write to register + */ +int32_t DP83848_GetLinkState(dp83848_Object_t *pObj) +{ + uint32_t readval = 0; + + /* Read Status register */ + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_BSR, &readval) < 0) + { + return DP83848_STATUS_READ_ERROR; + } + + /* Read Status register again */ + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_BSR, &readval) < 0) + { + return DP83848_STATUS_READ_ERROR; + } + + if((readval & DP83848_BSR_LINK_STATUS) == 0) + { + /* Return Link Down status */ + return DP83848_STATUS_LINK_DOWN; + } + + /* Check Auto negotiaition */ + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_BCR, &readval) < 0) + { + return DP83848_STATUS_READ_ERROR; + } + + if((readval & DP83848_BCR_AUTONEGO_EN) != DP83848_BCR_AUTONEGO_EN) + { + if(((readval & DP83848_BCR_SPEED_SELECT) == DP83848_BCR_SPEED_SELECT) && ((readval & DP83848_BCR_DUPLEX_MODE) == DP83848_BCR_DUPLEX_MODE)) + { + return DP83848_STATUS_100MBITS_FULLDUPLEX; + } + else if ((readval & DP83848_BCR_SPEED_SELECT) == DP83848_BCR_SPEED_SELECT) + { + return DP83848_STATUS_100MBITS_HALFDUPLEX; + } + else if ((readval & DP83848_BCR_DUPLEX_MODE) == DP83848_BCR_DUPLEX_MODE) + { + return DP83848_STATUS_10MBITS_FULLDUPLEX; + } + else + { + return DP83848_STATUS_10MBITS_HALFDUPLEX; + } + } + else /* Auto Nego enabled */ + { + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_PHYSCSR, &readval) < 0) + { + return DP83848_STATUS_READ_ERROR; + } + + /* Check if auto nego not done */ + if((readval & DP83848_PHYSCSR_AUTONEGO_DONE) == 0) + { + return DP83848_STATUS_AUTONEGO_NOTDONE; + } + + if((readval & DP83848_PHYSCSR_HCDSPEEDMASK) == DP83848_PHYSCSR_100BTX_FD) + { + return DP83848_STATUS_100MBITS_FULLDUPLEX; + } + else if ((readval & DP83848_PHYSCSR_HCDSPEEDMASK) == DP83848_PHYSCSR_100BTX_HD) + { + return DP83848_STATUS_100MBITS_HALFDUPLEX; + } + else if ((readval & DP83848_PHYSCSR_HCDSPEEDMASK) == DP83848_PHYSCSR_10BT_FD) + { + return DP83848_STATUS_10MBITS_FULLDUPLEX; + } + else + { + return DP83848_STATUS_10MBITS_HALFDUPLEX; + } + } +} + +/** + * @brief Set the link state of DP83848 device. + * @param pObj: Pointer to device object. + * @param pLinkState: link state can be one of the following + * DP83848_STATUS_100MBITS_FULLDUPLEX if 100Mb/s FD + * DP83848_STATUS_100MBITS_HALFDUPLEX if 100Mb/s HD + * DP83848_STATUS_10MBITS_FULLDUPLEX if 10Mb/s FD + * DP83848_STATUS_10MBITS_HALFDUPLEX if 10Mb/s HD + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_ERROR if parameter error + * DP83848_STATUS_READ_ERROR if connot read register + * DP83848_STATUS_WRITE_ERROR if connot write to register + */ +int32_t DP83848_SetLinkState(dp83848_Object_t *pObj, uint32_t LinkState) +{ + uint32_t bcrvalue = 0; + int32_t status = DP83848_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_BCR, &bcrvalue) >= 0) + { + /* Disable link config (Auto nego, speed and duplex) */ + bcrvalue &= ~(DP83848_BCR_AUTONEGO_EN | DP83848_BCR_SPEED_SELECT | DP83848_BCR_DUPLEX_MODE); + + if(LinkState == DP83848_STATUS_100MBITS_FULLDUPLEX) + { + bcrvalue |= (DP83848_BCR_SPEED_SELECT | DP83848_BCR_DUPLEX_MODE); + } + else if (LinkState == DP83848_STATUS_100MBITS_HALFDUPLEX) + { + bcrvalue |= DP83848_BCR_SPEED_SELECT; + } + else if (LinkState == DP83848_STATUS_10MBITS_FULLDUPLEX) + { + bcrvalue |= DP83848_BCR_DUPLEX_MODE; + } + else + { + /* Wrong link status parameter */ + status = DP83848_STATUS_ERROR; + } + } + else + { + status = DP83848_STATUS_READ_ERROR; + } + + if(status == DP83848_STATUS_OK) + { + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, DP83848_BCR, bcrvalue) < 0) + { + status = DP83848_STATUS_WRITE_ERROR; + } + } + + return status; +} + +/** + * @brief Enable loopback mode. + * @param pObj: Pointer to device object. + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_READ_ERROR if connot read register + * DP83848_STATUS_WRITE_ERROR if connot write to register + */ +int32_t DP83848_EnableLoopbackMode(dp83848_Object_t *pObj) +{ + uint32_t readval = 0; + int32_t status = DP83848_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_BCR, &readval) >= 0) + { + readval |= DP83848_BCR_LOOPBACK; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, DP83848_BCR, readval) < 0) + { + status = DP83848_STATUS_WRITE_ERROR; + } + } + else + { + status = DP83848_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Disable loopback mode. + * @param pObj: Pointer to device object. + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_READ_ERROR if connot read register + * DP83848_STATUS_WRITE_ERROR if connot write to register + */ +int32_t DP83848_DisableLoopbackMode(dp83848_Object_t *pObj) +{ + uint32_t readval = 0; + int32_t status = DP83848_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_BCR, &readval) >= 0) + { + readval &= ~DP83848_BCR_LOOPBACK; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, DP83848_BCR, readval) < 0) + { + status = DP83848_STATUS_WRITE_ERROR; + } + } + else + { + status = DP83848_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Enable IT source. + * @param pObj: Pointer to device object. + * @param Interrupt: IT source to be enabled + * should be a value or a combination of the following: + * DP83848_WOL_IT + * DP83848_ENERGYON_IT + * DP83848_AUTONEGO_COMPLETE_IT + * DP83848_REMOTE_FAULT_IT + * DP83848_LINK_DOWN_IT + * DP83848_AUTONEGO_LP_ACK_IT + * DP83848_PARALLEL_DETECTION_FAULT_IT + * DP83848_AUTONEGO_PAGE_RECEIVED_IT + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_READ_ERROR if connot read register + * DP83848_STATUS_WRITE_ERROR if connot write to register + */ +int32_t DP83848_EnableIT(dp83848_Object_t *pObj, uint32_t Interrupt) +{ + uint32_t readval = 0; + int32_t status = DP83848_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_IMR, &readval) >= 0) + { + readval |= Interrupt; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, DP83848_IMR, readval) < 0) + { + status = DP83848_STATUS_WRITE_ERROR; + } + } + else + { + status = DP83848_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Disable IT source. + * @param pObj: Pointer to device object. + * @param Interrupt: IT source to be disabled + * should be a value or a combination of the following: + * DP83848_WOL_IT + * DP83848_ENERGYON_IT + * DP83848_AUTONEGO_COMPLETE_IT + * DP83848_REMOTE_FAULT_IT + * DP83848_LINK_DOWN_IT + * DP83848_AUTONEGO_LP_ACK_IT + * DP83848_PARALLEL_DETECTION_FAULT_IT + * DP83848_AUTONEGO_PAGE_RECEIVED_IT + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_READ_ERROR if connot read register + * DP83848_STATUS_WRITE_ERROR if connot write to register + */ +int32_t DP83848_DisableIT(dp83848_Object_t *pObj, uint32_t Interrupt) +{ + uint32_t readval = 0; + int32_t status = DP83848_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_IMR, &readval) >= 0) + { + readval &= ~Interrupt; + + /* Apply configuration */ + if(pObj->IO.WriteReg(pObj->DevAddr, DP83848_IMR, readval) < 0) + { + status = DP83848_STATUS_WRITE_ERROR; + } + } + else + { + status = DP83848_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Clear IT flag. + * @param pObj: Pointer to device object. + * @param Interrupt: IT flag to be cleared + * should be a value or a combination of the following: + * DP83848_WOL_IT + * DP83848_ENERGYON_IT + * DP83848_AUTONEGO_COMPLETE_IT + * DP83848_REMOTE_FAULT_IT + * DP83848_LINK_DOWN_IT + * DP83848_AUTONEGO_LP_ACK_IT + * DP83848_PARALLEL_DETECTION_FAULT_IT + * DP83848_AUTONEGO_PAGE_RECEIVED_IT + * @retval DP83848_STATUS_OK if OK + * DP83848_STATUS_READ_ERROR if connot read register + */ +int32_t DP83848_ClearIT(dp83848_Object_t *pObj, uint32_t Interrupt) +{ + uint32_t readval = 0; + int32_t status = DP83848_STATUS_OK; + + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_ISFR, &readval) < 0) + { + status = DP83848_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @brief Get IT Flag status. + * @param pObj: Pointer to device object. + * @param Interrupt: IT Flag to be checked, + * should be a value or a combination of the following: + * DP83848_WOL_IT + * DP83848_ENERGYON_IT + * DP83848_AUTONEGO_COMPLETE_IT + * DP83848_REMOTE_FAULT_IT + * DP83848_LINK_DOWN_IT + * DP83848_AUTONEGO_LP_ACK_IT + * DP83848_PARALLEL_DETECTION_FAULT_IT + * DP83848_AUTONEGO_PAGE_RECEIVED_IT + * @retval 1 IT flag is SET + * 0 IT flag is RESET + * DP83848_STATUS_READ_ERROR if connot read register + */ +int32_t DP83848_GetITStatus(dp83848_Object_t *pObj, uint32_t Interrupt) +{ + uint32_t readval = 0; + int32_t status = 0; + + if(pObj->IO.ReadReg(pObj->DevAddr, DP83848_ISFR, &readval) >= 0) + { + status = ((readval & Interrupt) == Interrupt); + } + else + { + status = DP83848_STATUS_READ_ERROR; + } + + return status; +} + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ diff --git a/Drivers/BSP/Components/dp83848/dp83848.h b/Drivers/BSP/Components/dp83848/dp83848.h new file mode 100644 index 00000000..5b937d87 --- /dev/null +++ b/Drivers/BSP/Components/dp83848/dp83848.h @@ -0,0 +1,434 @@ +/** + ****************************************************************************** + * @file dp83848.h + * @author MCD Application Team + * @brief This file contains all the functions prototypes for the + * dp83848.c PHY driver. + ****************************************************************************** + * @attention + * + * Copyright (c) 2021 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef DP83848_H +#define DP83848_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include + +/** @addtogroup BSP + * @{ + */ + +/** @addtogroup Component + * @{ + */ + +/** @defgroup DP83848 + * @{ + */ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup DP83848_Exported_Constants DP83848 Exported Constants + * @{ + */ + +/** @defgroup DP83848_Registers_Mapping DP83848 Registers Mapping + * @{ + */ +#define DP83848_BCR ((uint16_t)0x0000U) +#define DP83848_BSR ((uint16_t)0x0001U) +#define DP83848_PHYI1R ((uint16_t)0x0002U) +#define DP83848_PHYI2R ((uint16_t)0x0003U) +#define DP83848_ANAR ((uint16_t)0x0004U) +#define DP83848_ANLPAR ((uint16_t)0x0005U) +#define DP83848_ANER ((uint16_t)0x0006U) +#define DP83848_ANNPTR ((uint16_t)0x0007U) +#define DP83848_SMR ((uint16_t)0x0019U) +#define DP83848_ISFR ((uint16_t)0x0012U) +#define DP83848_IMR ((uint16_t)0x0011U) +#define DP83848_PHYSCSR ((uint16_t)0x0010U) +/** + * @} + */ + +/** @defgroup DP83848_BCR_Bit_Definition DP83848 BCR Bit Definition + * @{ + */ +#define DP83848_BCR_SOFT_RESET ((uint16_t)0x8000U) +#define DP83848_BCR_LOOPBACK ((uint16_t)0x4000U) +#define DP83848_BCR_SPEED_SELECT ((uint16_t)0x2000U) +#define DP83848_BCR_AUTONEGO_EN ((uint16_t)0x1000U) +#define DP83848_BCR_POWER_DOWN ((uint16_t)0x0800U) +#define DP83848_BCR_ISOLATE ((uint16_t)0x0400U) +#define DP83848_BCR_RESTART_AUTONEGO ((uint16_t)0x0200U) +#define DP83848_BCR_DUPLEX_MODE ((uint16_t)0x0100U) +/** + * @} + */ + +/** @defgroup DP83848_BSR_Bit_Definition DP83848 BSR Bit Definition + * @{ + */ +#define DP83848_BSR_100BASE_T4 ((uint16_t)0x8000U) +#define DP83848_BSR_100BASE_TX_FD ((uint16_t)0x4000U) +#define DP83848_BSR_100BASE_TX_HD ((uint16_t)0x2000U) +#define DP83848_BSR_10BASE_T_FD ((uint16_t)0x1000U) +#define DP83848_BSR_10BASE_T_HD ((uint16_t)0x0800U) +#define DP83848_BSR_MF_PREAMBLE ((uint16_t)0x0040U) +#define DP83848_BSR_AUTONEGO_CPLT ((uint16_t)0x0020U) +#define DP83848_BSR_REMOTE_FAULT ((uint16_t)0x0010U) +#define DP83848_BSR_AUTONEGO_ABILITY ((uint16_t)0x0008U) +#define DP83848_BSR_LINK_STATUS ((uint16_t)0x0004U) +#define DP83848_BSR_JABBER_DETECT ((uint16_t)0x0002U) +#define DP83848_BSR_EXTENDED_CAP ((uint16_t)0x0001U) +/** + * @} + */ + +/** @defgroup DP83848_PHYI1R_Bit_Definition DP83848 PHYI1R Bit Definition + * @{ + */ +#define DP83848_PHYI1R_OUI_3_18 ((uint16_t)0xFFFFU) +/** + * @} + */ + +/** @defgroup DP83848_PHYI2R_Bit_Definition DP83848 PHYI2R Bit Definition + * @{ + */ +#define DP83848_PHYI2R_OUI_19_24 ((uint16_t)0xFC00U) +#define DP83848_PHYI2R_MODEL_NBR ((uint16_t)0x03F0U) +#define DP83848_PHYI2R_REVISION_NBR ((uint16_t)0x000FU) +/** + * @} + */ + +/** @defgroup DP83848_ANAR_Bit_Definition DP83848 ANAR Bit Definition + * @{ + */ +#define DP83848_ANAR_NEXT_PAGE ((uint16_t)0x8000U) +#define DP83848_ANAR_REMOTE_FAULT ((uint16_t)0x2000U) +#define DP83848_ANAR_PAUSE_OPERATION ((uint16_t)0x0C00U) +#define DP83848_ANAR_PO_NOPAUSE ((uint16_t)0x0000U) +#define DP83848_ANAR_PO_SYMMETRIC_PAUSE ((uint16_t)0x0400U) +#define DP83848_ANAR_PO_ASYMMETRIC_PAUSE ((uint16_t)0x0800U) +#define DP83848_ANAR_PO_ADVERTISE_SUPPORT ((uint16_t)0x0C00U) +#define DP83848_ANAR_100BASE_TX_FD ((uint16_t)0x0100U) +#define DP83848_ANAR_100BASE_TX ((uint16_t)0x0080U) +#define DP83848_ANAR_10BASE_T_FD ((uint16_t)0x0040U) +#define DP83848_ANAR_10BASE_T ((uint16_t)0x0020U) +#define DP83848_ANAR_SELECTOR_FIELD ((uint16_t)0x000FU) +/** + * @} + */ + +/** @defgroup DP83848_ANLPAR_Bit_Definition DP83848 ANLPAR Bit Definition + * @{ + */ +#define DP83848_ANLPAR_NEXT_PAGE ((uint16_t)0x8000U) +#define DP83848_ANLPAR_REMOTE_FAULT ((uint16_t)0x2000U) +#define DP83848_ANLPAR_PAUSE_OPERATION ((uint16_t)0x0C00U) +#define DP83848_ANLPAR_PO_NOPAUSE ((uint16_t)0x0000U) +#define DP83848_ANLPAR_PO_SYMMETRIC_PAUSE ((uint16_t)0x0400U) +#define DP83848_ANLPAR_PO_ASYMMETRIC_PAUSE ((uint16_t)0x0800U) +#define DP83848_ANLPAR_PO_ADVERTISE_SUPPORT ((uint16_t)0x0C00U) +#define DP83848_ANLPAR_100BASE_TX_FD ((uint16_t)0x0100U) +#define DP83848_ANLPAR_100BASE_TX ((uint16_t)0x0080U) +#define DP83848_ANLPAR_10BASE_T_FD ((uint16_t)0x0040U) +#define DP83848_ANLPAR_10BASE_T ((uint16_t)0x0020U) +#define DP83848_ANLPAR_SELECTOR_FIELD ((uint16_t)0x000FU) +/** + * @} + */ + +/** @defgroup DP83848_ANER_Bit_Definition DP83848 ANER Bit Definition + * @{ + */ +#define DP83848_ANER_RX_NP_LOCATION_ABLE ((uint16_t)0x0040U) +#define DP83848_ANER_RX_NP_STORAGE_LOCATION ((uint16_t)0x0020U) +#define DP83848_ANER_PARALLEL_DETECT_FAULT ((uint16_t)0x0010U) +#define DP83848_ANER_LP_NP_ABLE ((uint16_t)0x0008U) +#define DP83848_ANER_NP_ABLE ((uint16_t)0x0004U) +#define DP83848_ANER_PAGE_RECEIVED ((uint16_t)0x0002U) +#define DP83848_ANER_LP_AUTONEG_ABLE ((uint16_t)0x0001U) +/** + * @} + */ + +/** @defgroup DP83848_ANNPTR_Bit_Definition DP83848 ANNPTR Bit Definition + * @{ + */ +#define DP83848_ANNPTR_NEXT_PAGE ((uint16_t)0x8000U) +#define DP83848_ANNPTR_MESSAGE_PAGE ((uint16_t)0x2000U) +#define DP83848_ANNPTR_ACK2 ((uint16_t)0x1000U) +#define DP83848_ANNPTR_TOGGLE ((uint16_t)0x0800U) +#define DP83848_ANNPTR_MESSAGE_CODE ((uint16_t)0x07FFU) +/** + * @} + */ + +/** @defgroup DP83848_ANNPRR_Bit_Definition DP83848 ANNPRR Bit Definition + * @{ + */ +#define DP83848_ANNPRR_NEXT_PAGE ((uint16_t)0x8000U) +#define DP83848_ANNPRR_ACK ((uint16_t)0x4000U) +#define DP83848_ANNPRR_MESSAGE_PAGE ((uint16_t)0x2000U) +#define DP83848_ANNPRR_ACK2 ((uint16_t)0x1000U) +#define DP83848_ANNPRR_TOGGLE ((uint16_t)0x0800U) +#define DP83848_ANNPRR_MESSAGE_CODE ((uint16_t)0x07FFU) +/** + * @} + */ + +/** @defgroup DP83848_MMDACR_Bit_Definition DP83848 MMDACR Bit Definition + * @{ + */ +#define DP83848_MMDACR_MMD_FUNCTION ((uint16_t)0xC000U) +#define DP83848_MMDACR_MMD_FUNCTION_ADDR ((uint16_t)0x0000U) +#define DP83848_MMDACR_MMD_FUNCTION_DATA ((uint16_t)0x4000U) +#define DP83848_MMDACR_MMD_DEV_ADDR ((uint16_t)0x001FU) +/** + * @} + */ + +/** @defgroup DP83848_ENCTR_Bit_Definition DP83848 ENCTR Bit Definition + * @{ + */ +#define DP83848_ENCTR_TX_ENABLE ((uint16_t)0x8000U) +#define DP83848_ENCTR_TX_TIMER ((uint16_t)0x6000U) +#define DP83848_ENCTR_TX_TIMER_1S ((uint16_t)0x0000U) +#define DP83848_ENCTR_TX_TIMER_768MS ((uint16_t)0x2000U) +#define DP83848_ENCTR_TX_TIMER_512MS ((uint16_t)0x4000U) +#define DP83848_ENCTR_TX_TIMER_265MS ((uint16_t)0x6000U) +#define DP83848_ENCTR_RX_ENABLE ((uint16_t)0x1000U) +#define DP83848_ENCTR_RX_MAX_INTERVAL ((uint16_t)0x0C00U) +#define DP83848_ENCTR_RX_MAX_INTERVAL_64MS ((uint16_t)0x0000U) +#define DP83848_ENCTR_RX_MAX_INTERVAL_256MS ((uint16_t)0x0400U) +#define DP83848_ENCTR_RX_MAX_INTERVAL_512MS ((uint16_t)0x0800U) +#define DP83848_ENCTR_RX_MAX_INTERVAL_1S ((uint16_t)0x0C00U) +#define DP83848_ENCTR_EX_CROSS_OVER ((uint16_t)0x0002U) +#define DP83848_ENCTR_EX_MANUAL_CROSS_OVER ((uint16_t)0x0001U) +/** + * @} + */ + +/** @defgroup DP83848_MCSR_Bit_Definition DP83848 MCSR Bit Definition + * @{ + */ +#define DP83848_MCSR_EDPWRDOWN ((uint16_t)0x2000U) +#define DP83848_MCSR_FARLOOPBACK ((uint16_t)0x0200U) +#define DP83848_MCSR_ALTINT ((uint16_t)0x0040U) +#define DP83848_MCSR_ENERGYON ((uint16_t)0x0002U) +/** + * @} + */ + +/** @defgroup DP83848_SMR_Bit_Definition DP83848 SMR Bit Definition + * @{ + */ +#define DP83848_SMR_MODE ((uint16_t)0x00E0U) +#define DP83848_SMR_PHY_ADDR ((uint16_t)0x001FU) +/** + * @} + */ + +/** @defgroup DP83848_TPDCR_Bit_Definition DP83848 TPDCR Bit Definition + * @{ + */ +#define DP83848_TPDCR_DELAY_IN ((uint16_t)0x8000U) +#define DP83848_TPDCR_LINE_BREAK_COUNTER ((uint16_t)0x7000U) +#define DP83848_TPDCR_PATTERN_HIGH ((uint16_t)0x0FC0U) +#define DP83848_TPDCR_PATTERN_LOW ((uint16_t)0x003FU) +/** + * @} + */ + +/** @defgroup DP83848_TCSR_Bit_Definition DP83848 TCSR Bit Definition + * @{ + */ +#define DP83848_TCSR_TDR_ENABLE ((uint16_t)0x8000U) +#define DP83848_TCSR_TDR_AD_FILTER_ENABLE ((uint16_t)0x4000U) +#define DP83848_TCSR_TDR_CH_CABLE_TYPE ((uint16_t)0x0600U) +#define DP83848_TCSR_TDR_CH_CABLE_DEFAULT ((uint16_t)0x0000U) +#define DP83848_TCSR_TDR_CH_CABLE_SHORTED ((uint16_t)0x0200U) +#define DP83848_TCSR_TDR_CH_CABLE_OPEN ((uint16_t)0x0400U) +#define DP83848_TCSR_TDR_CH_CABLE_MATCH ((uint16_t)0x0600U) +#define DP83848_TCSR_TDR_CH_STATUS ((uint16_t)0x0100U) +#define DP83848_TCSR_TDR_CH_LENGTH ((uint16_t)0x00FFU) +/** + * @} + */ + +/** @defgroup DP83848_SCSIR_Bit_Definition DP83848 SCSIR Bit Definition + * @{ + */ +#define DP83848_SCSIR_AUTO_MDIX_ENABLE ((uint16_t)0x8000U) +#define DP83848_SCSIR_CHANNEL_SELECT ((uint16_t)0x2000U) +#define DP83848_SCSIR_SQE_DISABLE ((uint16_t)0x0800U) +#define DP83848_SCSIR_XPOLALITY ((uint16_t)0x0010U) +/** + * @} + */ + +/** @defgroup DP83848_CLR_Bit_Definition DP83848 CLR Bit Definition + * @{ + */ +#define DP83848_CLR_CABLE_LENGTH ((uint16_t)0xF000U) +/** + * @} + */ + +/** @defgroup DP83848_IMR_ISFR_Bit_Definition DP83848 IMR ISFR Bit Definition + * @{ + */ +#define DP83848_INT_8 ((uint16_t)0x0100U) +#define DP83848_INT_7 ((uint16_t)0x0080U) +#define DP83848_INT_6 ((uint16_t)0x0040U) +#define DP83848_INT_5 ((uint16_t)0x0020U) +#define DP83848_INT_4 ((uint16_t)0x0010U) +#define DP83848_INT_3 ((uint16_t)0x0008U) +#define DP83848_INT_2 ((uint16_t)0x0004U) +#define DP83848_INT_1 ((uint16_t)0x0002U) +/** + * @} + */ + +/** @defgroup DP83848_PHYSCSR_Bit_Definition DP83848 PHYSCSR Bit Definition + * @{ + */ +#define DP83848_PHYSCSR_AUTONEGO_DONE ((uint16_t)0x010U) +#define DP83848_PHYSCSR_HCDSPEEDMASK ((uint16_t)0x006U) +#define DP83848_PHYSCSR_10BT_HD ((uint16_t)0x002U) +#define DP83848_PHYSCSR_10BT_FD ((uint16_t)0x006U) +#define DP83848_PHYSCSR_100BTX_HD ((uint16_t)0x000U) +#define DP83848_PHYSCSR_100BTX_FD ((uint16_t)0x004U) +/** + * @} + */ + +/** @defgroup DP83848_Status DP83848 Status + * @{ + */ + +#define DP83848_STATUS_READ_ERROR ((int32_t)-5) +#define DP83848_STATUS_WRITE_ERROR ((int32_t)-4) +#define DP83848_STATUS_ADDRESS_ERROR ((int32_t)-3) +#define DP83848_STATUS_RESET_TIMEOUT ((int32_t)-2) +#define DP83848_STATUS_ERROR ((int32_t)-1) +#define DP83848_STATUS_OK ((int32_t) 0) +#define DP83848_STATUS_LINK_DOWN ((int32_t) 1) +#define DP83848_STATUS_100MBITS_FULLDUPLEX ((int32_t) 2) +#define DP83848_STATUS_100MBITS_HALFDUPLEX ((int32_t) 3) +#define DP83848_STATUS_10MBITS_FULLDUPLEX ((int32_t) 4) +#define DP83848_STATUS_10MBITS_HALFDUPLEX ((int32_t) 5) +#define DP83848_STATUS_AUTONEGO_NOTDONE ((int32_t) 6) +/** + * @} + */ + +/** @defgroup DP83848_IT_Flags DP83848 IT Flags + * @{ + */ +#define DP83848_WOL_IT DP83848_INT_8 +#define DP83848_ENERGYON_IT DP83848_INT_7 +#define DP83848_AUTONEGO_COMPLETE_IT DP83848_INT_6 +#define DP83848_REMOTE_FAULT_IT DP83848_INT_5 +#define DP83848_LINK_DOWN_IT DP83848_INT_4 +#define DP83848_AUTONEGO_LP_ACK_IT DP83848_INT_3 +#define DP83848_PARALLEL_DETECTION_FAULT_IT DP83848_INT_2 +#define DP83848_AUTONEGO_PAGE_RECEIVED_IT DP83848_INT_1 +/** + * @} + */ + +/** + * @} + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup DP83848_Exported_Types DP83848 Exported Types + * @{ + */ +typedef int32_t (*dp83848_Init_Func) (void); +typedef int32_t (*dp83848_DeInit_Func) (void); +typedef int32_t (*dp83848_ReadReg_Func) (uint32_t, uint32_t, uint32_t *); +typedef int32_t (*dp83848_WriteReg_Func) (uint32_t, uint32_t, uint32_t); +typedef int32_t (*dp83848_GetTick_Func) (void); + +typedef struct +{ + dp83848_Init_Func Init; + dp83848_DeInit_Func DeInit; + dp83848_WriteReg_Func WriteReg; + dp83848_ReadReg_Func ReadReg; + dp83848_GetTick_Func GetTick; +} dp83848_IOCtx_t; + + +typedef struct +{ + uint32_t DevAddr; + uint32_t Is_Initialized; + dp83848_IOCtx_t IO; + void *pData; +}dp83848_Object_t; +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup DP83848_Exported_Functions DP83848 Exported Functions + * @{ + */ +int32_t DP83848_RegisterBusIO(dp83848_Object_t *pObj, dp83848_IOCtx_t *ioctx); +int32_t DP83848_Init(dp83848_Object_t *pObj); +int32_t DP83848_DeInit(dp83848_Object_t *pObj); +int32_t DP83848_DisablePowerDownMode(dp83848_Object_t *pObj); +int32_t DP83848_EnablePowerDownMode(dp83848_Object_t *pObj); +int32_t DP83848_StartAutoNego(dp83848_Object_t *pObj); +int32_t DP83848_GetLinkState(dp83848_Object_t *pObj); +int32_t DP83848_SetLinkState(dp83848_Object_t *pObj, uint32_t LinkState); +int32_t DP83848_EnableLoopbackMode(dp83848_Object_t *pObj); +int32_t DP83848_DisableLoopbackMode(dp83848_Object_t *pObj); +int32_t DP83848_EnableIT(dp83848_Object_t *pObj, uint32_t Interrupt); +int32_t DP83848_DisableIT(dp83848_Object_t *pObj, uint32_t Interrupt); +int32_t DP83848_ClearIT(dp83848_Object_t *pObj, uint32_t Interrupt); +int32_t DP83848_GetITStatus(dp83848_Object_t *pObj, uint32_t Interrupt); +/** + * @} + */ + +#ifdef __cplusplus +} +#endif +#endif /* DP83848_H */ + + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ diff --git a/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f401xc.h b/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f401xc.h index 812bae65..38258bb5 100644 --- a/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f401xc.h +++ b/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f401xc.h @@ -12,13 +12,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -814,7 +813,15 @@ typedef struct /** @addtogroup Exported_constants * @{ */ - + +/** @addtogroup Hardware_Constant_Definition + * @{ + */ +#define LSI_STARTUP_TIME 40U /*!< LSI Maximum startup time in us */ +/** + * @} + */ + /** @addtogroup Peripheral_Registers_Bits_Definition * @{ */ @@ -2281,17 +2288,18 @@ typedef struct /* */ /******************************************************************************/ /******************* Bits definition for FLASH_ACR register *****************/ -#define FLASH_ACR_LATENCY_Pos (0U) -#define FLASH_ACR_LATENCY_Msk (0xFUL << FLASH_ACR_LATENCY_Pos) /*!< 0x0000000F */ -#define FLASH_ACR_LATENCY FLASH_ACR_LATENCY_Msk -#define FLASH_ACR_LATENCY_0WS 0x00000000U -#define FLASH_ACR_LATENCY_1WS 0x00000001U -#define FLASH_ACR_LATENCY_2WS 0x00000002U -#define FLASH_ACR_LATENCY_3WS 0x00000003U -#define FLASH_ACR_LATENCY_4WS 0x00000004U -#define FLASH_ACR_LATENCY_5WS 0x00000005U -#define FLASH_ACR_LATENCY_6WS 0x00000006U -#define FLASH_ACR_LATENCY_7WS 0x00000007U +#define FLASH_ACR_LATENCY_Pos (0U) +#define FLASH_ACR_LATENCY_Msk (0x7UL << FLASH_ACR_LATENCY_Pos) /*!< 0x00000007 */ +#define FLASH_ACR_LATENCY FLASH_ACR_LATENCY_Msk +#define FLASH_ACR_LATENCY_0WS 0x00000000U +#define FLASH_ACR_LATENCY_1WS 0x00000001U +#define FLASH_ACR_LATENCY_2WS 0x00000002U +#define FLASH_ACR_LATENCY_3WS 0x00000003U +#define FLASH_ACR_LATENCY_4WS 0x00000004U +#define FLASH_ACR_LATENCY_5WS 0x00000005U +#define FLASH_ACR_LATENCY_6WS 0x00000006U +#define FLASH_ACR_LATENCY_7WS 0x00000007U + #define FLASH_ACR_PRFTEN_Pos (8U) #define FLASH_ACR_PRFTEN_Msk (0x1UL << FLASH_ACR_PRFTEN_Pos) /*!< 0x00000100 */ @@ -2370,6 +2378,9 @@ typedef struct #define FLASH_CR_EOPIE_Pos (24U) #define FLASH_CR_EOPIE_Msk (0x1UL << FLASH_CR_EOPIE_Pos) /*!< 0x01000000 */ #define FLASH_CR_EOPIE FLASH_CR_EOPIE_Msk +#define FLASH_CR_ERRIE_Pos (25U) +#define FLASH_CR_ERRIE_Msk (0x1UL << FLASH_CR_ERRIE_Pos) +#define FLASH_CR_ERRIE FLASH_CR_ERRIE_Msk #define FLASH_CR_LOCK_Pos (31U) #define FLASH_CR_LOCK_Msk (0x1UL << FLASH_CR_LOCK_Pos) /*!< 0x80000000 */ #define FLASH_CR_LOCK FLASH_CR_LOCK_Msk @@ -2538,7 +2549,7 @@ typedef struct #define GPIO_MODER_MODE1 GPIO_MODER_MODER1 #define GPIO_MODER_MODE1_0 GPIO_MODER_MODER1_0 #define GPIO_MODER_MODE1_1 GPIO_MODER_MODER1_1 -#define GPIO_MODER_MODE2_Pos GPIO_MODER_MODER2_PoS +#define GPIO_MODER_MODE2_Pos GPIO_MODER_MODER2_Pos #define GPIO_MODER_MODE2_Msk GPIO_MODER_MODER2_Msk #define GPIO_MODER_MODE2 GPIO_MODER_MODER2 #define GPIO_MODER_MODE2_0 GPIO_MODER_MODER2_0 @@ -2569,7 +2580,7 @@ typedef struct #define GPIO_MODER_MODE7_0 GPIO_MODER_MODER7_0 #define GPIO_MODER_MODE7_1 GPIO_MODER_MODER7_1 #define GPIO_MODER_MODE8_Pos GPIO_MODER_MODER8_Pos -#define GPIO_MODER_MODE8_Msk GPIO_MODER_MODER2_Msk +#define GPIO_MODER_MODE8_Msk GPIO_MODER_MODER8_Msk #define GPIO_MODER_MODE8 GPIO_MODER_MODER8 #define GPIO_MODER_MODE8_0 GPIO_MODER_MODER8_0 #define GPIO_MODER_MODE8_1 GPIO_MODER_MODER8_1 @@ -8396,7 +8407,7 @@ typedef struct #define IS_TIM_32B_COUNTER_INSTANCE(INSTANCE)(((INSTANCE) == TIM2) || \ ((INSTANCE) == TIM5)) -/***************** TIM Instances : external trigger input availabe ************/ +/***************** TIM Instances : external trigger input available ************/ #define IS_TIM_ETR_INSTANCE(INSTANCE) (((INSTANCE) == TIM1) || \ ((INSTANCE) == TIM2) || \ ((INSTANCE) == TIM3) || \ @@ -8628,7 +8639,3 @@ typedef struct #endif /* __cplusplus */ #endif /* __STM32F401xC_H */ - - - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f407xx.h b/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f407xx.h index ef0421db..4f2df226 100644 --- a/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f407xx.h +++ b/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f407xx.h @@ -7,18 +7,17 @@ * This file contains: * - Data structures and the address mapping for all peripherals * - peripherals registers declarations and bits definition - * - Macros to access peripheral’s registers hardware + * - Macros to access peripheral's registers hardware * ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -35,7 +34,7 @@ #define __STM32F407xx_H #ifdef __cplusplus - extern "C" { +extern "C" { #endif /* __cplusplus */ /** @addtogroup Configuration_section_for_CMSIS @@ -65,7 +64,7 @@ */ typedef enum { -/****** Cortex-M4 Processor Exceptions Numbers ****************************************************************/ + /****** Cortex-M4 Processor Exceptions Numbers ****************************************************************/ NonMaskableInt_IRQn = -14, /*!< 2 Non Maskable Interrupt */ MemoryManagement_IRQn = -12, /*!< 4 Cortex-M4 Memory Management Interrupt */ BusFault_IRQn = -11, /*!< 5 Cortex-M4 Bus Fault Interrupt */ @@ -74,7 +73,7 @@ typedef enum DebugMonitor_IRQn = -4, /*!< 12 Cortex-M4 Debug Monitor Interrupt */ PendSV_IRQn = -2, /*!< 14 Cortex-M4 Pend SV Interrupt */ SysTick_IRQn = -1, /*!< 15 Cortex-M4 System Tick Interrupt */ -/****** STM32 specific Interrupt Numbers **********************************************************************/ + /****** STM32 specific Interrupt Numbers **********************************************************************/ WWDG_IRQn = 0, /*!< Window WatchDog Interrupt */ PVD_IRQn = 1, /*!< PVD through EXTI Line detection Interrupt */ TAMP_STAMP_IRQn = 2, /*!< Tamper and TimeStamp interrupts through the EXTI line */ @@ -318,7 +317,7 @@ typedef struct __IO uint32_t CR; /*!< Debug MCU configuration register, Address offset: 0x04 */ __IO uint32_t APB1FZ; /*!< Debug MCU APB1 freeze register, Address offset: 0x08 */ __IO uint32_t APB2FZ; /*!< Debug MCU APB2 freeze register, Address offset: 0x0C */ -}DBGMCU_TypeDef; +} DBGMCU_TypeDef; /** * @brief DCMI @@ -1149,9 +1148,17 @@ typedef struct * @{ */ - /** @addtogroup Peripheral_Registers_Bits_Definition +/** @addtogroup Hardware_Constant_Definition * @{ */ +#define LSI_STARTUP_TIME 40U /*!< LSI Maximum startup time in us */ +/** + * @} + */ + +/** @addtogroup Peripheral_Registers_Bits_Definition +* @{ +*/ /******************************************************************************/ /* Peripheral Registers_Bits_Definition */ @@ -1163,7 +1170,7 @@ typedef struct /* */ /******************************************************************************/ /* - * @brief Specific device feature definitions (not present on all devices in the STM32F4 serie) + * @brief Specific device feature definitions (not present on all devices in the STM32F4 series) */ #define ADC_MULTIMODE_SUPPORT /*!
© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.
+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -817,7 +816,15 @@ typedef struct /** @addtogroup Exported_constants * @{ */ - + +/** @addtogroup Hardware_Constant_Definition + * @{ + */ +#define LSI_STARTUP_TIME 40U /*!< LSI Maximum startup time in us */ +/** + * @} + */ + /** @addtogroup Peripheral_Registers_Bits_Definition * @{ */ @@ -2284,17 +2291,18 @@ typedef struct /* */ /******************************************************************************/ /******************* Bits definition for FLASH_ACR register *****************/ -#define FLASH_ACR_LATENCY_Pos (0U) -#define FLASH_ACR_LATENCY_Msk (0xFUL << FLASH_ACR_LATENCY_Pos) /*!< 0x0000000F */ -#define FLASH_ACR_LATENCY FLASH_ACR_LATENCY_Msk -#define FLASH_ACR_LATENCY_0WS 0x00000000U -#define FLASH_ACR_LATENCY_1WS 0x00000001U -#define FLASH_ACR_LATENCY_2WS 0x00000002U -#define FLASH_ACR_LATENCY_3WS 0x00000003U -#define FLASH_ACR_LATENCY_4WS 0x00000004U -#define FLASH_ACR_LATENCY_5WS 0x00000005U -#define FLASH_ACR_LATENCY_6WS 0x00000006U -#define FLASH_ACR_LATENCY_7WS 0x00000007U +#define FLASH_ACR_LATENCY_Pos (0U) +#define FLASH_ACR_LATENCY_Msk (0x7UL << FLASH_ACR_LATENCY_Pos) /*!< 0x00000007 */ +#define FLASH_ACR_LATENCY FLASH_ACR_LATENCY_Msk +#define FLASH_ACR_LATENCY_0WS 0x00000000U +#define FLASH_ACR_LATENCY_1WS 0x00000001U +#define FLASH_ACR_LATENCY_2WS 0x00000002U +#define FLASH_ACR_LATENCY_3WS 0x00000003U +#define FLASH_ACR_LATENCY_4WS 0x00000004U +#define FLASH_ACR_LATENCY_5WS 0x00000005U +#define FLASH_ACR_LATENCY_6WS 0x00000006U +#define FLASH_ACR_LATENCY_7WS 0x00000007U + #define FLASH_ACR_PRFTEN_Pos (8U) #define FLASH_ACR_PRFTEN_Msk (0x1UL << FLASH_ACR_PRFTEN_Pos) /*!< 0x00000100 */ @@ -2373,6 +2381,9 @@ typedef struct #define FLASH_CR_EOPIE_Pos (24U) #define FLASH_CR_EOPIE_Msk (0x1UL << FLASH_CR_EOPIE_Pos) /*!< 0x01000000 */ #define FLASH_CR_EOPIE FLASH_CR_EOPIE_Msk +#define FLASH_CR_ERRIE_Pos (25U) +#define FLASH_CR_ERRIE_Msk (0x1UL << FLASH_CR_ERRIE_Pos) +#define FLASH_CR_ERRIE FLASH_CR_ERRIE_Msk #define FLASH_CR_LOCK_Pos (31U) #define FLASH_CR_LOCK_Msk (0x1UL << FLASH_CR_LOCK_Pos) /*!< 0x80000000 */ #define FLASH_CR_LOCK FLASH_CR_LOCK_Msk @@ -2541,7 +2552,7 @@ typedef struct #define GPIO_MODER_MODE1 GPIO_MODER_MODER1 #define GPIO_MODER_MODE1_0 GPIO_MODER_MODER1_0 #define GPIO_MODER_MODE1_1 GPIO_MODER_MODER1_1 -#define GPIO_MODER_MODE2_Pos GPIO_MODER_MODER2_PoS +#define GPIO_MODER_MODE2_Pos GPIO_MODER_MODER2_Pos #define GPIO_MODER_MODE2_Msk GPIO_MODER_MODER2_Msk #define GPIO_MODER_MODE2 GPIO_MODER_MODER2 #define GPIO_MODER_MODE2_0 GPIO_MODER_MODER2_0 @@ -2572,7 +2583,7 @@ typedef struct #define GPIO_MODER_MODE7_0 GPIO_MODER_MODER7_0 #define GPIO_MODER_MODE7_1 GPIO_MODER_MODER7_1 #define GPIO_MODER_MODE8_Pos GPIO_MODER_MODER8_Pos -#define GPIO_MODER_MODE8_Msk GPIO_MODER_MODER2_Msk +#define GPIO_MODER_MODE8_Msk GPIO_MODER_MODER8_Msk #define GPIO_MODER_MODE8 GPIO_MODER_MODER8 #define GPIO_MODER_MODE8_0 GPIO_MODER_MODER8_0 #define GPIO_MODER_MODE8_1 GPIO_MODER_MODER8_1 @@ -8431,7 +8442,7 @@ typedef struct #define IS_TIM_32B_COUNTER_INSTANCE(INSTANCE)(((INSTANCE) == TIM2) || \ ((INSTANCE) == TIM5)) -/***************** TIM Instances : external trigger input availabe ************/ +/***************** TIM Instances : external trigger input available ************/ #define IS_TIM_ETR_INSTANCE(INSTANCE) (((INSTANCE) == TIM1) || \ ((INSTANCE) == TIM2) || \ ((INSTANCE) == TIM3) || \ @@ -8667,7 +8678,3 @@ typedef struct #endif /* __cplusplus */ #endif /* __STM32F411xE_H */ - - - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f446xx.h b/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f446xx.h index 3e08c450..6b064e8e 100644 --- a/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f446xx.h +++ b/Drivers/CMSIS/Device/ST/STM32F4xx/Include/stm32f446xx.h @@ -12,13 +12,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -1165,7 +1164,15 @@ typedef struct /** @addtogroup Exported_constants * @{ */ - + +/** @addtogroup Hardware_Constant_Definition + * @{ + */ +#define LSI_STARTUP_TIME 40U /*!< LSI Maximum startup time in us */ +/** + * @} + */ + /** @addtogroup Peripheral_Registers_Bits_Definition * @{ */ @@ -6841,26 +6848,27 @@ typedef struct /* */ /******************************************************************************/ /******************* Bits definition for FLASH_ACR register *****************/ -#define FLASH_ACR_LATENCY_Pos (0U) +#define FLASH_ACR_LATENCY_Pos (0U) #define FLASH_ACR_LATENCY_Msk (0xFUL << FLASH_ACR_LATENCY_Pos) /*!< 0x0000000F */ -#define FLASH_ACR_LATENCY FLASH_ACR_LATENCY_Msk -#define FLASH_ACR_LATENCY_0WS 0x00000000U -#define FLASH_ACR_LATENCY_1WS 0x00000001U -#define FLASH_ACR_LATENCY_2WS 0x00000002U -#define FLASH_ACR_LATENCY_3WS 0x00000003U -#define FLASH_ACR_LATENCY_4WS 0x00000004U -#define FLASH_ACR_LATENCY_5WS 0x00000005U -#define FLASH_ACR_LATENCY_6WS 0x00000006U -#define FLASH_ACR_LATENCY_7WS 0x00000007U - -#define FLASH_ACR_LATENCY_8WS 0x00000008U -#define FLASH_ACR_LATENCY_9WS 0x00000009U -#define FLASH_ACR_LATENCY_10WS 0x0000000AU -#define FLASH_ACR_LATENCY_11WS 0x0000000BU -#define FLASH_ACR_LATENCY_12WS 0x0000000CU -#define FLASH_ACR_LATENCY_13WS 0x0000000DU -#define FLASH_ACR_LATENCY_14WS 0x0000000EU -#define FLASH_ACR_LATENCY_15WS 0x0000000FU +#define FLASH_ACR_LATENCY FLASH_ACR_LATENCY_Msk +#define FLASH_ACR_LATENCY_0WS 0x00000000U +#define FLASH_ACR_LATENCY_1WS 0x00000001U +#define FLASH_ACR_LATENCY_2WS 0x00000002U +#define FLASH_ACR_LATENCY_3WS 0x00000003U +#define FLASH_ACR_LATENCY_4WS 0x00000004U +#define FLASH_ACR_LATENCY_5WS 0x00000005U +#define FLASH_ACR_LATENCY_6WS 0x00000006U +#define FLASH_ACR_LATENCY_7WS 0x00000007U + +#define FLASH_ACR_LATENCY_8WS 0x00000008U +#define FLASH_ACR_LATENCY_9WS 0x00000009U +#define FLASH_ACR_LATENCY_10WS 0x0000000AU +#define FLASH_ACR_LATENCY_11WS 0x0000000BU +#define FLASH_ACR_LATENCY_12WS 0x0000000CU +#define FLASH_ACR_LATENCY_13WS 0x0000000DU +#define FLASH_ACR_LATENCY_14WS 0x0000000EU +#define FLASH_ACR_LATENCY_15WS 0x0000000FU + #define FLASH_ACR_PRFTEN_Pos (8U) #define FLASH_ACR_PRFTEN_Msk (0x1UL << FLASH_ACR_PRFTEN_Pos) /*!< 0x00000100 */ #define FLASH_ACR_PRFTEN FLASH_ACR_PRFTEN_Msk @@ -6942,6 +6950,9 @@ typedef struct #define FLASH_CR_EOPIE_Pos (24U) #define FLASH_CR_EOPIE_Msk (0x1UL << FLASH_CR_EOPIE_Pos) /*!< 0x01000000 */ #define FLASH_CR_EOPIE FLASH_CR_EOPIE_Msk +#define FLASH_CR_ERRIE_Pos (25U) +#define FLASH_CR_ERRIE_Msk (0x1UL << FLASH_CR_ERRIE_Pos) +#define FLASH_CR_ERRIE FLASH_CR_ERRIE_Msk #define FLASH_CR_LOCK_Pos (31U) #define FLASH_CR_LOCK_Msk (0x1UL << FLASH_CR_LOCK_Pos) /*!< 0x80000000 */ #define FLASH_CR_LOCK FLASH_CR_LOCK_Msk @@ -7081,7 +7092,7 @@ typedef struct #define FMC_BCR1_CBURSTRW FMC_BCR1_CBURSTRW_Msk /*!
© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.
+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -34,18 +33,18 @@ /** @addtogroup stm32f4xx * @{ */ - + #ifndef __STM32F4xx_H #define __STM32F4xx_H #ifdef __cplusplus - extern "C" { +extern "C" { #endif /* __cplusplus */ - + /** @addtogroup Library_configuration_section * @{ */ - + /** * @brief STM32 Family */ @@ -54,7 +53,7 @@ #endif /* STM32F4 */ /* Uncomment the line below according to the target STM32 device used in your - application + application */ #if !defined (STM32F405xx) && !defined (STM32F415xx) && !defined (STM32F407xx) && !defined (STM32F417xx) && \ !defined (STM32F427xx) && !defined (STM32F437xx) && !defined (STM32F429xx) && !defined (STM32F439xx) && \ @@ -62,60 +61,60 @@ !defined (STM32F410Rx) && !defined (STM32F411xE) && !defined (STM32F446xx) && !defined (STM32F469xx) && \ !defined (STM32F479xx) && !defined (STM32F412Cx) && !defined (STM32F412Rx) && !defined (STM32F412Vx) && \ !defined (STM32F412Zx) && !defined (STM32F413xx) && !defined (STM32F423xx) - /* #define STM32F405xx */ /*!< STM32F405RG, STM32F405VG and STM32F405ZG Devices */ - /* #define STM32F415xx */ /*!< STM32F415RG, STM32F415VG and STM32F415ZG Devices */ - /* #define STM32F407xx */ /*!< STM32F407VG, STM32F407VE, STM32F407ZG, STM32F407ZE, STM32F407IG and STM32F407IE Devices */ - /* #define STM32F417xx */ /*!< STM32F417VG, STM32F417VE, STM32F417ZG, STM32F417ZE, STM32F417IG and STM32F417IE Devices */ - /* #define STM32F427xx */ /*!< STM32F427VG, STM32F427VI, STM32F427ZG, STM32F427ZI, STM32F427IG and STM32F427II Devices */ - /* #define STM32F437xx */ /*!< STM32F437VG, STM32F437VI, STM32F437ZG, STM32F437ZI, STM32F437IG and STM32F437II Devices */ - /* #define STM32F429xx */ /*!< STM32F429VG, STM32F429VI, STM32F429ZG, STM32F429ZI, STM32F429BG, STM32F429BI, STM32F429NG, - STM32F439NI, STM32F429IG and STM32F429II Devices */ - /* #define STM32F439xx */ /*!< STM32F439VG, STM32F439VI, STM32F439ZG, STM32F439ZI, STM32F439BG, STM32F439BI, STM32F439NG, - STM32F439NI, STM32F439IG and STM32F439II Devices */ - /* #define STM32F401xC */ /*!< STM32F401CB, STM32F401CC, STM32F401RB, STM32F401RC, STM32F401VB and STM32F401VC Devices */ - /* #define STM32F401xE */ /*!< STM32F401CD, STM32F401RD, STM32F401VD, STM32F401CE, STM32F401RE and STM32F401VE Devices */ - /* #define STM32F410Tx */ /*!< STM32F410T8 and STM32F410TB Devices */ - /* #define STM32F410Cx */ /*!< STM32F410C8 and STM32F410CB Devices */ - /* #define STM32F410Rx */ /*!< STM32F410R8 and STM32F410RB Devices */ - /* #define STM32F411xE */ /*!< STM32F411CC, STM32F411RC, STM32F411VC, STM32F411CE, STM32F411RE and STM32F411VE Devices */ - /* #define STM32F446xx */ /*!< STM32F446MC, STM32F446ME, STM32F446RC, STM32F446RE, STM32F446VC, STM32F446VE, STM32F446ZC, - and STM32F446ZE Devices */ - /* #define STM32F469xx */ /*!< STM32F469AI, STM32F469II, STM32F469BI, STM32F469NI, STM32F469AG, STM32F469IG, STM32F469BG, - STM32F469NG, STM32F469AE, STM32F469IE, STM32F469BE and STM32F469NE Devices */ - /* #define STM32F479xx */ /*!< STM32F479AI, STM32F479II, STM32F479BI, STM32F479NI, STM32F479AG, STM32F479IG, STM32F479BG - and STM32F479NG Devices */ - /* #define STM32F412Cx */ /*!< STM32F412CEU and STM32F412CGU Devices */ - /* #define STM32F412Zx */ /*!< STM32F412ZET, STM32F412ZGT, STM32F412ZEJ and STM32F412ZGJ Devices */ - /* #define STM32F412Vx */ /*!< STM32F412VET, STM32F412VGT, STM32F412VEH and STM32F412VGH Devices */ - /* #define STM32F412Rx */ /*!< STM32F412RET, STM32F412RGT, STM32F412REY and STM32F412RGY Devices */ - /* #define STM32F413xx */ /*!< STM32F413CH, STM32F413MH, STM32F413RH, STM32F413VH, STM32F413ZH, STM32F413CG, STM32F413MG, - STM32F413RG, STM32F413VG and STM32F413ZG Devices */ - /* #define STM32F423xx */ /*!< STM32F423CH, STM32F423RH, STM32F423VH and STM32F423ZH Devices */ +/* #define STM32F405xx */ /*!< STM32F405RG, STM32F405VG and STM32F405ZG Devices */ +/* #define STM32F415xx */ /*!< STM32F415RG, STM32F415VG and STM32F415ZG Devices */ +/* #define STM32F407xx */ /*!< STM32F407VG, STM32F407VE, STM32F407ZG, STM32F407ZE, STM32F407IG and STM32F407IE Devices */ +/* #define STM32F417xx */ /*!< STM32F417VG, STM32F417VE, STM32F417ZG, STM32F417ZE, STM32F417IG and STM32F417IE Devices */ +/* #define STM32F427xx */ /*!< STM32F427VG, STM32F427VI, STM32F427ZG, STM32F427ZI, STM32F427IG and STM32F427II Devices */ +/* #define STM32F437xx */ /*!< STM32F437VG, STM32F437VI, STM32F437ZG, STM32F437ZI, STM32F437IG and STM32F437II Devices */ +/* #define STM32F429xx */ /*!< STM32F429VG, STM32F429VI, STM32F429ZG, STM32F429ZI, STM32F429BG, STM32F429BI, STM32F429NG, + STM32F439NI, STM32F429IG and STM32F429II Devices */ +/* #define STM32F439xx */ /*!< STM32F439VG, STM32F439VI, STM32F439ZG, STM32F439ZI, STM32F439BG, STM32F439BI, STM32F439NG, + STM32F439NI, STM32F439IG and STM32F439II Devices */ +/* #define STM32F401xC */ /*!< STM32F401CB, STM32F401CC, STM32F401RB, STM32F401RC, STM32F401VB and STM32F401VC Devices */ +/* #define STM32F401xE */ /*!< STM32F401CD, STM32F401RD, STM32F401VD, STM32F401CE, STM32F401RE and STM32F401VE Devices */ +/* #define STM32F410Tx */ /*!< STM32F410T8 and STM32F410TB Devices */ +/* #define STM32F410Cx */ /*!< STM32F410C8 and STM32F410CB Devices */ +/* #define STM32F410Rx */ /*!< STM32F410R8 and STM32F410RB Devices */ +/* #define STM32F411xE */ /*!< STM32F411CC, STM32F411RC, STM32F411VC, STM32F411CE, STM32F411RE and STM32F411VE Devices */ +/* #define STM32F446xx */ /*!< STM32F446MC, STM32F446ME, STM32F446RC, STM32F446RE, STM32F446VC, STM32F446VE, STM32F446ZC, + and STM32F446ZE Devices */ +/* #define STM32F469xx */ /*!< STM32F469AI, STM32F469II, STM32F469BI, STM32F469NI, STM32F469AG, STM32F469IG, STM32F469BG, + STM32F469NG, STM32F469AE, STM32F469IE, STM32F469BE and STM32F469NE Devices */ +/* #define STM32F479xx */ /*!< STM32F479AI, STM32F479II, STM32F479BI, STM32F479NI, STM32F479AG, STM32F479IG, STM32F479BG + and STM32F479NG Devices */ +/* #define STM32F412Cx */ /*!< STM32F412CEU and STM32F412CGU Devices */ +/* #define STM32F412Zx */ /*!< STM32F412ZET, STM32F412ZGT, STM32F412ZEJ and STM32F412ZGJ Devices */ +/* #define STM32F412Vx */ /*!< STM32F412VET, STM32F412VGT, STM32F412VEH and STM32F412VGH Devices */ +/* #define STM32F412Rx */ /*!< STM32F412RET, STM32F412RGT, STM32F412REY and STM32F412RGY Devices */ +/* #define STM32F413xx */ /*!< STM32F413CH, STM32F413MH, STM32F413RH, STM32F413VH, STM32F413ZH, STM32F413CG, STM32F413MG, + STM32F413RG, STM32F413VG and STM32F413ZG Devices */ +/* #define STM32F423xx */ /*!< STM32F423CH, STM32F423RH, STM32F423VH and STM32F423ZH Devices */ #endif - + /* Tip: To avoid modifying this file each time you need to switch between these devices, you can define the device in your toolchain compiler preprocessor. */ #if !defined (USE_HAL_DRIVER) /** * @brief Comment the line below if you will not use the peripherals drivers. - In this case, these drivers will not be included and the application code will - be based on direct access to peripherals registers + In this case, these drivers will not be included and the application code will + be based on direct access to peripherals registers */ - /*#define USE_HAL_DRIVER */ +/*#define USE_HAL_DRIVER */ #endif /* USE_HAL_DRIVER */ /** - * @brief CMSIS version number V2.6.5 + * @brief CMSIS version number V2.6.10 */ #define __STM32F4xx_CMSIS_VERSION_MAIN (0x02U) /*!< [31:24] main version */ #define __STM32F4xx_CMSIS_VERSION_SUB1 (0x06U) /*!< [23:16] sub1 version */ -#define __STM32F4xx_CMSIS_VERSION_SUB2 (0x05U) /*!< [15:8] sub2 version */ +#define __STM32F4xx_CMSIS_VERSION_SUB2 (0x0AU) /*!< [15:8] sub2 version */ #define __STM32F4xx_CMSIS_VERSION_RC (0x00U) /*!< [7:0] release candidate */ #define __STM32F4xx_CMSIS_VERSION ((__STM32F4xx_CMSIS_VERSION_MAIN << 24)\ |(__STM32F4xx_CMSIS_VERSION_SUB1 << 16)\ |(__STM32F4xx_CMSIS_VERSION_SUB2 << 8 )\ - |(__STM32F4xx_CMSIS_VERSION)) + |(__STM32F4xx_CMSIS_VERSION_RC)) /** * @} @@ -126,53 +125,53 @@ */ #if defined(STM32F405xx) - #include "stm32f405xx.h" +#include "stm32f405xx.h" #elif defined(STM32F415xx) - #include "stm32f415xx.h" +#include "stm32f415xx.h" #elif defined(STM32F407xx) - #include "stm32f407xx.h" +#include "stm32f407xx.h" #elif defined(STM32F417xx) - #include "stm32f417xx.h" +#include "stm32f417xx.h" #elif defined(STM32F427xx) - #include "stm32f427xx.h" +#include "stm32f427xx.h" #elif defined(STM32F437xx) - #include "stm32f437xx.h" +#include "stm32f437xx.h" #elif defined(STM32F429xx) - #include "stm32f429xx.h" +#include "stm32f429xx.h" #elif defined(STM32F439xx) - #include "stm32f439xx.h" +#include "stm32f439xx.h" #elif defined(STM32F401xC) - #include "stm32f401xc.h" +#include "stm32f401xc.h" #elif defined(STM32F401xE) - #include "stm32f401xe.h" +#include "stm32f401xe.h" #elif defined(STM32F410Tx) - #include "stm32f410tx.h" +#include "stm32f410tx.h" #elif defined(STM32F410Cx) - #include "stm32f410cx.h" +#include "stm32f410cx.h" #elif defined(STM32F410Rx) - #include "stm32f410rx.h" +#include "stm32f410rx.h" #elif defined(STM32F411xE) - #include "stm32f411xe.h" +#include "stm32f411xe.h" #elif defined(STM32F446xx) - #include "stm32f446xx.h" +#include "stm32f446xx.h" #elif defined(STM32F469xx) - #include "stm32f469xx.h" +#include "stm32f469xx.h" #elif defined(STM32F479xx) - #include "stm32f479xx.h" +#include "stm32f479xx.h" #elif defined(STM32F412Cx) - #include "stm32f412cx.h" +#include "stm32f412cx.h" #elif defined(STM32F412Zx) - #include "stm32f412zx.h" +#include "stm32f412zx.h" #elif defined(STM32F412Rx) - #include "stm32f412rx.h" +#include "stm32f412rx.h" #elif defined(STM32F412Vx) - #include "stm32f412vx.h" +#include "stm32f412vx.h" #elif defined(STM32F413xx) - #include "stm32f413xx.h" +#include "stm32f413xx.h" #elif defined(STM32F423xx) - #include "stm32f423xx.h" +#include "stm32f423xx.h" #else - #error "Please select first the target STM32F4xx device used in your application (in stm32f4xx.h file)" +#error "Please select first the target STM32F4xx device used in your application (in stm32f4xx.h file)" #endif /** @@ -181,16 +180,16 @@ /** @addtogroup Exported_types * @{ - */ -typedef enum + */ +typedef enum { - RESET = 0U, + RESET = 0U, SET = !RESET } FlagStatus, ITStatus; -typedef enum +typedef enum { - DISABLE = 0U, + DISABLE = 0U, ENABLE = !DISABLE } FunctionalState; #define IS_FUNCTIONAL_STATE(STATE) (((STATE) == DISABLE) || ((STATE) == ENABLE)) @@ -223,15 +222,69 @@ typedef enum #define MODIFY_REG(REG, CLEARMASK, SETMASK) WRITE_REG((REG), (((READ_REG(REG)) & (~(CLEARMASK))) | (SETMASK))) -#define POSITION_VAL(VAL) (__CLZ(__RBIT(VAL))) +#define POSITION_VAL(VAL) (__CLZ(__RBIT(VAL))) +/* Use of CMSIS compiler intrinsics for register exclusive access */ +/* Atomic 32-bit register access macro to set one or several bits */ +#define ATOMIC_SET_BIT(REG, BIT) \ + do { \ + uint32_t val; \ + do { \ + val = __LDREXW((__IO uint32_t *)&(REG)) | (BIT); \ + } while ((__STREXW(val,(__IO uint32_t *)&(REG))) != 0U); \ + } while(0) + +/* Atomic 32-bit register access macro to clear one or several bits */ +#define ATOMIC_CLEAR_BIT(REG, BIT) \ + do { \ + uint32_t val; \ + do { \ + val = __LDREXW((__IO uint32_t *)&(REG)) & ~(BIT); \ + } while ((__STREXW(val,(__IO uint32_t *)&(REG))) != 0U); \ + } while(0) + +/* Atomic 32-bit register access macro to clear and set one or several bits */ +#define ATOMIC_MODIFY_REG(REG, CLEARMSK, SETMASK) \ + do { \ + uint32_t val; \ + do { \ + val = (__LDREXW((__IO uint32_t *)&(REG)) & ~(CLEARMSK)) | (SETMASK); \ + } while ((__STREXW(val,(__IO uint32_t *)&(REG))) != 0U); \ + } while(0) + +/* Atomic 16-bit register access macro to set one or several bits */ +#define ATOMIC_SETH_BIT(REG, BIT) \ + do { \ + uint16_t val; \ + do { \ + val = __LDREXH((__IO uint16_t *)&(REG)) | (BIT); \ + } while ((__STREXH(val,(__IO uint16_t *)&(REG))) != 0U); \ + } while(0) + +/* Atomic 16-bit register access macro to clear one or several bits */ +#define ATOMIC_CLEARH_BIT(REG, BIT) \ + do { \ + uint16_t val; \ + do { \ + val = __LDREXH((__IO uint16_t *)&(REG)) & ~(BIT); \ + } while ((__STREXH(val,(__IO uint16_t *)&(REG))) != 0U); \ + } while(0) + +/* Atomic 16-bit register access macro to clear and set one or several bits */ +#define ATOMIC_MODIFYH_REG(REG, CLEARMSK, SETMASK) \ + do { \ + uint16_t val; \ + do { \ + val = (__LDREXH((__IO uint16_t *)&(REG)) & ~(CLEARMSK)) | (SETMASK); \ + } while ((__STREXH(val,(__IO uint16_t *)&(REG))) != 0U); \ + } while(0) /** * @} */ #if defined (USE_HAL_DRIVER) - #include "stm32f4xx_hal.h" +#include "stm32f4xx_hal.h" #endif /* USE_HAL_DRIVER */ #ifdef __cplusplus @@ -246,8 +299,3 @@ typedef enum /** * @} */ - - - - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/CMSIS/Device/ST/STM32F4xx/Include/system_stm32f4xx.h b/Drivers/CMSIS/Device/ST/STM32F4xx/Include/system_stm32f4xx.h index 99cb936c..15208a55 100644 --- a/Drivers/CMSIS/Device/ST/STM32F4xx/Include/system_stm32f4xx.h +++ b/Drivers/CMSIS/Device/ST/STM32F4xx/Include/system_stm32f4xx.h @@ -2,36 +2,19 @@ ****************************************************************************** * @file system_stm32f4xx.h * @author MCD Application Team - * @brief CMSIS Cortex-M4 Device System Source File for STM32F4xx devices. - ****************************************************************************** + * @brief CMSIS Cortex-M4 Device System Source File for STM32F4xx devices. + ****************************************************************************** * @attention * - *

© COPYRIGHT(c) 2017 STMicroelectronics

- * - * Redistribution and use in source and binary forms, with or without modification, - * are permitted provided that the following conditions are met: - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * 3. Neither the name of STMicroelectronics nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * - ****************************************************************************** - */ + ****************************************************************************** + */ /** @addtogroup CMSIS * @{ @@ -39,8 +22,8 @@ /** @addtogroup stm32f4xx_system * @{ - */ - + */ + /** * @brief Define to prevent recursive inclusion */ @@ -48,8 +31,8 @@ #define __SYSTEM_STM32F4XX_H #ifdef __cplusplus - extern "C" { -#endif +extern "C" { +#endif /** @addtogroup STM32F4xx_System_Includes * @{ @@ -63,14 +46,14 @@ /** @addtogroup STM32F4xx_System_Exported_types * @{ */ - /* This variable is updated in three ways: - 1) by calling CMSIS function SystemCoreClockUpdate() - 2) by calling HAL API function HAL_RCC_GetSysClockFreq() - 3) each time HAL_RCC_ClockConfig() is called to configure the system clock frequency - Note: If you use this function to configure the system clock; then there - is no need to call the 2 first functions listed above, since SystemCoreClock - variable is updated automatically. - */ +/* This variable is updated in three ways: + 1) by calling CMSIS function SystemCoreClockUpdate() + 2) by calling HAL API function HAL_RCC_GetSysClockFreq() + 3) each time HAL_RCC_ClockConfig() is called to configure the system clock frequency + Note: If you use this function to configure the system clock; then there + is no need to call the 2 first functions listed above, since SystemCoreClock + variable is updated automatically. +*/ extern uint32_t SystemCoreClock; /*!< System Clock Frequency (Core Clock) */ extern const uint8_t AHBPrescTable[16]; /*!< AHB prescalers table values */ @@ -99,7 +82,7 @@ extern const uint8_t APBPrescTable[8]; /*!< APB prescalers table values */ /** @addtogroup STM32F4xx_System_Exported_Functions * @{ */ - + extern void SystemInit(void); extern void SystemCoreClockUpdate(void); /** @@ -115,8 +98,7 @@ extern void SystemCoreClockUpdate(void); /** * @} */ - + /** * @} - */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + */ diff --git a/Drivers/CMSIS/Device/ST/STM32F4xx/LICENSE.txt b/Drivers/CMSIS/Device/ST/STM32F4xx/LICENSE.txt new file mode 100644 index 00000000..872e82b4 --- /dev/null +++ b/Drivers/CMSIS/Device/ST/STM32F4xx/LICENSE.txt @@ -0,0 +1,6 @@ +This software component is provided to you as part of a software package and +applicable license terms are in the Package_license file. If you received this +software component outside of a package or without applicable license terms, +the terms of the Apache-2.0 license shall apply. +You may obtain a copy of the Apache-2.0 at: +https://opensource.org/licenses/Apache-2.0 diff --git a/Drivers/CMSIS/Include/cachel1_armv7.h b/Drivers/CMSIS/Include/cachel1_armv7.h new file mode 100644 index 00000000..abebc95f --- /dev/null +++ b/Drivers/CMSIS/Include/cachel1_armv7.h @@ -0,0 +1,411 @@ +/****************************************************************************** + * @file cachel1_armv7.h + * @brief CMSIS Level 1 Cache API for Armv7-M and later + * @version V1.0.1 + * @date 19. April 2021 + ******************************************************************************/ +/* + * Copyright (c) 2020-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_CACHEL1_ARMV7_H +#define ARM_CACHEL1_ARMV7_H + +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_CacheFunctions Cache Functions + \brief Functions that configure Instruction and Data cache. + @{ + */ + +/* Cache Size ID Register Macros */ +#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos) +#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) + +#ifndef __SCB_DCACHE_LINE_SIZE +#define __SCB_DCACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ +#endif + +#ifndef __SCB_ICACHE_LINE_SIZE +#define __SCB_ICACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ +#endif + +/** + \brief Enable I-Cache + \details Turns on I-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if (SCB->CCR & SCB_CCR_IC_Msk) return; /* return if ICache is already enabled */ + + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable I-Cache + \details Turns off I-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */ + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate I-Cache + \details Invalidates I-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; + __DSB(); + __ISB(); + #endif +} + + +/** + \brief I-Cache Invalidate by address + \details Invalidates I-Cache for the given address. + I-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + I-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] isize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (volatile void *addr, int32_t isize) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if ( isize > 0 ) { + int32_t op_size = isize + (((uint32_t)addr) & (__SCB_ICACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_ICACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->ICIMVAU = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_ICACHE_LINE_SIZE; + op_size -= __SCB_ICACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief Enable D-Cache + \details Turns on D-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + if (SCB->CCR & SCB_CCR_DC_Msk) return; /* return if DCache is already enabled */ + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + __DSB(); + + SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */ + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable D-Cache + \details Turns off D-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate D-Cache + \details Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean D-Cache + \details Cleans D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) | + ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean & Invalidate D-Cache + \details Cleans and Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Invalidate by address + \details Invalidates D-Cache for the given address. + D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (volatile void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean by address + \details Cleans D-Cache for the given address + D-Cache is cleaned starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (volatile void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean and Invalidate by address + \details Cleans and invalidates D_Cache for the given address + D-Cache is cleaned and invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned and invalidated. + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (volatile void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + +/*@} end of CMSIS_Core_CacheFunctions */ + +#endif /* ARM_CACHEL1_ARMV7_H */ diff --git a/Drivers/CMSIS/Include/cmsis_armcc.h b/Drivers/CMSIS/Include/cmsis_armcc.h index 4d9d0645..a955d471 100644 --- a/Drivers/CMSIS/Include/cmsis_armcc.h +++ b/Drivers/CMSIS/Include/cmsis_armcc.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armcc.h * @brief CMSIS compiler ARMCC (Arm Compiler 5) header file - * @version V5.0.4 - * @date 10. January 2018 + * @version V5.3.2 + * @date 27. May 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -46,7 +46,12 @@ /* __ARM_ARCH_8M_BASE__ not applicable */ /* __ARM_ARCH_8M_MAIN__ not applicable */ + /* __ARM_ARCH_8_1M_MAIN__ not applicable */ +/* CMSIS compiler control DSP macros */ +#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + #define __ARM_FEATURE_DSP 1 +#endif /* CMSIS compiler specific defines */ #ifndef __ASM @@ -58,9 +63,9 @@ #ifndef __STATIC_INLINE #define __STATIC_INLINE static __inline #endif -#ifndef __STATIC_FORCEINLINE +#ifndef __STATIC_FORCEINLINE #define __STATIC_FORCEINLINE static __forceinline -#endif +#endif #ifndef __NO_RETURN #define __NO_RETURN __declspec(noreturn) #endif @@ -100,279 +105,31 @@ #ifndef __RESTRICT #define __RESTRICT __restrict #endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __memory_changed() +#endif -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -/* intrinsic void __enable_irq(); */ - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -/* intrinsic void __disable_irq(); */ - -/** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value - */ -__STATIC_INLINE uint32_t __get_CONTROL(void) -{ - register uint32_t __regControl __ASM("control"); - return(__regControl); -} - - -/** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set - */ -__STATIC_INLINE void __set_CONTROL(uint32_t control) -{ - register uint32_t __regControl __ASM("control"); - __regControl = control; -} - - -/** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value - */ -__STATIC_INLINE uint32_t __get_IPSR(void) -{ - register uint32_t __regIPSR __ASM("ipsr"); - return(__regIPSR); -} - - -/** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value - */ -__STATIC_INLINE uint32_t __get_APSR(void) -{ - register uint32_t __regAPSR __ASM("apsr"); - return(__regAPSR); -} - - -/** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value - */ -__STATIC_INLINE uint32_t __get_xPSR(void) -{ - register uint32_t __regXPSR __ASM("xpsr"); - return(__regXPSR); -} - - -/** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value - */ -__STATIC_INLINE uint32_t __get_PSP(void) -{ - register uint32_t __regProcessStackPointer __ASM("psp"); - return(__regProcessStackPointer); -} - - -/** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) -{ - register uint32_t __regProcessStackPointer __ASM("psp"); - __regProcessStackPointer = topOfProcStack; -} - - -/** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value - */ -__STATIC_INLINE uint32_t __get_MSP(void) -{ - register uint32_t __regMainStackPointer __ASM("msp"); - return(__regMainStackPointer); -} - - -/** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) -{ - register uint32_t __regMainStackPointer __ASM("msp"); - __regMainStackPointer = topOfMainStack; -} - - -/** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value - */ -__STATIC_INLINE uint32_t __get_PRIMASK(void) -{ - register uint32_t __regPriMask __ASM("primask"); - return(__regPriMask); -} - - -/** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask - */ -__STATIC_INLINE void __set_PRIMASK(uint32_t priMask) -{ - register uint32_t __regPriMask __ASM("primask"); - __regPriMask = (priMask); -} - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) - -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. - */ -#define __enable_fault_irq __enable_fiq - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. - */ -#define __disable_fault_irq __disable_fiq - - -/** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value - */ -__STATIC_INLINE uint32_t __get_BASEPRI(void) -{ - register uint32_t __regBasePri __ASM("basepri"); - return(__regBasePri); -} - - -/** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set - */ -__STATIC_INLINE void __set_BASEPRI(uint32_t basePri) -{ - register uint32_t __regBasePri __ASM("basepri"); - __regBasePri = (basePri & 0xFFU); -} - - -/** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set - */ -__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri) -{ - register uint32_t __regBasePriMax __ASM("basepri_max"); - __regBasePriMax = (basePri & 0xFFU); -} - - -/** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value - */ -__STATIC_INLINE uint32_t __get_FAULTMASK(void) -{ - register uint32_t __regFaultMask __ASM("faultmask"); - return(__regFaultMask); -} - - -/** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) -{ - register uint32_t __regFaultMask __ASM("faultmask"); - __regFaultMask = (faultMask & (uint32_t)1U); -} - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ - +/* ######################### Startup and Lowlevel Init ######################## */ -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_INLINE uint32_t __get_FPSCR(void) -{ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) - register uint32_t __regfpscr __ASM("fpscr"); - return(__regfpscr); -#else - return(0U); +#ifndef __PROGRAM_START +#define __PROGRAM_START __main #endif -} - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) -{ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) - register uint32_t __regfpscr __ASM("fpscr"); - __regfpscr = (fpscr); -#else - (void)fpscr; +#ifndef __INITIAL_SP +#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit #endif -} +#ifndef __STACK_LIMIT +#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base +#endif -/*@} end of CMSIS_Core_RegAccFunctions */ +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) +#endif /* ########################## Core Instruction Access ######################### */ /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface @@ -415,35 +172,23 @@ __STATIC_INLINE void __set_FPSCR(uint32_t fpscr) so that all instructions following the ISB are fetched from cache or memory, after the instruction has been completed. */ -#define __ISB() do {\ - __schedule_barrier();\ - __isb(0xF);\ - __schedule_barrier();\ - } while (0U) +#define __ISB() __isb(0xF) /** \brief Data Synchronization Barrier \details Acts as a special kind of Data Memory Barrier. It completes when all explicit memory accesses before this instruction complete. */ -#define __DSB() do {\ - __schedule_barrier();\ - __dsb(0xF);\ - __schedule_barrier();\ - } while (0U) +#define __DSB() __dsb(0xF) /** \brief Data Memory Barrier \details Ensures the apparent order of the explicit memory operations before and after the instruction, without ensuring their completion. */ -#define __DMB() do {\ - __schedule_barrier();\ - __dmb(0xF);\ - __schedule_barrier();\ - } while (0U) +#define __DMB() __dmb(0xF) + - /** \brief Reverse byte order (32 bit) \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. @@ -598,187 +343,461 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value) /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXH(value, ptr) __strex(value, ptr) +#else + #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXW(value, ptr) __strex(value, ptr) +#else + #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __clrex + + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +{ + rrx r0, r0 + bx lr +} +#endif + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRBT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRHT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRT(value, ptr) __strt(value, ptr) + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +/* intrinsic void __enable_irq(); */ + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +/* intrinsic void __disable_irq(); */ + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_INLINE uint32_t __get_CONTROL(void) +{ + register uint32_t __regControl __ASM("control"); + return(__regControl); +} + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_INLINE void __set_CONTROL(uint32_t control) +{ + register uint32_t __regControl __ASM("control"); + __regControl = control; + __ISB(); +} + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_INLINE uint32_t __get_IPSR(void) +{ + register uint32_t __regIPSR __ASM("ipsr"); + return(__regIPSR); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_INLINE uint32_t __get_APSR(void) +{ + register uint32_t __regAPSR __ASM("apsr"); + return(__regAPSR); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_INLINE uint32_t __get_xPSR(void) +{ + register uint32_t __regXPSR __ASM("xpsr"); + return(__regXPSR); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) - #define __STREXH(value, ptr) __strex(value, ptr) -#else - #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") -#endif +__STATIC_INLINE uint32_t __get_PSP(void) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + return(__regProcessStackPointer); +} /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) - #define __STREXW(value, ptr) __strex(value, ptr) -#else - #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") -#endif +__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + __regProcessStackPointer = topOfProcStack; +} /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -#define __CLREX __clrex +__STATIC_INLINE uint32_t __get_MSP(void) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + return(__regMainStackPointer); +} /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -#define __SSAT __ssat +__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + __regMainStackPointer = topOfMainStack; +} /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -#define __USAT __usat +__STATIC_INLINE uint32_t __get_PRIMASK(void) +{ + register uint32_t __regPriMask __ASM("primask"); + return(__regPriMask); +} /** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -#ifndef __NO_EMBEDDED_ASM -__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +__STATIC_INLINE void __set_PRIMASK(uint32_t priMask) { - rrx r0, r0 - bx lr + register uint32_t __regPriMask __ASM("primask"); + __regPriMask = (priMask); } -#endif +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + /** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) +#define __enable_fault_irq __enable_fiq /** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) +#define __disable_fault_irq __disable_fiq /** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value */ -#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) +__STATIC_INLINE uint32_t __get_BASEPRI(void) +{ + register uint32_t __regBasePri __ASM("basepri"); + return(__regBasePri); +} /** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set */ -#define __STRBT(value, ptr) __strt(value, ptr) +__STATIC_INLINE void __set_BASEPRI(uint32_t basePri) +{ + register uint32_t __regBasePri __ASM("basepri"); + __regBasePri = (basePri & 0xFFU); +} /** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set */ -#define __STRHT(value, ptr) __strt(value, ptr) +__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + register uint32_t __regBasePriMax __ASM("basepri_max"); + __regBasePriMax = (basePri & 0xFFU); +} /** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value */ -#define __STRT(value, ptr) __strt(value, ptr) +__STATIC_INLINE uint32_t __get_FAULTMASK(void) +{ + register uint32_t __regFaultMask __ASM("faultmask"); + return(__regFaultMask); +} -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) +{ + register uint32_t __regFaultMask __ASM("faultmask"); + __regFaultMask = (faultMask & (uint32_t)1U); +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value */ -__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_INLINE uint32_t __get_FPSCR(void) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + return(__regfpscr); +#else + return(0U); +#endif } + /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set */ -__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) +__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + __regfpscr = (fpscr); +#else + (void)fpscr; +#endif } -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ +/*@} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ @@ -858,6 +877,10 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \ ((int64_t)(ARG3) << 32U) ) >> 32U)) +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + #endif /* ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ /*@} end of group CMSIS_SIMD_intrinsics */ diff --git a/Drivers/CMSIS/Include/cmsis_armclang.h b/Drivers/CMSIS/Include/cmsis_armclang.h index 162a400e..69114177 100644 --- a/Drivers/CMSIS/Include/cmsis_armclang.h +++ b/Drivers/CMSIS/Include/cmsis_armclang.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armclang.h * @brief CMSIS compiler armclang (Arm Compiler 6) header file - * @version V5.0.4 - * @date 10. January 2018 + * @version V5.4.3 + * @date 27. May 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,10 +29,6 @@ #pragma clang system_header /* treat file as system include file */ -#ifndef __ARM_COMPAT_H -#include /* Compatibility header for Arm Compiler 5 intrinsics */ -#endif - /* CMSIS compiler specific defines */ #ifndef __ASM #define __ASM __asm @@ -43,9 +39,9 @@ #ifndef __STATIC_INLINE #define __STATIC_INLINE static __inline #endif -#ifndef __STATIC_FORCEINLINE +#ifndef __STATIC_FORCEINLINE #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline -#endif +#endif #ifndef __NO_RETURN #define __NO_RETURN __attribute__((__noreturn__)) #endif @@ -110,1015 +106,424 @@ #ifndef __RESTRICT #define __RESTRICT __restrict #endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif +/* ######################### Startup and Lowlevel Init ######################## */ -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ +#ifndef __PROGRAM_START +#define __PROGRAM_START __main +#endif -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -/* intrinsic void __enable_irq(); see arm_compat.h */ +#ifndef __INITIAL_SP +#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit +#endif +#ifndef __STACK_LIMIT +#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base +#endif -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -/* intrinsic void __disable_irq(); see arm_compat.h */ +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) +#endif -/** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value - */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base +#endif - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return(result); -} +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return(result); +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; } #endif +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + /** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); -} - +#define __NOP __builtin_arm_nop -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); -} -#endif +#define __WFI __builtin_arm_wfi /** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return(result); -} +#define __WFE __builtin_arm_wfe /** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return(result); -} +#define __SEV __builtin_arm_sev /** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) -{ - uint32_t result; +#define __ISB() __builtin_arm_isb(0xF) - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return(result); -} +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __builtin_arm_dsb(0xF) /** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return(result); -} +#define __DMB() __builtin_arm_dmb(0xF) -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return(result); -} -#endif +#define __REV(value) __builtin_bswap32(value) /** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); -} +#define __REV16(value) __ROR(__REV(value), 16) -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} -#endif +#define __REVSH(value) (int16_t)__builtin_bswap16(value) /** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return(result); + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return(result); -} -#endif +#define __BKPT(value) __ASM volatile ("bkpt "#value) /** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); -} - +#define __RBIT __builtin_arm_rbit -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); } -#endif -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + /** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return(result); -} +#define __LDREXB (uint8_t)__builtin_arm_ldrex /** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} -#endif +#define __LDREXH (uint16_t)__builtin_arm_ldrex /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask" : "=r" (result) ); - return(result); -} +#define __LDREXW (uint32_t)__builtin_arm_ldrex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return(result); -} -#endif +#define __STREXB (uint32_t)__builtin_arm_strex /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) -{ - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); -} +#define __STREXH (uint32_t)__builtin_arm_strex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} -#endif +#define __STREXW (uint32_t)__builtin_arm_strex -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) /** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -#define __enable_fault_irq __enable_fiq /* see arm_compat.h */ +#define __CLREX __builtin_arm_clrex + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + /** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -#define __disable_fault_irq __disable_fiq /* see arm_compat.h */ +#define __SSAT __builtin_arm_ssat /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); - return(result); -} +#define __USAT __builtin_arm_usat -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); } -#endif /** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) { - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); -} - + uint32_t result; -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ } -#endif /** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) { - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ } /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) { - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return(result); + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#endif /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) { - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) { - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); } -#endif -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ - - -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ /** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return result; -#endif -} - -#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return result; -#endif -} -#endif - - -/** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif -} -#endif - - -/** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return result; -#endif -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return result; -#endif -} -#endif - - -/** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} -#endif - -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr -#else -#define __get_FPSCR() ((uint32_t)0U) -#endif - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __set_FPSCR __builtin_arm_set_fpscr -#else -#define __set_FPSCR(x) ((void)(x)) -#endif - - -/*@} end of CMSIS_Core_RegAccFunctions */ - - -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ - -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif - -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP __builtin_arm_nop - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI __builtin_arm_wfi - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE __builtin_arm_wfe - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV __builtin_arm_sev - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -#define __ISB() __builtin_arm_isb(0xF); - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -#define __DSB() __builtin_arm_dsb(0xF); - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -#define __DMB() __builtin_arm_dmb(0xF); - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV(value) __builtin_bswap32(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV16(value) __ROR(__REV(value), 16) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REVSH(value) (int16_t)__builtin_bswap16(value) - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); -} - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -#define __RBIT __builtin_arm_rbit - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -#define __CLZ (uint8_t)__builtin_clz - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex - - -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex - - -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXB (uint32_t)__builtin_arm_strex - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXH (uint32_t)__builtin_arm_strex - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXW (uint32_t)__builtin_arm_strex - - -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT __builtin_arm_ssat - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT __builtin_arm_usat - - -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return(result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t) result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t) result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} - -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) { if ((sat >= 1U) && (sat <= 32U)) { @@ -1160,13 +565,16 @@ __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) return (uint32_t)val; } -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + /** \brief Load-Acquire (8 bit) \details Executes a LDAB instruction for 8 bit value. @@ -1177,7 +585,7 @@ __STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) { uint32_t result; - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) ); + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); return ((uint8_t) result); } @@ -1192,7 +600,7 @@ __STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) { uint32_t result; - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) ); + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); return ((uint16_t) result); } @@ -1207,7 +615,7 @@ __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) { uint32_t result; - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) ); + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); return(result); } @@ -1220,7 +628,7 @@ __STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) */ __STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) { - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } @@ -1232,7 +640,7 @@ __STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) */ __STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) { - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } @@ -1244,7 +652,7 @@ __STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) */ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) { - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } @@ -1307,546 +715,768 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) */ #define __STLEX (uint32_t)__builtin_arm_stlex -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ /*@}*/ /* end of group CMSIS_Core_InstructionInterface */ -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions @{ -*/ - -#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + */ -__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) { - uint32_t result; - - __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); + __ASM volatile ("cpsie i" : : : "memory"); } +#endif -__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); } +#endif -__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) { uint32_t result; - __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, control" : "=r" (result) ); return(result); } -__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) { uint32_t result; - __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); return(result); } +#endif -__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); } -__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); } +#endif -__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) { uint32_t result; - __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); return(result); } -__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} -__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) { uint32_t result; - __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); return(result); } -__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} -__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) { uint32_t result; - __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); return(result); } -__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) { uint32_t result; - __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, psp" : "=r" (result) ); return(result); } -__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) { uint32_t result; - __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); return(result); } +#endif -__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} -__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) { - uint32_t result; - - __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); } -__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} -__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) { - uint32_t result; - - __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); } +#endif -__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - - __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} -__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) { uint32_t result; - __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, msp" : "=r" (result) ); return(result); } -__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) { uint32_t result; - __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); return(result); } +#endif -__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); } -__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); } +#endif -__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) { uint32_t result; - __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); return(result); } -__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); } +#endif + -__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) { uint32_t result; - __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, primask" : "=r" (result) ); return(result); } -__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) { uint32_t result; - __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); return(result); } +#endif -__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); } -__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); } +#endif -__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); } -__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); } -__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) { uint32_t result; - __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); return(result); } -__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) { uint32_t result; - __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); return(result); } +#endif -__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); } -__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); } +#endif -__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); } -__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) { uint32_t result; - __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); return(result); } -__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) { uint32_t result; - __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); return(result); } +#endif -__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); } -#define __SSAT16(ARG1,ARG2) \ -({ \ - int32_t __RES, __ARG1 = (ARG1); \ - __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ - __RES; \ - }) - -#define __USAT16(ARG1,ARG2) \ -({ \ - uint32_t __RES, __ARG1 = (ARG1); \ - __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ - __RES; \ - }) - -__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) -{ - uint32_t result; - __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); - return(result); +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); } +#endif -__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) -{ - uint32_t result; +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ - __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} -__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) -{ - uint32_t result; +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) - __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); - return(result); -} +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. -__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif } -__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) { +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif } +#endif -__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. -__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { - uint32_t result; - - __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif } -__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. -__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) { - union llreg_u{ - uint32_t w32[2]; - uint64_t w64; - } llr; - llr.w64 = acc; - -#ifndef __ARMEB__ /* Little endian */ - __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); -#else /* Big endian */ - __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); #endif - - return(llr.w64); } - -__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) -{ - union llreg_u{ - uint32_t w32[2]; - uint64_t w64; - } llr; - llr.w64 = acc; - -#ifndef __ARMEB__ /* Little endian */ - __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); -#else /* Big endian */ - __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); #endif - return(llr.w64); -} -__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif } -__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) -{ - uint32_t result; - __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. -__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) { +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif } +#endif -__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) -{ - uint32_t result; - __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. -__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) { - union llreg_u{ - uint32_t w32[2]; - uint64_t w64; - } llr; - llr.w64 = acc; - -#ifndef __ARMEB__ /* Little endian */ - __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); -#else /* Big endian */ - __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); #endif - - return(llr.w64); } -__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) { - union llreg_u{ - uint32_t w32[2]; - uint64_t w64; - } llr; - llr.w64 = acc; - -#ifndef __ARMEB__ /* Little endian */ - __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); -#else /* Big endian */ - __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); #endif - - return(llr.w64); } +#endif -__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) -{ - uint32_t result; +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ - __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif -__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) -{ - int32_t result; +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif - __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} -__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) -{ - int32_t result; +/*@} end of CMSIS_Core_RegAccFunctions */ - __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} -#if 0 -#define __PKHBT(ARG1,ARG2,ARG3) \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __PKHTB(ARG1,ARG2,ARG3) \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - if (ARG3 == 0) \ - __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ - else \ - __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) -#endif +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +#define __SADD8 __builtin_arm_sadd8 +#define __QADD8 __builtin_arm_qadd8 +#define __SHADD8 __builtin_arm_shadd8 +#define __UADD8 __builtin_arm_uadd8 +#define __UQADD8 __builtin_arm_uqadd8 +#define __UHADD8 __builtin_arm_uhadd8 +#define __SSUB8 __builtin_arm_ssub8 +#define __QSUB8 __builtin_arm_qsub8 +#define __SHSUB8 __builtin_arm_shsub8 +#define __USUB8 __builtin_arm_usub8 +#define __UQSUB8 __builtin_arm_uqsub8 +#define __UHSUB8 __builtin_arm_uhsub8 +#define __SADD16 __builtin_arm_sadd16 +#define __QADD16 __builtin_arm_qadd16 +#define __SHADD16 __builtin_arm_shadd16 +#define __UADD16 __builtin_arm_uadd16 +#define __UQADD16 __builtin_arm_uqadd16 +#define __UHADD16 __builtin_arm_uhadd16 +#define __SSUB16 __builtin_arm_ssub16 +#define __QSUB16 __builtin_arm_qsub16 +#define __SHSUB16 __builtin_arm_shsub16 +#define __USUB16 __builtin_arm_usub16 +#define __UQSUB16 __builtin_arm_uqsub16 +#define __UHSUB16 __builtin_arm_uhsub16 +#define __SASX __builtin_arm_sasx +#define __QASX __builtin_arm_qasx +#define __SHASX __builtin_arm_shasx +#define __UASX __builtin_arm_uasx +#define __UQASX __builtin_arm_uqasx +#define __UHASX __builtin_arm_uhasx +#define __SSAX __builtin_arm_ssax +#define __QSAX __builtin_arm_qsax +#define __SHSAX __builtin_arm_shsax +#define __USAX __builtin_arm_usax +#define __UQSAX __builtin_arm_uqsax +#define __UHSAX __builtin_arm_uhsax +#define __USAD8 __builtin_arm_usad8 +#define __USADA8 __builtin_arm_usada8 +#define __SSAT16 __builtin_arm_ssat16 +#define __USAT16 __builtin_arm_usat16 +#define __UXTB16 __builtin_arm_uxtb16 +#define __UXTAB16 __builtin_arm_uxtab16 +#define __SXTB16 __builtin_arm_sxtb16 +#define __SXTAB16 __builtin_arm_sxtab16 +#define __SMUAD __builtin_arm_smuad +#define __SMUADX __builtin_arm_smuadx +#define __SMLAD __builtin_arm_smlad +#define __SMLADX __builtin_arm_smladx +#define __SMLALD __builtin_arm_smlald +#define __SMLALDX __builtin_arm_smlaldx +#define __SMUSD __builtin_arm_smusd +#define __SMUSDX __builtin_arm_smusdx +#define __SMLSD __builtin_arm_smlsd +#define __SMLSDX __builtin_arm_smlsdx +#define __SMLSLD __builtin_arm_smlsld +#define __SMLSLDX __builtin_arm_smlsldx +#define __SEL __builtin_arm_sel +#define __QADD __builtin_arm_qadd +#define __QSUB __builtin_arm_qsub #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) @@ -1854,6 +1484,10 @@ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) { int32_t result; diff --git a/Drivers/CMSIS/Include/cmsis_armclang_ltm.h b/Drivers/CMSIS/Include/cmsis_armclang_ltm.h new file mode 100644 index 00000000..1e255d59 --- /dev/null +++ b/Drivers/CMSIS/Include/cmsis_armclang_ltm.h @@ -0,0 +1,1928 @@ +/**************************************************************************//** + * @file cmsis_armclang_ltm.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V1.5.3 + * @date 27. May 2021 + ******************************************************************************/ +/* + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */ + +#ifndef __CMSIS_ARMCLANG_H +#define __CMSIS_ARMCLANG_H + +#pragma clang system_header /* treat file as system include file */ + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32 */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_READ */ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +/* ######################### Startup and Lowlevel Init ######################## */ + +#ifndef __PROGRAM_START +#define __PROGRAM_START __main +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) +#endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base +#endif + +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif + + +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; +} +#endif + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP __builtin_arm_nop + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI __builtin_arm_wfi + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE __builtin_arm_wfe + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV __builtin_arm_sev + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __builtin_arm_isb(0xF) + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __builtin_arm_dsb(0xF) + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __builtin_arm_dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __builtin_bswap32(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __ROR(__REV(value), 16) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) (int16_t)__builtin_bswap16(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __builtin_arm_ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __builtin_arm_usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} +#endif + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} +#endif + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#define __SSAT16(ARG1,ARG2) \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +#define __USAT16(ARG1,ARG2) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCLANG_H */ diff --git a/Drivers/CMSIS/Include/cmsis_compiler.h b/Drivers/CMSIS/Include/cmsis_compiler.h index 94212eb8..adbf296f 100644 --- a/Drivers/CMSIS/Include/cmsis_compiler.h +++ b/Drivers/CMSIS/Include/cmsis_compiler.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file cmsis_compiler.h * @brief CMSIS compiler generic header file - * @version V5.0.4 - * @date 10. January 2018 + * @version V5.1.0 + * @date 09. October 2018 ******************************************************************************/ /* * Copyright (c) 2009-2018 Arm Limited. All rights reserved. @@ -35,9 +35,15 @@ /* - * Arm Compiler 6 (armclang) + * Arm Compiler 6.6 LTM (armclang) */ -#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) && (__ARMCC_VERSION < 6100100) + #include "cmsis_armclang_ltm.h" + + /* + * Arm Compiler above 6.10.1 (armclang) + */ +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100) #include "cmsis_armclang.h" @@ -115,8 +121,11 @@ #define __ALIGNED(x) __attribute__((aligned(x))) #endif #ifndef __RESTRICT - #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. - #define __RESTRICT + #define __RESTRICT __restrict + #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 #endif @@ -187,6 +196,10 @@ #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. #define __RESTRICT #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif /* @@ -255,6 +268,10 @@ #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. #define __RESTRICT #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif #else diff --git a/Drivers/CMSIS/Include/cmsis_gcc.h b/Drivers/CMSIS/Include/cmsis_gcc.h index 2d9db15a..67bda4ef 100644 --- a/Drivers/CMSIS/Include/cmsis_gcc.h +++ b/Drivers/CMSIS/Include/cmsis_gcc.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_gcc.h * @brief CMSIS compiler GCC header file - * @version V5.0.4 - * @date 09. April 2018 + * @version V5.4.1 + * @date 27. May 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -46,9 +46,9 @@ #ifndef __STATIC_INLINE #define __STATIC_INLINE static inline #endif -#ifndef __STATIC_FORCEINLINE +#ifndef __STATIC_FORCEINLINE #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline -#endif +#endif #ifndef __NO_RETURN #define __NO_RETURN __attribute__((__noreturn__)) #endif @@ -113,464 +113,638 @@ #ifndef __RESTRICT #define __RESTRICT __restrict #endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif +/* ######################### Startup and Lowlevel Init ######################## */ -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ +#ifndef __PROGRAM_START /** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Initializes data and bss sections + \details This default implementations initialized all data and additional bss + sections relying on .copy.table and .zero.table specified properly + in the used linker script. + */ -__STATIC_FORCEINLINE void __enable_irq(void) +__STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) { - __ASM volatile ("cpsie i" : : : "memory"); -} + extern void _start(void) __NO_RETURN; + typedef struct { + uint32_t const* src; + uint32_t* dest; + uint32_t wlen; + } __copy_table_t; -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} + typedef struct { + uint32_t* dest; + uint32_t wlen; + } __zero_table_t; + extern const __copy_table_t __copy_table_start__; + extern const __copy_table_t __copy_table_end__; + extern const __zero_table_t __zero_table_start__; + extern const __zero_table_t __zero_table_end__; -/** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value - */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; + for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) { + for(uint32_t i=0u; iwlen; ++i) { + pTable->dest[i] = pTable->src[i]; + } + } - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return(result); + for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) { + for(uint32_t i=0u; iwlen; ++i) { + pTable->dest[i] = 0u; + } + } + + _start(); } +#define __PROGRAM_START __cmsis_start +#endif -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; +#ifndef __INITIAL_SP +#define __INITIAL_SP __StackTop +#endif - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return(result); -} +#ifndef __STACK_LIMIT +#define __STACK_LIMIT __StackLimit #endif +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif -/** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); -} +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) +#endif +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL __StackSeal +#endif -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); -} +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U #endif +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif -/** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value - */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) -{ - uint32_t result; - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return(result); +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; } +#endif + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif /** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) -{ - uint32_t result; +#define __NOP() __ASM volatile ("nop") - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return(result); -} +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __ASM volatile ("wfi":::"memory") /** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return(result); -} +#define __WFE() __ASM volatile ("wfe":::"memory") /** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return(result); -} +#define __SEV() __ASM volatile ("sev") -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +__STATIC_FORCEINLINE void __ISB(void) { - uint32_t result; - - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return(result); + __ASM volatile ("isb 0xF":::"memory"); } -#endif /** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +__STATIC_FORCEINLINE void __DSB(void) { - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); + __ASM volatile ("dsb 0xF":::"memory"); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +__STATIC_FORCEINLINE void __DMB(void) { - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); + __ASM volatile ("dmb 0xF":::"memory"); } -#endif /** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) { +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + return __builtin_bswap32(value); +#else uint32_t result; - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return(result); + __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return(result); + __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; } -#endif /** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) { - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + return (int16_t)__builtin_bswap16(value); +#else + int16_t result; + + __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); } -#endif -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return(result); -} - - -/** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} -#endif +#define __BKPT(value) __ASM volatile ("bkpt "#value) /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, primask" : "=r" (result) :: "memory"); - return(result); +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); +#else + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ +#endif + return result; } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) :: "memory"); - return(result); +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); } -#endif +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) { - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) { - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); #endif + return ((uint16_t) result); /* Add explicit type cast here */ +} -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) /** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) { - __ASM volatile ("cpsie f" : : : "memory"); + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return(result); } /** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) { - __ASM volatile ("cpsid f" : : : "memory"); + uint32_t result; + + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); } /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); - return(result); + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return(result); + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return(result); } -#endif /** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +__STATIC_FORCEINLINE void __CLREX(void) { - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("clrex" ::: "memory"); } +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) /** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set + \brief Signed Saturate + \details Saturates a signed value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} -#endif +#define __SSAT(ARG1, ARG2) \ +__extension__ \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) /** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); -} +#define __USAT(ARG1, ARG2) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return(result); +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ } + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); #endif + return ((uint16_t) result); /* Add explicit type cast here */ +} /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) { - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) { - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; } -#endif #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ @@ -579,764 +753,646 @@ __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + /** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return result; -#endif + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); } -#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) + /** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); } -#endif /** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return result; -#endif + uint32_t result; + + __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return result; -#endif + uint32_t result; + + __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); } -#endif /** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); } -#endif #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ /** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +__STATIC_FORCEINLINE void __enable_irq(void) { -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#if __has_builtin(__builtin_arm_get_fpscr) -// Re-enable using built-in when GCC has been fixed -// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) - /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ - return __builtin_arm_get_fpscr(); -#else - uint32_t result; - - __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); - return(result); -#endif -#else - return(0U); -#endif + __ASM volatile ("cpsie i" : : : "memory"); } /** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +__STATIC_FORCEINLINE void __disable_irq(void) { -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#if __has_builtin(__builtin_arm_set_fpscr) -// Re-enable using built-in when GCC has been fixed -// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) - /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ - __builtin_arm_set_fpscr(fpscr); -#else - __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); -#endif -#else - (void)fpscr; -#endif + __ASM volatile ("cpsid i" : : : "memory"); } -/*@} end of CMSIS_Core_RegAccFunctions */ - +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value */ -#define __NOP() __ASM volatile ("nop") +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __ASM volatile ("wfi") + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -#define __WFE() __ASM volatile ("wfe") +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set */ -#define __SEV() __ASM volatile ("sev") +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); +} +#endif /** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -__STATIC_FORCEINLINE void __ISB(void) +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) { - __ASM volatile ("isb 0xF":::"memory"); + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); } /** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -__STATIC_FORCEINLINE void __DSB(void) +__STATIC_FORCEINLINE uint32_t __get_APSR(void) { - __ASM volatile ("dsb 0xF":::"memory"); + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); } /** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -__STATIC_FORCEINLINE void __DMB(void) +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) { - __ASM volatile ("dmb 0xF":::"memory"); + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); } /** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_PSP(void) { -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) - return __builtin_bswap32(value); -#else uint32_t result; - __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; -#endif + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value */ -__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) { uint32_t result; - __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) { -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - return (int16_t)__builtin_bswap16(value); -#else - int16_t result; - - __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; -#endif + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) { - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); } +#endif /** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value */ -__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) { uint32_t result; -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) - __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) ); -#else - uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ - - result = value; /* r will be reversed bits of v; first get LSB of v */ - for (value >>= 1U; value != 0U; value >>= 1U) - { - result <<= 1U; - result |= value & 1U; - s--; - } - result <<= s; /* shift when v's highest bits are zero */ -#endif - return result; + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -#define __CLZ (uint8_t)__builtin_clz +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) { - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); } +#endif +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value */ -__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) { - uint32_t result; + uint32_t result; -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); } /** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) { - uint32_t result; - - __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); - return(result); + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); } +#endif /** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) { - uint32_t result; + uint32_t result; - __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value */ -__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) { - uint32_t result; + uint32_t result; - __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); - return(result); +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask */ -__STATIC_FORCEINLINE void __CLREX(void) +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) { - __ASM volatile ("clrex" ::: "memory"); + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); } - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ +#endif #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (1..32) - \return Saturated value + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __SSAT(ARG1,ARG2) \ -__extension__ \ -({ \ - int32_t __RES, __ARG1 = (ARG1); \ - __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ - __RES; \ - }) +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (0..31) - \return Saturated value + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __USAT(ARG1,ARG2) \ - __extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1); \ - __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ - __RES; \ - }) +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} /** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) { uint32_t result; - __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) { - uint32_t result; + uint32_t result; -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) { - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) { - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); } +#endif /** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) { - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); } /** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) { - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) { - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); } +#endif -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); } + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); } +#endif #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ @@ -1345,186 +1401,235 @@ __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t) result); -} - /** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t) result); -} - + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif } - +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) { - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) { - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif } +#endif /** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value */ -__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { - uint32_t result; - - __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t) result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value */ -__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) { - uint32_t result; - - __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t) result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) { - uint32_t result; - - __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) { - uint32_t result; - - __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); - return(result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif } +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ /** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value */ -__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) { - uint32_t result; +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_get_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + return __builtin_arm_get_fpscr(); +#else + uint32_t result; - __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); - return(result); + __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); + return(result); +#endif +#else + return(0U); +#endif } /** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set */ -__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) { - uint32_t result; - - __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); - return(result); +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_set_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + __builtin_arm_set_fpscr(fpscr); +#else + __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); +#endif +#else + (void)fpscr; +#endif } -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ +/*@} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ @@ -1547,7 +1652,7 @@ __STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1555,7 +1660,7 @@ __STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1571,7 +1676,7 @@ __STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1579,7 +1684,7 @@ __STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1596,7 +1701,7 @@ __STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1604,7 +1709,7 @@ __STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1620,7 +1725,7 @@ __STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1628,7 +1733,7 @@ __STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1645,7 +1750,7 @@ __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1653,7 +1758,7 @@ __STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1669,7 +1774,7 @@ __STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1677,7 +1782,7 @@ __STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1693,7 +1798,7 @@ __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1701,7 +1806,7 @@ __STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1717,7 +1822,7 @@ __STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1725,7 +1830,7 @@ __STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1741,7 +1846,7 @@ __STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1749,7 +1854,7 @@ __STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1765,7 +1870,7 @@ __STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1773,7 +1878,7 @@ __STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1789,7 +1894,7 @@ __STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1797,7 +1902,7 @@ __STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1813,7 +1918,7 @@ __STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1821,7 +1926,7 @@ __STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1829,7 +1934,7 @@ __STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1837,21 +1942,23 @@ __STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) { uint32_t result; - __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + __ASM ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); return(result); } -#define __SSAT16(ARG1,ARG2) \ +#define __SSAT16(ARG1, ARG2) \ +__extension__ \ ({ \ int32_t __RES, __ARG1 = (ARG1); \ - __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __ASM volatile ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ __RES; \ }) -#define __USAT16(ARG1,ARG2) \ +#define __USAT16(ARG1, ARG2) \ +__extension__ \ ({ \ uint32_t __RES, __ARG1 = (ARG1); \ - __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __ASM volatile ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ __RES; \ }) @@ -1859,7 +1966,7 @@ __STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) { uint32_t result; - __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); + __ASM ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); return(result); } @@ -1867,7 +1974,7 @@ __STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } @@ -1875,18 +1982,41 @@ __STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) { uint32_t result; - __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); + __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); return(result); } +__STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate) +{ + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) { + __ASM volatile ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) ); + } else { + result = __SXTB16(__ROR(op1, rotate)) ; + } + return result; +} + __STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) { uint32_t result; - __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + __ASM ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); return(result); } +__STATIC_FORCEINLINE uint32_t __SXTAB16_RORn(uint32_t op1, uint32_t op2, uint32_t rotate) +{ + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) { + __ASM volatile ("sxtab16 %0, %1, %2, ROR %3" : "=r" (result) : "r" (op1) , "r" (op2) , "i" (rotate)); + } else { + result = __SXTAB16(op1, __ROR(op2, rotate)); + } + return result; +} + + __STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) { uint32_t result; @@ -2043,8 +2173,9 @@ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) return(result); } -#if 0 + #define __PKHBT(ARG1,ARG2,ARG3) \ +__extension__ \ ({ \ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ @@ -2052,6 +2183,7 @@ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) }) #define __PKHTB(ARG1,ARG2,ARG3) \ +__extension__ \ ({ \ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ if (ARG3 == 0) \ @@ -2060,19 +2192,13 @@ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ __RES; \ }) -#endif - -#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ - ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) -#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ - ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) { int32_t result; - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + __ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); return(result); } diff --git a/Drivers/CMSIS/Include/cmsis_iccarm.h b/Drivers/CMSIS/Include/cmsis_iccarm.h index 11c4af0e..65b824b0 100644 --- a/Drivers/CMSIS/Include/cmsis_iccarm.h +++ b/Drivers/CMSIS/Include/cmsis_iccarm.h @@ -1,13 +1,16 @@ /**************************************************************************//** * @file cmsis_iccarm.h * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file - * @version V5.0.7 - * @date 19. June 2018 + * @version V5.3.0 + * @date 14. April 2021 ******************************************************************************/ //------------------------------------------------------------------------------ // -// Copyright (c) 2017-2018 IAR Systems +// Copyright (c) 2017-2021 IAR Systems +// Copyright (c) 2017-2021 Arm Limited. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 // // Licensed under the Apache License, Version 2.0 (the "License") // you may not use this file except in compliance with the License. @@ -110,6 +113,10 @@ #define __ASM __asm #endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + #ifndef __INLINE #define __INLINE inline #endif @@ -150,7 +157,12 @@ #endif #ifndef __RESTRICT - #define __RESTRICT __restrict + #if __ICCARM_V8 + #define __RESTRICT __restrict + #else + /* Needs IAR language extensions */ + #define __RESTRICT restrict + #endif #endif #ifndef __STATIC_INLINE @@ -226,6 +238,7 @@ __packed struct __iar_u32 { uint32_t v; }; #endif #endif +#undef __WEAK /* undo the definition from DLib_Defaults.h */ #ifndef __WEAK #if __ICCARM_V8 #define __WEAK __attribute__((weak)) @@ -234,6 +247,43 @@ __packed struct __iar_u32 { uint32_t v; }; #endif #endif +#ifndef __PROGRAM_START +#define __PROGRAM_START __iar_program_start +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP CSTACK$$Limit +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT CSTACK$$Base +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __vector_table +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE @".intvec" +#endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL STACKSEAL$$Base +#endif + +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif + +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; +} +#endif #ifndef __ICCARM_INTRINSICS_VERSION__ #define __ICCARM_INTRINSICS_VERSION__ 0 @@ -305,7 +355,13 @@ __packed struct __iar_u32 { uint32_t v; }; #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE))) #define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE))) - #define __set_CONTROL(VALUE) (__arm_wsr("CONTROL", (VALUE))) + +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __arm_wsr("CONTROL", control); + __iar_builtin_ISB(); +} + #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE))) #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE))) @@ -327,7 +383,13 @@ __packed struct __iar_u32 { uint32_t v; }; #endif #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS")) - #define __TZ_set_CONTROL_NS(VALUE) (__arm_wsr("CONTROL_NS", (VALUE))) + +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __arm_wsr("CONTROL_NS", control); + __iar_builtin_ISB(); +} + #define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS")) #define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE))) #define __TZ_get_MSP_NS() (__arm_rsr("MSP_NS")) @@ -567,7 +629,7 @@ __packed struct __iar_u32 { uint32_t v; }; __IAR_FT uint32_t __RRX(uint32_t value) { uint32_t result; - __ASM("RRX %0, %1" : "=r"(result) : "r" (value) : "cc"); + __ASM volatile("RRX %0, %1" : "=r"(result) : "r" (value)); return(result); } @@ -649,6 +711,7 @@ __packed struct __iar_u32 { uint32_t v; }; __IAR_FT void __TZ_set_CONTROL_NS(uint32_t value) { __asm volatile("MSR CONTROL_NS,%0" :: "r" (value)); + __iar_builtin_ISB(); } __IAR_FT uint32_t __TZ_get_PSP_NS(void) @@ -806,37 +869,37 @@ __packed struct __iar_u32 { uint32_t v; }; __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) { uint32_t res; - __ASM("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + __ASM volatile ("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); return ((uint8_t)res); } __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) { uint32_t res; - __ASM("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + __ASM volatile ("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); return ((uint16_t)res); } __IAR_FT uint32_t __LDRT(volatile uint32_t *addr) { uint32_t res; - __ASM("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + __ASM volatile ("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); return res; } __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) { - __ASM("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + __ASM volatile ("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); } __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) { - __ASM("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + __ASM volatile ("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); } __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) { - __ASM("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); + __ASM volatile ("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); } #endif /* (__CORTEX_M >= 0x03) */ @@ -932,4 +995,8 @@ __packed struct __iar_u32 { uint32_t v; }; #pragma diag_default=Pe940 #pragma diag_default=Pe177 +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + #endif /* __CMSIS_ICCARM_H__ */ diff --git a/Drivers/CMSIS/Include/cmsis_version.h b/Drivers/CMSIS/Include/cmsis_version.h index 660f612a..8b4765f1 100644 --- a/Drivers/CMSIS/Include/cmsis_version.h +++ b/Drivers/CMSIS/Include/cmsis_version.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_version.h * @brief CMSIS Core(M) Version definitions - * @version V5.0.2 - * @date 19. April 2017 + * @version V5.0.5 + * @date 02. February 2022 ******************************************************************************/ /* - * Copyright (c) 2009-2017 ARM Limited. All rights reserved. + * Copyright (c) 2009-2022 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -33,7 +33,7 @@ /* CMSIS Version definitions */ #define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */ -#define __CM_CMSIS_VERSION_SUB ( 1U) /*!< [15:0] CMSIS Core(M) sub version */ +#define __CM_CMSIS_VERSION_SUB ( 6U) /*!< [15:0] CMSIS Core(M) sub version */ #define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \ __CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */ #endif diff --git a/Drivers/CMSIS/Include/core_armv81mml.h b/Drivers/CMSIS/Include/core_armv81mml.h new file mode 100644 index 00000000..94128a1a --- /dev/null +++ b/Drivers/CMSIS/Include/core_armv81mml.h @@ -0,0 +1,4228 @@ +/**************************************************************************//** + * @file core_armv81mml.h + * @brief CMSIS Armv8.1-M Mainline Core Peripheral Access Layer Header File + * @version V1.4.2 + * @date 13. October 2021 + ******************************************************************************/ +/* + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_ARMV81MML_H_GENERIC +#define __CORE_ARMV81MML_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMV81MML + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS ARMV81MML definitions */ +#define __ARMv81MML_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv81MML_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv81MML_CMSIS_VERSION ((__ARMv81MML_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv81MML_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (81U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV81MML_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV81MML_H_DEPENDANT +#define __CORE_ARMV81MML_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv81MML_REV + #define __ARMv81MML_REV 0x0000U + #warning "__ARMv81MML_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 2U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 31 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv81MML */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/* SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) ITM Device Type Register */ + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFmt_Pos 0U /*!< TPI FFCR: EnFmt Position */ +#define TPI_FFCR_EnFmt_Msk (0x3UL << /*TPI_FFCR_EnFmt_Pos*/) /*!< TPI FFCR: EnFmt Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) PMU Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) PMU Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) PMU Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) PMU Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) PMU Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) PMU Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) PMU Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) PMU Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) PMU Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) PMU Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) PMU Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) PMU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) PMU Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ + uint32_t RESERVED12[3]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ + __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ + uint32_t RESERVED13[3]; + __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ + __IOM uint32_t PIDR1; /*!< Offset: 0xFE4 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE8 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFEC (R/W) PMU Peripheral Identification Register 3 */ + __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ + __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ + __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ + __IOM uint32_t CIDR3; /*!< Offset: 0xFFC (R/W) PMU Component Identification Register 3 */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ + +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ + +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ + +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ + +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ + +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ + +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ + +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/** \brief PMU Authentication Status Register Definitions */ + +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + +/*@} end of group CMSIS_PMU */ +#endif + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: FPRound bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: FPRound bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: FPSqrt bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: FPSqrt bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: FPDivide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: FPDP bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: FPDP bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: FPSP bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: FPSP bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMDReg bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMDReg bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: FMAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: FMAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FPHP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FPHP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: FPDNaN bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: FPDNaN bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FPFtZ bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FPFtZ bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_FPD_Pos 23U /*!< \deprecated CoreDebug DHCSR: S_FPD Position */ +#define CoreDebug_DHCSR_S_FPD_Msk (1UL << CoreDebug_DHCSR_S_FPD_Pos) /*!< \deprecated CoreDebug DHCSR: S_FPD Mask */ + +#define CoreDebug_DHCSR_S_SUIDE_Pos 22U /*!< \deprecated CoreDebug DHCSR: S_SUIDE Position */ +#define CoreDebug_DHCSR_S_SUIDE_Msk (1UL << CoreDebug_DHCSR_S_SUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SUIDE Mask */ + +#define CoreDebug_DHCSR_S_NSUIDE_Pos 21U /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Position */ +#define CoreDebug_DHCSR_S_NSUIDE_Msk (1UL << CoreDebug_DHCSR_S_NSUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Mask */ + +#define CoreDebug_DHCSR_S_SDE_Pos 20U /*!< \deprecated CoreDebug DHCSR: S_SDE Position */ +#define CoreDebug_DHCSR_S_SDE_Msk (1UL << CoreDebug_DHCSR_S_SDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SDE Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_PMOV_Pos 6U /*!< \deprecated CoreDebug DHCSR: C_PMOV Position */ +#define CoreDebug_DHCSR_C_PMOV_Msk (1UL << CoreDebug_DHCSR_C_PMOV_Pos) /*!< \deprecated CoreDebug DHCSR: C_PMOV Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos 3U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos 1U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos 10U /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos 9U /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDAPEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Mask */ + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos 8U /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Position */ +#define CoreDebug_DAUTHCTRL_FSDMA_Msk (1UL << CoreDebug_DAUTHCTRL_FSDMA_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (0x1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (0x1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (0x1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (0x1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DSCEMCR, Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (0x1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "pmu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV81MML_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_armv8mbl.h b/Drivers/CMSIS/Include/core_armv8mbl.h index 251e4ede..932d3d18 100644 --- a/Drivers/CMSIS/Include/core_armv8mbl.h +++ b/Drivers/CMSIS/Include/core_armv8mbl.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_armv8mbl.h * @brief CMSIS Armv8-M Baseline Core Peripheral Access Layer Header File - * @version V5.0.7 - * @date 22. June 2018 + * @version V5.1.0 + * @date 27. March 2020 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -23,9 +23,11 @@ */ #if defined ( __ICCARM__ ) - #pragma system_include /* treat file as system include file for MISRA check */ + #pragma system_include /* treat file as system include file for MISRA check */ #elif defined (__clang__) - #pragma clang system_header /* treat file as system include file */ + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ #endif #ifndef __CORE_ARMV8MBL_H_GENERIC @@ -68,7 +70,7 @@ #define __ARMv8MBL_CMSIS_VERSION ((__ARMv8MBL_CMSIS_VERSION_MAIN << 16U) | \ __ARMv8MBL_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ -#define __CORTEX_M ( 2U) /*!< Cortex-M Core */ +#define __CORTEX_M (2U) /*!< Cortex-M Core */ /** __FPU_USED indicates whether an FPU is used or not. This core does not support an FPU at all @@ -81,7 +83,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif @@ -975,6 +977,7 @@ typedef struct #endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ /** \ingroup CMSIS_core_register \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) @@ -983,7 +986,7 @@ typedef struct */ /** - \brief Structure type to access the Core Debug Register (CoreDebug). + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). */ typedef struct { @@ -991,91 +994,276 @@ typedef struct __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ - uint32_t RESERVED4[1U]; + uint32_t RESERVED0[1U]; __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ } CoreDebug_Type; /* Debug Halting Control and Status Register Definitions */ -#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ -#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ -#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ -#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ -#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ -#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ -#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ -#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ -#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ -#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ -#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ -#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ -#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ -#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ -#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ -#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ -#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ -#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ -#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ -#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ -#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ -#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ -#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ -#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ /* Debug Core Register Selector Register Definitions */ -#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ -#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ -#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ -#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ -/* Debug Exception and Monitor Control Register */ -#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< CoreDebug DEMCR: DWTENA Position */ -#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< CoreDebug DEMCR: DWTENA Mask */ +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: DWTENA Position */ +#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< \deprecated CoreDebug DEMCR: DWTENA Mask */ -#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ -#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ -#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ -#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ /* Debug Authentication Control Register Definitions */ -#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ -#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ -#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ -#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ -#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ -#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ -#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ -#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ /* Debug Security Control and Status Register Definitions */ -#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ -#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ -#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ -#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ -#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ -#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ /*@} end of group CMSIS_CoreDebug */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + /** \ingroup CMSIS_core_register \defgroup CMSIS_core_bitfield Core register bit field macros @@ -1113,7 +1301,9 @@ typedef struct #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ - #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ @@ -1124,7 +1314,9 @@ typedef struct #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ - #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ @@ -1138,7 +1330,9 @@ typedef struct #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ - #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ @@ -1146,7 +1340,9 @@ typedef struct #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ - #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ @@ -1163,6 +1359,7 @@ typedef struct Core Function Interface contains: - Core NVIC Functions - Core SysTick Functions + - Core Debug Functions - Core Register Access Functions ******************************************************************************/ /** @@ -1223,7 +1420,7 @@ typedef struct #define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ #define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ #define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ -#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ #define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ /* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ @@ -1253,7 +1450,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -1552,6 +1751,7 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) uint32_t *vectors = (uint32_t *)0x0U; #endif vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); } @@ -1835,6 +2035,110 @@ __STATIC_INLINE void TZ_SAU_Disable(void) +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + /* ################################## SysTick function ############################################ */ /** \ingroup CMSIS_Core_FunctionInterface diff --git a/Drivers/CMSIS/Include/core_armv8mml.h b/Drivers/CMSIS/Include/core_armv8mml.h index 3a3148ea..c119fbf2 100644 --- a/Drivers/CMSIS/Include/core_armv8mml.h +++ b/Drivers/CMSIS/Include/core_armv8mml.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_armv8mml.h * @brief CMSIS Armv8-M Mainline Core Peripheral Access Layer Header File - * @version V5.0.7 - * @date 06. July 2018 + * @version V5.2.3 + * @date 13. October 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -23,9 +23,11 @@ */ #if defined ( __ICCARM__ ) - #pragma system_include /* treat file as system include file for MISRA check */ + #pragma system_include /* treat file as system include file for MISRA check */ #elif defined (__clang__) - #pragma clang system_header /* treat file as system include file */ + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ #endif #ifndef __CORE_ARMV8MML_H_GENERIC @@ -68,7 +70,7 @@ #define __ARMv8MML_CMSIS_VERSION ((__ARMv8MML_CMSIS_VERSION_MAIN << 16U) | \ __ARMv8MML_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ -#define __CORTEX_M (81U) /*!< Cortex-M Core */ +#define __CORTEX_M (80U) /*!< Cortex-M Core */ /** __FPU_USED indicates whether an FPU is used or not. For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. @@ -97,7 +99,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) #define __FPU_USED 1U #else @@ -248,6 +250,11 @@ #warning "__DSP_PRESENT not defined in device header file; using default!" #endif + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -512,7 +519,7 @@ typedef struct __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ - __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ @@ -521,7 +528,10 @@ typedef struct __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ - uint32_t RESERVED3[92U]; + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ uint32_t RESERVED4[15U]; __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ @@ -538,14 +548,7 @@ typedef struct __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ - uint32_t RESERVED7[6U]; - __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ - __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ - __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ - __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ - __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ - uint32_t RESERVED8[1U]; - __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ } SCB_Type; /* SCB CPUID Register Definitions */ @@ -746,22 +749,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -921,78 +924,6 @@ typedef struct #define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ #define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ -/* Instruction Tightly-Coupled Memory Control Register Definitions */ -#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ -#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ - -#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ -#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ - -#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ -#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ - -#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ -#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ - -/* Data Tightly-Coupled Memory Control Register Definitions */ -#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ -#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ - -#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ -#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ - -#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ -#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ - -#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ -#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ - -/* AHBP Control Register Definitions */ -#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ -#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ - -#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ -#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ - -/* L1 Cache Control Register Definitions */ -#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ -#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ - -#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ -#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ - -#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ -#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ - -/* AHBS Control Register Definitions */ -#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ -#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ - -#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ -#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ - -#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ -#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ - -/* Auxiliary Bus Fault Status Register Definitions */ -#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ -#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ - -#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ -#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ - -#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ -#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ - -#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ -#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ - -#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ -#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ - -#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ -#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ - /*@} end of group CMSIS_SCB */ @@ -1097,10 +1028,7 @@ typedef struct __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ uint32_t RESERVED2[15U]; __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ - uint32_t RESERVED3[29U]; - __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ - __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ - __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED3[32U]; uint32_t RESERVED4[43U]; __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ @@ -1163,18 +1091,6 @@ typedef struct #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ -/* ITM Integration Write Register Definitions */ -#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ -#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ - -/* ITM Integration Read Register Definitions */ -#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ -#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ - -/* ITM Integration Mode Control Register Definitions */ -#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ -#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ - /* ITM Lock Status Register Definitions */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ @@ -1688,8 +1604,9 @@ typedef struct __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ - __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ - __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ } FPU_Type; /* Floating-Point Context Control Register Definitions */ @@ -1761,7 +1678,7 @@ typedef struct #define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ #define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ -/* Media and FP Feature Register 0 Definitions */ +/* Media and VFP Feature Register 0 Definitions */ #define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ #define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ @@ -1786,7 +1703,7 @@ typedef struct #define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ #define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ -/* Media and FP Feature Register 1 Definitions */ +/* Media and VFP Feature Register 1 Definitions */ #define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ #define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ @@ -1799,9 +1716,13 @@ typedef struct #define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ #define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ -/*@} end of group CMSIS_FPU */ +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ +/*@} end of group CMSIS_FPU */ +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ /** \ingroup CMSIS_core_register \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) @@ -1810,7 +1731,7 @@ typedef struct */ /** - \brief Structure type to access the Core Debug Register (CoreDebug). + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). */ typedef struct { @@ -1818,124 +1739,354 @@ typedef struct __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ - uint32_t RESERVED4[1U]; + uint32_t RESERVED0[1U]; __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ } CoreDebug_Type; /* Debug Halting Control and Status Register Definitions */ -#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ -#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ -#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ -#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ -#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ -#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ -#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ -#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ -#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ -#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ -#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ -#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ -#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ -#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ -#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ -#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ -#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ -#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ -#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ -#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ -#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ -#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ -#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ -#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ -#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ -#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ /* Debug Core Register Selector Register Definitions */ -#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ -#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ -#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ -#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ /* Debug Exception and Monitor Control Register Definitions */ -#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ -#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ -#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ -#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ -#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ -#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ -#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ -#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ -#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ -#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ -#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ -#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ -#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ -#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ -#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ -#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ -#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ -#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ -#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ -#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ -#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ -#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ -#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ -#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ -#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ -#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ /* Debug Authentication Control Register Definitions */ -#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ -#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ -#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ -#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ -#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ -#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ -#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ -#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ /* Debug Security Control and Status Register Definitions */ -#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ -#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ -#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ -#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ -#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ -#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ /*@} end of group CMSIS_CoreDebug */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + /** \ingroup CMSIS_core_register \defgroup CMSIS_core_bitfield Core register bit field macros @@ -1974,7 +2125,9 @@ typedef struct #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ - #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ @@ -1986,7 +2139,9 @@ typedef struct #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ - #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ @@ -2003,7 +2158,9 @@ typedef struct #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ - #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ @@ -2012,7 +2169,9 @@ typedef struct #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ - #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ @@ -2026,6 +2185,15 @@ typedef struct /*@} */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + /******************************************************************************* * Hardware Abstraction Layer @@ -2093,7 +2261,7 @@ typedef struct #define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ #define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ #define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ -#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ #define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ /* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ @@ -2122,7 +2290,7 @@ __STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ reg_value = (reg_value | ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | - (PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ SCB->AIRCR = reg_value; } @@ -2148,7 +2316,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -2440,6 +2610,7 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { uint32_t *vectors = (uint32_t *)SCB->VTOR; vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); } @@ -2496,7 +2667,7 @@ __STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ reg_value = (reg_value | ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | - (PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ SCB_NS->AIRCR = reg_value; } @@ -2728,6 +2899,13 @@ __STATIC_INLINE uint32_t SCB_GetFPUType(void) /*@} end of CMSIS_Core_FpuFunctions */ +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + /* ########################## SAU functions #################################### */ /** @@ -2766,6 +2944,110 @@ __STATIC_INLINE void TZ_SAU_Disable(void) +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + /* ################################## SysTick function ############################################ */ /** \ingroup CMSIS_Core_FunctionInterface diff --git a/Drivers/CMSIS/Include/core_cm0.h b/Drivers/CMSIS/Include/core_cm0.h index f929bba0..6441ff34 100644 --- a/Drivers/CMSIS/Include/core_cm0.h +++ b/Drivers/CMSIS/Include/core_cm0.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm0.h * @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File - * @version V5.0.5 - * @date 28. May 2018 + * @version V5.0.8 + * @date 21. August 2019 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -81,7 +81,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif @@ -316,7 +316,7 @@ typedef struct __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ uint32_t RESERVED0[31U]; __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ - uint32_t RSERVED1[31U]; + uint32_t RESERVED1[31U]; __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ uint32_t RESERVED2[31U]; __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ @@ -624,7 +624,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -829,8 +831,9 @@ __STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGr */ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { - uint32_t *vectors = (uint32_t *)0x0U; - vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + *(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */ + /* ARM Application Note 321 states that the M0 does not require the architectural barrier */ } @@ -844,8 +847,8 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) */ __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) { - uint32_t *vectors = (uint32_t *)0x0U; - return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */ } diff --git a/Drivers/CMSIS/Include/core_cm0plus.h b/Drivers/CMSIS/Include/core_cm0plus.h index 424011ac..4e7179a6 100644 --- a/Drivers/CMSIS/Include/core_cm0plus.h +++ b/Drivers/CMSIS/Include/core_cm0plus.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm0plus.h * @brief CMSIS Cortex-M0+ Core Peripheral Access Layer Header File - * @version V5.0.6 - * @date 28. May 2018 + * @version V5.0.9 + * @date 21. August 2019 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -81,7 +81,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif @@ -330,7 +330,7 @@ typedef struct __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ uint32_t RESERVED0[31U]; __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ - uint32_t RSERVED1[31U]; + uint32_t RESERVED1[31U]; __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ uint32_t RESERVED2[31U]; __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ @@ -742,7 +742,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -949,10 +951,12 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { #if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; #else - uint32_t *vectors = (uint32_t *)0x0U; + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + *(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */ #endif - vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M0+ does not require the architectural barrier */ } @@ -968,11 +972,11 @@ __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) { #if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; #else - uint32_t *vectors = (uint32_t *)0x0U; + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */ #endif - return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; - } diff --git a/Drivers/CMSIS/Include/core_cm1.h b/Drivers/CMSIS/Include/core_cm1.h index 0ed678e3..76b45697 100644 --- a/Drivers/CMSIS/Include/core_cm1.h +++ b/Drivers/CMSIS/Include/core_cm1.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file core_cm1.h * @brief CMSIS Cortex-M1 Core Peripheral Access Layer Header File - * @version V1.0.0 - * @date 23. July 2018 + * @version V1.0.1 + * @date 12. November 2018 ******************************************************************************/ /* * Copyright (c) 2009-2018 Arm Limited. All rights reserved. @@ -81,7 +81,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif @@ -651,7 +651,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -858,6 +860,7 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { uint32_t *vectors = (uint32_t *)0x0U; vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M1 does not require the architectural barrier */ } diff --git a/Drivers/CMSIS/Include/core_cm23.h b/Drivers/CMSIS/Include/core_cm23.h index acbc5dfe..55fff995 100644 --- a/Drivers/CMSIS/Include/core_cm23.h +++ b/Drivers/CMSIS/Include/core_cm23.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm23.h * @brief CMSIS Cortex-M23 Core Peripheral Access Layer Header File - * @version V5.0.7 - * @date 22. June 2018 + * @version V5.1.0 + * @date 11. February 2020 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -23,9 +23,11 @@ */ #if defined ( __ICCARM__ ) - #pragma system_include /* treat file as system include file for MISRA check */ + #pragma system_include /* treat file as system include file for MISRA check */ #elif defined (__clang__) - #pragma clang system_header /* treat file as system include file */ + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ #endif #ifndef __CORE_CM23_H_GENERIC @@ -81,7 +83,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif @@ -1050,6 +1052,7 @@ typedef struct #endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ /** \ingroup CMSIS_core_register \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) @@ -1058,7 +1061,7 @@ typedef struct */ /** - \brief Structure type to access the Core Debug Register (CoreDebug). + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). */ typedef struct { @@ -1066,91 +1069,276 @@ typedef struct __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ - uint32_t RESERVED4[1U]; + uint32_t RESERVED0[1U]; __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ } CoreDebug_Type; /* Debug Halting Control and Status Register Definitions */ -#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ -#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ -#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ -#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ -#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ -#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ -#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ -#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ -#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ -#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ -#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ -#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ -#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ -#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ -#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ -#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ -#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ -#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ -#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ -#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ -#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ -#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ -#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ -#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ /* Debug Core Register Selector Register Definitions */ -#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ -#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ -#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ -#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ /* Debug Exception and Monitor Control Register */ -#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< CoreDebug DEMCR: DWTENA Position */ -#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< CoreDebug DEMCR: DWTENA Mask */ +#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: DWTENA Position */ +#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< \deprecated CoreDebug DEMCR: DWTENA Mask */ -#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ -#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ -#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ -#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ /* Debug Authentication Control Register Definitions */ -#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ -#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ -#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ -#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ -#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ -#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ -#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ -#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ /* Debug Security Control and Status Register Definitions */ -#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ -#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ -#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ -#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ -#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ -#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ /*@} end of group CMSIS_CoreDebug */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + /** \ingroup CMSIS_core_register \defgroup CMSIS_core_bitfield Core register bit field macros @@ -1188,7 +1376,9 @@ typedef struct #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ - #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ @@ -1199,7 +1389,9 @@ typedef struct #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ - #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ @@ -1213,7 +1405,9 @@ typedef struct #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ - #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ @@ -1221,7 +1415,9 @@ typedef struct #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ - #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ @@ -1238,6 +1434,7 @@ typedef struct Core Function Interface contains: - Core NVIC Functions - Core SysTick Functions + - Core Debug Functions - Core Register Access Functions ******************************************************************************/ /** @@ -1298,17 +1495,17 @@ typedef struct #define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ #define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ #define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ -#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ #define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ /* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ #define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ -#else +#else #define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ #endif - + /* Interrupt Priorities are WORD accessible only under Armv6-M */ /* The following MACROS handle generation of the register offset and byte masks */ #define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) @@ -1328,7 +1525,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -1627,6 +1826,7 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) uint32_t *vectors = (uint32_t *)0x0U; #endif vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); } @@ -1910,6 +2110,110 @@ __STATIC_INLINE void TZ_SAU_Disable(void) +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + /* ################################## SysTick function ############################################ */ /** \ingroup CMSIS_Core_FunctionInterface diff --git a/Drivers/CMSIS/Include/core_cm3.h b/Drivers/CMSIS/Include/core_cm3.h index 74bff64b..74fb87e5 100644 --- a/Drivers/CMSIS/Include/core_cm3.h +++ b/Drivers/CMSIS/Include/core_cm3.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm3.h * @brief CMSIS Cortex-M3 Core Peripheral Access Layer Header File - * @version V5.0.8 - * @date 04. June 2018 + * @version V5.1.2 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -62,7 +62,7 @@ #include "cmsis_version.h" -/* CMSIS CM3 definitions */ +/* CMSIS CM3 definitions */ #define __CM3_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ #define __CM3_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ #define __CM3_CMSIS_VERSION ((__CM3_CMSIS_VERSION_MAIN << 16U) | \ @@ -81,7 +81,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif @@ -142,6 +142,11 @@ #warning "__MPU_PRESENT not defined in device header file; using default!" #endif + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -342,7 +347,7 @@ typedef struct __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ uint32_t RESERVED0[24U]; __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ - uint32_t RSERVED1[24U]; + uint32_t RESERVED1[24U]; __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ uint32_t RESERVED2[24U]; __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ @@ -560,19 +565,19 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -668,6 +673,12 @@ typedef struct #define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ /* Auxiliary Control Register Definitions */ +#if defined (__CM3_REV) && (__CM3_REV >= 0x200U) +#define SCnSCB_ACTLR_DISOOFP_Pos 9U /*!< ACTLR: DISOOFP Position */ +#define SCnSCB_ACTLR_DISOOFP_Msk (1UL << SCnSCB_ACTLR_DISOOFP_Pos) /*!< ACTLR: DISOOFP Mask */ + +#define SCnSCB_ACTLR_DISFPCA_Pos 8U /*!< ACTLR: DISFPCA Position */ +#define SCnSCB_ACTLR_DISFPCA_Msk (1UL << SCnSCB_ACTLR_DISFPCA_Pos) /*!< ACTLR: DISFPCA Mask */ #define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ #define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ @@ -677,6 +688,7 @@ typedef struct #define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ #define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ +#endif /*@} end of group CMSIS_SCnotSCB */ @@ -757,10 +769,7 @@ typedef struct __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ uint32_t RESERVED2[15U]; __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ - uint32_t RESERVED3[29U]; - __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ - __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ - __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED3[32U]; uint32_t RESERVED4[43U]; __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ @@ -811,18 +820,6 @@ typedef struct #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ -/* ITM Integration Write Register Definitions */ -#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ -#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ - -/* ITM Integration Read Register Definitions */ -#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ -#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ - -/* ITM Integration Mode Control Register Definitions */ -#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ -#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ - /* ITM Lock Status Register Definitions */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ @@ -1055,13 +1052,13 @@ typedef struct /* TPI Integration ETM Data Register Definitions (FIFO0) */ #define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ -#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ #define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ #define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ #define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ -#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ #define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ #define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ @@ -1084,13 +1081,13 @@ typedef struct /* TPI Integration ITM Data Register Definitions (FIFO1) */ #define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ -#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ #define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ #define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ #define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ -#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ #define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ #define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ @@ -1451,7 +1448,7 @@ typedef struct #ifdef CMSIS_VECTAB_VIRTUAL #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE - #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" #endif #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE #else @@ -1486,7 +1483,7 @@ __STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ reg_value = (reg_value | ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | - (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ SCB->AIRCR = reg_value; } @@ -1512,7 +1509,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -1737,6 +1736,7 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { uint32_t *vectors = (uint32_t *)SCB->VTOR; vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M3 does not require the architectural barrier */ } @@ -1776,6 +1776,7 @@ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) /*@} end of CMSIS_Core_NVICFunctions */ + /* ########################## MPU functions #################################### */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) @@ -1784,6 +1785,7 @@ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) #endif + /* ########################## FPU functions #################################### */ /** \ingroup CMSIS_Core_FunctionInterface diff --git a/Drivers/CMSIS/Include/core_cm33.h b/Drivers/CMSIS/Include/core_cm33.h index 6cd2db77..18a2e6fb 100644 --- a/Drivers/CMSIS/Include/core_cm33.h +++ b/Drivers/CMSIS/Include/core_cm33.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm33.h * @brief CMSIS Cortex-M33 Core Peripheral Access Layer Header File - * @version V5.0.9 - * @date 06. July 2018 + * @version V5.2.3 + * @date 13. October 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -23,9 +23,11 @@ */ #if defined ( __ICCARM__ ) - #pragma system_include /* treat file as system include file for MISRA check */ + #pragma system_include /* treat file as system include file for MISRA check */ #elif defined (__clang__) - #pragma clang system_header /* treat file as system include file */ + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ #endif #ifndef __CORE_CM33_H_GENERIC @@ -97,7 +99,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined (__ARM_PCS_VFP) + #if defined (__ARM_FP) #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) #define __FPU_USED 1U #else @@ -248,6 +250,11 @@ #warning "__DSP_PRESENT not defined in device header file; using default!" #endif + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -512,7 +519,7 @@ typedef struct __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ - __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ @@ -521,7 +528,10 @@ typedef struct __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ - uint32_t RESERVED3[92U]; + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ uint32_t RESERVED4[15U]; __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ @@ -538,14 +548,7 @@ typedef struct __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ - uint32_t RESERVED7[6U]; - __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ - __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ - __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ - __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ - __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ - uint32_t RESERVED8[1U]; - __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ } SCB_Type; /* SCB CPUID Register Definitions */ @@ -746,22 +749,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -921,78 +924,6 @@ typedef struct #define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ #define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ -/* Instruction Tightly-Coupled Memory Control Register Definitions */ -#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ -#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ - -#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ -#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ - -#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ -#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ - -#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ -#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ - -/* Data Tightly-Coupled Memory Control Register Definitions */ -#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ -#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ - -#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ -#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ - -#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ -#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ - -#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ -#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ - -/* AHBP Control Register Definitions */ -#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ -#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ - -#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ -#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ - -/* L1 Cache Control Register Definitions */ -#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ -#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ - -#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ -#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ - -#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ -#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ - -/* AHBS Control Register Definitions */ -#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ -#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ - -#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ -#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ - -#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ -#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ - -/* Auxiliary Bus Fault Status Register Definitions */ -#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ -#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ - -#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ -#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ - -#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ -#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ - -#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ -#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ - -#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ -#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ - -#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ -#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ - /*@} end of group CMSIS_SCB */ @@ -1097,10 +1028,7 @@ typedef struct __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ uint32_t RESERVED2[15U]; __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ - uint32_t RESERVED3[29U]; - __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ - __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ - __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED3[32U]; uint32_t RESERVED4[43U]; __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ @@ -1163,18 +1091,6 @@ typedef struct #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ -/* ITM Integration Write Register Definitions */ -#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ -#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ - -/* ITM Integration Read Register Definitions */ -#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ -#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ - -/* ITM Integration Mode Control Register Definitions */ -#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ -#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ - /* ITM Lock Status Register Definitions */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ @@ -1763,8 +1679,9 @@ typedef struct __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ - __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ - __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ } FPU_Type; /* Floating-Point Context Control Register Definitions */ @@ -1836,7 +1753,7 @@ typedef struct #define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ #define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ -/* Media and FP Feature Register 0 Definitions */ +/* Media and VFP Feature Register 0 Definitions */ #define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ #define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ @@ -1861,7 +1778,7 @@ typedef struct #define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ #define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ -/* Media and FP Feature Register 1 Definitions */ +/* Media and VFP Feature Register 1 Definitions */ #define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ #define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ @@ -1874,9 +1791,13 @@ typedef struct #define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ #define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ -/*@} end of group CMSIS_FPU */ +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ +/*@} end of group CMSIS_FPU */ +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ /** \ingroup CMSIS_core_register \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) @@ -1885,7 +1806,7 @@ typedef struct */ /** - \brief Structure type to access the Core Debug Register (CoreDebug). + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). */ typedef struct { @@ -1893,124 +1814,354 @@ typedef struct __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ - uint32_t RESERVED4[1U]; + uint32_t RESERVED0[1U]; __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ } CoreDebug_Type; /* Debug Halting Control and Status Register Definitions */ -#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ -#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ -#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ -#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ -#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ -#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ -#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ -#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ -#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ -#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ -#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ -#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ -#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ -#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ -#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ -#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ -#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ -#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ -#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ -#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ -#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ -#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ -#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ -#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ -#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ -#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ /* Debug Core Register Selector Register Definitions */ -#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ -#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ -#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ -#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ /* Debug Exception and Monitor Control Register Definitions */ -#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ -#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ -#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ -#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ -#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ -#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ -#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ -#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ -#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ -#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ -#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ -#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ -#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ -#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ -#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ -#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ -#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ -#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ -#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ -#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ -#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ -#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ -#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ -#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ -#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ -#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ /* Debug Authentication Control Register Definitions */ -#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ -#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ -#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ -#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ -#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ -#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ -#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ -#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ /* Debug Security Control and Status Register Definitions */ -#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ -#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ -#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ -#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ -#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ -#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ /*@} end of group CMSIS_CoreDebug */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + /** \ingroup CMSIS_core_register \defgroup CMSIS_core_bitfield Core register bit field macros @@ -2049,7 +2200,9 @@ typedef struct #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ - #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ @@ -2061,7 +2214,9 @@ typedef struct #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ - #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ @@ -2078,7 +2233,9 @@ typedef struct #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ - #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ @@ -2087,7 +2244,9 @@ typedef struct #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ - #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ @@ -2101,6 +2260,15 @@ typedef struct /*@} */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + /******************************************************************************* * Hardware Abstraction Layer @@ -2159,7 +2327,7 @@ typedef struct /* Special LR values for Secure/Non-Secure call handling and exception handling */ -/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ #define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ /* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ @@ -2168,13 +2336,13 @@ typedef struct #define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ #define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ #define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ -#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ #define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ /* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ #define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ -#else +#else #define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ #endif @@ -2197,7 +2365,7 @@ __STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ reg_value = (reg_value | ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | - (PriorityGroupTmp << 8U) ); /* Insert write key and priority group */ + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ SCB->AIRCR = reg_value; } @@ -2223,7 +2391,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -2515,6 +2685,7 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { uint32_t *vectors = (uint32_t *)SCB->VTOR; vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); } @@ -2841,6 +3012,110 @@ __STATIC_INLINE void TZ_SAU_Disable(void) +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + /* ################################## SysTick function ############################################ */ /** \ingroup CMSIS_Core_FunctionInterface diff --git a/Drivers/CMSIS/Include/core_cm35p.h b/Drivers/CMSIS/Include/core_cm35p.h new file mode 100644 index 00000000..3843d954 --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm35p.h @@ -0,0 +1,3277 @@ +/**************************************************************************//** + * @file core_cm35p.h + * @brief CMSIS Cortex-M35P Core Peripheral Access Layer Header File + * @version V1.1.3 + * @date 13. October 2021 + ******************************************************************************/ +/* + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM35P_H_GENERIC +#define __CORE_CM35P_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M35P + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM35P definitions */ +#define __CM35P_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM35P_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM35P_CMSIS_VERSION ((__CM35P_CMSIS_VERSION_MAIN << 16U) | \ + __CM35P_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (35U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM35P_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM35P_H_DEPENDANT +#define __CORE_CM35P_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM35P_REV + #define __CM35P_REV 0x0000U + #warning "__CM35P_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M35P */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM35P_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm4.h b/Drivers/CMSIS/Include/core_cm4.h index 7d568735..e21cd149 100644 --- a/Drivers/CMSIS/Include/core_cm4.h +++ b/Drivers/CMSIS/Include/core_cm4.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm4.h * @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File - * @version V5.0.8 - * @date 04. June 2018 + * @version V5.1.2 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -86,7 +86,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) #define __FPU_USED 1U #else @@ -194,6 +194,11 @@ #warning "__MPU_PRESENT not defined in device header file; using default!" #endif + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -408,7 +413,7 @@ typedef struct __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ uint32_t RESERVED0[24U]; __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ - uint32_t RSERVED1[24U]; + uint32_t RESERVED1[24U]; __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ uint32_t RESERVED2[24U]; __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ @@ -618,22 +623,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -822,10 +827,7 @@ typedef struct __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ uint32_t RESERVED2[15U]; __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ - uint32_t RESERVED3[29U]; - __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ - __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ - __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED3[32U]; uint32_t RESERVED4[43U]; __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ @@ -876,18 +878,6 @@ typedef struct #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ -/* ITM Integration Write Register Definitions */ -#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ -#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ - -/* ITM Integration Read Register Definitions */ -#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ -#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ - -/* ITM Integration Mode Control Register Definitions */ -#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ -#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ - /* ITM Lock Status Register Definitions */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ @@ -1120,13 +1110,13 @@ typedef struct /* TPI Integration ETM Data Register Definitions (FIFO0) */ #define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ -#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ #define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ #define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ #define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ -#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ #define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ #define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ @@ -1149,13 +1139,13 @@ typedef struct /* TPI Integration ITM Data Register Definitions (FIFO1) */ #define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ -#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ #define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ #define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ #define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ -#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ #define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ #define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ @@ -1324,6 +1314,7 @@ typedef struct __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and FP Feature Register 2 */ } FPU_Type; /* Floating-Point Context Control Register Definitions */ @@ -1409,6 +1400,11 @@ typedef struct #define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ #define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ +/* Media and FP Feature Register 2 Definitions */ + +#define FPU_MVFR2_VFP_Misc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_VFP_Misc_Msk (0xFUL << FPU_MVFR2_VFP_Misc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + /*@} end of group CMSIS_FPU */ @@ -1625,7 +1621,7 @@ typedef struct #ifdef CMSIS_VECTAB_VIRTUAL #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE - #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" #endif #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE #else @@ -1689,7 +1685,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -1914,6 +1912,7 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { uint32_t *vectors = (uint32_t *)SCB->VTOR; vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M4 does not require the architectural barrier */ } @@ -1953,6 +1952,7 @@ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) /*@} end of CMSIS_Core_NVICFunctions */ + /* ########################## MPU functions #################################### */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) diff --git a/Drivers/CMSIS/Include/core_cm55.h b/Drivers/CMSIS/Include/core_cm55.h new file mode 100644 index 00000000..faa30ce3 --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm55.h @@ -0,0 +1,4817 @@ +/**************************************************************************//** + * @file core_cm55.h + * @brief CMSIS Cortex-M55 Core Peripheral Access Layer Header File + * @version V1.2.4 + * @date 21. April 2022 + ******************************************************************************/ +/* + * Copyright (c) 2018-2022 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM55_H_GENERIC +#define __CORE_CM55_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M55 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM55 definitions */ +#define __CM55_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM55_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM55_CMSIS_VERSION ((__CM55_CMSIS_VERSION_MAIN << 16U) | \ + __CM55_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (55U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM55_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM55_H_DEPENDANT +#define __CORE_CM55_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM55_REV + #define __CM55_REV 0x0000U + #warning "__CM55_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 8U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 8 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M55 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core EWIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core PMU Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/* SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ICB Implementation Control Block register (ICB) + \brief Type definitions for the Implementation Control Block Register + @{ + */ + +/** + \brief Structure type to access the Implementation Control Block (ICB). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} ICB_Type; + +/* Auxiliary Control Register Definitions */ +#define ICB_ACTLR_DISCRITAXIRUW_Pos 27U /*!< ACTLR: DISCRITAXIRUW Position */ +#define ICB_ACTLR_DISCRITAXIRUW_Msk (1UL << ICB_ACTLR_DISCRITAXIRUW_Pos) /*!< ACTLR: DISCRITAXIRUW Mask */ + +#define ICB_ACTLR_DISDI_Pos 16U /*!< ACTLR: DISDI Position */ +#define ICB_ACTLR_DISDI_Msk (3UL << ICB_ACTLR_DISDI_Pos) /*!< ACTLR: DISDI Mask */ + +#define ICB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define ICB_ACTLR_DISCRITAXIRUR_Msk (1UL << ICB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define ICB_ACTLR_EVENTBUSEN_Pos 14U /*!< ACTLR: EVENTBUSEN Position */ +#define ICB_ACTLR_EVENTBUSEN_Msk (1UL << ICB_ACTLR_EVENTBUSEN_Pos) /*!< ACTLR: EVENTBUSEN Mask */ + +#define ICB_ACTLR_EVENTBUSEN_S_Pos 13U /*!< ACTLR: EVENTBUSEN_S Position */ +#define ICB_ACTLR_EVENTBUSEN_S_Msk (1UL << ICB_ACTLR_EVENTBUSEN_S_Pos) /*!< ACTLR: EVENTBUSEN_S Mask */ + +#define ICB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define ICB_ACTLR_DISITMATBFLUSH_Msk (1UL << ICB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define ICB_ACTLR_DISNWAMODE_Pos 11U /*!< ACTLR: DISNWAMODE Position */ +#define ICB_ACTLR_DISNWAMODE_Msk (1UL << ICB_ACTLR_DISNWAMODE_Pos) /*!< ACTLR: DISNWAMODE Mask */ + +#define ICB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define ICB_ACTLR_FPEXCODIS_Msk (1UL << ICB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +#define ICB_ACTLR_DISOLAP_Pos 7U /*!< ACTLR: DISOLAP Position */ +#define ICB_ACTLR_DISOLAP_Msk (1UL << ICB_ACTLR_DISOLAP_Pos) /*!< ACTLR: DISOLAP Mask */ + +#define ICB_ACTLR_DISOLAPS_Pos 6U /*!< ACTLR: DISOLAPS Position */ +#define ICB_ACTLR_DISOLAPS_Msk (1UL << ICB_ACTLR_DISOLAPS_Pos) /*!< ACTLR: DISOLAPS Mask */ + +#define ICB_ACTLR_DISLOBR_Pos 5U /*!< ACTLR: DISLOBR Position */ +#define ICB_ACTLR_DISLOBR_Msk (1UL << ICB_ACTLR_DISLOBR_Pos) /*!< ACTLR: DISLOBR Mask */ + +#define ICB_ACTLR_DISLO_Pos 4U /*!< ACTLR: DISLO Position */ +#define ICB_ACTLR_DISLO_Msk (1UL << ICB_ACTLR_DISLO_Pos) /*!< ACTLR: DISLO Mask */ + +#define ICB_ACTLR_DISLOLEP_Pos 3U /*!< ACTLR: DISLOLEP Position */ +#define ICB_ACTLR_DISLOLEP_Msk (1UL << ICB_ACTLR_DISLOLEP_Pos) /*!< ACTLR: DISLOLEP Mask */ + +#define ICB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define ICB_ACTLR_DISFOLD_Msk (1UL << ICB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +/* Interrupt Controller Type Register Definitions */ +#define ICB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define ICB_ICTR_INTLINESNUM_Msk (0xFUL /*<< ICB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_ICB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) ITM Device Type Register */ + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup MemSysCtl_Type Memory System Control Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Memory System Control Registers (MEMSYSCTL) + @{ + */ + +/** + \brief Structure type to access the Memory System Control Registers (MEMSYSCTL). + */ +typedef struct +{ + __IOM uint32_t MSCR; /*!< Offset: 0x000 (R/W) Memory System Control Register */ + __IOM uint32_t PFCR; /*!< Offset: 0x004 (R/W) Prefetcher Control Register */ + uint32_t RESERVED1[2U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x010 (R/W) ITCM Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x014 (R/W) DTCM Control Register */ + __IOM uint32_t PAHBCR; /*!< Offset: 0x018 (R/W) P-AHB Control Register */ + uint32_t RESERVED2[313U]; + __IOM uint32_t ITGU_CTRL; /*!< Offset: 0x500 (R/W) ITGU Control Register */ + __IOM uint32_t ITGU_CFG; /*!< Offset: 0x504 (R/W) ITGU Configuration Register */ + uint32_t RESERVED3[2U]; + __IOM uint32_t ITGU_LUT[16U]; /*!< Offset: 0x510 (R/W) ITGU Look Up Table Register */ + uint32_t RESERVED4[44U]; + __IOM uint32_t DTGU_CTRL; /*!< Offset: 0x600 (R/W) DTGU Control Registers */ + __IOM uint32_t DTGU_CFG; /*!< Offset: 0x604 (R/W) DTGU Configuration Register */ + uint32_t RESERVED5[2U]; + __IOM uint32_t DTGU_LUT[16U]; /*!< Offset: 0x610 (R/W) DTGU Look Up Table Register */ +} MemSysCtl_Type; + +/* MEMSYSCTL Memory System Control Register (MSCR) Register Definitions */ +#define MEMSYSCTL_MSCR_CPWRDN_Pos 17U /*!< MEMSYSCTL MSCR: CPWRDN Position */ +#define MEMSYSCTL_MSCR_CPWRDN_Msk (0x1UL << MEMSYSCTL_MSCR_CPWRDN_Pos) /*!< MEMSYSCTL MSCR: CPWRDN Mask */ + +#define MEMSYSCTL_MSCR_DCCLEAN_Pos 16U /*!< MEMSYSCTL MSCR: DCCLEAN Position */ +#define MEMSYSCTL_MSCR_DCCLEAN_Msk (0x1UL << MEMSYSCTL_MSCR_DCCLEAN_Pos) /*!< MEMSYSCTL MSCR: DCCLEAN Mask */ + +#define MEMSYSCTL_MSCR_ICACTIVE_Pos 13U /*!< MEMSYSCTL MSCR: ICACTIVE Position */ +#define MEMSYSCTL_MSCR_ICACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_ICACTIVE_Pos) /*!< MEMSYSCTL MSCR: ICACTIVE Mask */ + +#define MEMSYSCTL_MSCR_DCACTIVE_Pos 12U /*!< MEMSYSCTL MSCR: DCACTIVE Position */ +#define MEMSYSCTL_MSCR_DCACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_DCACTIVE_Pos) /*!< MEMSYSCTL MSCR: DCACTIVE Mask */ + +#define MEMSYSCTL_MSCR_TECCCHKDIS_Pos 4U /*!< MEMSYSCTL MSCR: TECCCHKDIS Position */ +#define MEMSYSCTL_MSCR_TECCCHKDIS_Msk (0x1UL << MEMSYSCTL_MSCR_TECCCHKDIS_Pos) /*!< MEMSYSCTL MSCR: TECCCHKDIS Mask */ + +#define MEMSYSCTL_MSCR_EVECCFAULT_Pos 3U /*!< MEMSYSCTL MSCR: EVECCFAULT Position */ +#define MEMSYSCTL_MSCR_EVECCFAULT_Msk (0x1UL << MEMSYSCTL_MSCR_EVECCFAULT_Pos) /*!< MEMSYSCTL MSCR: EVECCFAULT Mask */ + +#define MEMSYSCTL_MSCR_FORCEWT_Pos 2U /*!< MEMSYSCTL MSCR: FORCEWT Position */ +#define MEMSYSCTL_MSCR_FORCEWT_Msk (0x1UL << MEMSYSCTL_MSCR_FORCEWT_Pos) /*!< MEMSYSCTL MSCR: FORCEWT Mask */ + +#define MEMSYSCTL_MSCR_ECCEN_Pos 1U /*!< MEMSYSCTL MSCR: ECCEN Position */ +#define MEMSYSCTL_MSCR_ECCEN_Msk (0x1UL << MEMSYSCTL_MSCR_ECCEN_Pos) /*!< MEMSYSCTL MSCR: ECCEN Mask */ + +/* MEMSYSCTL Prefetcher Control Register (PFCR) Register Definitions */ +#define MEMSYSCTL_PFCR_MAX_OS_Pos 7U /*!< MEMSYSCTL PFCR: MAX_OS Position */ +#define MEMSYSCTL_PFCR_MAX_OS_Msk (0x7UL << MEMSYSCTL_PFCR_MAX_OS_Pos) /*!< MEMSYSCTL PFCR: MAX_OS Mask */ + +#define MEMSYSCTL_PFCR_MAX_LA_Pos 4U /*!< MEMSYSCTL PFCR: MAX_LA Position */ +#define MEMSYSCTL_PFCR_MAX_LA_Msk (0x7UL << MEMSYSCTL_PFCR_MAX_LA_Pos) /*!< MEMSYSCTL PFCR: MAX_LA Mask */ + +#define MEMSYSCTL_PFCR_MIN_LA_Pos 1U /*!< MEMSYSCTL PFCR: MIN_LA Position */ +#define MEMSYSCTL_PFCR_MIN_LA_Msk (0x7UL << MEMSYSCTL_PFCR_MIN_LA_Pos) /*!< MEMSYSCTL PFCR: MIN_LA Mask */ + +#define MEMSYSCTL_PFCR_ENABLE_Pos 0U /*!< MEMSYSCTL PFCR: ENABLE Position */ +#define MEMSYSCTL_PFCR_ENABLE_Msk (0x1UL /*<< MEMSYSCTL_PFCR_ENABLE_Pos*/) /*!< MEMSYSCTL PFCR: ENABLE Mask */ + +/* MEMSYSCTL ITCM Control Register (ITCMCR) Register Definitions */ +#define MEMSYSCTL_ITCMCR_SZ_Pos 3U /*!< MEMSYSCTL ITCMCR: SZ Position */ +#define MEMSYSCTL_ITCMCR_SZ_Msk (0xFUL << MEMSYSCTL_ITCMCR_SZ_Pos) /*!< MEMSYSCTL ITCMCR: SZ Mask */ + +#define MEMSYSCTL_ITCMCR_EN_Pos 0U /*!< MEMSYSCTL ITCMCR: EN Position */ +#define MEMSYSCTL_ITCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_ITCMCR_EN_Pos*/) /*!< MEMSYSCTL ITCMCR: EN Mask */ + +/* MEMSYSCTL DTCM Control Register (DTCMCR) Register Definitions */ +#define MEMSYSCTL_DTCMCR_SZ_Pos 3U /*!< MEMSYSCTL DTCMCR: SZ Position */ +#define MEMSYSCTL_DTCMCR_SZ_Msk (0xFUL << MEMSYSCTL_DTCMCR_SZ_Pos) /*!< MEMSYSCTL DTCMCR: SZ Mask */ + +#define MEMSYSCTL_DTCMCR_EN_Pos 0U /*!< MEMSYSCTL DTCMCR: EN Position */ +#define MEMSYSCTL_DTCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_DTCMCR_EN_Pos*/) /*!< MEMSYSCTL DTCMCR: EN Mask */ + +/* MEMSYSCTL P-AHB Control Register (PAHBCR) Register Definitions */ +#define MEMSYSCTL_PAHBCR_SZ_Pos 1U /*!< MEMSYSCTL PAHBCR: SZ Position */ +#define MEMSYSCTL_PAHBCR_SZ_Msk (0x7UL << MEMSYSCTL_PAHBCR_SZ_Pos) /*!< MEMSYSCTL PAHBCR: SZ Mask */ + +#define MEMSYSCTL_PAHBCR_EN_Pos 0U /*!< MEMSYSCTL PAHBCR: EN Position */ +#define MEMSYSCTL_PAHBCR_EN_Msk (0x1UL /*<< MEMSYSCTL_PAHBCR_EN_Pos*/) /*!< MEMSYSCTL PAHBCR: EN Mask */ + +/* MEMSYSCTL ITGU Control Register (ITGU_CTRL) Register Definitions */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL ITGU_CTRL: DEREN Position */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_ITGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL ITGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL ITGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_ITGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL ITGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL ITGU Configuration Register (ITGU_CFG) Register Definitions */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL ITGU_CFG: PRESENT Position */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_ITGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL ITGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL ITGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_ITGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL ITGU_CFG: BLKSZ Mask */ + +/* MEMSYSCTL DTGU Control Registers (DTGU_CTRL) Register Definitions */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL DTGU_CTRL: DEREN Position */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_DTGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL DTGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL DTGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_DTGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL DTGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL DTGU Configuration Register (DTGU_CFG) Register Definitions */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL DTGU_CFG: PRESENT Position */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_DTGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL DTGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL DTGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_DTGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL DTGU_CFG: BLKSZ Mask */ + + +/*@}*/ /* end of group MemSysCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PwrModCtl_Type Power Mode Control Registers + \brief Type definitions for the Power Mode Control Registers (PWRMODCTL) + @{ + */ + +/** + \brief Structure type to access the Power Mode Control Registers (PWRMODCTL). + */ +typedef struct +{ + __IOM uint32_t CPDLPSTATE; /*!< Offset: 0x000 (R/W) Core Power Domain Low Power State Register */ + __IOM uint32_t DPDLPSTATE; /*!< Offset: 0x004 (R/W) Debug Power Domain Low Power State Register */ +} PwrModCtl_Type; + +/* PWRMODCTL Core Power Domain Low Power State (CPDLPSTATE) Register Definitions */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos 4U /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk (0x3UL /*<< PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos*/) /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Mask */ + +/* PWRMODCTL Debug Power Domain Low Power State (DPDLPSTATE) Register Definitions */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Position */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk (0x3UL /*<< PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos*/) /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Mask */ + +/*@}*/ /* end of group PwrModCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_Type External Wakeup Interrupt Controller Registers + \brief Type definitions for the External Wakeup Interrupt Controller Registers (EWIC) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller Registers (EWIC). + */ +typedef struct +{ + __OM uint32_t EVENTSPR; /*!< Offset: 0x000 ( /W) Event Set Pending Register */ + uint32_t RESERVED0[31U]; + __IM uint32_t EVENTMASKA; /*!< Offset: 0x080 (R/W) Event Mask A Register */ + __IM uint32_t EVENTMASK[15]; /*!< Offset: 0x084 (R/W) Event Mask Register */ +} EWIC_Type; + +/* EWIC External Wakeup Interrupt Controller (EVENTSPR) Register Definitions */ +#define EWIC_EVENTSPR_EDBGREQ_Pos 2U /*!< EWIC EVENTSPR: EDBGREQ Position */ +#define EWIC_EVENTSPR_EDBGREQ_Msk (0x1UL << EWIC_EVENTSPR_EDBGREQ_Pos) /*!< EWIC EVENTSPR: EDBGREQ Mask */ + +#define EWIC_EVENTSPR_NMI_Pos 1U /*!< EWIC EVENTSPR: NMI Position */ +#define EWIC_EVENTSPR_NMI_Msk (0x1UL << EWIC_EVENTSPR_NMI_Pos) /*!< EWIC EVENTSPR: NMI Mask */ + +#define EWIC_EVENTSPR_EVENT_Pos 0U /*!< EWIC EVENTSPR: EVENT Position */ +#define EWIC_EVENTSPR_EVENT_Msk (0x1UL /*<< EWIC_EVENTSPR_EVENT_Pos*/) /*!< EWIC EVENTSPR: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASKA) Register Definitions */ +#define EWIC_EVENTMASKA_EDBGREQ_Pos 2U /*!< EWIC EVENTMASKA: EDBGREQ Position */ +#define EWIC_EVENTMASKA_EDBGREQ_Msk (0x1UL << EWIC_EVENTMASKA_EDBGREQ_Pos) /*!< EWIC EVENTMASKA: EDBGREQ Mask */ + +#define EWIC_EVENTMASKA_NMI_Pos 1U /*!< EWIC EVENTMASKA: NMI Position */ +#define EWIC_EVENTMASKA_NMI_Msk (0x1UL << EWIC_EVENTMASKA_NMI_Pos) /*!< EWIC EVENTMASKA: NMI Mask */ + +#define EWIC_EVENTMASKA_EVENT_Pos 0U /*!< EWIC EVENTMASKA: EVENT Position */ +#define EWIC_EVENTMASKA_EVENT_Msk (0x1UL /*<< EWIC_EVENTMASKA_EVENT_Pos*/) /*!< EWIC EVENTMASKA: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASK) Register Definitions */ +#define EWIC_EVENTMASK_IRQ_Pos 0U /*!< EWIC EVENTMASKA: IRQ Position */ +#define EWIC_EVENTMASK_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EVENTMASKA_IRQ_Pos*/) /*!< EWIC EVENTMASKA: IRQ Mask */ + +/*@}*/ /* end of group EWIC_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup ErrBnk_Type Error Banking Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Error Banking Registers (ERRBNK) + @{ + */ + +/** + \brief Structure type to access the Error Banking Registers (ERRBNK). + */ +typedef struct +{ + __IOM uint32_t IEBR0; /*!< Offset: 0x000 (R/W) Instruction Cache Error Bank Register 0 */ + __IOM uint32_t IEBR1; /*!< Offset: 0x004 (R/W) Instruction Cache Error Bank Register 1 */ + uint32_t RESERVED0[2U]; + __IOM uint32_t DEBR0; /*!< Offset: 0x010 (R/W) Data Cache Error Bank Register 0 */ + __IOM uint32_t DEBR1; /*!< Offset: 0x014 (R/W) Data Cache Error Bank Register 1 */ + uint32_t RESERVED1[2U]; + __IOM uint32_t TEBR0; /*!< Offset: 0x020 (R/W) TCM Error Bank Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t TEBR1; /*!< Offset: 0x028 (R/W) TCM Error Bank Register 1 */ +} ErrBnk_Type; + +/* ERRBNK Instruction Cache Error Bank Register 0 (IEBR0) Register Definitions */ +#define ERRBNK_IEBR0_SWDEF_Pos 30U /*!< ERRBNK IEBR0: SWDEF Position */ +#define ERRBNK_IEBR0_SWDEF_Msk (0x3UL << ERRBNK_IEBR0_SWDEF_Pos) /*!< ERRBNK IEBR0: SWDEF Mask */ + +#define ERRBNK_IEBR0_BANK_Pos 16U /*!< ERRBNK IEBR0: BANK Position */ +#define ERRBNK_IEBR0_BANK_Msk (0x1UL << ERRBNK_IEBR0_BANK_Pos) /*!< ERRBNK IEBR0: BANK Mask */ + +#define ERRBNK_IEBR0_LOCATION_Pos 2U /*!< ERRBNK IEBR0: LOCATION Position */ +#define ERRBNK_IEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR0_LOCATION_Pos) /*!< ERRBNK IEBR0: LOCATION Mask */ + +#define ERRBNK_IEBR0_LOCKED_Pos 1U /*!< ERRBNK IEBR0: LOCKED Position */ +#define ERRBNK_IEBR0_LOCKED_Msk (0x1UL << ERRBNK_IEBR0_LOCKED_Pos) /*!< ERRBNK IEBR0: LOCKED Mask */ + +#define ERRBNK_IEBR0_VALID_Pos 0U /*!< ERRBNK IEBR0: VALID Position */ +#define ERRBNK_IEBR0_VALID_Msk (0x1UL << /*ERRBNK_IEBR0_VALID_Pos*/) /*!< ERRBNK IEBR0: VALID Mask */ + +/* ERRBNK Instruction Cache Error Bank Register 1 (IEBR1) Register Definitions */ +#define ERRBNK_IEBR1_SWDEF_Pos 30U /*!< ERRBNK IEBR1: SWDEF Position */ +#define ERRBNK_IEBR1_SWDEF_Msk (0x3UL << ERRBNK_IEBR1_SWDEF_Pos) /*!< ERRBNK IEBR1: SWDEF Mask */ + +#define ERRBNK_IEBR1_BANK_Pos 16U /*!< ERRBNK IEBR1: BANK Position */ +#define ERRBNK_IEBR1_BANK_Msk (0x1UL << ERRBNK_IEBR1_BANK_Pos) /*!< ERRBNK IEBR1: BANK Mask */ + +#define ERRBNK_IEBR1_LOCATION_Pos 2U /*!< ERRBNK IEBR1: LOCATION Position */ +#define ERRBNK_IEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR1_LOCATION_Pos) /*!< ERRBNK IEBR1: LOCATION Mask */ + +#define ERRBNK_IEBR1_LOCKED_Pos 1U /*!< ERRBNK IEBR1: LOCKED Position */ +#define ERRBNK_IEBR1_LOCKED_Msk (0x1UL << ERRBNK_IEBR1_LOCKED_Pos) /*!< ERRBNK IEBR1: LOCKED Mask */ + +#define ERRBNK_IEBR1_VALID_Pos 0U /*!< ERRBNK IEBR1: VALID Position */ +#define ERRBNK_IEBR1_VALID_Msk (0x1UL << /*ERRBNK_IEBR1_VALID_Pos*/) /*!< ERRBNK IEBR1: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 0 (DEBR0) Register Definitions */ +#define ERRBNK_DEBR0_SWDEF_Pos 30U /*!< ERRBNK DEBR0: SWDEF Position */ +#define ERRBNK_DEBR0_SWDEF_Msk (0x3UL << ERRBNK_DEBR0_SWDEF_Pos) /*!< ERRBNK DEBR0: SWDEF Mask */ + +#define ERRBNK_DEBR0_TYPE_Pos 17U /*!< ERRBNK DEBR0: TYPE Position */ +#define ERRBNK_DEBR0_TYPE_Msk (0x1UL << ERRBNK_DEBR0_TYPE_Pos) /*!< ERRBNK DEBR0: TYPE Mask */ + +#define ERRBNK_DEBR0_BANK_Pos 16U /*!< ERRBNK DEBR0: BANK Position */ +#define ERRBNK_DEBR0_BANK_Msk (0x1UL << ERRBNK_DEBR0_BANK_Pos) /*!< ERRBNK DEBR0: BANK Mask */ + +#define ERRBNK_DEBR0_LOCATION_Pos 2U /*!< ERRBNK DEBR0: LOCATION Position */ +#define ERRBNK_DEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR0_LOCATION_Pos) /*!< ERRBNK DEBR0: LOCATION Mask */ + +#define ERRBNK_DEBR0_LOCKED_Pos 1U /*!< ERRBNK DEBR0: LOCKED Position */ +#define ERRBNK_DEBR0_LOCKED_Msk (0x1UL << ERRBNK_DEBR0_LOCKED_Pos) /*!< ERRBNK DEBR0: LOCKED Mask */ + +#define ERRBNK_DEBR0_VALID_Pos 0U /*!< ERRBNK DEBR0: VALID Position */ +#define ERRBNK_DEBR0_VALID_Msk (0x1UL << /*ERRBNK_DEBR0_VALID_Pos*/) /*!< ERRBNK DEBR0: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 1 (DEBR1) Register Definitions */ +#define ERRBNK_DEBR1_SWDEF_Pos 30U /*!< ERRBNK DEBR1: SWDEF Position */ +#define ERRBNK_DEBR1_SWDEF_Msk (0x3UL << ERRBNK_DEBR1_SWDEF_Pos) /*!< ERRBNK DEBR1: SWDEF Mask */ + +#define ERRBNK_DEBR1_TYPE_Pos 17U /*!< ERRBNK DEBR1: TYPE Position */ +#define ERRBNK_DEBR1_TYPE_Msk (0x1UL << ERRBNK_DEBR1_TYPE_Pos) /*!< ERRBNK DEBR1: TYPE Mask */ + +#define ERRBNK_DEBR1_BANK_Pos 16U /*!< ERRBNK DEBR1: BANK Position */ +#define ERRBNK_DEBR1_BANK_Msk (0x1UL << ERRBNK_DEBR1_BANK_Pos) /*!< ERRBNK DEBR1: BANK Mask */ + +#define ERRBNK_DEBR1_LOCATION_Pos 2U /*!< ERRBNK DEBR1: LOCATION Position */ +#define ERRBNK_DEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR1_LOCATION_Pos) /*!< ERRBNK DEBR1: LOCATION Mask */ + +#define ERRBNK_DEBR1_LOCKED_Pos 1U /*!< ERRBNK DEBR1: LOCKED Position */ +#define ERRBNK_DEBR1_LOCKED_Msk (0x1UL << ERRBNK_DEBR1_LOCKED_Pos) /*!< ERRBNK DEBR1: LOCKED Mask */ + +#define ERRBNK_DEBR1_VALID_Pos 0U /*!< ERRBNK DEBR1: VALID Position */ +#define ERRBNK_DEBR1_VALID_Msk (0x1UL << /*ERRBNK_DEBR1_VALID_Pos*/) /*!< ERRBNK DEBR1: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 0 (TEBR0) Register Definitions */ +#define ERRBNK_TEBR0_SWDEF_Pos 30U /*!< ERRBNK TEBR0: SWDEF Position */ +#define ERRBNK_TEBR0_SWDEF_Msk (0x3UL << ERRBNK_TEBR0_SWDEF_Pos) /*!< ERRBNK TEBR0: SWDEF Mask */ + +#define ERRBNK_TEBR0_POISON_Pos 28U /*!< ERRBNK TEBR0: POISON Position */ +#define ERRBNK_TEBR0_POISON_Msk (0x1UL << ERRBNK_TEBR0_POISON_Pos) /*!< ERRBNK TEBR0: POISON Mask */ + +#define ERRBNK_TEBR0_TYPE_Pos 27U /*!< ERRBNK TEBR0: TYPE Position */ +#define ERRBNK_TEBR0_TYPE_Msk (0x1UL << ERRBNK_TEBR0_TYPE_Pos) /*!< ERRBNK TEBR0: TYPE Mask */ + +#define ERRBNK_TEBR0_BANK_Pos 24U /*!< ERRBNK TEBR0: BANK Position */ +#define ERRBNK_TEBR0_BANK_Msk (0x3UL << ERRBNK_TEBR0_BANK_Pos) /*!< ERRBNK TEBR0: BANK Mask */ + +#define ERRBNK_TEBR0_LOCATION_Pos 2U /*!< ERRBNK TEBR0: LOCATION Position */ +#define ERRBNK_TEBR0_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR0_LOCATION_Pos) /*!< ERRBNK TEBR0: LOCATION Mask */ + +#define ERRBNK_TEBR0_LOCKED_Pos 1U /*!< ERRBNK TEBR0: LOCKED Position */ +#define ERRBNK_TEBR0_LOCKED_Msk (0x1UL << ERRBNK_TEBR0_LOCKED_Pos) /*!< ERRBNK TEBR0: LOCKED Mask */ + +#define ERRBNK_TEBR0_VALID_Pos 0U /*!< ERRBNK TEBR0: VALID Position */ +#define ERRBNK_TEBR0_VALID_Msk (0x1UL << /*ERRBNK_TEBR0_VALID_Pos*/) /*!< ERRBNK TEBR0: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 1 (TEBR1) Register Definitions */ +#define ERRBNK_TEBR1_SWDEF_Pos 30U /*!< ERRBNK TEBR1: SWDEF Position */ +#define ERRBNK_TEBR1_SWDEF_Msk (0x3UL << ERRBNK_TEBR1_SWDEF_Pos) /*!< ERRBNK TEBR1: SWDEF Mask */ + +#define ERRBNK_TEBR1_POISON_Pos 28U /*!< ERRBNK TEBR1: POISON Position */ +#define ERRBNK_TEBR1_POISON_Msk (0x1UL << ERRBNK_TEBR1_POISON_Pos) /*!< ERRBNK TEBR1: POISON Mask */ + +#define ERRBNK_TEBR1_TYPE_Pos 27U /*!< ERRBNK TEBR1: TYPE Position */ +#define ERRBNK_TEBR1_TYPE_Msk (0x1UL << ERRBNK_TEBR1_TYPE_Pos) /*!< ERRBNK TEBR1: TYPE Mask */ + +#define ERRBNK_TEBR1_BANK_Pos 24U /*!< ERRBNK TEBR1: BANK Position */ +#define ERRBNK_TEBR1_BANK_Msk (0x3UL << ERRBNK_TEBR1_BANK_Pos) /*!< ERRBNK TEBR1: BANK Mask */ + +#define ERRBNK_TEBR1_LOCATION_Pos 2U /*!< ERRBNK TEBR1: LOCATION Position */ +#define ERRBNK_TEBR1_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR1_LOCATION_Pos) /*!< ERRBNK TEBR1: LOCATION Mask */ + +#define ERRBNK_TEBR1_LOCKED_Pos 1U /*!< ERRBNK TEBR1: LOCKED Position */ +#define ERRBNK_TEBR1_LOCKED_Msk (0x1UL << ERRBNK_TEBR1_LOCKED_Pos) /*!< ERRBNK TEBR1: LOCKED Mask */ + +#define ERRBNK_TEBR1_VALID_Pos 0U /*!< ERRBNK TEBR1: VALID Position */ +#define ERRBNK_TEBR1_VALID_Msk (0x1UL << /*ERRBNK_TEBR1_VALID_Pos*/) /*!< ERRBNK TEBR1: VALID Mask */ + +/*@}*/ /* end of group ErrBnk_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PrcCfgInf_Type Processor Configuration Information Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Processor Configuration Information Registerss (PRCCFGINF) + @{ + */ + +/** + \brief Structure type to access the Processor Configuration Information Registerss (PRCCFGINF). + */ +typedef struct +{ + __OM uint32_t CFGINFOSEL; /*!< Offset: 0x000 ( /W) Processor Configuration Information Selection Register */ + __IM uint32_t CFGINFORD; /*!< Offset: 0x004 (R/ ) Processor Configuration Information Read Data Register */ +} PrcCfgInf_Type; + +/* PRCCFGINF Processor Configuration Information Selection Register (CFGINFOSEL) Definitions */ + +/* PRCCFGINF Processor Configuration Information Read Data Register (CFGINFORD) Definitions */ + +/*@}*/ /* end of group PrcCfgInf_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup STL_Type Software Test Library Observation Registers + \brief Type definitions for the Software Test Library Observation Registerss (STL) + @{ + */ + +/** + \brief Structure type to access the Software Test Library Observation Registerss (STL). + */ +typedef struct +{ + __IM uint32_t STLNVICPENDOR; /*!< Offset: 0x000 (R/ ) NVIC Pending Priority Tree Register */ + __IM uint32_t STLNVICACTVOR; /*!< Offset: 0x004 (R/ ) NVIC Active Priority Tree Register */ + uint32_t RESERVED0[2U]; + __OM uint32_t STLIDMPUSR; /*!< Offset: 0x010 ( /W) MPU Sanple Register */ + __IM uint32_t STLIMPUOR; /*!< Offset: 0x014 (R/ ) MPU Region Hit Register */ + __IM uint32_t STLD0MPUOR; /*!< Offset: 0x018 (R/ ) MPU Memory Attributes Register 0 */ + __IM uint32_t STLD1MPUOR; /*!< Offset: 0x01C (R/ ) MPU Memory Attributes Register 1 */ + +} STL_Type; + +/* STL Software Test Library Observation Register (STLNVICPENDOR) Definitions */ +#define STL_STLNVICPENDOR_VALID_Pos 18U /*!< STL STLNVICPENDOR: VALID Position */ +#define STL_STLNVICPENDOR_VALID_Msk (0x1UL << STL_STLNVICPENDOR_VALID_Pos) /*!< STL STLNVICPENDOR: VALID Mask */ + +#define STL_STLNVICPENDOR_TARGET_Pos 17U /*!< STL STLNVICPENDOR: TARGET Position */ +#define STL_STLNVICPENDOR_TARGET_Msk (0x1UL << STL_STLNVICPENDOR_TARGET_Pos) /*!< STL STLNVICPENDOR: TARGET Mask */ + +#define STL_STLNVICPENDOR_PRIORITY_Pos 9U /*!< STL STLNVICPENDOR: PRIORITY Position */ +#define STL_STLNVICPENDOR_PRIORITY_Msk (0xFFUL << STL_STLNVICPENDOR_PRIORITY_Pos) /*!< STL STLNVICPENDOR: PRIORITY Mask */ + +#define STL_STLNVICPENDOR_INTNUM_Pos 0U /*!< STL STLNVICPENDOR: INTNUM Position */ +#define STL_STLNVICPENDOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICPENDOR_INTNUM_Pos*/) /*!< STL STLNVICPENDOR: INTNUM Mask */ + +/* STL Software Test Library Observation Register (STLNVICACTVOR) Definitions */ +#define STL_STLNVICACTVOR_VALID_Pos 18U /*!< STL STLNVICACTVOR: VALID Position */ +#define STL_STLNVICACTVOR_VALID_Msk (0x1UL << STL_STLNVICACTVOR_VALID_Pos) /*!< STL STLNVICACTVOR: VALID Mask */ + +#define STL_STLNVICACTVOR_TARGET_Pos 17U /*!< STL STLNVICACTVOR: TARGET Position */ +#define STL_STLNVICACTVOR_TARGET_Msk (0x1UL << STL_STLNVICACTVOR_TARGET_Pos) /*!< STL STLNVICACTVOR: TARGET Mask */ + +#define STL_STLNVICACTVOR_PRIORITY_Pos 9U /*!< STL STLNVICACTVOR: PRIORITY Position */ +#define STL_STLNVICACTVOR_PRIORITY_Msk (0xFFUL << STL_STLNVICACTVOR_PRIORITY_Pos) /*!< STL STLNVICACTVOR: PRIORITY Mask */ + +#define STL_STLNVICACTVOR_INTNUM_Pos 0U /*!< STL STLNVICACTVOR: INTNUM Position */ +#define STL_STLNVICACTVOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICACTVOR_INTNUM_Pos*/) /*!< STL STLNVICACTVOR: INTNUM Mask */ + +/* STL Software Test Library Observation Register (STLIDMPUSR) Definitions */ +#define STL_STLIDMPUSR_ADDR_Pos 5U /*!< STL STLIDMPUSR: ADDR Position */ +#define STL_STLIDMPUSR_ADDR_Msk (0x7FFFFFFUL << STL_STLIDMPUSR_ADDR_Pos) /*!< STL STLIDMPUSR: ADDR Mask */ + +#define STL_STLIDMPUSR_INSTR_Pos 2U /*!< STL STLIDMPUSR: INSTR Position */ +#define STL_STLIDMPUSR_INSTR_Msk (0x1UL << STL_STLIDMPUSR_INSTR_Pos) /*!< STL STLIDMPUSR: INSTR Mask */ + +#define STL_STLIDMPUSR_DATA_Pos 1U /*!< STL STLIDMPUSR: DATA Position */ +#define STL_STLIDMPUSR_DATA_Msk (0x1UL << STL_STLIDMPUSR_DATA_Pos) /*!< STL STLIDMPUSR: DATA Mask */ + +/* STL Software Test Library Observation Register (STLIMPUOR) Definitions */ +#define STL_STLIMPUOR_HITREGION_Pos 9U /*!< STL STLIMPUOR: HITREGION Position */ +#define STL_STLIMPUOR_HITREGION_Msk (0xFFUL << STL_STLIMPUOR_HITREGION_Pos) /*!< STL STLIMPUOR: HITREGION Mask */ + +#define STL_STLIMPUOR_ATTR_Pos 0U /*!< STL STLIMPUOR: ATTR Position */ +#define STL_STLIMPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLIMPUOR_ATTR_Pos*/) /*!< STL STLIMPUOR: ATTR Mask */ + +/* STL Software Test Library Observation Register (STLD0MPUOR) Definitions */ +#define STL_STLD0MPUOR_HITREGION_Pos 9U /*!< STL STLD0MPUOR: HITREGION Position */ +#define STL_STLD0MPUOR_HITREGION_Msk (0xFFUL << STL_STLD0MPUOR_HITREGION_Pos) /*!< STL STLD0MPUOR: HITREGION Mask */ + +#define STL_STLD0MPUOR_ATTR_Pos 0U /*!< STL STLD0MPUOR: ATTR Position */ +#define STL_STLD0MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD0MPUOR_ATTR_Pos*/) /*!< STL STLD0MPUOR: ATTR Mask */ + +/* STL Software Test Library Observation Register (STLD1MPUOR) Definitions */ +#define STL_STLD1MPUOR_HITREGION_Pos 9U /*!< STL STLD1MPUOR: HITREGION Position */ +#define STL_STLD1MPUOR_HITREGION_Msk (0xFFUL << STL_STLD1MPUOR_HITREGION_Pos) /*!< STL STLD1MPUOR: HITREGION Mask */ + +#define STL_STLD1MPUOR_ATTR_Pos 0U /*!< STL STLD1MPUOR: ATTR Position */ +#define STL_STLD1MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD1MPUOR_ATTR_Pos*/) /*!< STL STLD1MPUOR: ATTR Mask */ + +/*@}*/ /* end of group STL_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFmt_Pos 0U /*!< TPI FFCR: EnFmt Position */ +#define TPI_FFCR_EnFmt_Msk (0x3UL << /*TPI_FFCR_EnFmt_Pos*/) /*!< TPI FFCR: EnFmt Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) PMU Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) PMU Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) PMU Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) PMU Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) PMU Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) PMU Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) PMU Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) PMU Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) PMU Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) PMU Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) PMU Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) PMU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) PMU Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ + uint32_t RESERVED12[3]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ + __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ + uint32_t RESERVED13[3]; + __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ + __IOM uint32_t PIDR1; /*!< Offset: 0xFE4 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE8 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFEC (R/W) PMU Peripheral Identification Register 3 */ + __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ + __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ + __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ + __IOM uint32_t CIDR3; /*!< Offset: 0xFFC (R/W) PMU Component Identification Register 3 */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ + +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ + +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ + +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ + +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ + +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ + +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ + +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/** \brief PMU Authentication Status Register Definitions */ + +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + + +/*@} end of group CMSIS_PMU */ +#endif + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: FPRound bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: FPRound bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: FPSqrt bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: FPSqrt bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: FPDivide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: FPDP bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: FPDP bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: FPSP bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: FPSP bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMDReg bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMDReg bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: FMAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: FMAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FPHP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FPHP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: FPDNaN bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: FPDNaN bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FPFtZ bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FPFtZ bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_FPD_Pos 23U /*!< \deprecated CoreDebug DHCSR: S_FPD Position */ +#define CoreDebug_DHCSR_S_FPD_Msk (1UL << CoreDebug_DHCSR_S_FPD_Pos) /*!< \deprecated CoreDebug DHCSR: S_FPD Mask */ + +#define CoreDebug_DHCSR_S_SUIDE_Pos 22U /*!< \deprecated CoreDebug DHCSR: S_SUIDE Position */ +#define CoreDebug_DHCSR_S_SUIDE_Msk (1UL << CoreDebug_DHCSR_S_SUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SUIDE Mask */ + +#define CoreDebug_DHCSR_S_NSUIDE_Pos 21U /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Position */ +#define CoreDebug_DHCSR_S_NSUIDE_Msk (1UL << CoreDebug_DHCSR_S_NSUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Mask */ + +#define CoreDebug_DHCSR_S_SDE_Pos 20U /*!< \deprecated CoreDebug DHCSR: S_SDE Position */ +#define CoreDebug_DHCSR_S_SDE_Msk (1UL << CoreDebug_DHCSR_S_SDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SDE Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_PMOV_Pos 6U /*!< \deprecated CoreDebug DHCSR: C_PMOV Position */ +#define CoreDebug_DHCSR_C_PMOV_Msk (1UL << CoreDebug_DHCSR_C_PMOV_Pos) /*!< \deprecated CoreDebug DHCSR: C_PMOV Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos 3U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos 1U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos 10U /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos 9U /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDAPEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Mask */ + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos 8U /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Position */ +#define CoreDebug_DAUTHCTRL_FSDMA_Msk (1UL << CoreDebug_DAUTHCTRL_FSDMA_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (0x1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (0x1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (0x1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (0x1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DSCEMCR, Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (0x1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define MEMSYSCTL_BASE (0xE001E000UL) /*!< Memory System Control Base Address */ + #define ERRBNK_BASE (0xE001E100UL) /*!< Error Banking Base Address */ + #define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */ + #define EWIC_BASE (0xE001E400UL) /*!< External Wakeup Interrupt Controller Base Address */ + #define PRCCFGINF_BASE (0xE001E700UL) /*!< Processor Configuration Information Base Address */ + #define STL_BASE (0xE001E800UL) /*!< Software Test Library Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define ICB ((ICB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define MEMSYSCTL ((MemSysCtl_Type *) MEMSYSCTL_BASE ) /*!< Memory System Control configuration struct */ + #define ERRBNK ((ErrBnk_Type *) ERRBNK_BASE ) /*!< Error Banking configuration struct */ + #define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */ + #define EWIC ((EWIC_Type *) EWIC_BASE ) /*!< EWIC configuration struct */ + #define PRCCFGINF ((PrcCfgInf_Type *) PRCCFGINF_BASE ) /*!< Processor Configuration Information configuration struct */ + #define STL ((STL_Type *) STL_BASE ) /*!< Software Test Library configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define ICB_NS ((ICB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ + +/* 'SCnSCB' is deprecated and replaced by 'ICB' */ +typedef ICB_Type SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISCRITAXIRUW_Pos (ICB_ACTLR_DISCRITAXIRUW_Pos) +#define SCnSCB_ACTLR_DISCRITAXIRUW_Msk (ICB_ACTLR_DISCRITAXIRUW_Msk) + +#define SCnSCB_ACTLR_DISDI_Pos (ICB_ACTLR_DISDI_Pos) +#define SCnSCB_ACTLR_DISDI_Msk (ICB_ACTLR_DISDI_Msk) + +#define SCnSCB_ACTLR_DISCRITAXIRUR_Pos (ICB_ACTLR_DISCRITAXIRUR_Pos) +#define SCnSCB_ACTLR_DISCRITAXIRUR_Msk (ICB_ACTLR_DISCRITAXIRUR_Msk) + +#define SCnSCB_ACTLR_EVENTBUSEN_Pos (ICB_ACTLR_EVENTBUSEN_Pos) +#define SCnSCB_ACTLR_EVENTBUSEN_Msk (ICB_ACTLR_EVENTBUSEN_Msk) + +#define SCnSCB_ACTLR_EVENTBUSEN_S_Pos (ICB_ACTLR_EVENTBUSEN_S_Pos) +#define SCnSCB_ACTLR_EVENTBUSEN_S_Msk (ICB_ACTLR_EVENTBUSEN_S_Msk) + +#define SCnSCB_ACTLR_DISITMATBFLUSH_Pos (ICB_ACTLR_DISITMATBFLUSH_Pos) +#define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (ICB_ACTLR_DISITMATBFLUSH_Msk) + +#define SCnSCB_ACTLR_DISNWAMODE_Pos (ICB_ACTLR_DISNWAMODE_Pos) +#define SCnSCB_ACTLR_DISNWAMODE_Msk (ICB_ACTLR_DISNWAMODE_Msk) + +#define SCnSCB_ACTLR_FPEXCODIS_Pos (ICB_ACTLR_FPEXCODIS_Pos) +#define SCnSCB_ACTLR_FPEXCODIS_Msk (ICB_ACTLR_FPEXCODIS_Msk) + +#define SCnSCB_ACTLR_DISOLAP_Pos (ICB_ACTLR_DISOLAP_Pos) +#define SCnSCB_ACTLR_DISOLAP_Msk (ICB_ACTLR_DISOLAP_Msk) + +#define SCnSCB_ACTLR_DISOLAPS_Pos (ICB_ACTLR_DISOLAPS_Pos) +#define SCnSCB_ACTLR_DISOLAPS_Msk (ICB_ACTLR_DISOLAPS_Msk) + +#define SCnSCB_ACTLR_DISLOBR_Pos (ICB_ACTLR_DISLOBR_Pos) +#define SCnSCB_ACTLR_DISLOBR_Msk (ICB_ACTLR_DISLOBR_Msk) + +#define SCnSCB_ACTLR_DISLO_Pos (ICB_ACTLR_DISLO_Pos) +#define SCnSCB_ACTLR_DISLO_Msk (ICB_ACTLR_DISLO_Msk) + +#define SCnSCB_ACTLR_DISLOLEP_Pos (ICB_ACTLR_DISLOLEP_Pos) +#define SCnSCB_ACTLR_DISLOLEP_Msk (ICB_ACTLR_DISLOLEP_Msk) + +#define SCnSCB_ACTLR_DISFOLD_Pos (ICB_ACTLR_DISFOLD_Pos) +#define SCnSCB_ACTLR_DISFOLD_Msk (ICB_ACTLR_DISFOLD_Msk) + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos (ICB_ICTR_INTLINESNUM_Pos) +#define SCnSCB_ICTR_INTLINESNUM_Msk (ICB_ICTR_INTLINESNUM_Msk) + +#define SCnSCB (ICB) +#define SCnSCB_NS (ICB_NS) + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "pmu_armv8.h" + +/** + \brief Cortex-M55 PMU events + \note Architectural PMU events can be found in pmu_armv8.h +*/ + +#define ARMCM55_PMU_ECC_ERR 0xC000 /*!< Any ECC error */ +#define ARMCM55_PMU_ECC_ERR_FATAL 0xC001 /*!< Any fatal ECC error */ +#define ARMCM55_PMU_ECC_ERR_DCACHE 0xC010 /*!< Any ECC error in the data cache */ +#define ARMCM55_PMU_ECC_ERR_ICACHE 0xC011 /*!< Any ECC error in the instruction cache */ +#define ARMCM55_PMU_ECC_ERR_FATAL_DCACHE 0xC012 /*!< Any fatal ECC error in the data cache */ +#define ARMCM55_PMU_ECC_ERR_FATAL_ICACHE 0xC013 /*!< Any fatal ECC error in the instruction cache*/ +#define ARMCM55_PMU_ECC_ERR_DTCM 0xC020 /*!< Any ECC error in the DTCM */ +#define ARMCM55_PMU_ECC_ERR_ITCM 0xC021 /*!< Any ECC error in the ITCM */ +#define ARMCM55_PMU_ECC_ERR_FATAL_DTCM 0xC022 /*!< Any fatal ECC error in the DTCM */ +#define ARMCM55_PMU_ECC_ERR_FATAL_ITCM 0xC023 /*!< Any fatal ECC error in the ITCM */ +#define ARMCM55_PMU_PF_LINEFILL 0xC100 /*!< A prefetcher starts a line-fill */ +#define ARMCM55_PMU_PF_CANCEL 0xC101 /*!< A prefetcher stops prefetching */ +#define ARMCM55_PMU_PF_DROP_LINEFILL 0xC102 /*!< A linefill triggered by a prefetcher has been dropped because of lack of buffering */ +#define ARMCM55_PMU_NWAMODE_ENTER 0xC200 /*!< No write-allocate mode entry */ +#define ARMCM55_PMU_NWAMODE 0xC201 /*!< Write-allocate store is not allocated into the data cache due to no-write-allocate mode */ +#define ARMCM55_PMU_SAHB_ACCESS 0xC300 /*!< Read or write access on the S-AHB interface to the TCM */ +#define ARMCM55_PMU_PAHB_ACCESS 0xC301 /*!< Read or write access to the P-AHB write interface */ +#define ARMCM55_PMU_AXI_WRITE_ACCESS 0xC302 /*!< Any beat access to M-AXI write interface */ +#define ARMCM55_PMU_AXI_READ_ACCESS 0xC303 /*!< Any beat access to M-AXI read interface */ +#define ARMCM55_PMU_DOSTIMEOUT_DOUBLE 0xC400 /*!< Denial of Service timeout has fired twice and caused buffers to drain to allow forward progress */ +#define ARMCM55_PMU_DOSTIMEOUT_TRIPLE 0xC401 /*!< Denial of Service timeout has fired three times and blocked the LSU to force forward progress */ + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM55_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_cm7.h b/Drivers/CMSIS/Include/core_cm7.h index a14dc623..010506e9 100644 --- a/Drivers/CMSIS/Include/core_cm7.h +++ b/Drivers/CMSIS/Include/core_cm7.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm7.h * @brief CMSIS Cortex-M7 Core Peripheral Access Layer Header File - * @version V5.0.8 - * @date 04. June 2018 + * @version V5.1.6 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -86,7 +86,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) #define __FPU_USED 1U #else @@ -209,6 +209,11 @@ #warning "__DTCM_PRESENT not defined in device header file; using default!" #endif + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -423,7 +428,7 @@ typedef struct __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ uint32_t RESERVED0[24U]; __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ - uint32_t RSERVED1[24U]; + uint32_t RESERVED1[24U]; __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ uint32_t RESERVED2[24U]; __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ @@ -496,7 +501,8 @@ typedef struct __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ - uint32_t RESERVED7[6U]; + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ + uint32_t RESERVED7[5U]; __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ @@ -671,22 +677,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -870,21 +876,24 @@ typedef struct #define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ #define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ -#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ -#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ +#define SCB_CACR_ECCEN_Pos 1U /*!< \deprecated SCB CACR: ECCEN Position */ +#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< \deprecated SCB CACR: ECCEN Mask */ + +#define SCB_CACR_ECCDIS_Pos 1U /*!< SCB CACR: ECCDIS Position */ +#define SCB_CACR_ECCDIS_Msk (1UL << SCB_CACR_ECCDIS_Pos) /*!< SCB CACR: ECCDIS Mask */ #define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ #define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ /* AHBS Control Register Definitions */ #define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ -#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBSCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ #define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ -#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBSCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ #define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ -#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBSCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ /* Auxiliary Bus Fault Status Register Definitions */ #define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ @@ -930,6 +939,24 @@ typedef struct #define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ /* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISDYNADD_Pos 26U /*!< ACTLR: DISDYNADD Position */ +#define SCnSCB_ACTLR_DISDYNADD_Msk (1UL << SCnSCB_ACTLR_DISDYNADD_Pos) /*!< ACTLR: DISDYNADD Mask */ + +#define SCnSCB_ACTLR_DISISSCH1_Pos 21U /*!< ACTLR: DISISSCH1 Position */ +#define SCnSCB_ACTLR_DISISSCH1_Msk (0x1FUL << SCnSCB_ACTLR_DISISSCH1_Pos) /*!< ACTLR: DISISSCH1 Mask */ + +#define SCnSCB_ACTLR_DISDI_Pos 16U /*!< ACTLR: DISDI Position */ +#define SCnSCB_ACTLR_DISDI_Msk (0x1FUL << SCnSCB_ACTLR_DISDI_Pos) /*!< ACTLR: DISDI Mask */ + +#define SCnSCB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define SCnSCB_ACTLR_DISCRITAXIRUR_Msk (1UL << SCnSCB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define SCnSCB_ACTLR_DISBTACALLOC_Pos 14U /*!< ACTLR: DISBTACALLOC Position */ +#define SCnSCB_ACTLR_DISBTACALLOC_Msk (1UL << SCnSCB_ACTLR_DISBTACALLOC_Pos) /*!< ACTLR: DISBTACALLOC Mask */ + +#define SCnSCB_ACTLR_DISBTACREAD_Pos 13U /*!< ACTLR: DISBTACREAD Position */ +#define SCnSCB_ACTLR_DISBTACREAD_Msk (1UL << SCnSCB_ACTLR_DISBTACREAD_Pos) /*!< ACTLR: DISBTACREAD Mask */ + #define SCnSCB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ #define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (1UL << SCnSCB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ @@ -1024,10 +1051,7 @@ typedef struct __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ uint32_t RESERVED2[15U]; __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ - uint32_t RESERVED3[29U]; - __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ - __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ - __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED3[32U]; uint32_t RESERVED4[43U]; __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ @@ -1078,18 +1102,6 @@ typedef struct #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ -/* ITM Integration Write Register Definitions */ -#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ -#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ - -/* ITM Integration Read Register Definitions */ -#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ -#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ - -/* ITM Integration Mode Control Register Definitions */ -#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ -#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ - /* ITM Lock Status Register Definitions */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ @@ -1325,13 +1337,13 @@ typedef struct /* TPI Integration ETM Data Register Definitions (FIFO0) */ #define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ -#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ #define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ #define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ #define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ -#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ #define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ #define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ @@ -1354,13 +1366,13 @@ typedef struct /* TPI Integration ITM Data Register Definitions (FIFO1) */ #define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ -#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ #define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ #define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ #define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ -#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ #define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ #define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ @@ -1617,6 +1629,9 @@ typedef struct /* Media and FP Feature Register 2 Definitions */ +#define FPU_MVFR2_VFP_Misc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_VFP_Misc_Msk (0xFUL << FPU_MVFR2_VFP_Misc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + /*@} end of group CMSIS_FPU */ @@ -1897,7 +1912,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -2122,6 +2139,7 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { uint32_t *vectors = (uint32_t *)SCB->VTOR; vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); } @@ -2161,6 +2179,7 @@ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) /*@} end of CMSIS_Core_NVICFunctions */ + /* ########################## MPU functions #################################### */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) @@ -2169,6 +2188,7 @@ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) #endif + /* ########################## FPU functions #################################### */ /** \ingroup CMSIS_Core_FunctionInterface @@ -2204,340 +2224,15 @@ __STATIC_INLINE uint32_t SCB_GetFPUType(void) } } - /*@} end of CMSIS_Core_FpuFunctions */ - /* ########################## Cache functions #################################### */ -/** - \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_CacheFunctions Cache Functions - \brief Functions that configure Instruction and Data cache. - @{ - */ - -/* Cache Size ID Register Macros */ -#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos) -#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) - - -/** - \brief Enable I-Cache - \details Turns on I-Cache - */ -__STATIC_INLINE void SCB_EnableICache (void) -{ - #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) - __DSB(); - __ISB(); - SCB->ICIALLU = 0UL; /* invalidate I-Cache */ - __DSB(); - __ISB(); - SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */ - __DSB(); - __ISB(); - #endif -} - - -/** - \brief Disable I-Cache - \details Turns off I-Cache - */ -__STATIC_INLINE void SCB_DisableICache (void) -{ - #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) - __DSB(); - __ISB(); - SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */ - SCB->ICIALLU = 0UL; /* invalidate I-Cache */ - __DSB(); - __ISB(); - #endif -} - - -/** - \brief Invalidate I-Cache - \details Invalidates I-Cache - */ -__STATIC_INLINE void SCB_InvalidateICache (void) -{ - #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) - __DSB(); - __ISB(); - SCB->ICIALLU = 0UL; - __DSB(); - __ISB(); - #endif -} - - -/** - \brief Enable D-Cache - \details Turns on D-Cache - */ -__STATIC_INLINE void SCB_EnableDCache (void) -{ - #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - uint32_t ccsidr; - uint32_t sets; - uint32_t ways; - - SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ - __DSB(); - - ccsidr = SCB->CCSIDR; - - /* invalidate D-Cache */ - sets = (uint32_t)(CCSIDR_SETS(ccsidr)); - do { - ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); - do { - SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | - ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); - #if defined ( __CC_ARM ) - __schedule_barrier(); - #endif - } while (ways-- != 0U); - } while(sets-- != 0U); - __DSB(); - - SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */ - - __DSB(); - __ISB(); - #endif -} - - -/** - \brief Disable D-Cache - \details Turns off D-Cache - */ -__STATIC_INLINE void SCB_DisableDCache (void) -{ - #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - uint32_t ccsidr; - uint32_t sets; - uint32_t ways; - - SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ - __DSB(); - - SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ - __DSB(); - - ccsidr = SCB->CCSIDR; - - /* clean & invalidate D-Cache */ - sets = (uint32_t)(CCSIDR_SETS(ccsidr)); - do { - ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); - do { - SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | - ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); - #if defined ( __CC_ARM ) - __schedule_barrier(); - #endif - } while (ways-- != 0U); - } while(sets-- != 0U); - - __DSB(); - __ISB(); - #endif -} - - -/** - \brief Invalidate D-Cache - \details Invalidates D-Cache - */ -__STATIC_INLINE void SCB_InvalidateDCache (void) -{ - #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - uint32_t ccsidr; - uint32_t sets; - uint32_t ways; - - SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ - __DSB(); - - ccsidr = SCB->CCSIDR; - - /* invalidate D-Cache */ - sets = (uint32_t)(CCSIDR_SETS(ccsidr)); - do { - ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); - do { - SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | - ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); - #if defined ( __CC_ARM ) - __schedule_barrier(); - #endif - } while (ways-- != 0U); - } while(sets-- != 0U); - - __DSB(); - __ISB(); - #endif -} - - -/** - \brief Clean D-Cache - \details Cleans D-Cache - */ -__STATIC_INLINE void SCB_CleanDCache (void) -{ - #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - uint32_t ccsidr; - uint32_t sets; - uint32_t ways; - - SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ - __DSB(); - - ccsidr = SCB->CCSIDR; - - /* clean D-Cache */ - sets = (uint32_t)(CCSIDR_SETS(ccsidr)); - do { - ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); - do { - SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) | - ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) ); - #if defined ( __CC_ARM ) - __schedule_barrier(); - #endif - } while (ways-- != 0U); - } while(sets-- != 0U); - - __DSB(); - __ISB(); - #endif -} - - -/** - \brief Clean & Invalidate D-Cache - \details Cleans and Invalidates D-Cache - */ -__STATIC_INLINE void SCB_CleanInvalidateDCache (void) -{ - #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - uint32_t ccsidr; - uint32_t sets; - uint32_t ways; - - SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ - __DSB(); - - ccsidr = SCB->CCSIDR; - - /* clean & invalidate D-Cache */ - sets = (uint32_t)(CCSIDR_SETS(ccsidr)); - do { - ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); - do { - SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | - ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); - #if defined ( __CC_ARM ) - __schedule_barrier(); - #endif - } while (ways-- != 0U); - } while(sets-- != 0U); - - __DSB(); - __ISB(); - #endif -} - - -/** - \brief D-Cache Invalidate by address - \details Invalidates D-Cache for the given address - \param[in] addr address (aligned to 32-byte boundary) - \param[in] dsize size of memory block (in number of bytes) -*/ -__STATIC_INLINE void SCB_InvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) -{ - #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - int32_t op_size = dsize; - uint32_t op_addr = (uint32_t)addr; - int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ - - __DSB(); - - while (op_size > 0) { - SCB->DCIMVAC = op_addr; - op_addr += (uint32_t)linesize; - op_size -= linesize; - } - - __DSB(); - __ISB(); - #endif -} - - -/** - \brief D-Cache Clean by address - \details Cleans D-Cache for the given address - \param[in] addr address (aligned to 32-byte boundary) - \param[in] dsize size of memory block (in number of bytes) -*/ -__STATIC_INLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) -{ - #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - int32_t op_size = dsize; - uint32_t op_addr = (uint32_t) addr; - int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ - - __DSB(); - - while (op_size > 0) { - SCB->DCCMVAC = op_addr; - op_addr += (uint32_t)linesize; - op_size -= linesize; - } - - __DSB(); - __ISB(); - #endif -} - - -/** - \brief D-Cache Clean and Invalidate by address - \details Cleans and invalidates D_Cache for the given address - \param[in] addr address (aligned to 32-byte boundary) - \param[in] dsize size of memory block (in number of bytes) -*/ -__STATIC_INLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) -{ - #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - int32_t op_size = dsize; - uint32_t op_addr = (uint32_t) addr; - int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ - - __DSB(); - - while (op_size > 0) { - SCB->DCCIMVAC = op_addr; - op_addr += (uint32_t)linesize; - op_size -= linesize; - } - - __DSB(); - __ISB(); - #endif -} - - -/*@} end of CMSIS_Core_CacheFunctions */ +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif /* ################################## SysTick function ############################################ */ diff --git a/Drivers/CMSIS/Include/core_cm85.h b/Drivers/CMSIS/Include/core_cm85.h new file mode 100644 index 00000000..60463111 --- /dev/null +++ b/Drivers/CMSIS/Include/core_cm85.h @@ -0,0 +1,4672 @@ +/**************************************************************************//** + * @file core_cm85.h + * @brief CMSIS Cortex-M85 Core Peripheral Access Layer Header File + * @version V1.0.4 + * @date 21. April 2022 + ******************************************************************************/ +/* + * Copyright (c) 2022 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM85_H_GENERIC +#define __CORE_CM85_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M85 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM85 definitions */ + +#define __CORTEX_M (85U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM85_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM85_H_DEPENDANT +#define __CORE_CM85_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM85_REV + #define __CM85_REV 0x0001U + #warning "__CM85_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 8U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 8 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M85 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core EWIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core PMU Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:1; /*!< bit: 20 Reserved */ + uint32_t B:1; /*!< bit: 21 BTI active (read 0) */ + uint32_t _reserved2:2; /*!< bit: 22..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_B_Pos 21U /*!< xPSR: B Position */ +#define xPSR_B_Msk (1UL << xPSR_B_Pos) /*!< xPSR: B Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t BTI_EN:1; /*!< bit: 4 Privileged branch target identification enable */ + uint32_t UBTI_EN:1; /*!< bit: 5 Unprivileged branch target identification enable */ + uint32_t PAC_EN:1; /*!< bit: 6 Privileged pointer authentication enable */ + uint32_t UPAC_EN:1; /*!< bit: 7 Unprivileged pointer authentication enable */ + uint32_t _reserved1:24; /*!< bit: 8..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_UPAC_EN_Pos 7U /*!< CONTROL: UPAC_EN Position */ +#define CONTROL_UPAC_EN_Msk (1UL << CONTROL_UPAC_EN_Pos) /*!< CONTROL: UPAC_EN Mask */ + +#define CONTROL_PAC_EN_Pos 6U /*!< CONTROL: PAC_EN Position */ +#define CONTROL_PAC_EN_Msk (1UL << CONTROL_PAC_EN_Pos) /*!< CONTROL: PAC_EN Mask */ + +#define CONTROL_UBTI_EN_Pos 5U /*!< CONTROL: UBTI_EN Position */ +#define CONTROL_UBTI_EN_Msk (1UL << CONTROL_UBTI_EN_Pos) /*!< CONTROL: UBTI_EN Mask */ + +#define CONTROL_BTI_EN_Pos 4U /*!< CONTROL: BTI_EN Position */ +#define CONTROL_BTI_EN_Msk (1UL << CONTROL_BTI_EN_Pos) /*!< CONTROL: BTI_EN Mask */ + +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/* SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ICB Implementation Control Block register (ICB) + \brief Type definitions for the Implementation Control Block Register + @{ + */ + +/** + \brief Structure type to access the Implementation Control Block (ICB). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} ICB_Type; + +/* Auxiliary Control Register Definitions */ +#define ICB_ACTLR_DISCRITAXIRUW_Pos 27U /*!< ACTLR: DISCRITAXIRUW Position */ +#define ICB_ACTLR_DISCRITAXIRUW_Msk (1UL << ICB_ACTLR_DISCRITAXIRUW_Pos) /*!< ACTLR: DISCRITAXIRUW Mask */ + +#define ICB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define ICB_ACTLR_DISCRITAXIRUR_Msk (1UL << ICB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define ICB_ACTLR_EVENTBUSEN_Pos 14U /*!< ACTLR: EVENTBUSEN Position */ +#define ICB_ACTLR_EVENTBUSEN_Msk (1UL << ICB_ACTLR_EVENTBUSEN_Pos) /*!< ACTLR: EVENTBUSEN Mask */ + +#define ICB_ACTLR_EVENTBUSEN_S_Pos 13U /*!< ACTLR: EVENTBUSEN_S Position */ +#define ICB_ACTLR_EVENTBUSEN_S_Msk (1UL << ICB_ACTLR_EVENTBUSEN_S_Pos) /*!< ACTLR: EVENTBUSEN_S Mask */ + +#define ICB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define ICB_ACTLR_DISITMATBFLUSH_Msk (1UL << ICB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define ICB_ACTLR_DISNWAMODE_Pos 11U /*!< ACTLR: DISNWAMODE Position */ +#define ICB_ACTLR_DISNWAMODE_Msk (1UL << ICB_ACTLR_DISNWAMODE_Pos) /*!< ACTLR: DISNWAMODE Mask */ + +#define ICB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define ICB_ACTLR_FPEXCODIS_Msk (1UL << ICB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +/* Interrupt Controller Type Register Definitions */ +#define ICB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define ICB_ICTR_INTLINESNUM_Msk (0xFUL /*<< ICB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_ICB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) ITM Device Type Register */ + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup MemSysCtl_Type Memory System Control Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Memory System Control Registers (MEMSYSCTL) + @{ + */ + +/** + \brief Structure type to access the Memory System Control Registers (MEMSYSCTL). + */ +typedef struct +{ + __IOM uint32_t MSCR; /*!< Offset: 0x000 (R/W) Memory System Control Register */ + __IOM uint32_t PFCR; /*!< Offset: 0x004 (R/W) Prefetcher Control Register */ + uint32_t RESERVED1[2U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x010 (R/W) ITCM Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x014 (R/W) DTCM Control Register */ + __IOM uint32_t PAHBCR; /*!< Offset: 0x018 (R/W) P-AHB Control Register */ + uint32_t RESERVED2[313U]; + __IOM uint32_t ITGU_CTRL; /*!< Offset: 0x500 (R/W) ITGU Control Register */ + __IOM uint32_t ITGU_CFG; /*!< Offset: 0x504 (R/W) ITGU Configuration Register */ + uint32_t RESERVED3[2U]; + __IOM uint32_t ITGU_LUT[16U]; /*!< Offset: 0x510 (R/W) ITGU Look Up Table Register */ + uint32_t RESERVED4[44U]; + __IOM uint32_t DTGU_CTRL; /*!< Offset: 0x600 (R/W) DTGU Control Registers */ + __IOM uint32_t DTGU_CFG; /*!< Offset: 0x604 (R/W) DTGU Configuration Register */ + uint32_t RESERVED5[2U]; + __IOM uint32_t DTGU_LUT[16U]; /*!< Offset: 0x610 (R/W) DTGU Look Up Table Register */ +} MemSysCtl_Type; + +/* MEMSYSCTL Memory System Control Register (MSCR) Register Definitions */ +#define MEMSYSCTL_MSCR_CPWRDN_Pos 17U /*!< MEMSYSCTL MSCR: CPWRDN Position */ +#define MEMSYSCTL_MSCR_CPWRDN_Msk (0x1UL << MEMSYSCTL_MSCR_CPWRDN_Pos) /*!< MEMSYSCTL MSCR: CPWRDN Mask */ + +#define MEMSYSCTL_MSCR_DCCLEAN_Pos 16U /*!< MEMSYSCTL MSCR: DCCLEAN Position */ +#define MEMSYSCTL_MSCR_DCCLEAN_Msk (0x1UL << MEMSYSCTL_MSCR_DCCLEAN_Pos) /*!< MEMSYSCTL MSCR: DCCLEAN Mask */ + +#define MEMSYSCTL_MSCR_ICACTIVE_Pos 13U /*!< MEMSYSCTL MSCR: ICACTIVE Position */ +#define MEMSYSCTL_MSCR_ICACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_ICACTIVE_Pos) /*!< MEMSYSCTL MSCR: ICACTIVE Mask */ + +#define MEMSYSCTL_MSCR_DCACTIVE_Pos 12U /*!< MEMSYSCTL MSCR: DCACTIVE Position */ +#define MEMSYSCTL_MSCR_DCACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_DCACTIVE_Pos) /*!< MEMSYSCTL MSCR: DCACTIVE Mask */ + +#define MEMSYSCTL_MSCR_EVECCFAULT_Pos 3U /*!< MEMSYSCTL MSCR: EVECCFAULT Position */ +#define MEMSYSCTL_MSCR_EVECCFAULT_Msk (0x1UL << MEMSYSCTL_MSCR_EVECCFAULT_Pos) /*!< MEMSYSCTL MSCR: EVECCFAULT Mask */ + +#define MEMSYSCTL_MSCR_FORCEWT_Pos 2U /*!< MEMSYSCTL MSCR: FORCEWT Position */ +#define MEMSYSCTL_MSCR_FORCEWT_Msk (0x1UL << MEMSYSCTL_MSCR_FORCEWT_Pos) /*!< MEMSYSCTL MSCR: FORCEWT Mask */ + +#define MEMSYSCTL_MSCR_ECCEN_Pos 1U /*!< MEMSYSCTL MSCR: ECCEN Position */ +#define MEMSYSCTL_MSCR_ECCEN_Msk (0x1UL << MEMSYSCTL_MSCR_ECCEN_Pos) /*!< MEMSYSCTL MSCR: ECCEN Mask */ + +/* MEMSYSCTL Prefetcher Control Register (PFCR) Register Definitions */ +#define MEMSYSCTL_PFCR_DIS_NLP_Pos 7U /*!< MEMSYSCTL PFCR: DIS_NLP Position */ +#define MEMSYSCTL_PFCR_DIS_NLP_Msk (0x1UL << MEMSYSCTL_PFCR_DIS_NLP_Pos) /*!< MEMSYSCTL PFCR: DIS_NLP Mask */ + +#define MEMSYSCTL_PFCR_ENABLE_Pos 0U /*!< MEMSYSCTL PFCR: ENABLE Position */ +#define MEMSYSCTL_PFCR_ENABLE_Msk (0x1UL /*<< MEMSYSCTL_PFCR_ENABLE_Pos*/) /*!< MEMSYSCTL PFCR: ENABLE Mask */ + +/* MEMSYSCTL ITCM Control Register (ITCMCR) Register Definitions */ +#define MEMSYSCTL_ITCMCR_SZ_Pos 3U /*!< MEMSYSCTL ITCMCR: SZ Position */ +#define MEMSYSCTL_ITCMCR_SZ_Msk (0xFUL << MEMSYSCTL_ITCMCR_SZ_Pos) /*!< MEMSYSCTL ITCMCR: SZ Mask */ + +#define MEMSYSCTL_ITCMCR_EN_Pos 0U /*!< MEMSYSCTL ITCMCR: EN Position */ +#define MEMSYSCTL_ITCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_ITCMCR_EN_Pos*/) /*!< MEMSYSCTL ITCMCR: EN Mask */ + +/* MEMSYSCTL DTCM Control Register (DTCMCR) Register Definitions */ +#define MEMSYSCTL_DTCMCR_SZ_Pos 3U /*!< MEMSYSCTL DTCMCR: SZ Position */ +#define MEMSYSCTL_DTCMCR_SZ_Msk (0xFUL << MEMSYSCTL_DTCMCR_SZ_Pos) /*!< MEMSYSCTL DTCMCR: SZ Mask */ + +#define MEMSYSCTL_DTCMCR_EN_Pos 0U /*!< MEMSYSCTL DTCMCR: EN Position */ +#define MEMSYSCTL_DTCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_DTCMCR_EN_Pos*/) /*!< MEMSYSCTL DTCMCR: EN Mask */ + +/* MEMSYSCTL P-AHB Control Register (PAHBCR) Register Definitions */ +#define MEMSYSCTL_PAHBCR_SZ_Pos 1U /*!< MEMSYSCTL PAHBCR: SZ Position */ +#define MEMSYSCTL_PAHBCR_SZ_Msk (0x7UL << MEMSYSCTL_PAHBCR_SZ_Pos) /*!< MEMSYSCTL PAHBCR: SZ Mask */ + +#define MEMSYSCTL_PAHBCR_EN_Pos 0U /*!< MEMSYSCTL PAHBCR: EN Position */ +#define MEMSYSCTL_PAHBCR_EN_Msk (0x1UL /*<< MEMSYSCTL_PAHBCR_EN_Pos*/) /*!< MEMSYSCTL PAHBCR: EN Mask */ + +/* MEMSYSCTL ITGU Control Register (ITGU_CTRL) Register Definitions */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL ITGU_CTRL: DEREN Position */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_ITGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL ITGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL ITGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_ITGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL ITGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL ITGU Configuration Register (ITGU_CFG) Register Definitions */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL ITGU_CFG: PRESENT Position */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_ITGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL ITGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL ITGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_ITGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL ITGU_CFG: BLKSZ Mask */ + +/* MEMSYSCTL DTGU Control Registers (DTGU_CTRL) Register Definitions */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL DTGU_CTRL: DEREN Position */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_DTGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL DTGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL DTGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_DTGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL DTGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL DTGU Configuration Register (DTGU_CFG) Register Definitions */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL DTGU_CFG: PRESENT Position */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_DTGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL DTGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL DTGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_DTGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL DTGU_CFG: BLKSZ Mask */ + + +/*@}*/ /* end of group MemSysCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PwrModCtl_Type Power Mode Control Registers + \brief Type definitions for the Power Mode Control Registers (PWRMODCTL) + @{ + */ + +/** + \brief Structure type to access the Power Mode Control Registers (PWRMODCTL). + */ +typedef struct +{ + __IOM uint32_t CPDLPSTATE; /*!< Offset: 0x000 (R/W) Core Power Domain Low Power State Register */ + __IOM uint32_t DPDLPSTATE; /*!< Offset: 0x004 (R/W) Debug Power Domain Low Power State Register */ +} PwrModCtl_Type; + +/* PWRMODCTL Core Power Domain Low Power State (CPDLPSTATE) Register Definitions */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos 4U /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk (0x3UL /*<< PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos*/) /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Mask */ + +/* PWRMODCTL Debug Power Domain Low Power State (DPDLPSTATE) Register Definitions */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Position */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk (0x3UL /*<< PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos*/) /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Mask */ + +/*@}*/ /* end of group PwrModCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_Type External Wakeup Interrupt Controller Registers + \brief Type definitions for the External Wakeup Interrupt Controller Registers (EWIC) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller Registers (EWIC). + */ +typedef struct +{ + __OM uint32_t EVENTSPR; /*!< Offset: 0x000 ( /W) Event Set Pending Register */ + uint32_t RESERVED0[31U]; + __IM uint32_t EVENTMASKA; /*!< Offset: 0x080 (R/W) Event Mask A Register */ + __IM uint32_t EVENTMASK[15]; /*!< Offset: 0x084 (R/W) Event Mask Register */ +} EWIC_Type; + +/* EWIC External Wakeup Interrupt Controller (EVENTSPR) Register Definitions */ +#define EWIC_EVENTSPR_EDBGREQ_Pos 2U /*!< EWIC EVENTSPR: EDBGREQ Position */ +#define EWIC_EVENTSPR_EDBGREQ_Msk (0x1UL << EWIC_EVENTSPR_EDBGREQ_Pos) /*!< EWIC EVENTSPR: EDBGREQ Mask */ + +#define EWIC_EVENTSPR_NMI_Pos 1U /*!< EWIC EVENTSPR: NMI Position */ +#define EWIC_EVENTSPR_NMI_Msk (0x1UL << EWIC_EVENTSPR_NMI_Pos) /*!< EWIC EVENTSPR: NMI Mask */ + +#define EWIC_EVENTSPR_EVENT_Pos 0U /*!< EWIC EVENTSPR: EVENT Position */ +#define EWIC_EVENTSPR_EVENT_Msk (0x1UL /*<< EWIC_EVENTSPR_EVENT_Pos*/) /*!< EWIC EVENTSPR: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASKA) Register Definitions */ +#define EWIC_EVENTMASKA_EDBGREQ_Pos 2U /*!< EWIC EVENTMASKA: EDBGREQ Position */ +#define EWIC_EVENTMASKA_EDBGREQ_Msk (0x1UL << EWIC_EVENTMASKA_EDBGREQ_Pos) /*!< EWIC EVENTMASKA: EDBGREQ Mask */ + +#define EWIC_EVENTMASKA_NMI_Pos 1U /*!< EWIC EVENTMASKA: NMI Position */ +#define EWIC_EVENTMASKA_NMI_Msk (0x1UL << EWIC_EVENTMASKA_NMI_Pos) /*!< EWIC EVENTMASKA: NMI Mask */ + +#define EWIC_EVENTMASKA_EVENT_Pos 0U /*!< EWIC EVENTMASKA: EVENT Position */ +#define EWIC_EVENTMASKA_EVENT_Msk (0x1UL /*<< EWIC_EVENTMASKA_EVENT_Pos*/) /*!< EWIC EVENTMASKA: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASK) Register Definitions */ +#define EWIC_EVENTMASK_IRQ_Pos 0U /*!< EWIC EVENTMASKA: IRQ Position */ +#define EWIC_EVENTMASK_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EVENTMASKA_IRQ_Pos*/) /*!< EWIC EVENTMASKA: IRQ Mask */ + +/*@}*/ /* end of group EWIC_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup ErrBnk_Type Error Banking Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Error Banking Registers (ERRBNK) + @{ + */ + +/** + \brief Structure type to access the Error Banking Registers (ERRBNK). + */ +typedef struct +{ + __IOM uint32_t IEBR0; /*!< Offset: 0x000 (R/W) Instruction Cache Error Bank Register 0 */ + __IOM uint32_t IEBR1; /*!< Offset: 0x004 (R/W) Instruction Cache Error Bank Register 1 */ + uint32_t RESERVED0[2U]; + __IOM uint32_t DEBR0; /*!< Offset: 0x010 (R/W) Data Cache Error Bank Register 0 */ + __IOM uint32_t DEBR1; /*!< Offset: 0x014 (R/W) Data Cache Error Bank Register 1 */ + uint32_t RESERVED1[2U]; + __IOM uint32_t TEBR0; /*!< Offset: 0x020 (R/W) TCM Error Bank Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t TEBR1; /*!< Offset: 0x028 (R/W) TCM Error Bank Register 1 */ +} ErrBnk_Type; + +/* ERRBNK Instruction Cache Error Bank Register 0 (IEBR0) Register Definitions */ +#define ERRBNK_IEBR0_SWDEF_Pos 30U /*!< ERRBNK IEBR0: SWDEF Position */ +#define ERRBNK_IEBR0_SWDEF_Msk (0x3UL << ERRBNK_IEBR0_SWDEF_Pos) /*!< ERRBNK IEBR0: SWDEF Mask */ + +#define ERRBNK_IEBR0_BANK_Pos 16U /*!< ERRBNK IEBR0: BANK Position */ +#define ERRBNK_IEBR0_BANK_Msk (0x1UL << ERRBNK_IEBR0_BANK_Pos) /*!< ERRBNK IEBR0: BANK Mask */ + +#define ERRBNK_IEBR0_LOCATION_Pos 2U /*!< ERRBNK IEBR0: LOCATION Position */ +#define ERRBNK_IEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR0_LOCATION_Pos) /*!< ERRBNK IEBR0: LOCATION Mask */ + +#define ERRBNK_IEBR0_LOCKED_Pos 1U /*!< ERRBNK IEBR0: LOCKED Position */ +#define ERRBNK_IEBR0_LOCKED_Msk (0x1UL << ERRBNK_IEBR0_LOCKED_Pos) /*!< ERRBNK IEBR0: LOCKED Mask */ + +#define ERRBNK_IEBR0_VALID_Pos 0U /*!< ERRBNK IEBR0: VALID Position */ +#define ERRBNK_IEBR0_VALID_Msk (0x1UL << /*ERRBNK_IEBR0_VALID_Pos*/) /*!< ERRBNK IEBR0: VALID Mask */ + +/* ERRBNK Instruction Cache Error Bank Register 1 (IEBR1) Register Definitions */ +#define ERRBNK_IEBR1_SWDEF_Pos 30U /*!< ERRBNK IEBR1: SWDEF Position */ +#define ERRBNK_IEBR1_SWDEF_Msk (0x3UL << ERRBNK_IEBR1_SWDEF_Pos) /*!< ERRBNK IEBR1: SWDEF Mask */ + +#define ERRBNK_IEBR1_BANK_Pos 16U /*!< ERRBNK IEBR1: BANK Position */ +#define ERRBNK_IEBR1_BANK_Msk (0x1UL << ERRBNK_IEBR1_BANK_Pos) /*!< ERRBNK IEBR1: BANK Mask */ + +#define ERRBNK_IEBR1_LOCATION_Pos 2U /*!< ERRBNK IEBR1: LOCATION Position */ +#define ERRBNK_IEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR1_LOCATION_Pos) /*!< ERRBNK IEBR1: LOCATION Mask */ + +#define ERRBNK_IEBR1_LOCKED_Pos 1U /*!< ERRBNK IEBR1: LOCKED Position */ +#define ERRBNK_IEBR1_LOCKED_Msk (0x1UL << ERRBNK_IEBR1_LOCKED_Pos) /*!< ERRBNK IEBR1: LOCKED Mask */ + +#define ERRBNK_IEBR1_VALID_Pos 0U /*!< ERRBNK IEBR1: VALID Position */ +#define ERRBNK_IEBR1_VALID_Msk (0x1UL << /*ERRBNK_IEBR1_VALID_Pos*/) /*!< ERRBNK IEBR1: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 0 (DEBR0) Register Definitions */ +#define ERRBNK_DEBR0_SWDEF_Pos 30U /*!< ERRBNK DEBR0: SWDEF Position */ +#define ERRBNK_DEBR0_SWDEF_Msk (0x3UL << ERRBNK_DEBR0_SWDEF_Pos) /*!< ERRBNK DEBR0: SWDEF Mask */ + +#define ERRBNK_DEBR0_TYPE_Pos 17U /*!< ERRBNK DEBR0: TYPE Position */ +#define ERRBNK_DEBR0_TYPE_Msk (0x1UL << ERRBNK_DEBR0_TYPE_Pos) /*!< ERRBNK DEBR0: TYPE Mask */ + +#define ERRBNK_DEBR0_BANK_Pos 16U /*!< ERRBNK DEBR0: BANK Position */ +#define ERRBNK_DEBR0_BANK_Msk (0x1UL << ERRBNK_DEBR0_BANK_Pos) /*!< ERRBNK DEBR0: BANK Mask */ + +#define ERRBNK_DEBR0_LOCATION_Pos 2U /*!< ERRBNK DEBR0: LOCATION Position */ +#define ERRBNK_DEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR0_LOCATION_Pos) /*!< ERRBNK DEBR0: LOCATION Mask */ + +#define ERRBNK_DEBR0_LOCKED_Pos 1U /*!< ERRBNK DEBR0: LOCKED Position */ +#define ERRBNK_DEBR0_LOCKED_Msk (0x1UL << ERRBNK_DEBR0_LOCKED_Pos) /*!< ERRBNK DEBR0: LOCKED Mask */ + +#define ERRBNK_DEBR0_VALID_Pos 0U /*!< ERRBNK DEBR0: VALID Position */ +#define ERRBNK_DEBR0_VALID_Msk (0x1UL << /*ERRBNK_DEBR0_VALID_Pos*/) /*!< ERRBNK DEBR0: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 1 (DEBR1) Register Definitions */ +#define ERRBNK_DEBR1_SWDEF_Pos 30U /*!< ERRBNK DEBR1: SWDEF Position */ +#define ERRBNK_DEBR1_SWDEF_Msk (0x3UL << ERRBNK_DEBR1_SWDEF_Pos) /*!< ERRBNK DEBR1: SWDEF Mask */ + +#define ERRBNK_DEBR1_TYPE_Pos 17U /*!< ERRBNK DEBR1: TYPE Position */ +#define ERRBNK_DEBR1_TYPE_Msk (0x1UL << ERRBNK_DEBR1_TYPE_Pos) /*!< ERRBNK DEBR1: TYPE Mask */ + +#define ERRBNK_DEBR1_BANK_Pos 16U /*!< ERRBNK DEBR1: BANK Position */ +#define ERRBNK_DEBR1_BANK_Msk (0x1UL << ERRBNK_DEBR1_BANK_Pos) /*!< ERRBNK DEBR1: BANK Mask */ + +#define ERRBNK_DEBR1_LOCATION_Pos 2U /*!< ERRBNK DEBR1: LOCATION Position */ +#define ERRBNK_DEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR1_LOCATION_Pos) /*!< ERRBNK DEBR1: LOCATION Mask */ + +#define ERRBNK_DEBR1_LOCKED_Pos 1U /*!< ERRBNK DEBR1: LOCKED Position */ +#define ERRBNK_DEBR1_LOCKED_Msk (0x1UL << ERRBNK_DEBR1_LOCKED_Pos) /*!< ERRBNK DEBR1: LOCKED Mask */ + +#define ERRBNK_DEBR1_VALID_Pos 0U /*!< ERRBNK DEBR1: VALID Position */ +#define ERRBNK_DEBR1_VALID_Msk (0x1UL << /*ERRBNK_DEBR1_VALID_Pos*/) /*!< ERRBNK DEBR1: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 0 (TEBR0) Register Definitions */ +#define ERRBNK_TEBR0_SWDEF_Pos 30U /*!< ERRBNK TEBR0: SWDEF Position */ +#define ERRBNK_TEBR0_SWDEF_Msk (0x3UL << ERRBNK_TEBR0_SWDEF_Pos) /*!< ERRBNK TEBR0: SWDEF Mask */ + +#define ERRBNK_TEBR0_POISON_Pos 28U /*!< ERRBNK TEBR0: POISON Position */ +#define ERRBNK_TEBR0_POISON_Msk (0x1UL << ERRBNK_TEBR0_POISON_Pos) /*!< ERRBNK TEBR0: POISON Mask */ + +#define ERRBNK_TEBR0_TYPE_Pos 27U /*!< ERRBNK TEBR0: TYPE Position */ +#define ERRBNK_TEBR0_TYPE_Msk (0x1UL << ERRBNK_TEBR0_TYPE_Pos) /*!< ERRBNK TEBR0: TYPE Mask */ + +#define ERRBNK_TEBR0_BANK_Pos 24U /*!< ERRBNK TEBR0: BANK Position */ +#define ERRBNK_TEBR0_BANK_Msk (0x3UL << ERRBNK_TEBR0_BANK_Pos) /*!< ERRBNK TEBR0: BANK Mask */ + +#define ERRBNK_TEBR0_LOCATION_Pos 2U /*!< ERRBNK TEBR0: LOCATION Position */ +#define ERRBNK_TEBR0_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR0_LOCATION_Pos) /*!< ERRBNK TEBR0: LOCATION Mask */ + +#define ERRBNK_TEBR0_LOCKED_Pos 1U /*!< ERRBNK TEBR0: LOCKED Position */ +#define ERRBNK_TEBR0_LOCKED_Msk (0x1UL << ERRBNK_TEBR0_LOCKED_Pos) /*!< ERRBNK TEBR0: LOCKED Mask */ + +#define ERRBNK_TEBR0_VALID_Pos 0U /*!< ERRBNK TEBR0: VALID Position */ +#define ERRBNK_TEBR0_VALID_Msk (0x1UL << /*ERRBNK_TEBR0_VALID_Pos*/) /*!< ERRBNK TEBR0: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 1 (TEBR1) Register Definitions */ +#define ERRBNK_TEBR1_SWDEF_Pos 30U /*!< ERRBNK TEBR1: SWDEF Position */ +#define ERRBNK_TEBR1_SWDEF_Msk (0x3UL << ERRBNK_TEBR1_SWDEF_Pos) /*!< ERRBNK TEBR1: SWDEF Mask */ + +#define ERRBNK_TEBR1_POISON_Pos 28U /*!< ERRBNK TEBR1: POISON Position */ +#define ERRBNK_TEBR1_POISON_Msk (0x1UL << ERRBNK_TEBR1_POISON_Pos) /*!< ERRBNK TEBR1: POISON Mask */ + +#define ERRBNK_TEBR1_TYPE_Pos 27U /*!< ERRBNK TEBR1: TYPE Position */ +#define ERRBNK_TEBR1_TYPE_Msk (0x1UL << ERRBNK_TEBR1_TYPE_Pos) /*!< ERRBNK TEBR1: TYPE Mask */ + +#define ERRBNK_TEBR1_BANK_Pos 24U /*!< ERRBNK TEBR1: BANK Position */ +#define ERRBNK_TEBR1_BANK_Msk (0x3UL << ERRBNK_TEBR1_BANK_Pos) /*!< ERRBNK TEBR1: BANK Mask */ + +#define ERRBNK_TEBR1_LOCATION_Pos 2U /*!< ERRBNK TEBR1: LOCATION Position */ +#define ERRBNK_TEBR1_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR1_LOCATION_Pos) /*!< ERRBNK TEBR1: LOCATION Mask */ + +#define ERRBNK_TEBR1_LOCKED_Pos 1U /*!< ERRBNK TEBR1: LOCKED Position */ +#define ERRBNK_TEBR1_LOCKED_Msk (0x1UL << ERRBNK_TEBR1_LOCKED_Pos) /*!< ERRBNK TEBR1: LOCKED Mask */ + +#define ERRBNK_TEBR1_VALID_Pos 0U /*!< ERRBNK TEBR1: VALID Position */ +#define ERRBNK_TEBR1_VALID_Msk (0x1UL << /*ERRBNK_TEBR1_VALID_Pos*/) /*!< ERRBNK TEBR1: VALID Mask */ + +/*@}*/ /* end of group ErrBnk_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PrcCfgInf_Type Processor Configuration Information Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Processor Configuration Information Registerss (PRCCFGINF) + @{ + */ + +/** + \brief Structure type to access the Processor Configuration Information Registerss (PRCCFGINF). + */ +typedef struct +{ + __OM uint32_t CFGINFOSEL; /*!< Offset: 0x000 ( /W) Processor Configuration Information Selection Register */ + __IM uint32_t CFGINFORD; /*!< Offset: 0x004 (R/ ) Processor Configuration Information Read Data Register */ +} PrcCfgInf_Type; + +/* PRCCFGINF Processor Configuration Information Selection Register (CFGINFOSEL) Definitions */ + +/* PRCCFGINF Processor Configuration Information Read Data Register (CFGINFORD) Definitions */ + +/*@}*/ /* end of group PrcCfgInf_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFmt_Pos 0U /*!< TPI FFCR: EnFmt Position */ +#define TPI_FFCR_EnFmt_Msk (0x3UL << /*TPI_FFCR_EnFmt_Pos*/) /*!< TPI FFCR: EnFmt Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) PMU Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) PMU Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) PMU Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) PMU Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) PMU Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) PMU Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) PMU Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) PMU Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) PMU Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) PMU Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) PMU Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) PMU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) PMU Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ + uint32_t RESERVED12[3]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ + __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ + uint32_t RESERVED13[3]; + __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ + __IOM uint32_t PIDR1; /*!< Offset: 0xFE4 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE8 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFEC (R/W) PMU Peripheral Identification Register 3 */ + __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ + __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ + __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ + __IOM uint32_t CIDR3; /*!< Offset: 0xFFC (R/W) PMU Component Identification Register 3 */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ + +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ + +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ + +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ + +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ + +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ + +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ + +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/** \brief PMU Authentication Status Register Definitions */ + +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + + +/*@} end of group CMSIS_PMU */ +#endif + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: FPRound bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: FPRound bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: FPSqrt bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: FPSqrt bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: FPDivide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: FPDP bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: FPDP bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: FPSP bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: FPSP bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMDReg bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMDReg bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: FMAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: FMAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FPHP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FPHP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: FPDNaN bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: FPDNaN bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FPFtZ bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FPFtZ bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_FPD_Pos 23U /*!< \deprecated CoreDebug DHCSR: S_FPD Position */ +#define CoreDebug_DHCSR_S_FPD_Msk (1UL << CoreDebug_DHCSR_S_FPD_Pos) /*!< \deprecated CoreDebug DHCSR: S_FPD Mask */ + +#define CoreDebug_DHCSR_S_SUIDE_Pos 22U /*!< \deprecated CoreDebug DHCSR: S_SUIDE Position */ +#define CoreDebug_DHCSR_S_SUIDE_Msk (1UL << CoreDebug_DHCSR_S_SUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SUIDE Mask */ + +#define CoreDebug_DHCSR_S_NSUIDE_Pos 21U /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Position */ +#define CoreDebug_DHCSR_S_NSUIDE_Msk (1UL << CoreDebug_DHCSR_S_NSUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Mask */ + +#define CoreDebug_DHCSR_S_SDE_Pos 20U /*!< \deprecated CoreDebug DHCSR: S_SDE Position */ +#define CoreDebug_DHCSR_S_SDE_Msk (1UL << CoreDebug_DHCSR_S_SDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SDE Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_PMOV_Pos 6U /*!< \deprecated CoreDebug DHCSR: C_PMOV Position */ +#define CoreDebug_DHCSR_C_PMOV_Msk (1UL << CoreDebug_DHCSR_C_PMOV_Pos) /*!< \deprecated CoreDebug DHCSR: C_PMOV Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos 3U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos 1U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos 10U /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos 9U /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDAPEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Mask */ + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos 8U /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Position */ +#define CoreDebug_DAUTHCTRL_FSDMA_Msk (1UL << CoreDebug_DAUTHCTRL_FSDMA_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (0x1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (0x1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (0x1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (0x1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DSCEMCR, Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (0x1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define MEMSYSCTL_BASE (0xE001E000UL) /*!< Memory System Control Base Address */ + #define ERRBNK_BASE (0xE001E100UL) /*!< Error Banking Base Address */ + #define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */ + #define EWIC_BASE (0xE001E400UL) /*!< External Wakeup Interrupt Controller Base Address */ + #define PRCCFGINF_BASE (0xE001E700UL) /*!< Processor Configuration Information Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define ICB ((ICB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define MEMSYSCTL ((MemSysCtl_Type *) MEMSYSCTL_BASE ) /*!< Memory System Control configuration struct */ + #define ERRBNK ((ErrBnk_Type *) ERRBNK_BASE ) /*!< Error Banking configuration struct */ + #define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */ + #define EWIC ((EWIC_Type *) EWIC_BASE ) /*!< EWIC configuration struct */ + #define PRCCFGINF ((PrcCfgInf_Type *) PRCCFGINF_BASE ) /*!< Processor Configuration Information configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define ICB_NS ((ICB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "pmu_armv8.h" + +/** + \brief Cortex-M85 PMU events + \note Architectural PMU events can be found in pmu_armv8.h +*/ + +#define ARMCM85_PMU_ECC_ERR 0xC000 /*!< One or more Error Correcting Code (ECC) errors detected */ +#define ARMCM85_PMU_ECC_ERR_MBIT 0xC001 /*!< One or more multi-bit ECC errors detected */ +#define ARMCM85_PMU_ECC_ERR_DCACHE 0xC010 /*!< One or more ECC errors in the data cache */ +#define ARMCM85_PMU_ECC_ERR_ICACHE 0xC011 /*!< One or more ECC errors in the instruction cache */ +#define ARMCM85_PMU_ECC_ERR_MBIT_DCACHE 0xC012 /*!< One or more multi-bit ECC errors in the data cache */ +#define ARMCM85_PMU_ECC_ERR_MBIT_ICACHE 0xC013 /*!< One or more multi-bit ECC errors in the instruction cache */ +#define ARMCM85_PMU_ECC_ERR_DTCM 0xC020 /*!< One or more ECC errors in the Data Tightly Coupled Memory (DTCM) */ +#define ARMCM85_PMU_ECC_ERR_ITCM 0xC021 /*!< One or more ECC errors in the Instruction Tightly Coupled Memory (ITCM) */ +#define ARMCM85_PMU_ECC_ERR_MBIT_DTCM 0xC022 /*!< One or more multi-bit ECC errors in the DTCM */ +#define ARMCM85_PMU_ECC_ERR_MBIT_ITCM 0xC023 /*!< One or more multi-bit ECC errors in the ITCM */ +#define ARMCM85_PMU_PF_LINEFILL 0xC100 /*!< The prefetcher starts a line-fill */ +#define ARMCM85_PMU_PF_CANCEL 0xC101 /*!< The prefetcher stops prefetching */ +#define ARMCM85_PMU_PF_DROP_LINEFILL 0xC102 /*!< A linefill triggered by a prefetcher has been dropped because of lack of buffering */ +#define ARMCM85_PMU_NWAMODE_ENTER 0xC200 /*!< No write-allocate mode entry */ +#define ARMCM85_PMU_NWAMODE 0xC201 /*!< Write-allocate store is not allocated into the data cache due to no-write-allocate mode */ +#define ARMCM85_PMU_SAHB_ACCESS 0xC300 /*!< Read or write access on the S-AHB interface to the TCM */ +#define ARMCM85_PMU_PAHB_ACCESS 0xC301 /*!< Read or write access on the P-AHB write interface */ +#define ARMCM85_PMU_AXI_WRITE_ACCESS 0xC302 /*!< Any beat access to M-AXI write interface */ +#define ARMCM85_PMU_AXI_READ_ACCESS 0xC303 /*!< Any beat access to M-AXI read interface */ +#define ARMCM85_PMU_DOSTIMEOUT_DOUBLE 0xC400 /*!< Denial of Service timeout has fired twice and caused buffers to drain to allow forward progress */ +#define ARMCM85_PMU_DOSTIMEOUT_TRIPLE 0xC401 /*!< Denial of Service timeout has fired three times and blocked the LSU to force forward progress */ + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + +/* ################### PAC Key functions ########################### */ + +#if (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) +#include "pac_armv81.h" +#endif + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM85_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/core_sc000.h b/Drivers/CMSIS/Include/core_sc000.h index 9b67c92f..dbc755ff 100644 --- a/Drivers/CMSIS/Include/core_sc000.h +++ b/Drivers/CMSIS/Include/core_sc000.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_sc000.h * @brief CMSIS SC000 Core Peripheral Access Layer Header File - * @version V5.0.5 - * @date 28. May 2018 + * @version V5.0.7 + * @date 27. March 2020 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -62,7 +62,7 @@ #include "cmsis_version.h" -/* CMSIS SC000 definitions */ +/* CMSIS SC000 definitions */ #define __SC000_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ #define __SC000_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ #define __SC000_CMSIS_VERSION ((__SC000_CMSIS_VERSION_MAIN << 16U) | \ @@ -81,7 +81,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif @@ -142,6 +142,11 @@ #warning "__MPU_PRESENT not defined in device header file; using default!" #endif + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 2U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -750,7 +755,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -904,6 +911,7 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { uint32_t *vectors = (uint32_t *)SCB->VTOR; vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M0 and M0+ do not require the architectural barrier - assume SC000 is the same */ } diff --git a/Drivers/CMSIS/Include/core_sc300.h b/Drivers/CMSIS/Include/core_sc300.h index 3e8a4710..d6662103 100644 --- a/Drivers/CMSIS/Include/core_sc300.h +++ b/Drivers/CMSIS/Include/core_sc300.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_sc300.h * @brief CMSIS SC300 Core Peripheral Access Layer Header File - * @version V5.0.6 - * @date 04. June 2018 + * @version V5.0.10 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -62,7 +62,7 @@ #include "cmsis_version.h" -/* CMSIS SC300 definitions */ +/* CMSIS SC300 definitions */ #define __SC300_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ #define __SC300_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ #define __SC300_CMSIS_VERSION ((__SC300_CMSIS_VERSION_MAIN << 16U) | \ @@ -81,7 +81,7 @@ #endif #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) - #if defined __ARM_PCS_VFP + #if defined __ARM_FP #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" #endif @@ -142,6 +142,11 @@ #warning "__MPU_PRESENT not defined in device header file; using default!" #endif + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -342,7 +347,7 @@ typedef struct __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ uint32_t RESERVED0[24U]; __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ - uint32_t RSERVED1[24U]; + uint32_t RESERVED1[24U]; __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ uint32_t RESERVED2[24U]; __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ @@ -557,19 +562,19 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -653,13 +658,23 @@ typedef struct { uint32_t RESERVED0[1U]; __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ - uint32_t RESERVED1[1U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ } SCnSCB_Type; /* Interrupt Controller Type Register Definitions */ #define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ #define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + /*@} end of group CMSIS_SCnotSCB */ @@ -739,10 +754,7 @@ typedef struct __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ uint32_t RESERVED2[15U]; __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ - uint32_t RESERVED3[29U]; - __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ - __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ - __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED3[32U]; uint32_t RESERVED4[43U]; __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ @@ -793,18 +805,6 @@ typedef struct #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ -/* ITM Integration Write Register Definitions */ -#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ -#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ - -/* ITM Integration Read Register Definitions */ -#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ -#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ - -/* ITM Integration Mode Control Register Definitions */ -#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ -#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ - /* ITM Lock Status Register Definitions */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ @@ -1037,13 +1037,13 @@ typedef struct /* TPI Integration ETM Data Register Definitions (FIFO0) */ #define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ -#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ #define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ #define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ #define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ -#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ #define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ #define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ @@ -1066,13 +1066,13 @@ typedef struct /* TPI Integration ITM Data Register Definitions (FIFO1) */ #define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ -#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ #define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ #define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ #define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ -#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ #define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ #define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ @@ -1448,7 +1448,6 @@ typedef struct #define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ - /** \brief Set Priority Grouping \details Sets the priority grouping field using the required unlock sequence. @@ -1467,7 +1466,7 @@ __STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ reg_value = (reg_value | ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | - (PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ SCB->AIRCR = reg_value; } @@ -1493,7 +1492,9 @@ __STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) { if ((int32_t)(IRQn) >= 0) { + __COMPILER_BARRIER(); NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); } } @@ -1718,6 +1719,7 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { uint32_t *vectors = (uint32_t *)SCB->VTOR; vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M3 does not require the architectural barrier */ } diff --git a/Drivers/CMSIS/Include/core_starmc1.h b/Drivers/CMSIS/Include/core_starmc1.h new file mode 100644 index 00000000..d86c8d38 --- /dev/null +++ b/Drivers/CMSIS/Include/core_starmc1.h @@ -0,0 +1,3592 @@ +/**************************************************************************//** + * @file core_starmc1.h + * @brief CMSIS ArmChina STAR-MC1 Core Peripheral Access Layer Header File + * @version V1.0.2 + * @date 07. April 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. + * Copyright (c) 2018-2022 Arm China. + * All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_STAR_H_GENERIC +#define __CORE_STAR_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup STAR-MC1 + @{ + */ + +#include "cmsis_version.h" + +/* Macro Define for STAR-MC1 */ +#define __STAR_MC (1U) /*!< STAR-MC Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_STAR_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_STAR_H_DEPENDANT +#define __CORE_STAR_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __STAR_REV + #define __STAR_REV 0x0000U + #warning "__STAR_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group STAR-MC1 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for STAR-MC1 processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[1U]; + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED_ADD1[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: F00-D00=0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ +} SCB_Type; + +typedef struct +{ + __IOM uint32_t CACR; /*!< Offset: 0x0 (R/W) L1 Cache Control Register */ + __IOM uint32_t ITCMCR; /*!< Offset: 0x10 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x14 (R/W) Data Tightly-Coupled Memory Control Registers */ +}EMSS_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +#define SCB_CLIDR_IC_Pos 0U /*!< SCB CLIDR: IC Position */ +#define SCB_CLIDR_IC_Msk (1UL << SCB_CLIDR_IC_Pos) /*!< SCB CLIDR: IC Mask */ + +#define SCB_CLIDR_DC_Pos 1U /*!< SCB CLIDR: DC Position */ +#define SCB_CLIDR_DC_Msk (1UL << SCB_CLIDR_DC_Pos) /*!< SCB CLIDR: DC Mask */ + + + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache line Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_LEVEL_Pos 1U /*!< SCB DCISW: Level Position */ +#define SCB_DCISW_LEVEL_Msk (7UL << SCB_DCISW_LEVEL_Pos) /*!< SCB DCISW: Level Mask */ + +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0xFFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean line by Set-way Register Definitions */ +#define SCB_DCCSW_LEVEL_Pos 1U /*!< SCB DCCSW: Level Position */ +#define SCB_DCCSW_LEVEL_Msk (7UL << SCB_DCCSW_LEVEL_Pos) /*!< SCB DCCSW: Level Mask */ + +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0xFFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_LEVEL_Pos 1U /*!< SCB DCCISW: Level Position */ +#define SCB_DCCISW_LEVEL_Msk (7UL << SCB_DCCISW_LEVEL_Pos) /*!< SCB DCCISW: Level Mask */ + +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0xFFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* ArmChina: Implementation Defined */ +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_DCCLEAN_Pos 16U /*!< SCB CACR: DCCLEAN Position */ +#define SCB_CACR_DCCLEAN_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: DCCLEAN Mask */ + +#define SCB_CACR_ICACTIVE_Pos 13U /*!< SCB CACR: ICACTIVE Position */ +#define SCB_CACR_ICACTIVE_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: ICACTIVE Mask */ + +#define SCB_CACR_DCACTIVE_Pos 12U /*!< SCB CACR: DCACTIVE Position */ +#define SCB_CACR_DCACTIVE_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: DCACTIVE Mask */ + +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define EMSS_BASE (0xE001E000UL) /*!AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses including + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/** + \brief Software Reset + \details Initiates a system reset request to reset the CPU. + */ +__NO_RETURN __STATIC_INLINE void __SW_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses including + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_BFHFNMINS_Msk) | /* Keep BFHFNMINS unchanged. Use this Reset function in case your case need to keep it */ + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | /* Keep priority group unchanged */ + SCB_AIRCR_SYSRESETREQ_Msk ); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) + +/* ########################## Cache functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_CacheFunctions Cache Functions + \brief Functions that configure Instruction and Data cache. + @{ + */ + +/* Cache Size ID Register Macros */ +#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos) +#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) + +#define __SCB_DCACHE_LINE_SIZE 32U /*!< STAR-MC1 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ +#define __SCB_ICACHE_LINE_SIZE 32U /*!< STAR-MC1 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ + +/** + \brief Enable I-Cache + \details Turns on I-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if (SCB->CCR & SCB_CCR_IC_Msk) return; /* return if ICache is already enabled */ + + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable I-Cache + \details Turns off I-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */ + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate I-Cache + \details Invalidates I-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; + __DSB(); + __ISB(); + #endif +} + + +/** + \brief I-Cache Invalidate by address + \details Invalidates I-Cache for the given address. + I-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + I-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] isize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (void *addr, int32_t isize) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if ( isize > 0 ) { + int32_t op_size = isize + (((uint32_t)addr) & (__SCB_ICACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_ICACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->ICIMVAU = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_ICACHE_LINE_SIZE; + op_size -= __SCB_ICACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief Enable D-Cache + \details Turns on D-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + if (SCB->CCR & SCB_CCR_DC_Msk) return; /* return if DCache is already enabled */ + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + __DSB(); + + SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */ + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable D-Cache + \details Turns off D-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate D-Cache + \details Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean D-Cache + \details Cleans D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) | + ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean & Invalidate D-Cache + \details Cleans and Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Invalidate by address + \details Invalidates D-Cache for the given address. + D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean by address + \details Cleans D-Cache for the given address + D-Cache is cleaned starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean and Invalidate by address + \details Cleans and invalidates D_Cache for the given address + D-Cache is cleaned and invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned and invalidated. + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + +/*@} end of CMSIS_Core_CacheFunctions */ +#endif + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_STAR_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/Drivers/CMSIS/Include/mpu_armv7.h b/Drivers/CMSIS/Include/mpu_armv7.h index 01422033..d9eedf81 100644 --- a/Drivers/CMSIS/Include/mpu_armv7.h +++ b/Drivers/CMSIS/Include/mpu_armv7.h @@ -1,11 +1,11 @@ /****************************************************************************** * @file mpu_armv7.h * @brief CMSIS MPU API for Armv7-M MPU - * @version V5.0.4 - * @date 10. January 2018 + * @version V5.1.2 + * @date 25. May 2020 ******************************************************************************/ /* - * Copyright (c) 2017-2018 Arm Limited. All rights reserved. + * Copyright (c) 2017-2020 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -86,10 +86,10 @@ * \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. */ #define ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable) \ - ((((TypeExtField ) << MPU_RASR_TEX_Pos) & MPU_RASR_TEX_Msk) | \ - (((IsShareable ) << MPU_RASR_S_Pos) & MPU_RASR_S_Msk) | \ - (((IsCacheable ) << MPU_RASR_C_Pos) & MPU_RASR_C_Msk) | \ - (((IsBufferable ) << MPU_RASR_B_Pos) & MPU_RASR_B_Msk)) + ((((TypeExtField) << MPU_RASR_TEX_Pos) & MPU_RASR_TEX_Msk) | \ + (((IsShareable) << MPU_RASR_S_Pos) & MPU_RASR_S_Msk) | \ + (((IsCacheable) << MPU_RASR_C_Pos) & MPU_RASR_C_Msk) | \ + (((IsBufferable) << MPU_RASR_B_Pos) & MPU_RASR_B_Msk)) /** * MPU Region Attribute and Size Register Value @@ -100,11 +100,14 @@ * \param SubRegionDisable Sub-region disable field. * \param Size Region size of the region to be configured, for example 4K, 8K. */ -#define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \ - ((((DisableExec ) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \ - (((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \ - (((AccessAttributes) ) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) - +#define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \ + ((((DisableExec) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \ + (((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \ + (((AccessAttributes) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) | \ + (((SubRegionDisable) << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk) | \ + (((Size) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk) | \ + (((MPU_RASR_ENABLE_Msk)))) + /** * MPU Region Attribute and Size Register Value * @@ -131,7 +134,7 @@ /** * MPU Memory Access Attribute for device memory. -* - TEX: 000b (if non-shareable) or 010b (if shareable) +* - TEX: 000b (if shareable) or 010b (if non-shareable) * - Shareable or non-shareable * - Non-cacheable * - Bufferable (if shareable) or non-bufferable (if non-shareable) @@ -151,7 +154,7 @@ * \param InnerCp Configures the inner cache policy. * \param IsShareable Configures the memory as shareable or non-shareable. */ -#define ARM_MPU_ACCESS_NORMAL(OuterCp, InnerCp, IsShareable) ARM_MPU_ACCESS_((4U | (OuterCp)), IsShareable, ((InnerCp) & 2U), ((InnerCp) & 1U)) +#define ARM_MPU_ACCESS_NORMAL(OuterCp, InnerCp, IsShareable) ARM_MPU_ACCESS_((4U | (OuterCp)), IsShareable, ((InnerCp) >> 1U), ((InnerCp) & 1U)) /** * MPU Memory Access Attribute non-cacheable policy. @@ -187,24 +190,26 @@ typedef struct { */ __STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) { - __DSB(); - __ISB(); + __DMB(); MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; #ifdef SCB_SHCSR_MEMFAULTENA_Msk SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; #endif + __DSB(); + __ISB(); } /** Disable the MPU. */ __STATIC_INLINE void ARM_MPU_Disable(void) { - __DSB(); - __ISB(); + __DMB(); #ifdef SCB_SHCSR_MEMFAULTENA_Msk SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; #endif MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; + __DSB(); + __ISB(); } /** Clear and disable the given MPU region. @@ -218,7 +223,7 @@ __STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) /** Configure an MPU region. * \param rbar Value for RBAR register. -* \param rsar Value for RSAR register. +* \param rasr Value for RASR register. */ __STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr) { @@ -229,7 +234,7 @@ __STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr) /** Configure the given MPU region. * \param rnr Region number to be configured. * \param rbar Value for RBAR register. -* \param rsar Value for RSAR register. +* \param rasr Value for RASR register. */ __STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr) { @@ -238,12 +243,12 @@ __STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t r MPU->RASR = rasr; } -/** Memcopy with strictly ordered memory access, e.g. for register targets. +/** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_Load(). * \param dst Destination data is copied to. * \param src Source data is copied from. * \param len Amount of data words to be copied. */ -__STATIC_INLINE void orderedCpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) +__STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) { uint32_t i; for (i = 0U; i < len; ++i) @@ -260,11 +265,11 @@ __STATIC_INLINE void ARM_MPU_Load(ARM_MPU_Region_t const* table, uint32_t cnt) { const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; while (cnt > MPU_TYPE_RALIASES) { - orderedCpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize); + ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize); table += MPU_TYPE_RALIASES; cnt -= MPU_TYPE_RALIASES; } - orderedCpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize); + ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize); } #endif diff --git a/Drivers/CMSIS/Include/mpu_armv8.h b/Drivers/CMSIS/Include/mpu_armv8.h index 62571da5..3de16efc 100644 --- a/Drivers/CMSIS/Include/mpu_armv8.h +++ b/Drivers/CMSIS/Include/mpu_armv8.h @@ -1,11 +1,11 @@ /****************************************************************************** * @file mpu_armv8.h - * @brief CMSIS MPU API for Armv8-M MPU - * @version V5.0.4 - * @date 10. January 2018 + * @brief CMSIS MPU API for Armv8-M and Armv8.1-M MPU + * @version V5.1.3 + * @date 03. February 2021 ******************************************************************************/ /* - * Copyright (c) 2017-2018 Arm Limited. All rights reserved. + * Copyright (c) 2017-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -44,7 +44,7 @@ * \param WA Write Allocation: Set to 1 to use cache allocation on write miss. */ #define ARM_MPU_ATTR_MEMORY_(NT, WB, RA, WA) \ - (((NT & 1U) << 3U) | ((WB & 1U) << 2U) | ((RA & 1U) << 1U) | (WA & 1U)) + ((((NT) & 1U) << 3U) | (((WB) & 1U) << 2U) | (((RA) & 1U) << 1U) | ((WA) & 1U)) /** \brief Device memory type non Gathering, non Re-ordering, non Early Write Acknowledgement */ #define ARM_MPU_ATTR_DEVICE_nGnRnE (0U) @@ -62,7 +62,7 @@ * \param O Outer memory attributes * \param I O == ARM_MPU_ATTR_DEVICE: Device memory attributes, else: Inner memory attributes */ -#define ARM_MPU_ATTR(O, I) (((O & 0xFU) << 4U) | (((O & 0xFU) != 0U) ? (I & 0xFU) : ((I & 0x3U) << 2U))) +#define ARM_MPU_ATTR(O, I) ((((O) & 0xFU) << 4U) | ((((O) & 0xFU) != 0U) ? ((I) & 0xFU) : (((I) & 0x3U) << 2U))) /** \brief Normal memory non-shareable */ #define ARM_MPU_SH_NON (0U) @@ -77,7 +77,7 @@ * \param RO Read-Only: Set to 1 for read-only memory. * \param NP Non-Privileged: Set to 1 for non-privileged memory. */ -#define ARM_MPU_AP_(RO, NP) (((RO & 1U) << 1U) | (NP & 1U)) +#define ARM_MPU_AP_(RO, NP) ((((RO) & 1U) << 1U) | ((NP) & 1U)) /** \brief Region Base Address Register value * \param BASE The base address bits [31:5] of a memory region. The value is zero extended. Effective address gets 32 byte aligned. @@ -87,20 +87,35 @@ * \oaram XN eXecute Never: Set to 1 for a non-executable memory region. */ #define ARM_MPU_RBAR(BASE, SH, RO, NP, XN) \ - ((BASE & MPU_RBAR_BASE_Msk) | \ - ((SH << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | \ + (((BASE) & MPU_RBAR_BASE_Msk) | \ + (((SH) << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | \ ((ARM_MPU_AP_(RO, NP) << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | \ - ((XN << MPU_RBAR_XN_Pos) & MPU_RBAR_XN_Msk)) + (((XN) << MPU_RBAR_XN_Pos) & MPU_RBAR_XN_Msk)) /** \brief Region Limit Address Register value * \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended. * \param IDX The attribute index to be associated with this memory region. */ #define ARM_MPU_RLAR(LIMIT, IDX) \ - ((LIMIT & MPU_RLAR_LIMIT_Msk) | \ - ((IDX << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \ + (((LIMIT) & MPU_RLAR_LIMIT_Msk) | \ + (((IDX) << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \ (MPU_RLAR_EN_Msk)) +#if defined(MPU_RLAR_PXN_Pos) + +/** \brief Region Limit Address Register with PXN value +* \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended. +* \param PXN Privileged execute never. Defines whether code can be executed from this privileged region. +* \param IDX The attribute index to be associated with this memory region. +*/ +#define ARM_MPU_RLAR_PXN(LIMIT, PXN, IDX) \ + (((LIMIT) & MPU_RLAR_LIMIT_Msk) | \ + (((PXN) << MPU_RLAR_PXN_Pos) & MPU_RLAR_PXN_Msk) | \ + (((IDX) << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \ + (MPU_RLAR_EN_Msk)) + +#endif + /** * Struct for a single MPU Region */ @@ -114,24 +129,26 @@ typedef struct { */ __STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) { - __DSB(); - __ISB(); + __DMB(); MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; #ifdef SCB_SHCSR_MEMFAULTENA_Msk SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; #endif + __DSB(); + __ISB(); } /** Disable the MPU. */ __STATIC_INLINE void ARM_MPU_Disable(void) { - __DSB(); - __ISB(); + __DMB(); #ifdef SCB_SHCSR_MEMFAULTENA_Msk SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; #endif MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; + __DSB(); + __ISB(); } #ifdef MPU_NS @@ -140,24 +157,26 @@ __STATIC_INLINE void ARM_MPU_Disable(void) */ __STATIC_INLINE void ARM_MPU_Enable_NS(uint32_t MPU_Control) { - __DSB(); - __ISB(); + __DMB(); MPU_NS->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; #ifdef SCB_SHCSR_MEMFAULTENA_Msk SCB_NS->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; #endif + __DSB(); + __ISB(); } /** Disable the Non-secure MPU. */ __STATIC_INLINE void ARM_MPU_Disable_NS(void) { - __DSB(); - __ISB(); + __DMB(); #ifdef SCB_SHCSR_MEMFAULTENA_Msk SCB_NS->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; #endif MPU_NS->CTRL &= ~MPU_CTRL_ENABLE_Msk; + __DSB(); + __ISB(); } #endif @@ -262,12 +281,12 @@ __STATIC_INLINE void ARM_MPU_SetRegion_NS(uint32_t rnr, uint32_t rbar, uint32_t } #endif -/** Memcopy with strictly ordered memory access, e.g. for register targets. +/** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_LoadEx() * \param dst Destination data is copied to. * \param src Source data is copied from. * \param len Amount of data words to be copied. */ -__STATIC_INLINE void orderedCpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) +__STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) { uint32_t i; for (i = 0U; i < len; ++i) @@ -287,7 +306,7 @@ __STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_ const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; if (cnt == 1U) { mpu->RNR = rnr; - orderedCpy(&(mpu->RBAR), &(table->RBAR), rowWordSize); + ARM_MPU_OrderedMemcpy(&(mpu->RBAR), &(table->RBAR), rowWordSize); } else { uint32_t rnrBase = rnr & ~(MPU_TYPE_RALIASES-1U); uint32_t rnrOffset = rnr % MPU_TYPE_RALIASES; @@ -295,7 +314,7 @@ __STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_ mpu->RNR = rnrBase; while ((rnrOffset + cnt) > MPU_TYPE_RALIASES) { uint32_t c = MPU_TYPE_RALIASES - rnrOffset; - orderedCpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), c*rowWordSize); + ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), c*rowWordSize); table += c; cnt -= c; rnrOffset = 0U; @@ -303,7 +322,7 @@ __STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_ mpu->RNR = rnrBase; } - orderedCpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), cnt*rowWordSize); + ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), cnt*rowWordSize); } } diff --git a/Drivers/CMSIS/Include/pac_armv81.h b/Drivers/CMSIS/Include/pac_armv81.h new file mode 100644 index 00000000..854b60a2 --- /dev/null +++ b/Drivers/CMSIS/Include/pac_armv81.h @@ -0,0 +1,206 @@ +/****************************************************************************** + * @file pac_armv81.h + * @brief CMSIS PAC key functions for Armv8.1-M PAC extension + * @version V1.0.0 + * @date 23. March 2022 + ******************************************************************************/ +/* + * Copyright (c) 2022 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef PAC_ARMV81_H +#define PAC_ARMV81_H + + +/* ################### PAC Key functions ########################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_PacKeyFunctions PAC Key functions + \brief Functions that access the PAC keys. + @{ + */ + +#if (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) + +/** + \brief read the PAC key used for privileged mode + \details Reads the PAC key stored in the PAC_KEY_P registers. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __get_PAC_KEY_P (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_p_0\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_p_1\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_p_2\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_p_3\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for privileged mode + \details writes the given PAC key to the PAC_KEY_P registers. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __set_PAC_KEY_P (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_p_0, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_p_1, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_p_2, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_p_3, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief read the PAC key used for unprivileged mode + \details Reads the PAC key stored in the PAC_KEY_U registers. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __get_PAC_KEY_U (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_u_0\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_u_1\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_u_2\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_u_3\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for unprivileged mode + \details writes the given PAC key to the PAC_KEY_U registers. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __set_PAC_KEY_U (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_u_0, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_u_1, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_u_2, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_u_3, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) + +/** + \brief read the PAC key used for privileged mode (non-secure) + \details Reads the PAC key stored in the non-secure PAC_KEY_P registers when in secure mode. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_get_PAC_KEY_P_NS (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_p_0_ns\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_p_1_ns\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_p_2_ns\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_p_3_ns\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for privileged mode (non-secure) + \details writes the given PAC key to the non-secure PAC_KEY_P registers when in secure mode. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_set_PAC_KEY_P_NS (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_p_0_ns, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_p_1_ns, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_p_2_ns, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_p_3_ns, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief read the PAC key used for unprivileged mode (non-secure) + \details Reads the PAC key stored in the non-secure PAC_KEY_U registers when in secure mode. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_get_PAC_KEY_U_NS (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_u_0_ns\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_u_1_ns\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_u_2_ns\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_u_3_ns\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for unprivileged mode (non-secure) + \details writes the given PAC key to the non-secure PAC_KEY_U registers when in secure mode. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_set_PAC_KEY_U_NS (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_u_0_ns, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_u_1_ns, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_u_2_ns, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_u_3_ns, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +#endif /* (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) */ + +#endif /* (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) */ + +/*@} end of CMSIS_Core_PacKeyFunctions */ + + +#endif /* PAC_ARMV81_H */ diff --git a/Drivers/CMSIS/Include/pmu_armv8.h b/Drivers/CMSIS/Include/pmu_armv8.h new file mode 100644 index 00000000..f8f3d893 --- /dev/null +++ b/Drivers/CMSIS/Include/pmu_armv8.h @@ -0,0 +1,337 @@ +/****************************************************************************** + * @file pmu_armv8.h + * @brief CMSIS PMU API for Armv8.1-M PMU + * @version V1.0.1 + * @date 15. April 2020 + ******************************************************************************/ +/* + * Copyright (c) 2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_PMU_ARMV8_H +#define ARM_PMU_ARMV8_H + +/** + * \brief PMU Events + * \note See the Armv8.1-M Architecture Reference Manual for full details on these PMU events. + * */ + +#define ARM_PMU_SW_INCR 0x0000 /*!< Software update to the PMU_SWINC register, architecturally executed and condition code check pass */ +#define ARM_PMU_L1I_CACHE_REFILL 0x0001 /*!< L1 I-Cache refill */ +#define ARM_PMU_L1D_CACHE_REFILL 0x0003 /*!< L1 D-Cache refill */ +#define ARM_PMU_L1D_CACHE 0x0004 /*!< L1 D-Cache access */ +#define ARM_PMU_LD_RETIRED 0x0006 /*!< Memory-reading instruction architecturally executed and condition code check pass */ +#define ARM_PMU_ST_RETIRED 0x0007 /*!< Memory-writing instruction architecturally executed and condition code check pass */ +#define ARM_PMU_INST_RETIRED 0x0008 /*!< Instruction architecturally executed */ +#define ARM_PMU_EXC_TAKEN 0x0009 /*!< Exception entry */ +#define ARM_PMU_EXC_RETURN 0x000A /*!< Exception return instruction architecturally executed and the condition code check pass */ +#define ARM_PMU_PC_WRITE_RETIRED 0x000C /*!< Software change to the Program Counter (PC). Instruction is architecturally executed and condition code check pass */ +#define ARM_PMU_BR_IMMED_RETIRED 0x000D /*!< Immediate branch architecturally executed */ +#define ARM_PMU_BR_RETURN_RETIRED 0x000E /*!< Function return instruction architecturally executed and the condition code check pass */ +#define ARM_PMU_UNALIGNED_LDST_RETIRED 0x000F /*!< Unaligned memory memory-reading or memory-writing instruction architecturally executed and condition code check pass */ +#define ARM_PMU_BR_MIS_PRED 0x0010 /*!< Mispredicted or not predicted branch speculatively executed */ +#define ARM_PMU_CPU_CYCLES 0x0011 /*!< Cycle */ +#define ARM_PMU_BR_PRED 0x0012 /*!< Predictable branch speculatively executed */ +#define ARM_PMU_MEM_ACCESS 0x0013 /*!< Data memory access */ +#define ARM_PMU_L1I_CACHE 0x0014 /*!< Level 1 instruction cache access */ +#define ARM_PMU_L1D_CACHE_WB 0x0015 /*!< Level 1 data cache write-back */ +#define ARM_PMU_L2D_CACHE 0x0016 /*!< Level 2 data cache access */ +#define ARM_PMU_L2D_CACHE_REFILL 0x0017 /*!< Level 2 data cache refill */ +#define ARM_PMU_L2D_CACHE_WB 0x0018 /*!< Level 2 data cache write-back */ +#define ARM_PMU_BUS_ACCESS 0x0019 /*!< Bus access */ +#define ARM_PMU_MEMORY_ERROR 0x001A /*!< Local memory error */ +#define ARM_PMU_INST_SPEC 0x001B /*!< Instruction speculatively executed */ +#define ARM_PMU_BUS_CYCLES 0x001D /*!< Bus cycles */ +#define ARM_PMU_CHAIN 0x001E /*!< For an odd numbered counter, increment when an overflow occurs on the preceding even-numbered counter on the same PE */ +#define ARM_PMU_L1D_CACHE_ALLOCATE 0x001F /*!< Level 1 data cache allocation without refill */ +#define ARM_PMU_L2D_CACHE_ALLOCATE 0x0020 /*!< Level 2 data cache allocation without refill */ +#define ARM_PMU_BR_RETIRED 0x0021 /*!< Branch instruction architecturally executed */ +#define ARM_PMU_BR_MIS_PRED_RETIRED 0x0022 /*!< Mispredicted branch instruction architecturally executed */ +#define ARM_PMU_STALL_FRONTEND 0x0023 /*!< No operation issued because of the frontend */ +#define ARM_PMU_STALL_BACKEND 0x0024 /*!< No operation issued because of the backend */ +#define ARM_PMU_L2I_CACHE 0x0027 /*!< Level 2 instruction cache access */ +#define ARM_PMU_L2I_CACHE_REFILL 0x0028 /*!< Level 2 instruction cache refill */ +#define ARM_PMU_L3D_CACHE_ALLOCATE 0x0029 /*!< Level 3 data cache allocation without refill */ +#define ARM_PMU_L3D_CACHE_REFILL 0x002A /*!< Level 3 data cache refill */ +#define ARM_PMU_L3D_CACHE 0x002B /*!< Level 3 data cache access */ +#define ARM_PMU_L3D_CACHE_WB 0x002C /*!< Level 3 data cache write-back */ +#define ARM_PMU_LL_CACHE_RD 0x0036 /*!< Last level data cache read */ +#define ARM_PMU_LL_CACHE_MISS_RD 0x0037 /*!< Last level data cache read miss */ +#define ARM_PMU_L1D_CACHE_MISS_RD 0x0039 /*!< Level 1 data cache read miss */ +#define ARM_PMU_OP_COMPLETE 0x003A /*!< Operation retired */ +#define ARM_PMU_OP_SPEC 0x003B /*!< Operation speculatively executed */ +#define ARM_PMU_STALL 0x003C /*!< Stall cycle for instruction or operation not sent for execution */ +#define ARM_PMU_STALL_OP_BACKEND 0x003D /*!< Stall cycle for instruction or operation not sent for execution due to pipeline backend */ +#define ARM_PMU_STALL_OP_FRONTEND 0x003E /*!< Stall cycle for instruction or operation not sent for execution due to pipeline frontend */ +#define ARM_PMU_STALL_OP 0x003F /*!< Instruction or operation slots not occupied each cycle */ +#define ARM_PMU_L1D_CACHE_RD 0x0040 /*!< Level 1 data cache read */ +#define ARM_PMU_LE_RETIRED 0x0100 /*!< Loop end instruction executed */ +#define ARM_PMU_LE_SPEC 0x0101 /*!< Loop end instruction speculatively executed */ +#define ARM_PMU_BF_RETIRED 0x0104 /*!< Branch future instruction architecturally executed and condition code check pass */ +#define ARM_PMU_BF_SPEC 0x0105 /*!< Branch future instruction speculatively executed and condition code check pass */ +#define ARM_PMU_LE_CANCEL 0x0108 /*!< Loop end instruction not taken */ +#define ARM_PMU_BF_CANCEL 0x0109 /*!< Branch future instruction not taken */ +#define ARM_PMU_SE_CALL_S 0x0114 /*!< Call to secure function, resulting in Security state change */ +#define ARM_PMU_SE_CALL_NS 0x0115 /*!< Call to non-secure function, resulting in Security state change */ +#define ARM_PMU_DWT_CMPMATCH0 0x0118 /*!< DWT comparator 0 match */ +#define ARM_PMU_DWT_CMPMATCH1 0x0119 /*!< DWT comparator 1 match */ +#define ARM_PMU_DWT_CMPMATCH2 0x011A /*!< DWT comparator 2 match */ +#define ARM_PMU_DWT_CMPMATCH3 0x011B /*!< DWT comparator 3 match */ +#define ARM_PMU_MVE_INST_RETIRED 0x0200 /*!< MVE instruction architecturally executed */ +#define ARM_PMU_MVE_INST_SPEC 0x0201 /*!< MVE instruction speculatively executed */ +#define ARM_PMU_MVE_FP_RETIRED 0x0204 /*!< MVE floating-point instruction architecturally executed */ +#define ARM_PMU_MVE_FP_SPEC 0x0205 /*!< MVE floating-point instruction speculatively executed */ +#define ARM_PMU_MVE_FP_HP_RETIRED 0x0208 /*!< MVE half-precision floating-point instruction architecturally executed */ +#define ARM_PMU_MVE_FP_HP_SPEC 0x0209 /*!< MVE half-precision floating-point instruction speculatively executed */ +#define ARM_PMU_MVE_FP_SP_RETIRED 0x020C /*!< MVE single-precision floating-point instruction architecturally executed */ +#define ARM_PMU_MVE_FP_SP_SPEC 0x020D /*!< MVE single-precision floating-point instruction speculatively executed */ +#define ARM_PMU_MVE_FP_MAC_RETIRED 0x0214 /*!< MVE floating-point multiply or multiply-accumulate instruction architecturally executed */ +#define ARM_PMU_MVE_FP_MAC_SPEC 0x0215 /*!< MVE floating-point multiply or multiply-accumulate instruction speculatively executed */ +#define ARM_PMU_MVE_INT_RETIRED 0x0224 /*!< MVE integer instruction architecturally executed */ +#define ARM_PMU_MVE_INT_SPEC 0x0225 /*!< MVE integer instruction speculatively executed */ +#define ARM_PMU_MVE_INT_MAC_RETIRED 0x0228 /*!< MVE multiply or multiply-accumulate instruction architecturally executed */ +#define ARM_PMU_MVE_INT_MAC_SPEC 0x0229 /*!< MVE multiply or multiply-accumulate instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_RETIRED 0x0238 /*!< MVE load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_SPEC 0x0239 /*!< MVE load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_RETIRED 0x023C /*!< MVE load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_SPEC 0x023D /*!< MVE load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_RETIRED 0x0240 /*!< MVE store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_SPEC 0x0241 /*!< MVE store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_CONTIG_RETIRED 0x0244 /*!< MVE contiguous load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_CONTIG_SPEC 0x0245 /*!< MVE contiguous load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_CONTIG_RETIRED 0x0248 /*!< MVE contiguous load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_CONTIG_SPEC 0x0249 /*!< MVE contiguous load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_CONTIG_RETIRED 0x024C /*!< MVE contiguous store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_CONTIG_SPEC 0x024D /*!< MVE contiguous store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_NONCONTIG_RETIRED 0x0250 /*!< MVE non-contiguous load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_NONCONTIG_SPEC 0x0251 /*!< MVE non-contiguous load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_NONCONTIG_RETIRED 0x0254 /*!< MVE non-contiguous load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_NONCONTIG_SPEC 0x0255 /*!< MVE non-contiguous load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_NONCONTIG_RETIRED 0x0258 /*!< MVE non-contiguous store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_NONCONTIG_SPEC 0x0259 /*!< MVE non-contiguous store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_MULTI_RETIRED 0x025C /*!< MVE memory instruction targeting multiple registers architecturally executed */ +#define ARM_PMU_MVE_LDST_MULTI_SPEC 0x025D /*!< MVE memory instruction targeting multiple registers speculatively executed */ +#define ARM_PMU_MVE_LD_MULTI_RETIRED 0x0260 /*!< MVE memory load instruction targeting multiple registers architecturally executed */ +#define ARM_PMU_MVE_LD_MULTI_SPEC 0x0261 /*!< MVE memory load instruction targeting multiple registers speculatively executed */ +#define ARM_PMU_MVE_ST_MULTI_RETIRED 0x0261 /*!< MVE memory store instruction targeting multiple registers architecturally executed */ +#define ARM_PMU_MVE_ST_MULTI_SPEC 0x0265 /*!< MVE memory store instruction targeting multiple registers speculatively executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_RETIRED 0x028C /*!< MVE unaligned memory load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_SPEC 0x028D /*!< MVE unaligned memory load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_UNALIGNED_RETIRED 0x0290 /*!< MVE unaligned load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_UNALIGNED_SPEC 0x0291 /*!< MVE unaligned load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_UNALIGNED_RETIRED 0x0294 /*!< MVE unaligned store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_UNALIGNED_SPEC 0x0295 /*!< MVE unaligned store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_NONCONTIG_RETIRED 0x0298 /*!< MVE unaligned noncontiguous load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_NONCONTIG_SPEC 0x0299 /*!< MVE unaligned noncontiguous load or store instruction speculatively executed */ +#define ARM_PMU_MVE_VREDUCE_RETIRED 0x02A0 /*!< MVE vector reduction instruction architecturally executed */ +#define ARM_PMU_MVE_VREDUCE_SPEC 0x02A1 /*!< MVE vector reduction instruction speculatively executed */ +#define ARM_PMU_MVE_VREDUCE_FP_RETIRED 0x02A4 /*!< MVE floating-point vector reduction instruction architecturally executed */ +#define ARM_PMU_MVE_VREDUCE_FP_SPEC 0x02A5 /*!< MVE floating-point vector reduction instruction speculatively executed */ +#define ARM_PMU_MVE_VREDUCE_INT_RETIRED 0x02A8 /*!< MVE integer vector reduction instruction architecturally executed */ +#define ARM_PMU_MVE_VREDUCE_INT_SPEC 0x02A9 /*!< MVE integer vector reduction instruction speculatively executed */ +#define ARM_PMU_MVE_PRED 0x02B8 /*!< Cycles where one or more predicated beats architecturally executed */ +#define ARM_PMU_MVE_STALL 0x02CC /*!< Stall cycles caused by an MVE instruction */ +#define ARM_PMU_MVE_STALL_RESOURCE 0x02CD /*!< Stall cycles caused by an MVE instruction because of resource conflicts */ +#define ARM_PMU_MVE_STALL_RESOURCE_MEM 0x02CE /*!< Stall cycles caused by an MVE instruction because of memory resource conflicts */ +#define ARM_PMU_MVE_STALL_RESOURCE_FP 0x02CF /*!< Stall cycles caused by an MVE instruction because of floating-point resource conflicts */ +#define ARM_PMU_MVE_STALL_RESOURCE_INT 0x02D0 /*!< Stall cycles caused by an MVE instruction because of integer resource conflicts */ +#define ARM_PMU_MVE_STALL_BREAK 0x02D3 /*!< Stall cycles caused by an MVE chain break */ +#define ARM_PMU_MVE_STALL_DEPENDENCY 0x02D4 /*!< Stall cycles caused by MVE register dependency */ +#define ARM_PMU_ITCM_ACCESS 0x4007 /*!< Instruction TCM access */ +#define ARM_PMU_DTCM_ACCESS 0x4008 /*!< Data TCM access */ +#define ARM_PMU_TRCEXTOUT0 0x4010 /*!< ETM external output 0 */ +#define ARM_PMU_TRCEXTOUT1 0x4011 /*!< ETM external output 1 */ +#define ARM_PMU_TRCEXTOUT2 0x4012 /*!< ETM external output 2 */ +#define ARM_PMU_TRCEXTOUT3 0x4013 /*!< ETM external output 3 */ +#define ARM_PMU_CTI_TRIGOUT4 0x4018 /*!< Cross-trigger Interface output trigger 4 */ +#define ARM_PMU_CTI_TRIGOUT5 0x4019 /*!< Cross-trigger Interface output trigger 5 */ +#define ARM_PMU_CTI_TRIGOUT6 0x401A /*!< Cross-trigger Interface output trigger 6 */ +#define ARM_PMU_CTI_TRIGOUT7 0x401B /*!< Cross-trigger Interface output trigger 7 */ + +/** \brief PMU Functions */ + +__STATIC_INLINE void ARM_PMU_Enable(void); +__STATIC_INLINE void ARM_PMU_Disable(void); + +__STATIC_INLINE void ARM_PMU_Set_EVTYPER(uint32_t num, uint32_t type); + +__STATIC_INLINE void ARM_PMU_CYCCNT_Reset(void); +__STATIC_INLINE void ARM_PMU_EVCNTR_ALL_Reset(void); + +__STATIC_INLINE void ARM_PMU_CNTR_Enable(uint32_t mask); +__STATIC_INLINE void ARM_PMU_CNTR_Disable(uint32_t mask); + +__STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void); +__STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num); + +__STATIC_INLINE uint32_t ARM_PMU_Get_CNTR_OVS(void); +__STATIC_INLINE void ARM_PMU_Set_CNTR_OVS(uint32_t mask); + +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Enable(uint32_t mask); +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Disable(uint32_t mask); + +__STATIC_INLINE void ARM_PMU_CNTR_Increment(uint32_t mask); + +/** + \brief Enable the PMU +*/ +__STATIC_INLINE void ARM_PMU_Enable(void) +{ + PMU->CTRL |= PMU_CTRL_ENABLE_Msk; +} + +/** + \brief Disable the PMU +*/ +__STATIC_INLINE void ARM_PMU_Disable(void) +{ + PMU->CTRL &= ~PMU_CTRL_ENABLE_Msk; +} + +/** + \brief Set event to count for PMU eventer counter + \param [in] num Event counter (0-30) to configure + \param [in] type Event to count +*/ +__STATIC_INLINE void ARM_PMU_Set_EVTYPER(uint32_t num, uint32_t type) +{ + PMU->EVTYPER[num] = type; +} + +/** + \brief Reset cycle counter +*/ +__STATIC_INLINE void ARM_PMU_CYCCNT_Reset(void) +{ + PMU->CTRL |= PMU_CTRL_CYCCNT_RESET_Msk; +} + +/** + \brief Reset all event counters +*/ +__STATIC_INLINE void ARM_PMU_EVCNTR_ALL_Reset(void) +{ + PMU->CTRL |= PMU_CTRL_EVENTCNT_RESET_Msk; +} + +/** + \brief Enable counters + \param [in] mask Counters to enable + \note Enables one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_CNTR_Enable(uint32_t mask) +{ + PMU->CNTENSET = mask; +} + +/** + \brief Disable counters + \param [in] mask Counters to enable + \note Disables one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_CNTR_Disable(uint32_t mask) +{ + PMU->CNTENCLR = mask; +} + +/** + \brief Read cycle counter + \return Cycle count +*/ +__STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void) +{ + return PMU->CCNTR; +} + +/** + \brief Read event counter + \param [in] num Event counter (0-30) to read + \return Event count +*/ +__STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num) +{ + return PMU_EVCNTR_CNT_Msk & PMU->EVCNTR[num]; +} + +/** + \brief Read counter overflow status + \return Counter overflow status bits for the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE uint32_t ARM_PMU_Get_CNTR_OVS(void) +{ + return PMU->OVSSET; +} + +/** + \brief Clear counter overflow status + \param [in] mask Counter overflow status bits to clear + \note Clears overflow status bits for one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_Set_CNTR_OVS(uint32_t mask) +{ + PMU->OVSCLR = mask; +} + +/** + \brief Enable counter overflow interrupt request + \param [in] mask Counter overflow interrupt request bits to set + \note Sets overflow interrupt request bits for one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Enable(uint32_t mask) +{ + PMU->INTENSET = mask; +} + +/** + \brief Disable counter overflow interrupt request + \param [in] mask Counter overflow interrupt request bits to clear + \note Clears overflow interrupt request bits for one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Disable(uint32_t mask) +{ + PMU->INTENCLR = mask; +} + +/** + \brief Software increment event counter + \param [in] mask Counters to increment + \note Software increment bits for one or more event counters (0-30) +*/ +__STATIC_INLINE void ARM_PMU_CNTR_Increment(uint32_t mask) +{ + PMU->SWINC = mask; +} + +#endif diff --git a/Drivers/CMSIS/LICENSE.txt b/Drivers/CMSIS/LICENSE.txt new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/Drivers/CMSIS/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h b/Drivers/STM32F4xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h index 90767ed3..239d1495 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2019 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2021 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -23,7 +22,7 @@ #define STM32_HAL_LEGACY #ifdef __cplusplus - extern "C" { +extern "C" { #endif /* Includes ------------------------------------------------------------------*/ @@ -38,7 +37,12 @@ #define AES_CLEARFLAG_CCF CRYP_CLEARFLAG_CCF #define AES_CLEARFLAG_RDERR CRYP_CLEARFLAG_RDERR #define AES_CLEARFLAG_WRERR CRYP_CLEARFLAG_WRERR - +#if defined(STM32H7) || defined(STM32MP1) +#define CRYP_DATATYPE_32B CRYP_NO_SWAP +#define CRYP_DATATYPE_16B CRYP_HALFWORD_SWAP +#define CRYP_DATATYPE_8B CRYP_BYTE_SWAP +#define CRYP_DATATYPE_1B CRYP_BIT_SWAP +#endif /* STM32H7 || STM32MP1 */ /** * @} */ @@ -98,6 +102,16 @@ #if defined(STM32H7) #define ADC_CHANNEL_VBAT_DIV4 ADC_CHANNEL_VBAT #endif /* STM32H7 */ + +#if defined(STM32U5) +#define ADC_SAMPLETIME_5CYCLE ADC_SAMPLETIME_5CYCLES +#define ADC_SAMPLETIME_391CYCLES_5 ADC_SAMPLETIME_391CYCLES +#define ADC4_SAMPLETIME_160CYCLES_5 ADC4_SAMPLETIME_814CYCLES_5 +#endif /* STM32U5 */ + +#if defined(STM32H5) +#define ADC_CHANNEL_VCORE ADC_CHANNEL_VDDCORE +#endif /* STM32H5 */ /** * @} */ @@ -125,7 +139,8 @@ #define COMP_EXTI_LINE_COMP6_EVENT COMP_EXTI_LINE_COMP6 #define COMP_EXTI_LINE_COMP7_EVENT COMP_EXTI_LINE_COMP7 #if defined(STM32L0) -#define COMP_LPTIMCONNECTION_ENABLED ((uint32_t)0x00000003U) /*!< COMPX output generic naming: connected to LPTIM input 1 for COMP1, LPTIM input 2 for COMP2 */ +#define COMP_LPTIMCONNECTION_ENABLED ((uint32_t)0x00000003U) /*!< COMPX output generic naming: connected to LPTIM + input 1 for COMP1, LPTIM input 2 for COMP2 */ #endif #define COMP_OUTPUT_COMP6TIM2OCREFCLR COMP_OUTPUT_COMP6_TIM2OCREFCLR #if defined(STM32F373xC) || defined(STM32F378xx) @@ -199,6 +214,11 @@ #endif #endif + +#if defined(STM32U5) +#define __HAL_COMP_COMP1_EXTI_CLEAR_RASING_FLAG __HAL_COMP_COMP1_EXTI_CLEAR_RISING_FLAG +#endif + /** * @} */ @@ -207,6 +227,25 @@ * @{ */ #define __HAL_CORTEX_SYSTICKCLK_CONFIG HAL_SYSTICK_CLKSourceConfig +#if defined(STM32U5) +#define MPU_DEVICE_nGnRnE MPU_DEVICE_NGNRNE +#define MPU_DEVICE_nGnRE MPU_DEVICE_NGNRE +#define MPU_DEVICE_nGRE MPU_DEVICE_NGRE +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup CRC_Aliases CRC API aliases + * @{ + */ +#if defined(STM32H5) || defined(STM32C0) +#else +#define HAL_CRC_Input_Data_Reverse HAL_CRCEx_Input_Data_Reverse /*!< Aliased to HAL_CRCEx_Input_Data_Reverse for + inter STM32 series compatibility */ +#define HAL_CRC_Output_Data_Reverse HAL_CRCEx_Output_Data_Reverse /*!< Aliased to HAL_CRCEx_Output_Data_Reverse for + inter STM32 series compatibility */ +#endif /** * @} */ @@ -236,12 +275,25 @@ #define DAC_WAVEGENERATION_NOISE DAC_WAVE_NOISE #define DAC_WAVEGENERATION_TRIANGLE DAC_WAVE_TRIANGLE -#if defined(STM32G4) || defined(STM32H7) +#if defined(STM32G4) || defined(STM32H7) || defined (STM32U5) #define DAC_CHIPCONNECT_DISABLE DAC_CHIPCONNECT_EXTERNAL #define DAC_CHIPCONNECT_ENABLE DAC_CHIPCONNECT_INTERNAL #endif -#if defined(STM32L1) || defined(STM32L4) || defined(STM32G0) || defined(STM32L5) || defined(STM32H7) || defined(STM32F4) +#if defined(STM32U5) +#define DAC_TRIGGER_STOP_LPTIM1_OUT DAC_TRIGGER_STOP_LPTIM1_CH1 +#define DAC_TRIGGER_STOP_LPTIM3_OUT DAC_TRIGGER_STOP_LPTIM3_CH1 +#define DAC_TRIGGER_LPTIM1_OUT DAC_TRIGGER_LPTIM1_CH1 +#define DAC_TRIGGER_LPTIM3_OUT DAC_TRIGGER_LPTIM3_CH1 +#endif + +#if defined(STM32H5) +#define DAC_TRIGGER_LPTIM1_OUT DAC_TRIGGER_LPTIM1_CH1 +#define DAC_TRIGGER_LPTIM2_OUT DAC_TRIGGER_LPTIM2_CH1 +#endif + +#if defined(STM32L1) || defined(STM32L4) || defined(STM32G0) || defined(STM32L5) || defined(STM32H7) || \ + defined(STM32F4) || defined(STM32G4) #define HAL_DAC_MSP_INIT_CB_ID HAL_DAC_MSPINIT_CB_ID #define HAL_DAC_MSP_DEINIT_CB_ID HAL_DAC_MSPDEINIT_CB_ID #endif @@ -306,15 +358,21 @@ #define HAL_DMAMUX_REQUEST_GEN_FALLING HAL_DMAMUX_REQ_GEN_FALLING #define HAL_DMAMUX_REQUEST_GEN_RISING_FALLING HAL_DMAMUX_REQ_GEN_RISING_FALLING -#if defined(STM32L4R5xx) || defined(STM32L4R9xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || defined(STM32L4S7xx) || defined(STM32L4S9xx) +#if defined(STM32L4R5xx) || defined(STM32L4R9xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || \ + defined(STM32L4S7xx) || defined(STM32L4S9xx) #define DMA_REQUEST_DCMI_PSSI DMA_REQUEST_DCMI #endif #endif /* STM32L4 */ #if defined(STM32G0) -#define DMA_REQUEST_DAC1_CHANNEL1 DMA_REQUEST_DAC1_CH1 -#define DMA_REQUEST_DAC1_CHANNEL2 DMA_REQUEST_DAC1_CH2 +#define DMA_REQUEST_DAC1_CHANNEL1 DMA_REQUEST_DAC1_CH1 +#define DMA_REQUEST_DAC1_CHANNEL2 DMA_REQUEST_DAC1_CH2 +#define DMA_REQUEST_TIM16_TRIG_COM DMA_REQUEST_TIM16_COM +#define DMA_REQUEST_TIM17_TRIG_COM DMA_REQUEST_TIM17_COM + +#define LL_DMAMUX_REQ_TIM16_TRIG_COM LL_DMAMUX_REQ_TIM16_COM +#define LL_DMAMUX_REQ_TIM17_TRIG_COM LL_DMAMUX_REQ_TIM17_COM #endif #if defined(STM32H7) @@ -379,6 +437,9 @@ #endif /* STM32H7 */ +#if defined(STM32U5) +#define GPDMA1_REQUEST_DCMI GPDMA1_REQUEST_DCMI_PSSI +#endif /* STM32U5 */ /** * @} */ @@ -458,7 +519,7 @@ #define OB_RDP_LEVEL0 OB_RDP_LEVEL_0 #define OB_RDP_LEVEL1 OB_RDP_LEVEL_1 #define OB_RDP_LEVEL2 OB_RDP_LEVEL_2 -#if defined(STM32G0) +#if defined(STM32G0) || defined(STM32C0) #define OB_BOOT_LOCK_DISABLE OB_BOOT_ENTRY_FORCED_NONE #define OB_BOOT_LOCK_ENABLE OB_BOOT_ENTRY_FORCED_FLASH #else @@ -466,15 +527,37 @@ #define OB_BOOT_ENTRY_FORCED_FLASH OB_BOOT_LOCK_ENABLE #endif #if defined(STM32H7) -#define FLASH_FLAG_SNECCE_BANK1RR FLASH_FLAG_SNECCERR_BANK1 -#define FLASH_FLAG_DBECCE_BANK1RR FLASH_FLAG_DBECCERR_BANK1 -#define FLASH_FLAG_STRBER_BANK1R FLASH_FLAG_STRBERR_BANK1 -#define FLASH_FLAG_SNECCE_BANK2RR FLASH_FLAG_SNECCERR_BANK2 -#define FLASH_FLAG_DBECCE_BANK2RR FLASH_FLAG_DBECCERR_BANK2 -#define FLASH_FLAG_STRBER_BANK2R FLASH_FLAG_STRBERR_BANK2 -#define FLASH_FLAG_WDW FLASH_FLAG_WBNE -#define OB_WRP_SECTOR_All OB_WRP_SECTOR_ALL +#define FLASH_FLAG_SNECCE_BANK1RR FLASH_FLAG_SNECCERR_BANK1 +#define FLASH_FLAG_DBECCE_BANK1RR FLASH_FLAG_DBECCERR_BANK1 +#define FLASH_FLAG_STRBER_BANK1R FLASH_FLAG_STRBERR_BANK1 +#define FLASH_FLAG_SNECCE_BANK2RR FLASH_FLAG_SNECCERR_BANK2 +#define FLASH_FLAG_DBECCE_BANK2RR FLASH_FLAG_DBECCERR_BANK2 +#define FLASH_FLAG_STRBER_BANK2R FLASH_FLAG_STRBERR_BANK2 +#define FLASH_FLAG_WDW FLASH_FLAG_WBNE +#define OB_WRP_SECTOR_All OB_WRP_SECTOR_ALL #endif /* STM32H7 */ +#if defined(STM32U5) +#define OB_USER_nRST_STOP OB_USER_NRST_STOP +#define OB_USER_nRST_STDBY OB_USER_NRST_STDBY +#define OB_USER_nRST_SHDW OB_USER_NRST_SHDW +#define OB_USER_nSWBOOT0 OB_USER_NSWBOOT0 +#define OB_USER_nBOOT0 OB_USER_NBOOT0 +#define OB_nBOOT0_RESET OB_NBOOT0_RESET +#define OB_nBOOT0_SET OB_NBOOT0_SET +#define OB_USER_SRAM134_RST OB_USER_SRAM_RST +#define OB_SRAM134_RST_ERASE OB_SRAM_RST_ERASE +#define OB_SRAM134_RST_NOT_ERASE OB_SRAM_RST_NOT_ERASE +#endif /* STM32U5 */ +#if defined(STM32U0) +#define OB_USER_nRST_STOP OB_USER_NRST_STOP +#define OB_USER_nRST_STDBY OB_USER_NRST_STDBY +#define OB_USER_nRST_SHDW OB_USER_NRST_SHDW +#define OB_USER_nBOOT_SEL OB_USER_NBOOT_SEL +#define OB_USER_nBOOT0 OB_USER_NBOOT0 +#define OB_USER_nBOOT1 OB_USER_NBOOT1 +#define OB_nBOOT0_RESET OB_NBOOT0_RESET +#define OB_nBOOT0_SET OB_NBOOT0_SET +#endif /* STM32U0 */ /** * @} @@ -517,6 +600,107 @@ #define HAL_SYSCFG_EnableIOAnalogSwitchVDD HAL_SYSCFG_EnableIOSwitchVDD #define HAL_SYSCFG_DisableIOAnalogSwitchVDD HAL_SYSCFG_DisableIOSwitchVDD #endif /* STM32G4 */ + +#if defined(STM32H5) +#define SYSCFG_IT_FPU_IOC SBS_IT_FPU_IOC +#define SYSCFG_IT_FPU_DZC SBS_IT_FPU_DZC +#define SYSCFG_IT_FPU_UFC SBS_IT_FPU_UFC +#define SYSCFG_IT_FPU_OFC SBS_IT_FPU_OFC +#define SYSCFG_IT_FPU_IDC SBS_IT_FPU_IDC +#define SYSCFG_IT_FPU_IXC SBS_IT_FPU_IXC + +#define SYSCFG_BREAK_FLASH_ECC SBS_BREAK_FLASH_ECC +#define SYSCFG_BREAK_PVD SBS_BREAK_PVD +#define SYSCFG_BREAK_SRAM_ECC SBS_BREAK_SRAM_ECC +#define SYSCFG_BREAK_LOCKUP SBS_BREAK_LOCKUP + +#define SYSCFG_VREFBUF_VOLTAGE_SCALE0 VREFBUF_VOLTAGE_SCALE0 +#define SYSCFG_VREFBUF_VOLTAGE_SCALE1 VREFBUF_VOLTAGE_SCALE1 +#define SYSCFG_VREFBUF_VOLTAGE_SCALE2 VREFBUF_VOLTAGE_SCALE2 +#define SYSCFG_VREFBUF_VOLTAGE_SCALE3 VREFBUF_VOLTAGE_SCALE3 + +#define SYSCFG_VREFBUF_HIGH_IMPEDANCE_DISABLE VREFBUF_HIGH_IMPEDANCE_DISABLE +#define SYSCFG_VREFBUF_HIGH_IMPEDANCE_ENABLE VREFBUF_HIGH_IMPEDANCE_ENABLE + +#define SYSCFG_FASTMODEPLUS_PB6 SBS_FASTMODEPLUS_PB6 +#define SYSCFG_FASTMODEPLUS_PB7 SBS_FASTMODEPLUS_PB7 +#define SYSCFG_FASTMODEPLUS_PB8 SBS_FASTMODEPLUS_PB8 +#define SYSCFG_FASTMODEPLUS_PB9 SBS_FASTMODEPLUS_PB9 + +#define SYSCFG_ETH_MII SBS_ETH_MII +#define SYSCFG_ETH_RMII SBS_ETH_RMII +#define IS_SYSCFG_ETHERNET_CONFIG IS_SBS_ETHERNET_CONFIG + +#define SYSCFG_MEMORIES_ERASE_FLAG_IPMEE SBS_MEMORIES_ERASE_FLAG_IPMEE +#define SYSCFG_MEMORIES_ERASE_FLAG_MCLR SBS_MEMORIES_ERASE_FLAG_MCLR +#define IS_SYSCFG_MEMORIES_ERASE_FLAG IS_SBS_MEMORIES_ERASE_FLAG + +#define IS_SYSCFG_CODE_CONFIG IS_SBS_CODE_CONFIG + +#define SYSCFG_MPU_NSEC SBS_MPU_NSEC +#define SYSCFG_VTOR_NSEC SBS_VTOR_NSEC +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#define SYSCFG_SAU SBS_SAU +#define SYSCFG_MPU_SEC SBS_MPU_SEC +#define SYSCFG_VTOR_AIRCR_SEC SBS_VTOR_AIRCR_SEC +#define SYSCFG_LOCK_ALL SBS_LOCK_ALL +#else +#define SYSCFG_LOCK_ALL SBS_LOCK_ALL +#endif /* __ARM_FEATURE_CMSE */ + +#define SYSCFG_CLK SBS_CLK +#define SYSCFG_CLASSB SBS_CLASSB +#define SYSCFG_FPU SBS_FPU +#define SYSCFG_ALL SBS_ALL + +#define SYSCFG_SEC SBS_SEC +#define SYSCFG_NSEC SBS_NSEC + +#define __HAL_SYSCFG_FPU_INTERRUPT_ENABLE __HAL_SBS_FPU_INTERRUPT_ENABLE +#define __HAL_SYSCFG_FPU_INTERRUPT_DISABLE __HAL_SBS_FPU_INTERRUPT_DISABLE + +#define __HAL_SYSCFG_BREAK_ECC_LOCK __HAL_SBS_BREAK_ECC_LOCK +#define __HAL_SYSCFG_BREAK_LOCKUP_LOCK __HAL_SBS_BREAK_LOCKUP_LOCK +#define __HAL_SYSCFG_BREAK_PVD_LOCK __HAL_SBS_BREAK_PVD_LOCK +#define __HAL_SYSCFG_BREAK_SRAM_ECC_LOCK __HAL_SBS_BREAK_SRAM_ECC_LOCK + +#define __HAL_SYSCFG_FASTMODEPLUS_ENABLE __HAL_SBS_FASTMODEPLUS_ENABLE +#define __HAL_SYSCFG_FASTMODEPLUS_DISABLE __HAL_SBS_FASTMODEPLUS_DISABLE + +#define __HAL_SYSCFG_GET_MEMORIES_ERASE_STATUS __HAL_SBS_GET_MEMORIES_ERASE_STATUS +#define __HAL_SYSCFG_CLEAR_MEMORIES_ERASE_STATUS __HAL_SBS_CLEAR_MEMORIES_ERASE_STATUS + +#define IS_SYSCFG_FPU_INTERRUPT IS_SBS_FPU_INTERRUPT +#define IS_SYSCFG_BREAK_CONFIG IS_SBS_BREAK_CONFIG +#define IS_SYSCFG_VREFBUF_VOLTAGE_SCALE IS_VREFBUF_VOLTAGE_SCALE +#define IS_SYSCFG_VREFBUF_HIGH_IMPEDANCE IS_VREFBUF_HIGH_IMPEDANCE +#define IS_SYSCFG_VREFBUF_TRIMMING IS_VREFBUF_TRIMMING +#define IS_SYSCFG_FASTMODEPLUS IS_SBS_FASTMODEPLUS +#define IS_SYSCFG_ITEMS_ATTRIBUTES IS_SBS_ITEMS_ATTRIBUTES +#define IS_SYSCFG_ATTRIBUTES IS_SBS_ATTRIBUTES +#define IS_SYSCFG_LOCK_ITEMS IS_SBS_LOCK_ITEMS + +#define HAL_SYSCFG_VREFBUF_VoltageScalingConfig HAL_VREFBUF_VoltageScalingConfig +#define HAL_SYSCFG_VREFBUF_HighImpedanceConfig HAL_VREFBUF_HighImpedanceConfig +#define HAL_SYSCFG_VREFBUF_TrimmingConfig HAL_VREFBUF_TrimmingConfig +#define HAL_SYSCFG_EnableVREFBUF HAL_EnableVREFBUF +#define HAL_SYSCFG_DisableVREFBUF HAL_DisableVREFBUF + +#define HAL_SYSCFG_EnableIOAnalogSwitchBooster HAL_SBS_EnableIOAnalogSwitchBooster +#define HAL_SYSCFG_DisableIOAnalogSwitchBooster HAL_SBS_DisableIOAnalogSwitchBooster +#define HAL_SYSCFG_ETHInterfaceSelect HAL_SBS_ETHInterfaceSelect + +#define HAL_SYSCFG_Lock HAL_SBS_Lock +#define HAL_SYSCFG_GetLock HAL_SBS_GetLock + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#define HAL_SYSCFG_ConfigAttributes HAL_SBS_ConfigAttributes +#define HAL_SYSCFG_GetConfigAttributes HAL_SBS_GetConfigAttributes +#endif /* __ARM_FEATURE_CMSE */ + +#endif /* STM32H5 */ + + /** * @} */ @@ -584,34 +768,72 @@ #define GPIO_AF10_OTG2_HS GPIO_AF10_OTG2_FS #define GPIO_AF10_OTG1_FS GPIO_AF10_OTG1_HS #define GPIO_AF12_OTG2_FS GPIO_AF12_OTG1_FS -#endif /*STM32H743xx || STM32H753xx || STM32H750xx || STM32H742xx || STM32H745xx || STM32H755xx || STM32H747xx || STM32H757xx */ +#endif /*STM32H743xx || STM32H753xx || STM32H750xx || STM32H742xx || STM32H745xx || STM32H755xx || STM32H747xx || \ + STM32H757xx */ #endif /* STM32H7 */ #define GPIO_AF0_LPTIM GPIO_AF0_LPTIM1 #define GPIO_AF1_LPTIM GPIO_AF1_LPTIM1 #define GPIO_AF2_LPTIM GPIO_AF2_LPTIM1 -#if defined(STM32L0) || defined(STM32L4) || defined(STM32F4) || defined(STM32F2) || defined(STM32F7) || defined(STM32G4) || defined(STM32H7) +#if defined(STM32L0) || defined(STM32L4) || defined(STM32F4) || defined(STM32F2) || defined(STM32F7) || \ + defined(STM32G4) || defined(STM32H7) || defined(STM32WB) || defined(STM32U5) #define GPIO_SPEED_LOW GPIO_SPEED_FREQ_LOW #define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_MEDIUM #define GPIO_SPEED_FAST GPIO_SPEED_FREQ_HIGH #define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_VERY_HIGH -#endif /* STM32L0 || STM32L4 || STM32F4 || STM32F2 || STM32F7 || STM32G4 || STM32H7*/ +#endif /* STM32L0 || STM32L4 || STM32F4 || STM32F2 || STM32F7 || STM32G4 || STM32H7 || STM32WB || STM32U5*/ #if defined(STM32L1) - #define GPIO_SPEED_VERY_LOW GPIO_SPEED_FREQ_LOW - #define GPIO_SPEED_LOW GPIO_SPEED_FREQ_MEDIUM - #define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_HIGH - #define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_VERY_HIGH +#define GPIO_SPEED_VERY_LOW GPIO_SPEED_FREQ_LOW +#define GPIO_SPEED_LOW GPIO_SPEED_FREQ_MEDIUM +#define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_HIGH +#define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_VERY_HIGH #endif /* STM32L1 */ #if defined(STM32F0) || defined(STM32F3) || defined(STM32F1) - #define GPIO_SPEED_LOW GPIO_SPEED_FREQ_LOW - #define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_MEDIUM - #define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_HIGH +#define GPIO_SPEED_LOW GPIO_SPEED_FREQ_LOW +#define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_MEDIUM +#define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_HIGH #endif /* STM32F0 || STM32F3 || STM32F1 */ #define GPIO_AF6_DFSDM GPIO_AF6_DFSDM1 + +#if defined(STM32U5) || defined(STM32H5) +#define GPIO_AF0_RTC_50Hz GPIO_AF0_RTC_50HZ +#endif /* STM32U5 || STM32H5 */ +#if defined(STM32U5) +#define GPIO_AF0_S2DSTOP GPIO_AF0_SRDSTOP +#define GPIO_AF11_LPGPIO GPIO_AF11_LPGPIO1 +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup HAL_GTZC_Aliased_Defines HAL GTZC Aliased Defines maintained for legacy purpose + * @{ + */ +#if defined(STM32U5) +#define GTZC_PERIPH_DCMI GTZC_PERIPH_DCMI_PSSI +#define GTZC_PERIPH_LTDC GTZC_PERIPH_LTDCUSB +#endif /* STM32U5 */ +#if defined(STM32H5) +#define GTZC_PERIPH_DAC12 GTZC_PERIPH_DAC1 +#define GTZC_PERIPH_ADC12 GTZC_PERIPH_ADC +#define GTZC_PERIPH_USBFS GTZC_PERIPH_USB +#endif /* STM32H5 */ +#if defined(STM32H5) || defined(STM32U5) +#define GTZC_MCPBB_NB_VCTR_REG_MAX GTZC_MPCBB_NB_VCTR_REG_MAX +#define GTZC_MCPBB_NB_LCK_VCTR_REG_MAX GTZC_MPCBB_NB_LCK_VCTR_REG_MAX +#define GTZC_MCPBB_SUPERBLOCK_UNLOCKED GTZC_MPCBB_SUPERBLOCK_UNLOCKED +#define GTZC_MCPBB_SUPERBLOCK_LOCKED GTZC_MPCBB_SUPERBLOCK_LOCKED +#define GTZC_MCPBB_BLOCK_NSEC GTZC_MPCBB_BLOCK_NSEC +#define GTZC_MCPBB_BLOCK_SEC GTZC_MPCBB_BLOCK_SEC +#define GTZC_MCPBB_BLOCK_NPRIV GTZC_MPCBB_BLOCK_NPRIV +#define GTZC_MCPBB_BLOCK_PRIV GTZC_MPCBB_BLOCK_PRIV +#define GTZC_MCPBB_LOCK_OFF GTZC_MPCBB_LOCK_OFF +#define GTZC_MCPBB_LOCK_ON GTZC_MPCBB_LOCK_ON +#endif /* STM32H5 || STM32U5 */ /** * @} */ @@ -643,6 +865,10 @@ #define HAL_HRTIM_ExternalEventCounterEnable HAL_HRTIM_ExtEventCounterEnable #define HAL_HRTIM_ExternalEventCounterDisable HAL_HRTIM_ExtEventCounterDisable #define HAL_HRTIM_ExternalEventCounterReset HAL_HRTIM_ExtEventCounterReset +#define HRTIM_TIMEEVENT_A HRTIM_EVENTCOUNTER_A +#define HRTIM_TIMEEVENT_B HRTIM_EVENTCOUNTER_B +#define HRTIM_TIMEEVENTRESETMODE_UNCONDITIONAL HRTIM_EVENTCOUNTER_RSTMODE_UNCONDITIONAL +#define HRTIM_TIMEEVENTRESETMODE_CONDITIONAL HRTIM_EVENTCOUNTER_RSTMODE_CONDITIONAL #endif /* STM32G4 */ #if defined(STM32H7) @@ -765,49 +991,6 @@ #define HRTIM_EVENTSRC_3 (HRTIM_EECR1_EE1SRC_1) #define HRTIM_EVENTSRC_4 (HRTIM_EECR1_EE1SRC_1 | HRTIM_EECR1_EE1SRC_0) -/** @brief Constants defining the events that can be selected to configure the - * set/reset crossbar of a timer output - */ -#define HRTIM_OUTPUTSET_TIMEV_1 (HRTIM_SET1R_TIMEVNT1) -#define HRTIM_OUTPUTSET_TIMEV_2 (HRTIM_SET1R_TIMEVNT2) -#define HRTIM_OUTPUTSET_TIMEV_3 (HRTIM_SET1R_TIMEVNT3) -#define HRTIM_OUTPUTSET_TIMEV_4 (HRTIM_SET1R_TIMEVNT4) -#define HRTIM_OUTPUTSET_TIMEV_5 (HRTIM_SET1R_TIMEVNT5) -#define HRTIM_OUTPUTSET_TIMEV_6 (HRTIM_SET1R_TIMEVNT6) -#define HRTIM_OUTPUTSET_TIMEV_7 (HRTIM_SET1R_TIMEVNT7) -#define HRTIM_OUTPUTSET_TIMEV_8 (HRTIM_SET1R_TIMEVNT8) -#define HRTIM_OUTPUTSET_TIMEV_9 (HRTIM_SET1R_TIMEVNT9) - -#define HRTIM_OUTPUTRESET_TIMEV_1 (HRTIM_RST1R_TIMEVNT1) -#define HRTIM_OUTPUTRESET_TIMEV_2 (HRTIM_RST1R_TIMEVNT2) -#define HRTIM_OUTPUTRESET_TIMEV_3 (HRTIM_RST1R_TIMEVNT3) -#define HRTIM_OUTPUTRESET_TIMEV_4 (HRTIM_RST1R_TIMEVNT4) -#define HRTIM_OUTPUTRESET_TIMEV_5 (HRTIM_RST1R_TIMEVNT5) -#define HRTIM_OUTPUTRESET_TIMEV_6 (HRTIM_RST1R_TIMEVNT6) -#define HRTIM_OUTPUTRESET_TIMEV_7 (HRTIM_RST1R_TIMEVNT7) -#define HRTIM_OUTPUTRESET_TIMEV_8 (HRTIM_RST1R_TIMEVNT8) -#define HRTIM_OUTPUTRESET_TIMEV_9 (HRTIM_RST1R_TIMEVNT9) - -/** @brief Constants defining the event filtering applied to external events - * by a timer - */ -#define HRTIM_TIMEVENTFILTER_NONE (0x00000000U) -#define HRTIM_TIMEVENTFILTER_BLANKINGCMP1 (HRTIM_EEFR1_EE1FLTR_0) -#define HRTIM_TIMEVENTFILTER_BLANKINGCMP2 (HRTIM_EEFR1_EE1FLTR_1) -#define HRTIM_TIMEVENTFILTER_BLANKINGCMP3 (HRTIM_EEFR1_EE1FLTR_1 | HRTIM_EEFR1_EE1FLTR_0) -#define HRTIM_TIMEVENTFILTER_BLANKINGCMP4 (HRTIM_EEFR1_EE1FLTR_2) -#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR1 (HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_0) -#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR2 (HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_1) -#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR3 (HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_1 | HRTIM_EEFR1_EE1FLTR_0) -#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR4 (HRTIM_EEFR1_EE1FLTR_3) -#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR5 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_0) -#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR6 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_1) -#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR7 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_1 | HRTIM_EEFR1_EE1FLTR_0) -#define HRTIM_TIMEVENTFILTER_BLANKINGFLTR8 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_2) -#define HRTIM_TIMEVENTFILTER_WINDOWINGCMP2 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_0) -#define HRTIM_TIMEVENTFILTER_WINDOWINGCMP3 (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_1) -#define HRTIM_TIMEVENTFILTER_WINDOWINGTIM (HRTIM_EEFR1_EE1FLTR_3 | HRTIM_EEFR1_EE1FLTR_2 | HRTIM_EEFR1_EE1FLTR_1 | HRTIM_EEFR1_EE1FLTR_0) - /** @brief Constants defining the DLL calibration periods (in micro seconds) */ #define HRTIM_CALIBRATIONRATE_7300 0x00000000U @@ -831,7 +1014,8 @@ #define I2C_NOSTRETCH_ENABLED I2C_NOSTRETCH_ENABLE #define I2C_ANALOGFILTER_ENABLED I2C_ANALOGFILTER_ENABLE #define I2C_ANALOGFILTER_DISABLED I2C_ANALOGFILTER_DISABLE -#if defined(STM32F0) || defined(STM32F1) || defined(STM32F3) || defined(STM32G0) || defined(STM32L4) || defined(STM32L1) || defined(STM32F7) +#if defined(STM32F0) || defined(STM32F1) || defined(STM32F3) || defined(STM32G0) || defined(STM32L4) || \ + defined(STM32L1) || defined(STM32F7) #define HAL_I2C_STATE_MEM_BUSY_TX HAL_I2C_STATE_BUSY_TX #define HAL_I2C_STATE_MEM_BUSY_RX HAL_I2C_STATE_BUSY_RX #define HAL_I2C_STATE_MASTER_BUSY_TX HAL_I2C_STATE_BUSY_TX @@ -888,6 +1072,20 @@ #define LPTIM_TRIGSAMPLETIME_4TRANSITION LPTIM_TRIGSAMPLETIME_4TRANSITIONS #define LPTIM_TRIGSAMPLETIME_8TRANSITION LPTIM_TRIGSAMPLETIME_8TRANSITIONS + +/** @defgroup HAL_LPTIM_Aliased_Defines HAL LPTIM Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_LPTIM_ReadCompare HAL_LPTIM_ReadCapturedValue +/** + * @} + */ + +#if defined(STM32U5) +#define LPTIM_ISR_CC1 LPTIM_ISR_CC1IF +#define LPTIM_ISR_CC2 LPTIM_ISR_CC2IF +#define LPTIM_CHANNEL_ALL 0x00000000U +#endif /* STM32U5 */ /** * @} */ @@ -955,11 +1153,16 @@ #define OPAMP_PGACONNECT_VM0 OPAMP_PGA_CONNECT_INVERTINGINPUT_IO0 #define OPAMP_PGACONNECT_VM1 OPAMP_PGA_CONNECT_INVERTINGINPUT_IO1 -#if defined(STM32L1) || defined(STM32L4) || defined(STM32L5) || defined(STM32H7) +#if defined(STM32L1) || defined(STM32L4) || defined(STM32L5) || defined(STM32H7) || defined(STM32G4) || defined(STM32U5) #define HAL_OPAMP_MSP_INIT_CB_ID HAL_OPAMP_MSPINIT_CB_ID #define HAL_OPAMP_MSP_DEINIT_CB_ID HAL_OPAMP_MSPDEINIT_CB_ID #endif +#if defined(STM32L4) || defined(STM32L5) +#define OPAMP_POWERMODE_NORMAL OPAMP_POWERMODE_NORMALPOWER +#elif defined(STM32G4) +#define OPAMP_POWERMODE_NORMAL OPAMP_POWERMODE_NORMALSPEED +#endif /** * @} @@ -971,15 +1174,15 @@ #define I2S_STANDARD_PHILLIPS I2S_STANDARD_PHILIPS #if defined(STM32H7) - #define I2S_IT_TXE I2S_IT_TXP - #define I2S_IT_RXNE I2S_IT_RXP +#define I2S_IT_TXE I2S_IT_TXP +#define I2S_IT_RXNE I2S_IT_RXP - #define I2S_FLAG_TXE I2S_FLAG_TXP - #define I2S_FLAG_RXNE I2S_FLAG_RXP +#define I2S_FLAG_TXE I2S_FLAG_TXP +#define I2S_FLAG_RXNE I2S_FLAG_RXP #endif #if defined(STM32F7) - #define I2S_CLOCK_SYSCLK I2S_CLOCK_PLL +#define I2S_CLOCK_SYSCLK I2S_CLOCK_PLL #endif /** * @} @@ -1034,8 +1237,8 @@ #define RTC_TAMPER1_2_3_INTERRUPT RTC_ALL_TAMPER_INTERRUPT #define RTC_TIMESTAMPPIN_PC13 RTC_TIMESTAMPPIN_DEFAULT -#define RTC_TIMESTAMPPIN_PA0 RTC_TIMESTAMPPIN_POS1 -#define RTC_TIMESTAMPPIN_PI8 RTC_TIMESTAMPPIN_POS1 +#define RTC_TIMESTAMPPIN_PA0 RTC_TIMESTAMPPIN_POS1 +#define RTC_TIMESTAMPPIN_PI8 RTC_TIMESTAMPPIN_POS1 #define RTC_TIMESTAMPPIN_PC1 RTC_TIMESTAMPPIN_POS2 #define RTC_OUTPUT_REMAP_PC13 RTC_OUTPUT_REMAP_NONE @@ -1046,15 +1249,42 @@ #define RTC_TAMPERPIN_PA0 RTC_TAMPERPIN_POS1 #define RTC_TAMPERPIN_PI8 RTC_TAMPERPIN_POS1 +#if defined(STM32H5) || defined(STM32H7RS) +#define TAMP_SECRETDEVICE_ERASE_NONE TAMP_DEVICESECRETS_ERASE_NONE +#define TAMP_SECRETDEVICE_ERASE_BKP_SRAM TAMP_DEVICESECRETS_ERASE_BKPSRAM +#endif /* STM32H5 || STM32H7RS */ + +#if defined(STM32WBA) +#define TAMP_SECRETDEVICE_ERASE_NONE TAMP_DEVICESECRETS_ERASE_NONE +#define TAMP_SECRETDEVICE_ERASE_SRAM2 TAMP_DEVICESECRETS_ERASE_SRAM2 +#define TAMP_SECRETDEVICE_ERASE_RHUK TAMP_DEVICESECRETS_ERASE_RHUK +#define TAMP_SECRETDEVICE_ERASE_ICACHE TAMP_DEVICESECRETS_ERASE_ICACHE +#define TAMP_SECRETDEVICE_ERASE_SAES_AES_HASH TAMP_DEVICESECRETS_ERASE_SAES_AES_HASH +#define TAMP_SECRETDEVICE_ERASE_PKA_SRAM TAMP_DEVICESECRETS_ERASE_PKA_SRAM +#define TAMP_SECRETDEVICE_ERASE_ALL TAMP_DEVICESECRETS_ERASE_ALL +#endif /* STM32WBA */ + +#if defined(STM32H5) || defined(STM32WBA) || defined(STM32H7RS) +#define TAMP_SECRETDEVICE_ERASE_DISABLE TAMP_DEVICESECRETS_ERASE_NONE +#define TAMP_SECRETDEVICE_ERASE_ENABLE TAMP_SECRETDEVICE_ERASE_ALL +#endif /* STM32H5 || STM32WBA || STM32H7RS */ + +#if defined(STM32F7) +#define RTC_TAMPCR_TAMPXE RTC_TAMPER_ENABLE_BITS_MASK +#define RTC_TAMPCR_TAMPXIE RTC_TAMPER_IT_ENABLE_BITS_MASK +#endif /* STM32F7 */ + #if defined(STM32H7) #define RTC_TAMPCR_TAMPXE RTC_TAMPER_X #define RTC_TAMPCR_TAMPXIE RTC_TAMPER_X_INTERRUPT +#endif /* STM32H7 */ +#if defined(STM32F7) || defined(STM32H7) || defined(STM32L0) #define RTC_TAMPER1_INTERRUPT RTC_IT_TAMP1 #define RTC_TAMPER2_INTERRUPT RTC_IT_TAMP2 #define RTC_TAMPER3_INTERRUPT RTC_IT_TAMP3 -#define RTC_ALL_TAMPER_INTERRUPT RTC_IT_TAMPALL -#endif /* STM32H7 */ +#define RTC_ALL_TAMPER_INTERRUPT RTC_IT_TAMP +#endif /* STM32F7 || STM32H7 || STM32L0 */ /** * @} @@ -1114,16 +1344,16 @@ #if defined(STM32H7) - #define SPI_FLAG_TXE SPI_FLAG_TXP - #define SPI_FLAG_RXNE SPI_FLAG_RXP +#define SPI_FLAG_TXE SPI_FLAG_TXP +#define SPI_FLAG_RXNE SPI_FLAG_RXP - #define SPI_IT_TXE SPI_IT_TXP - #define SPI_IT_RXNE SPI_IT_RXP +#define SPI_IT_TXE SPI_IT_TXP +#define SPI_IT_RXNE SPI_IT_RXP - #define SPI_FRLVL_EMPTY SPI_RX_FIFO_0PACKET - #define SPI_FRLVL_QUARTER_FULL SPI_RX_FIFO_1PACKET - #define SPI_FRLVL_HALF_FULL SPI_RX_FIFO_2PACKET - #define SPI_FRLVL_FULL SPI_RX_FIFO_3PACKET +#define SPI_FRLVL_EMPTY SPI_RX_FIFO_0PACKET +#define SPI_FRLVL_QUARTER_FULL SPI_RX_FIFO_1PACKET +#define SPI_FRLVL_HALF_FULL SPI_RX_FIFO_2PACKET +#define SPI_FRLVL_FULL SPI_RX_FIFO_3PACKET #endif /* STM32H7 */ @@ -1221,6 +1451,10 @@ #define TIM_TIM3_TI1_COMP1COMP2_OUT TIM_TIM3_TI1_COMP1_COMP2 #endif +#if defined(STM32U5) +#define OCREF_CLEAR_SELECT_Pos OCREF_CLEAR_SELECT_POS +#define OCREF_CLEAR_SELECT_Msk OCREF_CLEAR_SELECT_MSK +#endif /** * @} */ @@ -1330,30 +1564,40 @@ #define ETH_MMCRFAECR 0x00000198U #define ETH_MMCRGUFCR 0x000001C4U -#define ETH_MAC_TXFIFO_FULL 0x02000000U /* Tx FIFO full */ -#define ETH_MAC_TXFIFONOT_EMPTY 0x01000000U /* Tx FIFO not empty */ -#define ETH_MAC_TXFIFO_WRITE_ACTIVE 0x00400000U /* Tx FIFO write active */ -#define ETH_MAC_TXFIFO_IDLE 0x00000000U /* Tx FIFO read status: Idle */ -#define ETH_MAC_TXFIFO_READ 0x00100000U /* Tx FIFO read status: Read (transferring data to the MAC transmitter) */ -#define ETH_MAC_TXFIFO_WAITING 0x00200000U /* Tx FIFO read status: Waiting for TxStatus from MAC transmitter */ -#define ETH_MAC_TXFIFO_WRITING 0x00300000U /* Tx FIFO read status: Writing the received TxStatus or flushing the TxFIFO */ -#define ETH_MAC_TRANSMISSION_PAUSE 0x00080000U /* MAC transmitter in pause */ -#define ETH_MAC_TRANSMITFRAMECONTROLLER_IDLE 0x00000000U /* MAC transmit frame controller: Idle */ -#define ETH_MAC_TRANSMITFRAMECONTROLLER_WAITING 0x00020000U /* MAC transmit frame controller: Waiting for Status of previous frame or IFG/backoff period to be over */ -#define ETH_MAC_TRANSMITFRAMECONTROLLER_GENRATING_PCF 0x00040000U /* MAC transmit frame controller: Generating and transmitting a Pause control frame (in full duplex mode) */ -#define ETH_MAC_TRANSMITFRAMECONTROLLER_TRANSFERRING 0x00060000U /* MAC transmit frame controller: Transferring input frame for transmission */ +#define ETH_MAC_TXFIFO_FULL 0x02000000U /* Tx FIFO full */ +#define ETH_MAC_TXFIFONOT_EMPTY 0x01000000U /* Tx FIFO not empty */ +#define ETH_MAC_TXFIFO_WRITE_ACTIVE 0x00400000U /* Tx FIFO write active */ +#define ETH_MAC_TXFIFO_IDLE 0x00000000U /* Tx FIFO read status: Idle */ +#define ETH_MAC_TXFIFO_READ 0x00100000U /* Tx FIFO read status: Read (transferring data to + the MAC transmitter) */ +#define ETH_MAC_TXFIFO_WAITING 0x00200000U /* Tx FIFO read status: Waiting for TxStatus from + MAC transmitter */ +#define ETH_MAC_TXFIFO_WRITING 0x00300000U /* Tx FIFO read status: Writing the received TxStatus + or flushing the TxFIFO */ +#define ETH_MAC_TRANSMISSION_PAUSE 0x00080000U /* MAC transmitter in pause */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_IDLE 0x00000000U /* MAC transmit frame controller: Idle */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_WAITING 0x00020000U /* MAC transmit frame controller: Waiting for Status + of previous frame or IFG/backoff period to be over */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_GENRATING_PCF 0x00040000U /* MAC transmit frame controller: Generating and + transmitting a Pause control frame (in full duplex mode) */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_TRANSFERRING 0x00060000U /* MAC transmit frame controller: Transferring input + frame for transmission */ #define ETH_MAC_MII_TRANSMIT_ACTIVE 0x00010000U /* MAC MII transmit engine active */ #define ETH_MAC_RXFIFO_EMPTY 0x00000000U /* Rx FIFO fill level: empty */ -#define ETH_MAC_RXFIFO_BELOW_THRESHOLD 0x00000100U /* Rx FIFO fill level: fill-level below flow-control de-activate threshold */ -#define ETH_MAC_RXFIFO_ABOVE_THRESHOLD 0x00000200U /* Rx FIFO fill level: fill-level above flow-control activate threshold */ +#define ETH_MAC_RXFIFO_BELOW_THRESHOLD 0x00000100U /* Rx FIFO fill level: fill-level below flow-control + de-activate threshold */ +#define ETH_MAC_RXFIFO_ABOVE_THRESHOLD 0x00000200U /* Rx FIFO fill level: fill-level above flow-control + activate threshold */ #define ETH_MAC_RXFIFO_FULL 0x00000300U /* Rx FIFO fill level: full */ #if defined(STM32F1) #else #define ETH_MAC_READCONTROLLER_IDLE 0x00000000U /* Rx FIFO read controller IDLE state */ #define ETH_MAC_READCONTROLLER_READING_DATA 0x00000020U /* Rx FIFO read controller Reading frame data */ -#define ETH_MAC_READCONTROLLER_READING_STATUS 0x00000040U /* Rx FIFO read controller Reading frame status (or time-stamp) */ +#define ETH_MAC_READCONTROLLER_READING_STATUS 0x00000040U /* Rx FIFO read controller Reading frame status + (or time-stamp) */ #endif -#define ETH_MAC_READCONTROLLER_FLUSHING 0x00000060U /* Rx FIFO read controller Flushing the frame data and status */ +#define ETH_MAC_READCONTROLLER_FLUSHING 0x00000060U /* Rx FIFO read controller Flushing the frame data and + status */ #define ETH_MAC_RXFIFO_WRITE_ACTIVE 0x00000010U /* Rx FIFO write controller active */ #define ETH_MAC_SMALL_FIFO_NOTACTIVE 0x00000000U /* MAC small FIFO read / write controllers not active */ #define ETH_MAC_SMALL_FIFO_READ_ACTIVE 0x00000002U /* MAC small FIFO read controller active */ @@ -1361,6 +1605,8 @@ #define ETH_MAC_SMALL_FIFO_RW_ACTIVE 0x00000006U /* MAC small FIFO read / write controllers active */ #define ETH_MAC_MII_RECEIVE_PROTOCOL_ACTIVE 0x00000001U /* MAC MII receive protocol engine active */ +#define ETH_TxPacketConfig ETH_TxPacketConfigTypeDef /* Transmit Packet Configuration structure definition */ + /** * @} */ @@ -1409,6 +1655,20 @@ */ #endif /* STM32L4 || STM32F7 || STM32F4 || STM32H7 */ +#if defined(STM32L4) || defined(STM32F7) || defined(STM32F427xx) || defined(STM32F437xx) \ + || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) \ + || defined(STM32H7) || defined(STM32U5) +/** @defgroup DMA2D_Aliases DMA2D API Aliases + * @{ + */ +#define HAL_DMA2D_DisableCLUT HAL_DMA2D_CLUTLoading_Abort /*!< Aliased to HAL_DMA2D_CLUTLoading_Abort + for compatibility with legacy code */ +/** + * @} + */ + +#endif /* STM32L4 || STM32F7 || STM32F4 || STM32H7 || STM32U5 */ + /** @defgroup HAL_PPP_Aliased_Defines HAL PPP Aliased Defines maintained for legacy purpose * @{ */ @@ -1427,6 +1687,29 @@ * @} */ +/** @defgroup HAL_DCACHE_Aliased_Functions HAL DCACHE Aliased Functions maintained for legacy purpose + * @{ + */ + +#if defined(STM32U5) +#define HAL_DCACHE_CleanInvalidateByAddr HAL_DCACHE_CleanInvalidByAddr +#define HAL_DCACHE_CleanInvalidateByAddr_IT HAL_DCACHE_CleanInvalidByAddr_IT +#endif /* STM32U5 */ + +/** + * @} + */ + +#if !defined(STM32F2) +/** @defgroup HASH_alias HASH API alias + * @{ + */ +#define HAL_HASHEx_IRQHandler HAL_HASH_IRQHandler /*!< Redirection for compatibility with legacy code */ +/** + * + * @} + */ +#endif /* STM32F2 */ /** @defgroup HAL_HASH_Aliased_Functions HAL HASH Aliased Functions maintained for legacy purpose * @{ */ @@ -1450,7 +1733,7 @@ #define HASH_HMACKeyType_ShortKey HASH_HMAC_KEYTYPE_SHORTKEY #define HASH_HMACKeyType_LongKey HASH_HMAC_KEYTYPE_LONGKEY -#if defined(STM32L4) || defined(STM32F4) || defined(STM32F7) || defined(STM32H7) +#if defined(STM32L4) || defined(STM32L5) || defined(STM32F2) || defined(STM32F4) || defined(STM32F7) || defined(STM32H7) #define HAL_HASH_MD5_Accumulate HAL_HASH_MD5_Accmlt #define HAL_HASH_MD5_Accumulate_End HAL_HASH_MD5_Accmlt_End @@ -1472,7 +1755,7 @@ #define HAL_HASHEx_SHA256_Accumulate_IT HAL_HASHEx_SHA256_Accmlt_IT #define HAL_HASHEx_SHA256_Accumulate_End_IT HAL_HASHEx_SHA256_Accmlt_End_IT -#endif /* STM32L4 || STM32F4 || STM32F7 || STM32H7 */ +#endif /* STM32L4 || STM32L5 || STM32F2 || STM32F4 || STM32F7 || STM32H7 */ /** * @} */ @@ -1486,7 +1769,9 @@ #define HAL_DisableDBGStopMode HAL_DBGMCU_DisableDBGStopMode #define HAL_EnableDBGStandbyMode HAL_DBGMCU_EnableDBGStandbyMode #define HAL_DisableDBGStandbyMode HAL_DBGMCU_DisableDBGStandbyMode -#define HAL_DBG_LowPowerConfig(Periph, cmd) (((cmd)==ENABLE)? HAL_DBGMCU_DBG_EnableLowPowerConfig(Periph) : HAL_DBGMCU_DBG_DisableLowPowerConfig(Periph)) +#define HAL_DBG_LowPowerConfig(Periph, cmd) (((cmd\ + )==ENABLE)? HAL_DBGMCU_DBG_EnableLowPowerConfig(Periph) : \ + HAL_DBGMCU_DBG_DisableLowPowerConfig(Periph)) #define HAL_VREFINT_OutputSelect HAL_SYSCFG_VREFINT_OutputSelect #define HAL_Lock_Cmd(cmd) (((cmd)==ENABLE) ? HAL_SYSCFG_Enable_Lock_VREFINT() : HAL_SYSCFG_Disable_Lock_VREFINT()) #if defined(STM32L0) @@ -1494,8 +1779,11 @@ #define HAL_VREFINT_Cmd(cmd) (((cmd)==ENABLE)? HAL_SYSCFG_EnableVREFINT() : HAL_SYSCFG_DisableVREFINT()) #endif #define HAL_ADC_EnableBuffer_Cmd(cmd) (((cmd)==ENABLE) ? HAL_ADCEx_EnableVREFINT() : HAL_ADCEx_DisableVREFINT()) -#define HAL_ADC_EnableBufferSensor_Cmd(cmd) (((cmd)==ENABLE) ? HAL_ADCEx_EnableVREFINTTempSensor() : HAL_ADCEx_DisableVREFINTTempSensor()) -#if defined(STM32H7A3xx) || defined(STM32H7B3xx) || defined(STM32H7B0xx) || defined(STM32H7A3xxQ) || defined(STM32H7B3xxQ) || defined(STM32H7B0xxQ) +#define HAL_ADC_EnableBufferSensor_Cmd(cmd) (((cmd\ + )==ENABLE) ? HAL_ADCEx_EnableVREFINTTempSensor() : \ + HAL_ADCEx_DisableVREFINTTempSensor()) +#if defined(STM32H7A3xx) || defined(STM32H7B3xx) || defined(STM32H7B0xx) || defined(STM32H7A3xxQ) || \ + defined(STM32H7B3xxQ) || defined(STM32H7B0xxQ) #define HAL_EnableSRDomainDBGStopMode HAL_EnableDomain3DBGStopMode #define HAL_DisableSRDomainDBGStopMode HAL_DisableDomain3DBGStopMode #define HAL_EnableSRDomainDBGStandbyMode HAL_EnableDomain3DBGStandbyMode @@ -1517,9 +1805,9 @@ #define HAL_DATA_EEPROMEx_Erase HAL_FLASHEx_DATAEEPROM_Erase #define HAL_DATA_EEPROMEx_Program HAL_FLASHEx_DATAEEPROM_Program - /** +/** * @} - */ + */ /** @defgroup HAL_I2C_Aliased_Functions HAL I2C Aliased Functions maintained for legacy purpose * @{ @@ -1529,20 +1817,26 @@ #define HAL_FMPI2CEx_AnalogFilter_Config HAL_FMPI2CEx_ConfigAnalogFilter #define HAL_FMPI2CEx_DigitalFilter_Config HAL_FMPI2CEx_ConfigDigitalFilter -#define HAL_I2CFastModePlusConfig(SYSCFG_I2CFastModePlus, cmd) (((cmd)==ENABLE)? HAL_I2CEx_EnableFastModePlus(SYSCFG_I2CFastModePlus): HAL_I2CEx_DisableFastModePlus(SYSCFG_I2CFastModePlus)) +#define HAL_I2CFastModePlusConfig(SYSCFG_I2CFastModePlus, cmd) (((cmd) == ENABLE)? \ + HAL_I2CEx_EnableFastModePlus(SYSCFG_I2CFastModePlus): \ + HAL_I2CEx_DisableFastModePlus(SYSCFG_I2CFastModePlus)) -#if defined(STM32H7) || defined(STM32WB) || defined(STM32G0) || defined(STM32F0) || defined(STM32F1) || defined(STM32F2) || defined(STM32F3) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) || defined(STM32L5) || defined(STM32G4) +#if defined(STM32H7) || defined(STM32WB) || defined(STM32G0) || defined(STM32F0) || defined(STM32F1) || \ + defined(STM32F2) || defined(STM32F3) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || \ + defined(STM32L4) || defined(STM32L5) || defined(STM32G4) || defined(STM32L1) #define HAL_I2C_Master_Sequential_Transmit_IT HAL_I2C_Master_Seq_Transmit_IT #define HAL_I2C_Master_Sequential_Receive_IT HAL_I2C_Master_Seq_Receive_IT #define HAL_I2C_Slave_Sequential_Transmit_IT HAL_I2C_Slave_Seq_Transmit_IT #define HAL_I2C_Slave_Sequential_Receive_IT HAL_I2C_Slave_Seq_Receive_IT -#endif /* STM32H7 || STM32WB || STM32G0 || STM32F0 || STM32F1 || STM32F2 || STM32F3 || STM32F4 || STM32F7 || STM32L0 || STM32L4 || STM32L5 || STM32G4 */ -#if defined(STM32H7) || defined(STM32WB) || defined(STM32G0) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) || defined(STM32L5) || defined(STM32G4) +#endif /* STM32H7 || STM32WB || STM32G0 || STM32F0 || STM32F1 || STM32F2 || STM32F3 || STM32F4 || STM32F7 || STM32L0 || + STM32L4 || STM32L5 || STM32G4 || STM32L1 */ +#if defined(STM32H7) || defined(STM32WB) || defined(STM32G0) || defined(STM32F4) || defined(STM32F7) || \ + defined(STM32L0) || defined(STM32L4) || defined(STM32L5) || defined(STM32G4)|| defined(STM32L1) #define HAL_I2C_Master_Sequential_Transmit_DMA HAL_I2C_Master_Seq_Transmit_DMA #define HAL_I2C_Master_Sequential_Receive_DMA HAL_I2C_Master_Seq_Receive_DMA #define HAL_I2C_Slave_Sequential_Transmit_DMA HAL_I2C_Slave_Seq_Transmit_DMA #define HAL_I2C_Slave_Sequential_Receive_DMA HAL_I2C_Slave_Seq_Receive_DMA -#endif /* STM32H7 || STM32WB || STM32G0 || STM32F4 || STM32F7 || STM32L0 || STM32L4 || STM32L5 || STM32G4 */ +#endif /* STM32H7 || STM32WB || STM32G0 || STM32F4 || STM32F7 || STM32L0 || STM32L4 || STM32L5 || STM32G4 || STM32L1 */ #if defined(STM32F4) #define HAL_FMPI2C_Master_Sequential_Transmit_IT HAL_FMPI2C_Master_Seq_Transmit_IT @@ -1554,19 +1848,19 @@ #define HAL_FMPI2C_Slave_Sequential_Transmit_DMA HAL_FMPI2C_Slave_Seq_Transmit_DMA #define HAL_FMPI2C_Slave_Sequential_Receive_DMA HAL_FMPI2C_Slave_Seq_Receive_DMA #endif /* STM32F4 */ - /** +/** * @} - */ + */ /** @defgroup HAL_PWR_Aliased HAL PWR Aliased maintained for legacy purpose * @{ */ #if defined(STM32G0) -#define HAL_PWR_ConfigPVD HAL_PWREx_ConfigPVD -#define HAL_PWR_EnablePVD HAL_PWREx_EnablePVD -#define HAL_PWR_DisablePVD HAL_PWREx_DisablePVD -#define HAL_PWR_PVD_IRQHandler HAL_PWREx_PVD_IRQHandler +#define HAL_PWR_ConfigPVD HAL_PWREx_ConfigPVD +#define HAL_PWR_EnablePVD HAL_PWREx_EnablePVD +#define HAL_PWR_DisablePVD HAL_PWREx_DisablePVD +#define HAL_PWR_PVD_IRQHandler HAL_PWREx_PVD_IRQHandler #endif #define HAL_PWR_PVDConfig HAL_PWR_ConfigPVD #define HAL_PWR_DisableBkUpReg HAL_PWREx_DisableBkUpReg @@ -1611,7 +1905,108 @@ #define PWR_MODE_EVT PWR_PVD_MODE_NORMAL - /** +#if defined (STM32U5) +#define PWR_SRAM1_PAGE1_STOP_RETENTION PWR_SRAM1_PAGE1_STOP +#define PWR_SRAM1_PAGE2_STOP_RETENTION PWR_SRAM1_PAGE2_STOP +#define PWR_SRAM1_PAGE3_STOP_RETENTION PWR_SRAM1_PAGE3_STOP +#define PWR_SRAM1_PAGE4_STOP_RETENTION PWR_SRAM1_PAGE4_STOP +#define PWR_SRAM1_PAGE5_STOP_RETENTION PWR_SRAM1_PAGE5_STOP +#define PWR_SRAM1_PAGE6_STOP_RETENTION PWR_SRAM1_PAGE6_STOP +#define PWR_SRAM1_PAGE7_STOP_RETENTION PWR_SRAM1_PAGE7_STOP +#define PWR_SRAM1_PAGE8_STOP_RETENTION PWR_SRAM1_PAGE8_STOP +#define PWR_SRAM1_PAGE9_STOP_RETENTION PWR_SRAM1_PAGE9_STOP +#define PWR_SRAM1_PAGE10_STOP_RETENTION PWR_SRAM1_PAGE10_STOP +#define PWR_SRAM1_PAGE11_STOP_RETENTION PWR_SRAM1_PAGE11_STOP +#define PWR_SRAM1_PAGE12_STOP_RETENTION PWR_SRAM1_PAGE12_STOP +#define PWR_SRAM1_FULL_STOP_RETENTION PWR_SRAM1_FULL_STOP + +#define PWR_SRAM2_PAGE1_STOP_RETENTION PWR_SRAM2_PAGE1_STOP +#define PWR_SRAM2_PAGE2_STOP_RETENTION PWR_SRAM2_PAGE2_STOP +#define PWR_SRAM2_FULL_STOP_RETENTION PWR_SRAM2_FULL_STOP + +#define PWR_SRAM3_PAGE1_STOP_RETENTION PWR_SRAM3_PAGE1_STOP +#define PWR_SRAM3_PAGE2_STOP_RETENTION PWR_SRAM3_PAGE2_STOP +#define PWR_SRAM3_PAGE3_STOP_RETENTION PWR_SRAM3_PAGE3_STOP +#define PWR_SRAM3_PAGE4_STOP_RETENTION PWR_SRAM3_PAGE4_STOP +#define PWR_SRAM3_PAGE5_STOP_RETENTION PWR_SRAM3_PAGE5_STOP +#define PWR_SRAM3_PAGE6_STOP_RETENTION PWR_SRAM3_PAGE6_STOP +#define PWR_SRAM3_PAGE7_STOP_RETENTION PWR_SRAM3_PAGE7_STOP +#define PWR_SRAM3_PAGE8_STOP_RETENTION PWR_SRAM3_PAGE8_STOP +#define PWR_SRAM3_PAGE9_STOP_RETENTION PWR_SRAM3_PAGE9_STOP +#define PWR_SRAM3_PAGE10_STOP_RETENTION PWR_SRAM3_PAGE10_STOP +#define PWR_SRAM3_PAGE11_STOP_RETENTION PWR_SRAM3_PAGE11_STOP +#define PWR_SRAM3_PAGE12_STOP_RETENTION PWR_SRAM3_PAGE12_STOP +#define PWR_SRAM3_PAGE13_STOP_RETENTION PWR_SRAM3_PAGE13_STOP +#define PWR_SRAM3_FULL_STOP_RETENTION PWR_SRAM3_FULL_STOP + +#define PWR_SRAM4_FULL_STOP_RETENTION PWR_SRAM4_FULL_STOP + +#define PWR_SRAM5_PAGE1_STOP_RETENTION PWR_SRAM5_PAGE1_STOP +#define PWR_SRAM5_PAGE2_STOP_RETENTION PWR_SRAM5_PAGE2_STOP +#define PWR_SRAM5_PAGE3_STOP_RETENTION PWR_SRAM5_PAGE3_STOP +#define PWR_SRAM5_PAGE4_STOP_RETENTION PWR_SRAM5_PAGE4_STOP +#define PWR_SRAM5_PAGE5_STOP_RETENTION PWR_SRAM5_PAGE5_STOP +#define PWR_SRAM5_PAGE6_STOP_RETENTION PWR_SRAM5_PAGE6_STOP +#define PWR_SRAM5_PAGE7_STOP_RETENTION PWR_SRAM5_PAGE7_STOP +#define PWR_SRAM5_PAGE8_STOP_RETENTION PWR_SRAM5_PAGE8_STOP +#define PWR_SRAM5_PAGE9_STOP_RETENTION PWR_SRAM5_PAGE9_STOP +#define PWR_SRAM5_PAGE10_STOP_RETENTION PWR_SRAM5_PAGE10_STOP +#define PWR_SRAM5_PAGE11_STOP_RETENTION PWR_SRAM5_PAGE11_STOP +#define PWR_SRAM5_PAGE12_STOP_RETENTION PWR_SRAM5_PAGE12_STOP +#define PWR_SRAM5_PAGE13_STOP_RETENTION PWR_SRAM5_PAGE13_STOP +#define PWR_SRAM5_FULL_STOP_RETENTION PWR_SRAM5_FULL_STOP + +#define PWR_SRAM6_PAGE1_STOP_RETENTION PWR_SRAM6_PAGE1_STOP +#define PWR_SRAM6_PAGE2_STOP_RETENTION PWR_SRAM6_PAGE2_STOP +#define PWR_SRAM6_PAGE3_STOP_RETENTION PWR_SRAM6_PAGE3_STOP +#define PWR_SRAM6_PAGE4_STOP_RETENTION PWR_SRAM6_PAGE4_STOP +#define PWR_SRAM6_PAGE5_STOP_RETENTION PWR_SRAM6_PAGE5_STOP +#define PWR_SRAM6_PAGE6_STOP_RETENTION PWR_SRAM6_PAGE6_STOP +#define PWR_SRAM6_PAGE7_STOP_RETENTION PWR_SRAM6_PAGE7_STOP +#define PWR_SRAM6_PAGE8_STOP_RETENTION PWR_SRAM6_PAGE8_STOP +#define PWR_SRAM6_FULL_STOP_RETENTION PWR_SRAM6_FULL_STOP + + +#define PWR_ICACHE_FULL_STOP_RETENTION PWR_ICACHE_FULL_STOP +#define PWR_DCACHE1_FULL_STOP_RETENTION PWR_DCACHE1_FULL_STOP +#define PWR_DCACHE2_FULL_STOP_RETENTION PWR_DCACHE2_FULL_STOP +#define PWR_DMA2DRAM_FULL_STOP_RETENTION PWR_DMA2DRAM_FULL_STOP +#define PWR_PERIPHRAM_FULL_STOP_RETENTION PWR_PERIPHRAM_FULL_STOP +#define PWR_PKA32RAM_FULL_STOP_RETENTION PWR_PKA32RAM_FULL_STOP +#define PWR_GRAPHICPRAM_FULL_STOP_RETENTION PWR_GRAPHICPRAM_FULL_STOP +#define PWR_DSIRAM_FULL_STOP_RETENTION PWR_DSIRAM_FULL_STOP +#define PWR_JPEGRAM_FULL_STOP_RETENTION PWR_JPEGRAM_FULL_STOP + + +#define PWR_SRAM2_PAGE1_STANDBY_RETENTION PWR_SRAM2_PAGE1_STANDBY +#define PWR_SRAM2_PAGE2_STANDBY_RETENTION PWR_SRAM2_PAGE2_STANDBY +#define PWR_SRAM2_FULL_STANDBY_RETENTION PWR_SRAM2_FULL_STANDBY + +#define PWR_SRAM1_FULL_RUN_RETENTION PWR_SRAM1_FULL_RUN +#define PWR_SRAM2_FULL_RUN_RETENTION PWR_SRAM2_FULL_RUN +#define PWR_SRAM3_FULL_RUN_RETENTION PWR_SRAM3_FULL_RUN +#define PWR_SRAM4_FULL_RUN_RETENTION PWR_SRAM4_FULL_RUN +#define PWR_SRAM5_FULL_RUN_RETENTION PWR_SRAM5_FULL_RUN +#define PWR_SRAM6_FULL_RUN_RETENTION PWR_SRAM6_FULL_RUN + +#define PWR_ALL_RAM_RUN_RETENTION_MASK PWR_ALL_RAM_RUN_MASK +#endif + +/** + * @} + */ + +/** @defgroup HAL_RTC_Aliased_Functions HAL RTC Aliased Functions maintained for legacy purpose + * @{ + */ +#if defined(STM32H5) || defined(STM32WBA) || defined(STM32H7RS) +#define HAL_RTCEx_SetBoothardwareKey HAL_RTCEx_LockBootHardwareKey +#define HAL_RTCEx_BKUPBlock_Enable HAL_RTCEx_BKUPBlock +#define HAL_RTCEx_BKUPBlock_Disable HAL_RTCEx_BKUPUnblock +#define HAL_RTCEx_Erase_SecretDev_Conf HAL_RTCEx_ConfigEraseDeviceSecrets +#endif /* STM32H5 || STM32WBA || STM32H7RS */ + +/** * @} */ @@ -1640,7 +2035,8 @@ #define HAL_TIM_DMAError TIM_DMAError #define HAL_TIM_DMACaptureCplt TIM_DMACaptureCplt #define HAL_TIMEx_DMACommutationCplt TIMEx_DMACommutationCplt -#if defined(STM32H7) || defined(STM32G0) || defined(STM32F0) || defined(STM32F1) || defined(STM32F2) || defined(STM32F3) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) +#if defined(STM32H7) || defined(STM32G0) || defined(STM32F0) || defined(STM32F1) || defined(STM32F2) || \ + defined(STM32F3) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) #define HAL_TIM_SlaveConfigSynchronization HAL_TIM_SlaveConfigSynchro #define HAL_TIM_SlaveConfigSynchronization_IT HAL_TIM_SlaveConfigSynchro_IT #define HAL_TIMEx_CommutationCallback HAL_TIMEx_CommutCallback @@ -1862,15 +2258,15 @@ #define __HAL_FREEZE_RTC_DBGMCU __HAL_DBGMCU_FREEZE_RTC #define __HAL_UNFREEZE_RTC_DBGMCU __HAL_DBGMCU_UNFREEZE_RTC #if defined(STM32H7) - #define __HAL_FREEZE_WWDG_DBGMCU __HAL_DBGMCU_FREEZE_WWDG1 - #define __HAL_UNFREEZE_WWDG_DBGMCU __HAL_DBGMCU_UnFreeze_WWDG1 - #define __HAL_FREEZE_IWDG_DBGMCU __HAL_DBGMCU_FREEZE_IWDG1 - #define __HAL_UNFREEZE_IWDG_DBGMCU __HAL_DBGMCU_UnFreeze_IWDG1 +#define __HAL_FREEZE_WWDG_DBGMCU __HAL_DBGMCU_FREEZE_WWDG1 +#define __HAL_UNFREEZE_WWDG_DBGMCU __HAL_DBGMCU_UnFreeze_WWDG1 +#define __HAL_FREEZE_IWDG_DBGMCU __HAL_DBGMCU_FREEZE_IWDG1 +#define __HAL_UNFREEZE_IWDG_DBGMCU __HAL_DBGMCU_UnFreeze_IWDG1 #else - #define __HAL_FREEZE_WWDG_DBGMCU __HAL_DBGMCU_FREEZE_WWDG - #define __HAL_UNFREEZE_WWDG_DBGMCU __HAL_DBGMCU_UNFREEZE_WWDG - #define __HAL_FREEZE_IWDG_DBGMCU __HAL_DBGMCU_FREEZE_IWDG - #define __HAL_UNFREEZE_IWDG_DBGMCU __HAL_DBGMCU_UNFREEZE_IWDG +#define __HAL_FREEZE_WWDG_DBGMCU __HAL_DBGMCU_FREEZE_WWDG +#define __HAL_UNFREEZE_WWDG_DBGMCU __HAL_DBGMCU_UNFREEZE_WWDG +#define __HAL_FREEZE_IWDG_DBGMCU __HAL_DBGMCU_FREEZE_IWDG +#define __HAL_UNFREEZE_IWDG_DBGMCU __HAL_DBGMCU_UNFREEZE_IWDG #endif /* STM32H7 */ #define __HAL_FREEZE_I2C1_TIMEOUT_DBGMCU __HAL_DBGMCU_FREEZE_I2C1_TIMEOUT #define __HAL_UNFREEZE_I2C1_TIMEOUT_DBGMCU __HAL_DBGMCU_UNFREEZE_I2C1_TIMEOUT @@ -1897,7 +2293,8 @@ #define COMP_STOP __HAL_COMP_DISABLE #define COMP_LOCK __HAL_COMP_LOCK -#if defined(STM32F301x8) || defined(STM32F302x8) || defined(STM32F318xx) || defined(STM32F303x8) || defined(STM32F334x8) || defined(STM32F328xx) +#if defined(STM32F301x8) || defined(STM32F302x8) || defined(STM32F318xx) || defined(STM32F303x8) || \ + defined(STM32F334x8) || defined(STM32F328xx) #define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ __HAL_COMP_COMP6_EXTI_ENABLE_RISING_EDGE()) @@ -1922,8 +2319,8 @@ #define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ __HAL_COMP_COMP6_EXTI_CLEAR_FLAG()) -# endif -# if defined(STM32F302xE) || defined(STM32F302xC) +#endif +#if defined(STM32F302xE) || defined(STM32F302xC) #define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ @@ -1956,8 +2353,8 @@ ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ __HAL_COMP_COMP6_EXTI_CLEAR_FLAG()) -# endif -# if defined(STM32F303xE) || defined(STM32F398xx) || defined(STM32F303xC) || defined(STM32F358xx) +#endif +#if defined(STM32F303xE) || defined(STM32F398xx) || defined(STM32F303xC) || defined(STM32F358xx) #define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_ENABLE_RISING_EDGE() : \ @@ -2014,8 +2411,8 @@ ((__FLAG__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_CLEAR_FLAG() : \ ((__FLAG__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_CLEAR_FLAG() : \ __HAL_COMP_COMP7_EXTI_CLEAR_FLAG()) -# endif -# if defined(STM32F373xC) ||defined(STM32F378xx) +#endif +#if defined(STM32F373xC) ||defined(STM32F378xx) #define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE()) #define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ @@ -2032,7 +2429,7 @@ __HAL_COMP_COMP2_EXTI_GET_FLAG()) #define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ __HAL_COMP_COMP2_EXTI_CLEAR_FLAG()) -# endif +#endif #else #define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE()) @@ -2069,8 +2466,10 @@ /** @defgroup HAL_COMP_Aliased_Functions HAL COMP Aliased Functions maintained for legacy purpose * @{ */ -#define HAL_COMP_Start_IT HAL_COMP_Start /* Function considered as legacy as EXTI event or IT configuration is done into HAL_COMP_Init() */ -#define HAL_COMP_Stop_IT HAL_COMP_Stop /* Function considered as legacy as EXTI event or IT configuration is done into HAL_COMP_Init() */ +#define HAL_COMP_Start_IT HAL_COMP_Start /* Function considered as legacy as EXTI event or IT configuration is + done into HAL_COMP_Init() */ +#define HAL_COMP_Stop_IT HAL_COMP_Stop /* Function considered as legacy as EXTI event or IT configuration is + done into HAL_COMP_Init() */ /** * @} */ @@ -2081,8 +2480,8 @@ */ #define IS_DAC_WAVE(WAVE) (((WAVE) == DAC_WAVE_NONE) || \ - ((WAVE) == DAC_WAVE_NOISE)|| \ - ((WAVE) == DAC_WAVE_TRIANGLE)) + ((WAVE) == DAC_WAVE_NOISE)|| \ + ((WAVE) == DAC_WAVE_TRIANGLE)) /** * @} @@ -2138,7 +2537,7 @@ #define IS_I2S_INSTANCE_EXT IS_I2S_ALL_INSTANCE_EXT #if defined(STM32H7) - #define __HAL_I2S_CLEAR_FREFLAG __HAL_I2S_CLEAR_TIFREFLAG +#define __HAL_I2S_CLEAR_FREFLAG __HAL_I2S_CLEAR_TIFREFLAG #endif /** @@ -2229,7 +2628,9 @@ #define __HAL_PWR_INTERNALWAKEUP_ENABLE HAL_PWREx_EnableInternalWakeUpLine #define __HAL_PWR_PULL_UP_DOWN_CONFIG_DISABLE HAL_PWREx_DisablePullUpPullDownConfig #define __HAL_PWR_PULL_UP_DOWN_CONFIG_ENABLE HAL_PWREx_EnablePullUpPullDownConfig -#define __HAL_PWR_PVD_EXTI_CLEAR_EGDE_TRIGGER() do { __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE();__HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE(); } while(0) +#define __HAL_PWR_PVD_EXTI_CLEAR_EGDE_TRIGGER() do { __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE(); \ + __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE(); \ + } while(0) #define __HAL_PWR_PVD_EXTI_EVENT_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_EVENT #define __HAL_PWR_PVD_EXTI_EVENT_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_EVENT #define __HAL_PWR_PVD_EXTI_FALLINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE @@ -2238,8 +2639,12 @@ #define __HAL_PWR_PVD_EXTI_RISINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE #define __HAL_PWR_PVD_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE #define __HAL_PWR_PVD_EXTI_SET_RISING_EDGE_TRIGGER __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE -#define __HAL_PWR_PVM_DISABLE() do { HAL_PWREx_DisablePVM1();HAL_PWREx_DisablePVM2();HAL_PWREx_DisablePVM3();HAL_PWREx_DisablePVM4(); } while(0) -#define __HAL_PWR_PVM_ENABLE() do { HAL_PWREx_EnablePVM1();HAL_PWREx_EnablePVM2();HAL_PWREx_EnablePVM3();HAL_PWREx_EnablePVM4(); } while(0) +#define __HAL_PWR_PVM_DISABLE() do { HAL_PWREx_DisablePVM1();HAL_PWREx_DisablePVM2(); \ + HAL_PWREx_DisablePVM3();HAL_PWREx_DisablePVM4(); \ + } while(0) +#define __HAL_PWR_PVM_ENABLE() do { HAL_PWREx_EnablePVM1();HAL_PWREx_EnablePVM2(); \ + HAL_PWREx_EnablePVM3();HAL_PWREx_EnablePVM4(); \ + } while(0) #define __HAL_PWR_SRAM2CONTENT_PRESERVE_DISABLE HAL_PWREx_DisableSRAM2ContentRetention #define __HAL_PWR_SRAM2CONTENT_PRESERVE_ENABLE HAL_PWREx_EnableSRAM2ContentRetention #define __HAL_PWR_VDDIO2_DISABLE HAL_PWREx_DisableVddIO2 @@ -2275,7 +2680,8 @@ #define RCC_StopWakeUpClock_HSI RCC_STOP_WAKEUPCLOCK_HSI #define HAL_RCC_CCSCallback HAL_RCC_CSSCallback -#define HAL_RC48_EnableBuffer_Cmd(cmd) (((cmd)==ENABLE) ? HAL_RCCEx_EnableHSI48_VREFINT() : HAL_RCCEx_DisableHSI48_VREFINT()) +#define HAL_RC48_EnableBuffer_Cmd(cmd) (((cmd)==ENABLE) ? \ + HAL_RCCEx_EnableHSI48_VREFINT() : HAL_RCCEx_DisableHSI48_VREFINT()) #define __ADC_CLK_DISABLE __HAL_RCC_ADC_CLK_DISABLE #define __ADC_CLK_ENABLE __HAL_RCC_ADC_CLK_ENABLE @@ -2325,6 +2731,12 @@ #define __APB1_RELEASE_RESET __HAL_RCC_APB1_RELEASE_RESET #define __APB2_FORCE_RESET __HAL_RCC_APB2_FORCE_RESET #define __APB2_RELEASE_RESET __HAL_RCC_APB2_RELEASE_RESET +#if defined(STM32C0) +#define __HAL_RCC_APB1_FORCE_RESET __HAL_RCC_APB1_GRP1_FORCE_RESET +#define __HAL_RCC_APB1_RELEASE_RESET __HAL_RCC_APB1_GRP1_RELEASE_RESET +#define __HAL_RCC_APB2_FORCE_RESET __HAL_RCC_APB1_GRP2_FORCE_RESET +#define __HAL_RCC_APB2_RELEASE_RESET __HAL_RCC_APB1_GRP2_RELEASE_RESET +#endif /* STM32C0 */ #define __BKP_CLK_DISABLE __HAL_RCC_BKP_CLK_DISABLE #define __BKP_CLK_ENABLE __HAL_RCC_BKP_CLK_ENABLE #define __BKP_FORCE_RESET __HAL_RCC_BKP_FORCE_RESET @@ -2779,6 +3191,11 @@ #define __HAL_RCC_WWDG_IS_CLK_ENABLED __HAL_RCC_WWDG1_IS_CLK_ENABLED #define __HAL_RCC_WWDG_IS_CLK_DISABLED __HAL_RCC_WWDG1_IS_CLK_DISABLED +#define RCC_SPI4CLKSOURCE_D2PCLK1 RCC_SPI4CLKSOURCE_D2PCLK2 +#define RCC_SPI5CLKSOURCE_D2PCLK1 RCC_SPI5CLKSOURCE_D2PCLK2 +#define RCC_SPI45CLKSOURCE_D2PCLK1 RCC_SPI45CLKSOURCE_D2PCLK2 +#define RCC_SPI45CLKSOURCE_CDPCLK1 RCC_SPI45CLKSOURCE_CDPCLK2 +#define RCC_SPI45CLKSOURCE_PCLK1 RCC_SPI45CLKSOURCE_PCLK2 #endif #define __WWDG_CLK_DISABLE __HAL_RCC_WWDG_CLK_DISABLE @@ -3243,9 +3660,13 @@ #define RCC_MCOSOURCE_PLLCLK_NODIV RCC_MCO1SOURCE_PLLCLK #define RCC_MCOSOURCE_PLLCLK_DIV2 RCC_MCO1SOURCE_PLLCLK_DIV2 -#if defined(STM32L4) +#if defined(STM32U0) +#define RCC_SYSCLKSOURCE_STATUS_PLLR RCC_SYSCLKSOURCE_STATUS_PLLCLK +#endif + +#if defined(STM32L4) || defined(STM32WB) || defined(STM32G0) || defined(STM32G4) || defined(STM32L5) || \ + defined(STM32WL) || defined(STM32C0) || defined(STM32H7RS) || defined(STM32U0) #define RCC_RTCCLKSOURCE_NO_CLK RCC_RTCCLKSOURCE_NONE -#elif defined(STM32WB) || defined(STM32G0) || defined(STM32G4) || defined(STM32L5) #else #define RCC_RTCCLKSOURCE_NONE RCC_RTCCLKSOURCE_NO_CLK #endif @@ -3346,8 +3767,10 @@ #define __HAL_RCC_GET_DFSDM_SOURCE __HAL_RCC_GET_DFSDM1_SOURCE #define RCC_DFSDM1CLKSOURCE_PCLK RCC_DFSDM1CLKSOURCE_PCLK2 #define RCC_SWPMI1CLKSOURCE_PCLK RCC_SWPMI1CLKSOURCE_PCLK1 +#if !defined(STM32U0) #define RCC_LPTIM1CLKSOURCE_PCLK RCC_LPTIM1CLKSOURCE_PCLK1 #define RCC_LPTIM2CLKSOURCE_PCLK RCC_LPTIM2CLKSOURCE_PCLK1 +#endif #define RCC_DFSDM1AUDIOCLKSOURCE_I2SAPB1 RCC_DFSDM1AUDIOCLKSOURCE_I2S1 #define RCC_DFSDM1AUDIOCLKSOURCE_I2SAPB2 RCC_DFSDM1AUDIOCLKSOURCE_I2S2 @@ -3356,6 +3779,124 @@ #define RCC_DFSDM1CLKSOURCE_APB2 RCC_DFSDM1CLKSOURCE_PCLK2 #define RCC_DFSDM2CLKSOURCE_APB2 RCC_DFSDM2CLKSOURCE_PCLK2 #define RCC_FMPI2C1CLKSOURCE_APB RCC_FMPI2C1CLKSOURCE_PCLK1 +#if defined(STM32U5) +#define MSIKPLLModeSEL RCC_MSIKPLL_MODE_SEL +#define MSISPLLModeSEL RCC_MSISPLL_MODE_SEL +#define __HAL_RCC_AHB21_CLK_DISABLE __HAL_RCC_AHB2_1_CLK_DISABLE +#define __HAL_RCC_AHB22_CLK_DISABLE __HAL_RCC_AHB2_2_CLK_DISABLE +#define __HAL_RCC_AHB1_CLK_Disable_Clear __HAL_RCC_AHB1_CLK_ENABLE +#define __HAL_RCC_AHB21_CLK_Disable_Clear __HAL_RCC_AHB2_1_CLK_ENABLE +#define __HAL_RCC_AHB22_CLK_Disable_Clear __HAL_RCC_AHB2_2_CLK_ENABLE +#define __HAL_RCC_AHB3_CLK_Disable_Clear __HAL_RCC_AHB3_CLK_ENABLE +#define __HAL_RCC_APB1_CLK_Disable_Clear __HAL_RCC_APB1_CLK_ENABLE +#define __HAL_RCC_APB2_CLK_Disable_Clear __HAL_RCC_APB2_CLK_ENABLE +#define __HAL_RCC_APB3_CLK_Disable_Clear __HAL_RCC_APB3_CLK_ENABLE +#define IS_RCC_MSIPLLModeSelection IS_RCC_MSIPLLMODE_SELECT +#define RCC_PERIPHCLK_CLK48 RCC_PERIPHCLK_ICLK +#define RCC_CLK48CLKSOURCE_HSI48 RCC_ICLK_CLKSOURCE_HSI48 +#define RCC_CLK48CLKSOURCE_PLL2 RCC_ICLK_CLKSOURCE_PLL2 +#define RCC_CLK48CLKSOURCE_PLL1 RCC_ICLK_CLKSOURCE_PLL1 +#define RCC_CLK48CLKSOURCE_MSIK RCC_ICLK_CLKSOURCE_MSIK +#define __HAL_RCC_ADC1_CLK_ENABLE __HAL_RCC_ADC12_CLK_ENABLE +#define __HAL_RCC_ADC1_CLK_DISABLE __HAL_RCC_ADC12_CLK_DISABLE +#define __HAL_RCC_ADC1_IS_CLK_ENABLED __HAL_RCC_ADC12_IS_CLK_ENABLED +#define __HAL_RCC_ADC1_IS_CLK_DISABLED __HAL_RCC_ADC12_IS_CLK_DISABLED +#define __HAL_RCC_ADC1_FORCE_RESET __HAL_RCC_ADC12_FORCE_RESET +#define __HAL_RCC_ADC1_RELEASE_RESET __HAL_RCC_ADC12_RELEASE_RESET +#define __HAL_RCC_ADC1_CLK_SLEEP_ENABLE __HAL_RCC_ADC12_CLK_SLEEP_ENABLE +#define __HAL_RCC_ADC1_CLK_SLEEP_DISABLE __HAL_RCC_ADC12_CLK_SLEEP_DISABLE +#define __HAL_RCC_GET_CLK48_SOURCE __HAL_RCC_GET_ICLK_SOURCE +#define __HAL_RCC_PLLFRACN_ENABLE __HAL_RCC_PLL_FRACN_ENABLE +#define __HAL_RCC_PLLFRACN_DISABLE __HAL_RCC_PLL_FRACN_DISABLE +#define __HAL_RCC_PLLFRACN_CONFIG __HAL_RCC_PLL_FRACN_CONFIG +#define IS_RCC_PLLFRACN_VALUE IS_RCC_PLL_FRACN_VALUE +#endif /* STM32U5 */ + +#if defined(STM32H5) +#define __HAL_RCC_PLLFRACN_ENABLE __HAL_RCC_PLL_FRACN_ENABLE +#define __HAL_RCC_PLLFRACN_DISABLE __HAL_RCC_PLL_FRACN_DISABLE +#define __HAL_RCC_PLLFRACN_CONFIG __HAL_RCC_PLL_FRACN_CONFIG +#define IS_RCC_PLLFRACN_VALUE IS_RCC_PLL_FRACN_VALUE + +#define RCC_PLLSOURCE_NONE RCC_PLL1_SOURCE_NONE +#define RCC_PLLSOURCE_HSI RCC_PLL1_SOURCE_HSI +#define RCC_PLLSOURCE_CSI RCC_PLL1_SOURCE_CSI +#define RCC_PLLSOURCE_HSE RCC_PLL1_SOURCE_HSE +#define RCC_PLLVCIRANGE_0 RCC_PLL1_VCIRANGE_0 +#define RCC_PLLVCIRANGE_1 RCC_PLL1_VCIRANGE_1 +#define RCC_PLLVCIRANGE_2 RCC_PLL1_VCIRANGE_2 +#define RCC_PLLVCIRANGE_3 RCC_PLL1_VCIRANGE_3 +#define RCC_PLL1VCOWIDE RCC_PLL1_VCORANGE_WIDE +#define RCC_PLL1VCOMEDIUM RCC_PLL1_VCORANGE_MEDIUM + +#define IS_RCC_PLLSOURCE IS_RCC_PLL1_SOURCE +#define IS_RCC_PLLRGE_VALUE IS_RCC_PLL1_VCIRGE_VALUE +#define IS_RCC_PLLVCORGE_VALUE IS_RCC_PLL1_VCORGE_VALUE +#define IS_RCC_PLLCLOCKOUT_VALUE IS_RCC_PLL1_CLOCKOUT_VALUE +#define IS_RCC_PLL_FRACN_VALUE IS_RCC_PLL1_FRACN_VALUE +#define IS_RCC_PLLM_VALUE IS_RCC_PLL1_DIVM_VALUE +#define IS_RCC_PLLN_VALUE IS_RCC_PLL1_MULN_VALUE +#define IS_RCC_PLLP_VALUE IS_RCC_PLL1_DIVP_VALUE +#define IS_RCC_PLLQ_VALUE IS_RCC_PLL1_DIVQ_VALUE +#define IS_RCC_PLLR_VALUE IS_RCC_PLL1_DIVR_VALUE + +#define __HAL_RCC_PLL_ENABLE __HAL_RCC_PLL1_ENABLE +#define __HAL_RCC_PLL_DISABLE __HAL_RCC_PLL1_DISABLE +#define __HAL_RCC_PLL_FRACN_ENABLE __HAL_RCC_PLL1_FRACN_ENABLE +#define __HAL_RCC_PLL_FRACN_DISABLE __HAL_RCC_PLL1_FRACN_DISABLE +#define __HAL_RCC_PLL_CONFIG __HAL_RCC_PLL1_CONFIG +#define __HAL_RCC_PLL_PLLSOURCE_CONFIG __HAL_RCC_PLL1_PLLSOURCE_CONFIG +#define __HAL_RCC_PLL_DIVM_CONFIG __HAL_RCC_PLL1_DIVM_CONFIG +#define __HAL_RCC_PLL_FRACN_CONFIG __HAL_RCC_PLL1_FRACN_CONFIG +#define __HAL_RCC_PLL_VCIRANGE __HAL_RCC_PLL1_VCIRANGE +#define __HAL_RCC_PLL_VCORANGE __HAL_RCC_PLL1_VCORANGE +#define __HAL_RCC_GET_PLL_OSCSOURCE __HAL_RCC_GET_PLL1_OSCSOURCE +#define __HAL_RCC_PLLCLKOUT_ENABLE __HAL_RCC_PLL1_CLKOUT_ENABLE +#define __HAL_RCC_PLLCLKOUT_DISABLE __HAL_RCC_PLL1_CLKOUT_DISABLE +#define __HAL_RCC_GET_PLLCLKOUT_CONFIG __HAL_RCC_GET_PLL1_CLKOUT_CONFIG + +#define __HAL_RCC_PLL2FRACN_ENABLE __HAL_RCC_PLL2_FRACN_ENABLE +#define __HAL_RCC_PLL2FRACN_DISABLE __HAL_RCC_PLL2_FRACN_DISABLE +#define __HAL_RCC_PLL2CLKOUT_ENABLE __HAL_RCC_PLL2_CLKOUT_ENABLE +#define __HAL_RCC_PLL2CLKOUT_DISABLE __HAL_RCC_PLL2_CLKOUT_DISABLE +#define __HAL_RCC_PLL2FRACN_CONFIG __HAL_RCC_PLL2_FRACN_CONFIG +#define __HAL_RCC_GET_PLL2CLKOUT_CONFIG __HAL_RCC_GET_PLL2_CLKOUT_CONFIG + +#define __HAL_RCC_PLL3FRACN_ENABLE __HAL_RCC_PLL3_FRACN_ENABLE +#define __HAL_RCC_PLL3FRACN_DISABLE __HAL_RCC_PLL3_FRACN_DISABLE +#define __HAL_RCC_PLL3CLKOUT_ENABLE __HAL_RCC_PLL3_CLKOUT_ENABLE +#define __HAL_RCC_PLL3CLKOUT_DISABLE __HAL_RCC_PLL3_CLKOUT_DISABLE +#define __HAL_RCC_PLL3FRACN_CONFIG __HAL_RCC_PLL3_FRACN_CONFIG +#define __HAL_RCC_GET_PLL3CLKOUT_CONFIG __HAL_RCC_GET_PLL3_CLKOUT_CONFIG + +#define RCC_PLL2VCIRANGE_0 RCC_PLL2_VCIRANGE_0 +#define RCC_PLL2VCIRANGE_1 RCC_PLL2_VCIRANGE_1 +#define RCC_PLL2VCIRANGE_2 RCC_PLL2_VCIRANGE_2 +#define RCC_PLL2VCIRANGE_3 RCC_PLL2_VCIRANGE_3 + +#define RCC_PLL2VCOWIDE RCC_PLL2_VCORANGE_WIDE +#define RCC_PLL2VCOMEDIUM RCC_PLL2_VCORANGE_MEDIUM + +#define RCC_PLL2SOURCE_NONE RCC_PLL2_SOURCE_NONE +#define RCC_PLL2SOURCE_HSI RCC_PLL2_SOURCE_HSI +#define RCC_PLL2SOURCE_CSI RCC_PLL2_SOURCE_CSI +#define RCC_PLL2SOURCE_HSE RCC_PLL2_SOURCE_HSE + +#define RCC_PLL3VCIRANGE_0 RCC_PLL3_VCIRANGE_0 +#define RCC_PLL3VCIRANGE_1 RCC_PLL3_VCIRANGE_1 +#define RCC_PLL3VCIRANGE_2 RCC_PLL3_VCIRANGE_2 +#define RCC_PLL3VCIRANGE_3 RCC_PLL3_VCIRANGE_3 + +#define RCC_PLL3VCOWIDE RCC_PLL3_VCORANGE_WIDE +#define RCC_PLL3VCOMEDIUM RCC_PLL3_VCORANGE_MEDIUM + +#define RCC_PLL3SOURCE_NONE RCC_PLL3_SOURCE_NONE +#define RCC_PLL3SOURCE_HSI RCC_PLL3_SOURCE_HSI +#define RCC_PLL3SOURCE_CSI RCC_PLL3_SOURCE_CSI +#define RCC_PLL3SOURCE_HSE RCC_PLL3_SOURCE_HSE + + +#endif /* STM32H5 */ /** * @} @@ -3373,7 +3914,9 @@ /** @defgroup HAL_RTC_Aliased_Macros HAL RTC Aliased Macros maintained for legacy purpose * @{ */ -#if defined (STM32G0) || defined (STM32L5) || defined (STM32L412xx) || defined (STM32L422xx) || defined (STM32L4P5xx) || defined (STM32L4Q5xx) || defined (STM32G4) +#if defined (STM32G0) || defined (STM32L5) || defined (STM32L412xx) || defined (STM32L422xx) || \ + defined (STM32L4P5xx)|| defined (STM32L4Q5xx) || defined (STM32G4) || defined (STM32WL) || defined (STM32U5) || \ + defined (STM32WBA) || defined (STM32H5) || defined (STM32C0) || defined (STM32H7RS) || defined (STM32U0) #else #define __HAL_RTC_CLEAR_FLAG __HAL_RTC_EXTI_CLEAR_FLAG #endif @@ -3393,21 +3936,28 @@ #else #define __HAL_RTC_EXTI_CLEAR_FLAG(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_CLEAR_FLAG() : \ (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_CLEAR_FLAG() : \ - __HAL_RTC_TAMPER_TIMESTAMP_EXTI_CLEAR_FLAG())) + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_CLEAR_FLAG())) #define __HAL_RTC_EXTI_ENABLE_IT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_ENABLE_IT() : \ - (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_IT() : \ - __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_IT())) + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_IT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_IT())) #define __HAL_RTC_EXTI_DISABLE_IT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_DISABLE_IT() : \ - (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_IT() : \ - __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_IT())) + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_IT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_IT())) #define __HAL_RTC_EXTI_GET_FLAG(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_GET_FLAG() : \ - (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_GET_FLAG() : \ - __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GET_FLAG())) + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_GET_FLAG() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GET_FLAG())) #define __HAL_RTC_EXTI_GENERATE_SWIT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_GENERATE_SWIT() : \ - (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_GENERATE_SWIT() : \ - __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GENERATE_SWIT())) + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_GENERATE_SWIT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GENERATE_SWIT())) #endif /* STM32F1 */ +#if defined (STM32F0) || defined (STM32F2) || defined (STM32F3) || defined (STM32F4) || defined (STM32F7) || \ + defined (STM32H7) || \ + defined (STM32L0) || defined (STM32L1) || \ + defined (STM32WB) +#define __HAL_RTC_TAMPER_GET_IT __HAL_RTC_TAMPER_GET_FLAG +#endif + #define IS_ALARM IS_RTC_ALARM #define IS_ALARM_MASK IS_RTC_ALARM_MASK #define IS_TAMPER IS_RTC_TAMPER @@ -3426,17 +3976,31 @@ #define __RTC_WRITEPROTECTION_ENABLE __HAL_RTC_WRITEPROTECTION_ENABLE #define __RTC_WRITEPROTECTION_DISABLE __HAL_RTC_WRITEPROTECTION_DISABLE +#if defined (STM32H5) +#define __HAL_RCC_RTCAPB_CLK_ENABLE __HAL_RCC_RTC_CLK_ENABLE +#define __HAL_RCC_RTCAPB_CLK_DISABLE __HAL_RCC_RTC_CLK_DISABLE +#endif /* STM32H5 */ + /** * @} */ -/** @defgroup HAL_SD_Aliased_Macros HAL SD Aliased Macros maintained for legacy purpose +/** @defgroup HAL_SD_Aliased_Macros HAL SD/MMC Aliased Macros maintained for legacy purpose * @{ */ #define SD_OCR_CID_CSD_OVERWRIETE SD_OCR_CID_CSD_OVERWRITE #define SD_CMD_SD_APP_STAUS SD_CMD_SD_APP_STATUS +#if !defined(STM32F1) && !defined(STM32F2) && !defined(STM32F4) && !defined(STM32L1) +#define eMMC_HIGH_VOLTAGE_RANGE EMMC_HIGH_VOLTAGE_RANGE +#define eMMC_DUAL_VOLTAGE_RANGE EMMC_DUAL_VOLTAGE_RANGE +#define eMMC_LOW_VOLTAGE_RANGE EMMC_LOW_VOLTAGE_RANGE + +#define SDMMC_NSpeed_CLK_DIV SDMMC_NSPEED_CLK_DIV +#define SDMMC_HSpeed_CLK_DIV SDMMC_HSPEED_CLK_DIV +#endif + #if defined(STM32F4) || defined(STM32F2) #define SD_SDMMC_DISABLED SD_SDIO_DISABLED #define SD_SDMMC_FUNCTION_BUSY SD_SDIO_FUNCTION_BUSY @@ -3481,9 +4045,9 @@ #define __HAL_SD_SDIO_CLEAR_FLAG __HAL_SD_SDMMC_CLEAR_FLAG #define __HAL_SD_SDIO_GET_IT __HAL_SD_SDMMC_GET_IT #define __HAL_SD_SDIO_CLEAR_IT __HAL_SD_SDMMC_CLEAR_IT -#define SDIO_STATIC_FLAGS SDMMC_STATIC_FLAGS -#define SDIO_CMD0TIMEOUT SDMMC_CMD0TIMEOUT -#define SD_SDIO_SEND_IF_COND SD_SDMMC_SEND_IF_COND +#define SDIO_STATIC_FLAGS SDMMC_STATIC_FLAGS +#define SDIO_CMD0TIMEOUT SDMMC_CMD0TIMEOUT +#define SD_SDIO_SEND_IF_COND SD_SDMMC_SEND_IF_COND /* alias CMSIS for compatibilities */ #define SDIO_IRQn SDMMC1_IRQn #define SDIO_IRQHandler SDMMC1_IRQHandler @@ -3589,6 +4153,13 @@ #define __HAL_USART_GETCLOCKSOURCE USART_GETCLOCKSOURCE #define __USART_GETCLOCKSOURCE USART_GETCLOCKSOURCE +#if defined(STM32F0) || defined(STM32F3) || defined(STM32F7) +#define USART_OVERSAMPLING_16 0x00000000U +#define USART_OVERSAMPLING_8 USART_CR1_OVER8 + +#define IS_USART_OVERSAMPLING(__SAMPLING__) (((__SAMPLING__) == USART_OVERSAMPLING_16) || \ + ((__SAMPLING__) == USART_OVERSAMPLING_8)) +#endif /* STM32F0 || STM32F3 || STM32F7 */ /** * @} */ @@ -3668,6 +4239,9 @@ #define __HAL_TIM_GetCompare __HAL_TIM_GET_COMPARE #define TIM_BREAKINPUTSOURCE_DFSDM TIM_BREAKINPUTSOURCE_DFSDM1 + +#define TIM_OCMODE_ASSYMETRIC_PWM1 TIM_OCMODE_ASYMMETRIC_PWM1 +#define TIM_OCMODE_ASSYMETRIC_PWM2 TIM_OCMODE_ASYMMETRIC_PWM2 /** * @} */ @@ -3751,13 +4325,23 @@ /** @defgroup HAL_QSPI_Aliased_Macros HAL QSPI Aliased Macros maintained for legacy purpose * @{ */ -#if defined (STM32L4) || defined (STM32F4) || defined (STM32F7) +#if defined (STM32L4) || defined (STM32F4) || defined (STM32F7) || defined(STM32H7) #define HAL_QPSI_TIMEOUT_DEFAULT_VALUE HAL_QSPI_TIMEOUT_DEFAULT_VALUE #endif /* STM32L4 || STM32F4 || STM32F7 */ /** * @} */ +/** @defgroup HAL_Generic_Aliased_Macros HAL Generic Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined (STM32F7) +#define ART_ACCLERATOR_ENABLE ART_ACCELERATOR_ENABLE +#endif /* STM32F7 */ +/** + * @} + */ + /** @defgroup HAL_PPP_Aliased_Macros HAL PPP Aliased Macros maintained for legacy purpose * @{ */ @@ -3772,5 +4356,4 @@ #endif /* STM32_HAL_LEGACY */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal.h index 209864d5..f7eb847b 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal.h @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -295,4 +294,4 @@ void HAL_DisableMemorySwappingBank(void); #endif /* __STM32F4xx_HAL_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_adc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_adc.h index a041e769..c79a073e 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_adc.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_adc.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -22,7 +21,7 @@ #define __STM32F4xx_ADC_H #ifdef __cplusplus - extern "C" { +extern "C" { #endif /* Includes ------------------------------------------------------------------*/ @@ -37,15 +36,15 @@ /** @addtogroup ADC * @{ - */ + */ /* Exported types ------------------------------------------------------------*/ /** @defgroup ADC_Exported_Types ADC Exported Types * @{ */ -/** - * @brief Structure definition of ADC and regular group initialization +/** + * @brief Structure definition of ADC and regular group initialization * @note Parameters of this structure are shared within 2 scopes: * - Scope entire ADC (affects regular and injected groups): ClockPrescaler, Resolution, ScanConvMode, DataAlign, ScanConvMode, EOCSelection, LowPowerAutoWait, LowPowerAutoPowerOff, ChannelsBank. * - Scope regular group: ContinuousConvMode, NbrOfConversion, DiscontinuousConvMode, NbrOfDiscConversion, ExternalTrigConvEdge, ExternalTrigConv. @@ -59,7 +58,7 @@ */ typedef struct { - uint32_t ClockPrescaler; /*!< Select ADC clock prescaler. The clock is common for + uint32_t ClockPrescaler; /*!< Select ADC clock prescaler. The clock is common for all the ADCs. This parameter can be a value of @ref ADC_ClockPrescaler */ uint32_t Resolution; /*!< Configures the ADC resolution. @@ -102,20 +101,20 @@ typedef struct If trigger is set to ADC_SOFTWARE_START, this parameter is discarded. This parameter can be a value of @ref ADC_External_trigger_edge_Regular */ FunctionalState DMAContinuousRequests; /*!< Specifies whether the DMA requests are performed in one shot mode (DMA transfer stop when number of conversions is reached) - or in Continuous mode (DMA transfer unlimited, whatever number of conversions). - Note: In continuous mode, DMA must be configured in circular mode. Otherwise an overrun will be triggered when DMA buffer maximum pointer is reached. - Note: This parameter must be modified when no conversion is on going on both regular and injected groups (ADC disabled, or ADC enabled without continuous mode or external trigger that could launch a conversion). - This parameter can be set to ENABLE or DISABLE. */ -}ADC_InitTypeDef; + or in Continuous mode (DMA transfer unlimited, whatever number of conversions). + Note: In continuous mode, DMA must be configured in circular mode. Otherwise an overrun will be triggered when DMA buffer maximum pointer is reached. + Note: This parameter must be modified when no conversion is on going on both regular and injected groups (ADC disabled, or ADC enabled without continuous mode or external trigger that could launch a conversion). + This parameter can be set to ENABLE or DISABLE. */ +} ADC_InitTypeDef; -/** - * @brief Structure definition of ADC channel for regular group +/** + * @brief Structure definition of ADC channel for regular group * @note The setting of these parameters with function HAL_ADC_ConfigChannel() is conditioned to ADC state. * ADC can be either disabled or enabled without conversion on going on regular group. - */ -typedef struct + */ +typedef struct { uint32_t Channel; /*!< Specifies the channel to configure into ADC regular group. This parameter can be a value of @ref ADC_channels */ @@ -131,31 +130,31 @@ typedef struct sampling time constraints must be respected (sampling time can be adjusted in function of ADC clock frequency and sampling time setting) Refer to device datasheet for timings values, parameters TS_vrefint, TS_temp (values rough order: 4us min). */ uint32_t Offset; /*!< Reserved for future use, can be set to 0 */ -}ADC_ChannelConfTypeDef; +} ADC_ChannelConfTypeDef; -/** - * @brief ADC Configuration multi-mode structure definition - */ +/** + * @brief ADC Configuration multi-mode structure definition + */ typedef struct { uint32_t WatchdogMode; /*!< Configures the ADC analog watchdog mode. This parameter can be a value of @ref ADC_analog_watchdog_selection */ uint32_t HighThreshold; /*!< Configures the ADC analog watchdog High threshold value. - This parameter must be a 12-bit value. */ + This parameter must be a 12-bit value. */ uint32_t LowThreshold; /*!< Configures the ADC analog watchdog High threshold value. This parameter must be a 12-bit value. */ - uint32_t Channel; /*!< Configures ADC channel for the analog watchdog. - This parameter has an effect only if watchdog mode is configured on single channel - This parameter can be a value of @ref ADC_channels */ + uint32_t Channel; /*!< Configures ADC channel for the analog watchdog. + This parameter has an effect only if watchdog mode is configured on single channel + This parameter can be a value of @ref ADC_channels */ FunctionalState ITMode; /*!< Specifies whether the analog watchdog is configured is interrupt mode or in polling mode. This parameter can be set to ENABLE or DISABLE */ uint32_t WatchdogNumber; /*!< Reserved for future use, can be set to 0 */ -}ADC_AnalogWDGConfTypeDef; +} ADC_AnalogWDGConfTypeDef; -/** +/** * @brief HAL ADC state machine: ADC states definition (bitfields) - */ + */ /* States of ADC global scope */ #define HAL_ADC_STATE_RESET 0x00000000U /*!< ADC not yet initialized or disabled */ #define HAL_ADC_STATE_READY 0x00000001U /*!< ADC peripheral ready for use */ @@ -187,9 +186,9 @@ typedef struct #define HAL_ADC_STATE_MULTIMODE_SLAVE 0x00100000U /*!< Not available on STM32F4 device: ADC in multimode slave state, controlled by another ADC master ( */ -/** +/** * @brief ADC handle Structure definition - */ + */ #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) typedef struct __ADC_HandleTypeDef #else @@ -218,7 +217,7 @@ typedef struct void (* MspInitCallback)(struct __ADC_HandleTypeDef *hadc); /*!< ADC Msp Init callback */ void (* MspDeInitCallback)(struct __ADC_HandleTypeDef *hadc); /*!< ADC Msp DeInit callback */ #endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ -}ADC_HandleTypeDef; +} ADC_HandleTypeDef; #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) /** @@ -269,18 +268,18 @@ typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to /** @defgroup ADC_ClockPrescaler ADC Clock Prescaler * @{ - */ + */ #define ADC_CLOCK_SYNC_PCLK_DIV2 0x00000000U #define ADC_CLOCK_SYNC_PCLK_DIV4 ((uint32_t)ADC_CCR_ADCPRE_0) #define ADC_CLOCK_SYNC_PCLK_DIV6 ((uint32_t)ADC_CCR_ADCPRE_1) #define ADC_CLOCK_SYNC_PCLK_DIV8 ((uint32_t)ADC_CCR_ADCPRE) /** * @} - */ + */ /** @defgroup ADC_delay_between_2_sampling_phases ADC Delay Between 2 Sampling Phases * @{ - */ + */ #define ADC_TWOSAMPLINGDELAY_5CYCLES 0x00000000U #define ADC_TWOSAMPLINGDELAY_6CYCLES ((uint32_t)ADC_CCR_DELAY_0) #define ADC_TWOSAMPLINGDELAY_7CYCLES ((uint32_t)ADC_CCR_DELAY_1) @@ -299,29 +298,29 @@ typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to #define ADC_TWOSAMPLINGDELAY_20CYCLES ((uint32_t)ADC_CCR_DELAY) /** * @} - */ + */ /** @defgroup ADC_Resolution ADC Resolution * @{ - */ + */ #define ADC_RESOLUTION_12B 0x00000000U #define ADC_RESOLUTION_10B ((uint32_t)ADC_CR1_RES_0) #define ADC_RESOLUTION_8B ((uint32_t)ADC_CR1_RES_1) #define ADC_RESOLUTION_6B ((uint32_t)ADC_CR1_RES) /** * @} - */ + */ /** @defgroup ADC_External_trigger_edge_Regular ADC External Trigger Edge Regular * @{ - */ + */ #define ADC_EXTERNALTRIGCONVEDGE_NONE 0x00000000U #define ADC_EXTERNALTRIGCONVEDGE_RISING ((uint32_t)ADC_CR2_EXTEN_0) #define ADC_EXTERNALTRIGCONVEDGE_FALLING ((uint32_t)ADC_CR2_EXTEN_1) #define ADC_EXTERNALTRIGCONVEDGE_RISINGFALLING ((uint32_t)ADC_CR2_EXTEN) /** * @} - */ + */ /** @defgroup ADC_External_trigger_Source_Regular ADC External Trigger Source Regular * @{ @@ -347,20 +346,20 @@ typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to #define ADC_SOFTWARE_START ((uint32_t)ADC_CR2_EXTSEL + 1U) /** * @} - */ + */ /** @defgroup ADC_Data_align ADC Data Align * @{ - */ + */ #define ADC_DATAALIGN_RIGHT 0x00000000U #define ADC_DATAALIGN_LEFT ((uint32_t)ADC_CR2_ALIGN) /** * @} - */ + */ /** @defgroup ADC_channels ADC Common Channels * @{ - */ + */ #define ADC_CHANNEL_0 0x00000000U #define ADC_CHANNEL_1 ((uint32_t)ADC_CR1_AWDCH_0) #define ADC_CHANNEL_2 ((uint32_t)ADC_CR1_AWDCH_1) @@ -385,11 +384,11 @@ typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to #define ADC_CHANNEL_VBAT ((uint32_t)ADC_CHANNEL_18) /** * @} - */ + */ /** @defgroup ADC_sampling_times ADC Sampling Times * @{ - */ + */ #define ADC_SAMPLETIME_3CYCLES 0x00000000U #define ADC_SAMPLETIME_15CYCLES ((uint32_t)ADC_SMPR1_SMP10_0) #define ADC_SAMPLETIME_28CYCLES ((uint32_t)ADC_SMPR1_SMP10_1) @@ -400,21 +399,21 @@ typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to #define ADC_SAMPLETIME_480CYCLES ((uint32_t)ADC_SMPR1_SMP10) /** * @} - */ + */ - /** @defgroup ADC_EOCSelection ADC EOC Selection +/** @defgroup ADC_EOCSelection ADC EOC Selection * @{ - */ + */ #define ADC_EOC_SEQ_CONV 0x00000000U #define ADC_EOC_SINGLE_CONV 0x00000001U #define ADC_EOC_SINGLE_SEQ_CONV 0x00000002U /*!< reserved for future use */ /** * @} - */ + */ /** @defgroup ADC_Event_type ADC Event Type * @{ - */ + */ #define ADC_AWD_EVENT ((uint32_t)ADC_FLAG_AWD) #define ADC_OVR_EVENT ((uint32_t)ADC_FLAG_OVR) /** @@ -423,7 +422,7 @@ typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to /** @defgroup ADC_analog_watchdog_selection ADC Analog Watchdog Selection * @{ - */ + */ #define ADC_ANALOGWATCHDOG_SINGLE_REG ((uint32_t)(ADC_CR1_AWDSGL | ADC_CR1_AWDEN)) #define ADC_ANALOGWATCHDOG_SINGLE_INJEC ((uint32_t)(ADC_CR1_AWDSGL | ADC_CR1_JAWDEN)) #define ADC_ANALOGWATCHDOG_SINGLE_REGINJEC ((uint32_t)(ADC_CR1_AWDSGL | ADC_CR1_AWDEN | ADC_CR1_JAWDEN)) @@ -433,22 +432,22 @@ typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to #define ADC_ANALOGWATCHDOG_NONE 0x00000000U /** * @} - */ - + */ + /** @defgroup ADC_interrupts_definition ADC Interrupts Definition * @{ - */ + */ #define ADC_IT_EOC ((uint32_t)ADC_CR1_EOCIE) #define ADC_IT_AWD ((uint32_t)ADC_CR1_AWDIE) #define ADC_IT_JEOC ((uint32_t)ADC_CR1_JEOCIE) #define ADC_IT_OVR ((uint32_t)ADC_CR1_OVRIE) /** * @} - */ - + */ + /** @defgroup ADC_flags_definition ADC Flags Definition * @{ - */ + */ #define ADC_FLAG_AWD ((uint32_t)ADC_SR_AWD) #define ADC_FLAG_EOC ((uint32_t)ADC_SR_EOC) #define ADC_FLAG_JEOC ((uint32_t)ADC_SR_JEOC) @@ -457,11 +456,11 @@ typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to #define ADC_FLAG_OVR ((uint32_t)ADC_SR_OVR) /** * @} - */ + */ /** @defgroup ADC_channels_type ADC Channels Type * @{ - */ + */ #define ADC_ALL_CHANNELS 0x00000001U #define ADC_REGULAR_CHANNELS 0x00000002U /*!< reserved for future use */ #define ADC_INJECTED_CHANNELS 0x00000003U /*!< reserved for future use */ @@ -471,7 +470,7 @@ typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to /** * @} - */ + */ /* Exported macro ------------------------------------------------------------*/ /** @defgroup ADC_Exported_Macros ADC Exported Macros @@ -563,10 +562,10 @@ typedef void (*pADC_CallbackTypeDef)(ADC_HandleTypeDef *hadc); /*!< pointer to * @{ */ /* Initialization/de-initialization functions ***********************************/ -HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef *hadc); HAL_StatusTypeDef HAL_ADC_DeInit(ADC_HandleTypeDef *hadc); -void HAL_ADC_MspInit(ADC_HandleTypeDef* hadc); -void HAL_ADC_MspDeInit(ADC_HandleTypeDef* hadc); +void HAL_ADC_MspInit(ADC_HandleTypeDef *hadc); +void HAL_ADC_MspDeInit(ADC_HandleTypeDef *hadc); #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) /* Callbacks Register/UnRegister functions ***********************************/ @@ -581,25 +580,25 @@ HAL_StatusTypeDef HAL_ADC_UnRegisterCallback(ADC_HandleTypeDef *hadc, HAL_ADC_Ca * @{ */ /* I/O operation functions ******************************************************/ -HAL_StatusTypeDef HAL_ADC_Start(ADC_HandleTypeDef* hadc); -HAL_StatusTypeDef HAL_ADC_Stop(ADC_HandleTypeDef* hadc); -HAL_StatusTypeDef HAL_ADC_PollForConversion(ADC_HandleTypeDef* hadc, uint32_t Timeout); +HAL_StatusTypeDef HAL_ADC_Start(ADC_HandleTypeDef *hadc); +HAL_StatusTypeDef HAL_ADC_Stop(ADC_HandleTypeDef *hadc); +HAL_StatusTypeDef HAL_ADC_PollForConversion(ADC_HandleTypeDef *hadc, uint32_t Timeout); -HAL_StatusTypeDef HAL_ADC_PollForEvent(ADC_HandleTypeDef* hadc, uint32_t EventType, uint32_t Timeout); +HAL_StatusTypeDef HAL_ADC_PollForEvent(ADC_HandleTypeDef *hadc, uint32_t EventType, uint32_t Timeout); -HAL_StatusTypeDef HAL_ADC_Start_IT(ADC_HandleTypeDef* hadc); -HAL_StatusTypeDef HAL_ADC_Stop_IT(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADC_Start_IT(ADC_HandleTypeDef *hadc); +HAL_StatusTypeDef HAL_ADC_Stop_IT(ADC_HandleTypeDef *hadc); -void HAL_ADC_IRQHandler(ADC_HandleTypeDef* hadc); +void HAL_ADC_IRQHandler(ADC_HandleTypeDef *hadc); -HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, uint32_t Length); -HAL_StatusTypeDef HAL_ADC_Stop_DMA(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef *hadc, uint32_t *pData, uint32_t Length); +HAL_StatusTypeDef HAL_ADC_Stop_DMA(ADC_HandleTypeDef *hadc); -uint32_t HAL_ADC_GetValue(ADC_HandleTypeDef* hadc); +uint32_t HAL_ADC_GetValue(ADC_HandleTypeDef *hadc); -void HAL_ADC_ConvCpltCallback(ADC_HandleTypeDef* hadc); -void HAL_ADC_ConvHalfCpltCallback(ADC_HandleTypeDef* hadc); -void HAL_ADC_LevelOutOfWindowCallback(ADC_HandleTypeDef* hadc); +void HAL_ADC_ConvCpltCallback(ADC_HandleTypeDef *hadc); +void HAL_ADC_ConvHalfCpltCallback(ADC_HandleTypeDef *hadc); +void HAL_ADC_LevelOutOfWindowCallback(ADC_HandleTypeDef *hadc); void HAL_ADC_ErrorCallback(ADC_HandleTypeDef *hadc); /** * @} @@ -609,8 +608,8 @@ void HAL_ADC_ErrorCallback(ADC_HandleTypeDef *hadc); * @{ */ /* Peripheral Control functions *************************************************/ -HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef* hadc, ADC_ChannelConfTypeDef* sConfig); -HAL_StatusTypeDef HAL_ADC_AnalogWDGConfig(ADC_HandleTypeDef* hadc, ADC_AnalogWDGConfTypeDef* AnalogWDGConfig); +HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef *hadc, ADC_ChannelConfTypeDef *sConfig); +HAL_StatusTypeDef HAL_ADC_AnalogWDGConfig(ADC_HandleTypeDef *hadc, ADC_AnalogWDGConfTypeDef *AnalogWDGConfig); /** * @} */ @@ -619,7 +618,7 @@ HAL_StatusTypeDef HAL_ADC_AnalogWDGConfig(ADC_HandleTypeDef* hadc, ADC_AnalogWDG * @{ */ /* Peripheral State functions ***************************************************/ -uint32_t HAL_ADC_GetState(ADC_HandleTypeDef* hadc); +uint32_t HAL_ADC_GetState(ADC_HandleTypeDef *hadc); uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); /** * @} @@ -698,7 +697,7 @@ uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); #define ADC_CLEAR_ERRORCODE(__HANDLE__) \ ((__HANDLE__)->ErrorCode = HAL_ADC_ERROR_NONE) - + #define IS_ADC_CLOCKPRESCALER(ADC_CLOCK) (((ADC_CLOCK) == ADC_CLOCK_SYNC_PCLK_DIV2) || \ ((ADC_CLOCK) == ADC_CLOCK_SYNC_PCLK_DIV4) || \ ((ADC_CLOCK) == ADC_CLOCK_SYNC_PCLK_DIV6) || \ @@ -782,7 +781,7 @@ uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); /** * @brief Set ADC Regular channel sequence length. - * @param _NbrOfConversion_ Regular channel sequence length. + * @param _NbrOfConversion_ Regular channel sequence length. * @retval None */ #define ADC_SQR1(_NbrOfConversion_) (((_NbrOfConversion_) - (uint8_t)1U) << 20U) @@ -790,7 +789,7 @@ uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); /** * @brief Set the ADC's sample time for channel numbers between 10 and 18. * @param _SAMPLETIME_ Sample time parameter. - * @param _CHANNELNB_ Channel number. + * @param _CHANNELNB_ Channel number. * @retval None */ #define ADC_SMPR1(_SAMPLETIME_, _CHANNELNB_) ((_SAMPLETIME_) << (3U * (((uint32_t)((uint16_t)(_CHANNELNB_))) - 10U))) @@ -798,7 +797,7 @@ uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); /** * @brief Set the ADC's sample time for channel numbers between 0 and 9. * @param _SAMPLETIME_ Sample time parameter. - * @param _CHANNELNB_ Channel number. + * @param _CHANNELNB_ Channel number. * @retval None */ #define ADC_SMPR2(_SAMPLETIME_, _CHANNELNB_) ((_SAMPLETIME_) << (3U * ((uint32_t)((uint16_t)(_CHANNELNB_))))) @@ -806,7 +805,7 @@ uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); /** * @brief Set the selected regular channel rank for rank between 1 and 6. * @param _CHANNELNB_ Channel number. - * @param _RANKNB_ Rank number. + * @param _RANKNB_ Rank number. * @retval None */ #define ADC_SQR3_RK(_CHANNELNB_, _RANKNB_) (((uint32_t)((uint16_t)(_CHANNELNB_))) << (5U * ((_RANKNB_) - 1U))) @@ -814,7 +813,7 @@ uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); /** * @brief Set the selected regular channel rank for rank between 7 and 12. * @param _CHANNELNB_ Channel number. - * @param _RANKNB_ Rank number. + * @param _RANKNB_ Rank number. * @retval None */ #define ADC_SQR2_RK(_CHANNELNB_, _RANKNB_) (((uint32_t)((uint16_t)(_CHANNELNB_))) << (5U * ((_RANKNB_) - 7U))) @@ -822,7 +821,7 @@ uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); /** * @brief Set the selected regular channel rank for rank between 13 and 16. * @param _CHANNELNB_ Channel number. - * @param _RANKNB_ Rank number. + * @param _RANKNB_ Rank number. * @retval None */ #define ADC_SQR1_RK(_CHANNELNB_, _RANKNB_) (((uint32_t)((uint16_t)(_CHANNELNB_))) << (5U * ((_RANKNB_) - 13U))) @@ -884,7 +883,7 @@ uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); /** * @} - */ + */ /** * @} @@ -897,4 +896,3 @@ uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc); #endif /*__STM32F4xx_ADC_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_adc_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_adc_ex.h index b0cdd2ab..8ce8484d 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_adc_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_adc_ex.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -22,7 +21,7 @@ #define __STM32F4xx_ADC_EX_H #ifdef __cplusplus - extern "C" { +extern "C" { #endif /* Includes ------------------------------------------------------------------*/ @@ -34,14 +33,14 @@ /** @addtogroup ADCEx * @{ - */ + */ /* Exported types ------------------------------------------------------------*/ /** @defgroup ADCEx_Exported_Types ADC Exported Types * @{ */ - -/** + +/** * @brief ADC Configuration injected Channel structure definition * @note Parameters of this structure are shared within 2 scopes: * - Scope channel: InjectedChannel, InjectedRank, InjectedSamplingTime, InjectedOffset @@ -53,7 +52,7 @@ * - For all except parameters 'InjectedDiscontinuousConvMode' and 'AutoInjectedConv': ADC enabled without conversion on going on injected group. * - For parameters 'ExternalTrigInjecConv' and 'ExternalTrigInjecConvEdge': ADC enabled, even with conversion on going on injected group. */ -typedef struct +typedef struct { uint32_t InjectedChannel; /*!< Selection of ADC channel to configure This parameter can be a value of @ref ADC_channels @@ -77,17 +76,17 @@ typedef struct uint32_t InjectedNbrOfConversion; /*!< Specifies the number of ranks that will be converted within the injected group sequencer. To use the injected group sequencer and convert several ranks, parameter 'ScanConvMode' must be enabled. This parameter must be a number between Min_Data = 1 and Max_Data = 4. - Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to + Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to configure a channel on injected group can impact the configuration of other channels previously set. */ FunctionalState InjectedDiscontinuousConvMode; /*!< Specifies whether the conversions sequence of injected group is performed in Complete-sequence/Discontinuous-sequence (main sequence subdivided in successive parts). Discontinuous mode is used only if sequencer is enabled (parameter 'ScanConvMode'). If sequencer is disabled, this parameter is discarded. Discontinuous mode can be enabled only if continuous mode is disabled. If continuous mode is enabled, this parameter setting is discarded. This parameter can be set to ENABLE or DISABLE. Note: For injected group, number of discontinuous ranks increment is fixed to one-by-one. - Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to + Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to configure a channel on injected group can impact the configuration of other channels previously set. */ FunctionalState AutoInjectedConv; /*!< Enables or disables the selected ADC automatic injected group conversion after regular one - This parameter can be set to ENABLE or DISABLE. + This parameter can be set to ENABLE or DISABLE. Note: To use Automatic injected conversion, discontinuous mode must be disabled ('DiscontinuousConvMode' and 'InjectedDiscontinuousConvMode' set to DISABLE) Note: To use Automatic injected conversion, injected group external triggers must be disabled ('ExternalTrigInjecConv' set to ADC_SOFTWARE_START) Note: In case of DMA used with regular group: if DMA configured in normal mode (single shot) JAUTO will be stopped upon DMA transfer complete. @@ -105,22 +104,22 @@ typedef struct uint32_t ExternalTrigInjecConvEdge; /*!< Selects the external trigger edge of injected group. This parameter can be a value of @ref ADCEx_External_trigger_edge_Injected. If trigger is set to ADC_INJECTED_SOFTWARE_START, this parameter is discarded. - Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to + Caution: this setting impacts the entire injected group. Therefore, call of HAL_ADCEx_InjectedConfigChannel() to configure a channel on injected group can impact the configuration of other channels previously set. */ -}ADC_InjectionConfTypeDef; +} ADC_InjectionConfTypeDef; -/** - * @brief ADC Configuration multi-mode structure definition - */ +/** + * @brief ADC Configuration multi-mode structure definition + */ typedef struct { - uint32_t Mode; /*!< Configures the ADC to operate in independent or multi mode. + uint32_t Mode; /*!< Configures the ADC to operate in independent or multi mode. This parameter can be a value of @ref ADCEx_Common_mode */ uint32_t DMAAccessMode; /*!< Configures the Direct memory access mode for multi ADC mode. This parameter can be a value of @ref ADCEx_Direct_memory_access_mode_for_multi_mode */ uint32_t TwoSamplingDelay; /*!< Configures the Delay between 2 sampling phases. This parameter can be a value of @ref ADC_delay_between_2_sampling_phases */ -}ADC_MultiModeTypeDef; +} ADC_MultiModeTypeDef; /** * @} @@ -133,7 +132,7 @@ typedef struct /** @defgroup ADCEx_Common_mode ADC Common Mode * @{ - */ + */ #define ADC_MODE_INDEPENDENT 0x00000000U #define ADC_DUALMODE_REGSIMULT_INJECSIMULT ((uint32_t)ADC_CCR_MULTI_0) #define ADC_DUALMODE_REGSIMULT_ALTERTRIG ((uint32_t)ADC_CCR_MULTI_1) @@ -149,33 +148,33 @@ typedef struct #define ADC_TRIPLEMODE_ALTERTRIG ((uint32_t)(ADC_CCR_MULTI_4 | ADC_CCR_MULTI_3 | ADC_CCR_MULTI_0)) /** * @} - */ + */ /** @defgroup ADCEx_Direct_memory_access_mode_for_multi_mode ADC Direct Memory Access Mode For Multi Mode * @{ - */ + */ #define ADC_DMAACCESSMODE_DISABLED 0x00000000U /*!< DMA mode disabled */ #define ADC_DMAACCESSMODE_1 ((uint32_t)ADC_CCR_DMA_0) /*!< DMA mode 1 enabled (2 / 3 half-words one by one - 1 then 2 then 3)*/ #define ADC_DMAACCESSMODE_2 ((uint32_t)ADC_CCR_DMA_1) /*!< DMA mode 2 enabled (2 / 3 half-words by pairs - 2&1 then 1&3 then 3&2)*/ #define ADC_DMAACCESSMODE_3 ((uint32_t)ADC_CCR_DMA) /*!< DMA mode 3 enabled (2 / 3 bytes by pairs - 2&1 then 1&3 then 3&2) */ /** * @} - */ + */ /** @defgroup ADCEx_External_trigger_edge_Injected ADC External Trigger Edge Injected * @{ - */ + */ #define ADC_EXTERNALTRIGINJECCONVEDGE_NONE 0x00000000U #define ADC_EXTERNALTRIGINJECCONVEDGE_RISING ((uint32_t)ADC_CR2_JEXTEN_0) #define ADC_EXTERNALTRIGINJECCONVEDGE_FALLING ((uint32_t)ADC_CR2_JEXTEN_1) #define ADC_EXTERNALTRIGINJECCONVEDGE_RISINGFALLING ((uint32_t)ADC_CR2_JEXTEN) /** * @} - */ + */ /** @defgroup ADCEx_External_trigger_Source_Injected ADC External Trigger Source Injected * @{ - */ + */ #define ADC_EXTERNALTRIGINJECCONV_T1_CC4 0x00000000U #define ADC_EXTERNALTRIGINJECCONV_T1_TRGO ((uint32_t)ADC_CR2_JEXTSEL_0) #define ADC_EXTERNALTRIGINJECCONV_T2_CC1 ((uint32_t)ADC_CR2_JEXTSEL_1) @@ -195,11 +194,11 @@ typedef struct #define ADC_INJECTED_SOFTWARE_START ((uint32_t)ADC_CR2_JEXTSEL + 1U) /** * @} - */ + */ /** @defgroup ADCEx_injected_rank ADC Injected Rank * @{ - */ + */ #define ADC_INJECTED_RANK_1 0x00000001U #define ADC_INJECTED_RANK_2 0x00000002U #define ADC_INJECTED_RANK_3 0x00000003U @@ -220,18 +219,18 @@ typedef struct STM32F412Vx || STM32F412Rx || STM32F412Cx */ #if defined(STM32F411xE) || defined(STM32F413xx) || defined(STM32F423xx) || defined(STM32F427xx) || defined(STM32F437xx) ||\ - defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) + defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) #define ADC_CHANNEL_DIFFERENCIATION_TEMPSENSOR_VBAT 0x10000000U /* Dummy bit for driver internal usage, not used in ADC channel setting registers CR1 or SQRx */ #define ADC_CHANNEL_TEMPSENSOR ((uint32_t)ADC_CHANNEL_18 | ADC_CHANNEL_DIFFERENCIATION_TEMPSENSOR_VBAT) #endif /* STM32F411xE || STM32F413xx || STM32F423xx || STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ /** * @} - */ + */ /** * @} - */ + */ /* Exported macro ------------------------------------------------------------*/ /** @defgroup ADC_Exported_Macros ADC Exported Macros @@ -243,9 +242,9 @@ typedef struct * @note Use case of this macro: * On devices STM32F42x and STM32F43x, ADC internal channels * Vbat and VrefInt share the same internal path, only - * one of them can be enabled.This macro is to be used when ADC - * channels Vbat and VrefInt are selected, and must be called - * before starting conversion of ADC channel VrefInt in order + * one of them can be enabled.This macro is to be used when ADC + * channels Vbat and VrefInt are selected, and must be called + * before starting conversion of ADC channel VrefInt in order * to disable ADC channel Vbat. * @retval None */ @@ -253,7 +252,7 @@ typedef struct #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx */ /** * @} - */ + */ /* Exported functions --------------------------------------------------------*/ /** @addtogroup ADCEx_Exported_Functions @@ -265,24 +264,24 @@ typedef struct */ /* I/O operation functions ******************************************************/ -HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef* hadc); -HAL_StatusTypeDef HAL_ADCEx_InjectedStop(ADC_HandleTypeDef* hadc); -HAL_StatusTypeDef HAL_ADCEx_InjectedPollForConversion(ADC_HandleTypeDef* hadc, uint32_t Timeout); -HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc); -HAL_StatusTypeDef HAL_ADCEx_InjectedStop_IT(ADC_HandleTypeDef* hadc); -uint32_t HAL_ADCEx_InjectedGetValue(ADC_HandleTypeDef* hadc, uint32_t InjectedRank); -HAL_StatusTypeDef HAL_ADCEx_MultiModeStart_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, uint32_t Length); -HAL_StatusTypeDef HAL_ADCEx_MultiModeStop_DMA(ADC_HandleTypeDef* hadc); -uint32_t HAL_ADCEx_MultiModeGetValue(ADC_HandleTypeDef* hadc); -void HAL_ADCEx_InjectedConvCpltCallback(ADC_HandleTypeDef* hadc); +HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef *hadc); +HAL_StatusTypeDef HAL_ADCEx_InjectedStop(ADC_HandleTypeDef *hadc); +HAL_StatusTypeDef HAL_ADCEx_InjectedPollForConversion(ADC_HandleTypeDef *hadc, uint32_t Timeout); +HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef *hadc); +HAL_StatusTypeDef HAL_ADCEx_InjectedStop_IT(ADC_HandleTypeDef *hadc); +uint32_t HAL_ADCEx_InjectedGetValue(ADC_HandleTypeDef *hadc, uint32_t InjectedRank); +HAL_StatusTypeDef HAL_ADCEx_MultiModeStart_DMA(ADC_HandleTypeDef *hadc, uint32_t *pData, uint32_t Length); +HAL_StatusTypeDef HAL_ADCEx_MultiModeStop_DMA(ADC_HandleTypeDef *hadc); +uint32_t HAL_ADCEx_MultiModeGetValue(ADC_HandleTypeDef *hadc); +void HAL_ADCEx_InjectedConvCpltCallback(ADC_HandleTypeDef *hadc); /* Peripheral Control functions *************************************************/ -HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc,ADC_InjectionConfTypeDef* sConfigInjected); -HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_MultiModeTypeDef* multimode); +HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef *hadc, ADC_InjectionConfTypeDef *sConfigInjected); +HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef *hadc, ADC_MultiModeTypeDef *multimode); /** * @} - */ + */ /** * @} @@ -309,7 +308,7 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_ #define IS_ADC_CHANNEL(CHANNEL) ((CHANNEL) <= ADC_CHANNEL_18) #endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F401xC || STM32F401xE || STM32F410xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ - + #if defined(STM32F411xE) || defined(STM32F413xx) || defined(STM32F423xx) || defined(STM32F427xx) || \ defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) || \ defined(STM32F469xx) || defined(STM32F479xx) @@ -361,7 +360,7 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_ /** * @brief Set the selected injected Channel rank. * @param _CHANNELNB_ Channel number. - * @param _RANKNB_ Rank number. + * @param _RANKNB_ Rank number. * @param _JSQR_JL_ Sequence length. * @retval None */ @@ -393,7 +392,7 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_ /** * @} - */ + */ /** * @} @@ -406,4 +405,3 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_ #endif /*__STM32F4xx_ADC_EX_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_cortex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_cortex.h index 95218cfb..76909305 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_cortex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_cortex.h @@ -6,14 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ @@ -286,8 +284,11 @@ void HAL_SYSTICK_Callback(void); #if (__MPU_PRESENT == 1U) void HAL_MPU_Enable(uint32_t MPU_Control); void HAL_MPU_Disable(void); +void HAL_MPU_EnableRegion(uint32_t RegionNumber); +void HAL_MPU_DisableRegion(uint32_t RegionNumber); void HAL_MPU_ConfigRegion(MPU_Region_InitTypeDef *MPU_Init); #endif /* __MPU_PRESENT */ +void HAL_CORTEX_ClearEvent(void); /** * @} */ @@ -407,4 +408,3 @@ void HAL_MPU_ConfigRegion(MPU_Region_InitTypeDef *MPU_Init); #endif /* __STM32F4xx_HAL_CORTEX_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dac.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dac.h index abd85449..3ee217af 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dac.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dac.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -79,19 +78,19 @@ typedef struct __IO uint32_t ErrorCode; /*!< DAC Error code */ #if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) - void (* ConvCpltCallbackCh1) (struct __DAC_HandleTypeDef *hdac); - void (* ConvHalfCpltCallbackCh1) (struct __DAC_HandleTypeDef *hdac); - void (* ErrorCallbackCh1) (struct __DAC_HandleTypeDef *hdac); - void (* DMAUnderrunCallbackCh1) (struct __DAC_HandleTypeDef *hdac); + void (* ConvCpltCallbackCh1)(struct __DAC_HandleTypeDef *hdac); + void (* ConvHalfCpltCallbackCh1)(struct __DAC_HandleTypeDef *hdac); + void (* ErrorCallbackCh1)(struct __DAC_HandleTypeDef *hdac); + void (* DMAUnderrunCallbackCh1)(struct __DAC_HandleTypeDef *hdac); #if defined(DAC_CHANNEL2_SUPPORT) - void (* ConvCpltCallbackCh2) (struct __DAC_HandleTypeDef *hdac); - void (* ConvHalfCpltCallbackCh2) (struct __DAC_HandleTypeDef *hdac); - void (* ErrorCallbackCh2) (struct __DAC_HandleTypeDef *hdac); - void (* DMAUnderrunCallbackCh2) (struct __DAC_HandleTypeDef *hdac); + void (* ConvCpltCallbackCh2)(struct __DAC_HandleTypeDef *hdac); + void (* ConvHalfCpltCallbackCh2)(struct __DAC_HandleTypeDef *hdac); + void (* ErrorCallbackCh2)(struct __DAC_HandleTypeDef *hdac); + void (* DMAUnderrunCallbackCh2)(struct __DAC_HandleTypeDef *hdac); #endif /* DAC_CHANNEL2_SUPPORT */ - void (* MspInitCallback) (struct __DAC_HandleTypeDef *hdac); - void (* MspDeInitCallback) (struct __DAC_HandleTypeDef *hdac); + void (* MspInitCallback)(struct __DAC_HandleTypeDef *hdac); + void (* MspDeInitCallback)(struct __DAC_HandleTypeDef *hdac); #endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ } DAC_HandleTypeDef; @@ -405,7 +404,7 @@ void HAL_DAC_MspDeInit(DAC_HandleTypeDef *hdac); /* IO operation functions *****************************************************/ HAL_StatusTypeDef HAL_DAC_Start(DAC_HandleTypeDef *hdac, uint32_t Channel); HAL_StatusTypeDef HAL_DAC_Stop(DAC_HandleTypeDef *hdac, uint32_t Channel); -HAL_StatusTypeDef HAL_DAC_Start_DMA(DAC_HandleTypeDef *hdac, uint32_t Channel, uint32_t *pData, uint32_t Length, +HAL_StatusTypeDef HAL_DAC_Start_DMA(DAC_HandleTypeDef *hdac, uint32_t Channel, const uint32_t *pData, uint32_t Length, uint32_t Alignment); HAL_StatusTypeDef HAL_DAC_Stop_DMA(DAC_HandleTypeDef *hdac, uint32_t Channel); void HAL_DAC_IRQHandler(DAC_HandleTypeDef *hdac); @@ -431,8 +430,9 @@ HAL_StatusTypeDef HAL_DAC_UnRegisterCallback(DAC_HandleTypeDef *hdac, HAL_DA * @{ */ /* Peripheral Control functions ***********************************************/ -uint32_t HAL_DAC_GetValue(DAC_HandleTypeDef *hdac, uint32_t Channel); -HAL_StatusTypeDef HAL_DAC_ConfigChannel(DAC_HandleTypeDef *hdac, DAC_ChannelConfTypeDef *sConfig, uint32_t Channel); +uint32_t HAL_DAC_GetValue(const DAC_HandleTypeDef *hdac, uint32_t Channel); +HAL_StatusTypeDef HAL_DAC_ConfigChannel(DAC_HandleTypeDef *hdac, + const DAC_ChannelConfTypeDef *sConfig, uint32_t Channel); /** * @} */ @@ -441,8 +441,8 @@ HAL_StatusTypeDef HAL_DAC_ConfigChannel(DAC_HandleTypeDef *hdac, DAC_ChannelConf * @{ */ /* Peripheral State and Error functions ***************************************/ -HAL_DAC_StateTypeDef HAL_DAC_GetState(DAC_HandleTypeDef *hdac); -uint32_t HAL_DAC_GetError(DAC_HandleTypeDef *hdac); +HAL_DAC_StateTypeDef HAL_DAC_GetState(const DAC_HandleTypeDef *hdac); +uint32_t HAL_DAC_GetError(const DAC_HandleTypeDef *hdac); /** * @} @@ -478,5 +478,3 @@ void DAC_DMAHalfConvCpltCh1(DMA_HandleTypeDef *hdma); #endif /* STM32F4xx_HAL_DAC_H */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dac_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dac_ex.h index 03159d4f..1bb5ce45 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dac_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dac_ex.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -82,6 +81,7 @@ extern "C" { * @} */ + /** * @} */ @@ -148,19 +148,18 @@ HAL_StatusTypeDef HAL_DACEx_TriangleWaveGenerate(DAC_HandleTypeDef *hdac, uint32 HAL_StatusTypeDef HAL_DACEx_NoiseWaveGenerate(DAC_HandleTypeDef *hdac, uint32_t Channel, uint32_t Amplitude); #if defined(DAC_CHANNEL2_SUPPORT) -#endif HAL_StatusTypeDef HAL_DACEx_DualStart(DAC_HandleTypeDef *hdac); HAL_StatusTypeDef HAL_DACEx_DualStop(DAC_HandleTypeDef *hdac); HAL_StatusTypeDef HAL_DACEx_DualSetValue(DAC_HandleTypeDef *hdac, uint32_t Alignment, uint32_t Data1, uint32_t Data2); -uint32_t HAL_DACEx_DualGetValue(DAC_HandleTypeDef *hdac); +uint32_t HAL_DACEx_DualGetValue(const DAC_HandleTypeDef *hdac); +#endif /* DAC_CHANNEL2_SUPPORT */ #if defined(DAC_CHANNEL2_SUPPORT) -#endif void HAL_DACEx_ConvCpltCallbackCh2(DAC_HandleTypeDef *hdac); void HAL_DACEx_ConvHalfCpltCallbackCh2(DAC_HandleTypeDef *hdac); void HAL_DACEx_ErrorCallbackCh2(DAC_HandleTypeDef *hdac); void HAL_DACEx_DMAUnderrunCallbackCh2(DAC_HandleTypeDef *hdac); - +#endif /* DAC_CHANNEL2_SUPPORT */ /** * @} @@ -203,5 +202,3 @@ void DAC_DMAHalfConvCpltCh2(DMA_HandleTypeDef *hdma); #endif #endif /* STM32F4xx_HAL_DAC_EX_H */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_def.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_def.h index e32e362d..1df0d7d0 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_def.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_def.h @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -55,7 +54,9 @@ typedef enum /* Exported macro ------------------------------------------------------------*/ +#if !defined(UNUSED) #define UNUSED(X) (void)X /* To avoid gcc/g++ warnings */ +#endif /* UNUSED */ #define HAL_MAX_DELAY 0xFFFFFFFFU @@ -107,7 +108,14 @@ typedef enum }while (0U) #endif /* USE_RTOS */ -#if defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ +#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /* ARM Compiler V6 */ + #ifndef __weak + #define __weak __attribute__((weak)) + #endif + #ifndef __packed + #define __packed __attribute__((packed)) + #endif +#elif defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ #ifndef __weak #define __weak __attribute__((weak)) #endif /* __weak */ @@ -118,7 +126,14 @@ typedef enum /* Macro to get variable aligned on 4-bytes, for __ICCARM__ the directive "#pragma data_alignment=4" must be used instead */ -#if defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ +#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /* ARM Compiler V6 */ + #ifndef __ALIGN_BEGIN + #define __ALIGN_BEGIN + #endif + #ifndef __ALIGN_END + #define __ALIGN_END __attribute__ ((aligned (4))) + #endif +#elif defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ #ifndef __ALIGN_END #define __ALIGN_END __attribute__ ((aligned (4))) #endif /* __ALIGN_END */ @@ -130,7 +145,7 @@ typedef enum #define __ALIGN_END #endif /* __ALIGN_END */ #ifndef __ALIGN_BEGIN - #if defined (__CC_ARM) /* ARM Compiler */ + #if defined (__CC_ARM) /* ARM Compiler V5*/ #define __ALIGN_BEGIN __align(4) #elif defined (__ICCARM__) /* IAR Compiler */ #define __ALIGN_BEGIN @@ -142,9 +157,9 @@ typedef enum /** * @brief __RAM_FUNC definition */ -#if defined ( __CC_ARM ) -/* ARM Compiler - ------------ +#if defined ( __CC_ARM ) || (defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) +/* ARM Compiler V4/V5 and V6 + -------------------------- RAM functions are defined using the toolchain options. Functions that are executed in RAM should reside in a separate source module. Using the 'Options for File' dialog you can simply change the 'Code / Const' @@ -174,9 +189,9 @@ typedef enum /** * @brief __NOINLINE definition */ -#if defined ( __CC_ARM ) || defined ( __GNUC__ ) -/* ARM & GNUCompiler - ---------------- +#if defined ( __CC_ARM ) || (defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) || defined ( __GNUC__ ) +/* ARM V4/V5 and V6 & GNU Compiler + ------------------------------- */ #define __NOINLINE __attribute__ ( (noinline) ) @@ -194,4 +209,4 @@ typedef enum #endif /* ___STM32F4xx_HAL_DEF */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma.h index 90dd292c..7ff3836b 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -801,4 +800,3 @@ uint32_t HAL_DMA_GetError(DMA_HandleTypeDef *hdma); #endif /* __STM32F4xx_HAL_DMA_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma_ex.h index 2e60aff2..9858c741 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_dma_ex.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -101,4 +100,3 @@ HAL_StatusTypeDef HAL_DMAEx_ChangeMemory(DMA_HandleTypeDef *hdma, uint32_t Addre #endif /*__STM32F4xx_HAL_DMA_EX_H*/ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_eth.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_eth.h new file mode 100644 index 00000000..9024fff0 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_eth.h @@ -0,0 +1,2017 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_eth.h + * @author MCD Application Team + * @brief Header file of ETH HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F4xx_HAL_ETH_H +#define STM32F4xx_HAL_ETH_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal_def.h" + +#if defined(ETH) + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup ETH + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +#ifndef ETH_TX_DESC_CNT +#define ETH_TX_DESC_CNT 4U +#endif /* ETH_TX_DESC_CNT */ + +#ifndef ETH_RX_DESC_CNT +#define ETH_RX_DESC_CNT 4U +#endif /* ETH_RX_DESC_CNT */ + + +/*********************** Descriptors struct def section ************************/ +/** @defgroup ETH_Exported_Types ETH Exported Types + * @{ + */ + +/** + * @brief ETH DMA Descriptor structure definition + */ +typedef struct +{ + __IO uint32_t DESC0; + __IO uint32_t DESC1; + __IO uint32_t DESC2; + __IO uint32_t DESC3; + __IO uint32_t DESC4; + __IO uint32_t DESC5; + __IO uint32_t DESC6; + __IO uint32_t DESC7; + uint32_t BackupAddr0; /* used to store rx buffer 1 address */ + uint32_t BackupAddr1; /* used to store rx buffer 2 address */ +} ETH_DMADescTypeDef; +/** + * + */ + +/** + * @brief ETH Buffers List structure definition + */ +typedef struct __ETH_BufferTypeDef +{ + uint8_t *buffer; /*gState = HAL_ETH_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_ETH_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->gState = HAL_ETH_STATE_RESET; \ + } while(0) +#endif /*USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @brief Enables the specified ETHERNET DMA interrupts. + * @param __HANDLE__ : ETH Handle + * @param __INTERRUPT__: specifies the ETHERNET DMA interrupt sources to be + * enabled @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMAIER \ + |= (__INTERRUPT__)) + +/** + * @brief Disables the specified ETHERNET DMA interrupts. + * @param __HANDLE__ : ETH Handle + * @param __INTERRUPT__: specifies the ETHERNET DMA interrupt sources to be + * disabled. @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMAIER \ + &= ~(__INTERRUPT__)) + +/** + * @brief Gets the ETHERNET DMA IT source enabled or disabled. + * @param __HANDLE__ : ETH Handle + * @param __INTERRUPT__: specifies the interrupt source to get . @ref ETH_DMA_Interrupts + * @retval The ETH DMA IT Source enabled or disabled + */ +#define __HAL_ETH_DMA_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->DMAIER &\ + (__INTERRUPT__)) == (__INTERRUPT__)) + +/** + * @brief Gets the ETHERNET DMA IT pending bit. + * @param __HANDLE__ : ETH Handle + * @param __INTERRUPT__: specifies the interrupt source to get . @ref ETH_DMA_Interrupts + * @retval The state of ETH DMA IT (SET or RESET) + */ +#define __HAL_ETH_DMA_GET_IT(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->DMASR &\ + (__INTERRUPT__)) == (__INTERRUPT__)) + +/** + * @brief Clears the ETHERNET DMA IT pending bit. + * @param __HANDLE__ : ETH Handle + * @param __INTERRUPT__: specifies the interrupt pending bit to clear. @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_CLEAR_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMASR = (__INTERRUPT__)) + +/** + * @brief Checks whether the specified ETHERNET DMA flag is set or not. + * @param __HANDLE__: ETH Handle + * @param __FLAG__: specifies the flag to check. @ref ETH_DMA_Status_Flags + * @retval The state of ETH DMA FLAG (SET or RESET). + */ +#define __HAL_ETH_DMA_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->DMASR &\ + ( __FLAG__)) == ( __FLAG__)) + +/** + * @brief Clears the specified ETHERNET DMA flag. + * @param __HANDLE__: ETH Handle + * @param __FLAG__: specifies the flag to check. @ref ETH_DMA_Status_Flags + * @retval The state of ETH DMA FLAG (SET or RESET). + */ +#define __HAL_ETH_DMA_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->DMASR = ( __FLAG__)) + + + +/** + * @brief Checks whether the specified ETHERNET MAC flag is set or not. + * @param __HANDLE__: ETH Handle + * @param __INTERRUPT__: specifies the flag to check. @ref ETH_MAC_Interrupts + * @retval The state of ETH MAC IT (SET or RESET). + */ +#define __HAL_ETH_MAC_GET_IT(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->MACSR &\ + ( __INTERRUPT__)) == ( __INTERRUPT__)) + +/*!< External interrupt line 19 Connected to the ETH wakeup EXTI Line */ +#define ETH_WAKEUP_EXTI_LINE 0x00080000U + +/** + * @brief Enable the ETH WAKEUP Exti Line. + * @param __EXTI_LINE__: specifies the ETH WAKEUP Exti sources to be enabled. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None. + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_IT(__EXTI_LINE__) (EXTI->IMR |= (__EXTI_LINE__)) + +/** + * @brief checks whether the specified ETH WAKEUP Exti interrupt flag is set or not. + * @param __EXTI_LINE__: specifies the ETH WAKEUP Exti sources to be cleared. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval EXTI ETH WAKEUP Line Status. + */ +#define __HAL_ETH_WAKEUP_EXTI_GET_FLAG(__EXTI_LINE__) (EXTI->PR & (__EXTI_LINE__)) + +/** + * @brief Clear the ETH WAKEUP Exti flag. + * @param __EXTI_LINE__: specifies the ETH WAKEUP Exti sources to be cleared. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None. + */ +#define __HAL_ETH_WAKEUP_EXTI_CLEAR_FLAG(__EXTI_LINE__) (EXTI->PR = (__EXTI_LINE__)) + +/** + * @brief enable rising edge interrupt on selected EXTI line. + * @param __EXTI_LINE__: specifies the ETH WAKEUP EXTI sources to be disabled. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_RISING_EDGE(__EXTI_LINE__) (EXTI->FTSR &= ~(__EXTI_LINE__)); \ + (EXTI->RTSR |= (__EXTI_LINE__)) + +/** + * @brief enable falling edge interrupt on selected EXTI line. + * @param __EXTI_LINE__: specifies the ETH WAKEUP EXTI sources to be disabled. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLING_EDGE(__EXTI_LINE__) (EXTI->RTSR &= ~(__EXTI_LINE__));\ + (EXTI->FTSR |= (__EXTI_LINE__)) + +/** + * @brief enable falling edge interrupt on selected EXTI line. + * @param __EXTI_LINE__: specifies the ETH WAKEUP EXTI sources to be disabled. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE(__EXTI_LINE__) (EXTI->RTSR |= (__EXTI_LINE__));\ + (EXTI->FTSR |= (__EXTI_LINE__)) + +/** + * @brief Generates a Software interrupt on selected EXTI line. + * @param __EXTI_LINE__: specifies the ETH WAKEUP EXTI sources to be disabled. + * @arg ETH_WAKEUP_EXTI_LINE + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_GENERATE_SWIT(__EXTI_LINE__) (EXTI->SWIER |= (__EXTI_LINE__)) + +#define __HAL_ETH_GET_PTP_CONTROL(__HANDLE__, __FLAG__) (((((__HANDLE__)->Instance->PTPTSCR) & \ + (__FLAG__)) == (__FLAG__)) ? SET : RESET) + +#define __HAL_ETH_SET_PTP_CONTROL(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->PTPTSCR |= (__FLAG__)) + +/** + * @} + */ + + +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup ETH_Exported_Functions + * @{ + */ + +/** @addtogroup ETH_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de initialization functions **********************************/ +HAL_StatusTypeDef HAL_ETH_Init(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_DeInit(ETH_HandleTypeDef *heth); +void HAL_ETH_MspInit(ETH_HandleTypeDef *heth); +void HAL_ETH_MspDeInit(ETH_HandleTypeDef *heth); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_ETH_RegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID, + pETH_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @addtogroup ETH_Exported_Functions_Group2 + * @{ + */ +/* IO operation functions *******************************************************/ +HAL_StatusTypeDef HAL_ETH_Start(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_Start_IT(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_Stop(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_Stop_IT(ETH_HandleTypeDef *heth); + +HAL_StatusTypeDef HAL_ETH_ReadData(ETH_HandleTypeDef *heth, void **pAppBuff); +HAL_StatusTypeDef HAL_ETH_RegisterRxAllocateCallback(ETH_HandleTypeDef *heth, + pETH_rxAllocateCallbackTypeDef rxAllocateCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterRxAllocateCallback(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_RegisterRxLinkCallback(ETH_HandleTypeDef *heth, pETH_rxLinkCallbackTypeDef rxLinkCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterRxLinkCallback(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_GetRxDataErrorCode(const ETH_HandleTypeDef *heth, uint32_t *pErrorCode); +HAL_StatusTypeDef HAL_ETH_RegisterTxFreeCallback(ETH_HandleTypeDef *heth, pETH_txFreeCallbackTypeDef txFreeCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterTxFreeCallback(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_ReleaseTxPacket(ETH_HandleTypeDef *heth); + +#ifdef HAL_ETH_USE_PTP +HAL_StatusTypeDef HAL_ETH_PTP_SetConfig(ETH_HandleTypeDef *heth, ETH_PTP_ConfigTypeDef *ptpconfig); +HAL_StatusTypeDef HAL_ETH_PTP_GetConfig(ETH_HandleTypeDef *heth, ETH_PTP_ConfigTypeDef *ptpconfig); +HAL_StatusTypeDef HAL_ETH_PTP_SetTime(ETH_HandleTypeDef *heth, ETH_TimeTypeDef *time); +HAL_StatusTypeDef HAL_ETH_PTP_GetTime(ETH_HandleTypeDef *heth, ETH_TimeTypeDef *time); +HAL_StatusTypeDef HAL_ETH_PTP_AddTimeOffset(ETH_HandleTypeDef *heth, ETH_PtpUpdateTypeDef ptpoffsettype, + ETH_TimeTypeDef *timeoffset); +HAL_StatusTypeDef HAL_ETH_PTP_InsertTxTimestamp(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_PTP_GetTxTimestamp(ETH_HandleTypeDef *heth, ETH_TimeStampTypeDef *timestamp); +HAL_StatusTypeDef HAL_ETH_PTP_GetRxTimestamp(ETH_HandleTypeDef *heth, ETH_TimeStampTypeDef *timestamp); +HAL_StatusTypeDef HAL_ETH_RegisterTxPtpCallback(ETH_HandleTypeDef *heth, pETH_txPtpCallbackTypeDef txPtpCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterTxPtpCallback(ETH_HandleTypeDef *heth); +#endif /* HAL_ETH_USE_PTP */ + +HAL_StatusTypeDef HAL_ETH_Transmit(ETH_HandleTypeDef *heth, ETH_TxPacketConfigTypeDef *pTxConfig, uint32_t Timeout); +HAL_StatusTypeDef HAL_ETH_Transmit_IT(ETH_HandleTypeDef *heth, ETH_TxPacketConfigTypeDef *pTxConfig); + +HAL_StatusTypeDef HAL_ETH_WritePHYRegister(const ETH_HandleTypeDef *heth, uint32_t PHYAddr, uint32_t PHYReg, + uint32_t RegValue); +HAL_StatusTypeDef HAL_ETH_ReadPHYRegister(ETH_HandleTypeDef *heth, uint32_t PHYAddr, uint32_t PHYReg, + uint32_t *pRegValue); + +void HAL_ETH_IRQHandler(ETH_HandleTypeDef *heth); +void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_PMTCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_WakeUpCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_RxAllocateCallback(uint8_t **buff); +void HAL_ETH_RxLinkCallback(void **pStart, void **pEnd, uint8_t *buff, uint16_t Length); +void HAL_ETH_TxFreeCallback(uint32_t *buff); +void HAL_ETH_TxPtpCallback(uint32_t *buff, ETH_TimeStampTypeDef *timestamp); +/** + * @} + */ + +/** @addtogroup ETH_Exported_Functions_Group3 + * @{ + */ +/* Peripheral Control functions **********************************************/ +/* MAC & DMA Configuration APIs **********************************************/ +HAL_StatusTypeDef HAL_ETH_GetMACConfig(const ETH_HandleTypeDef *heth, ETH_MACConfigTypeDef *macconf); +HAL_StatusTypeDef HAL_ETH_GetDMAConfig(const ETH_HandleTypeDef *heth, ETH_DMAConfigTypeDef *dmaconf); +HAL_StatusTypeDef HAL_ETH_SetMACConfig(ETH_HandleTypeDef *heth, ETH_MACConfigTypeDef *macconf); +HAL_StatusTypeDef HAL_ETH_SetDMAConfig(ETH_HandleTypeDef *heth, ETH_DMAConfigTypeDef *dmaconf); +void HAL_ETH_SetMDIOClockRange(ETH_HandleTypeDef *heth); + +/* MAC VLAN Processing APIs ************************************************/ +void HAL_ETH_SetRxVLANIdentifier(ETH_HandleTypeDef *heth, uint32_t ComparisonBits, + uint32_t VLANIdentifier); + +/* MAC L2 Packet Filtering APIs **********************************************/ +HAL_StatusTypeDef HAL_ETH_GetMACFilterConfig(const ETH_HandleTypeDef *heth, ETH_MACFilterConfigTypeDef *pFilterConfig); +HAL_StatusTypeDef HAL_ETH_SetMACFilterConfig(ETH_HandleTypeDef *heth, const ETH_MACFilterConfigTypeDef *pFilterConfig); +HAL_StatusTypeDef HAL_ETH_SetHashTable(ETH_HandleTypeDef *heth, uint32_t *pHashTable); +HAL_StatusTypeDef HAL_ETH_SetSourceMACAddrMatch(const ETH_HandleTypeDef *heth, uint32_t AddrNbr, + const uint8_t *pMACAddr); + +/* MAC Power Down APIs *****************************************************/ +void HAL_ETH_EnterPowerDownMode(ETH_HandleTypeDef *heth, + const ETH_PowerDownConfigTypeDef *pPowerDownConfig); +void HAL_ETH_ExitPowerDownMode(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_SetWakeUpFilter(ETH_HandleTypeDef *heth, uint32_t *pFilter, uint32_t Count); + +/** + * @} + */ + +/** @addtogroup ETH_Exported_Functions_Group4 + * @{ + */ +/* Peripheral State functions **************************************************/ +HAL_ETH_StateTypeDef HAL_ETH_GetState(const ETH_HandleTypeDef *heth); +uint32_t HAL_ETH_GetError(const ETH_HandleTypeDef *heth); +uint32_t HAL_ETH_GetDMAError(const ETH_HandleTypeDef *heth); +uint32_t HAL_ETH_GetMACError(const ETH_HandleTypeDef *heth); +uint32_t HAL_ETH_GetMACWakeUpSource(const ETH_HandleTypeDef *heth); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* ETH */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F4xx_HAL_ETH_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_exti.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_exti.h index ff74222e..b18a2287 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_exti.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_exti.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2018 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2018 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS.Clause * ****************************************************************************** */ @@ -243,19 +242,19 @@ typedef struct /** @defgroup EXTI_Private_Macros EXTI Private Macros * @{ */ -#define IS_EXTI_LINE(__LINE__) ((((__LINE__) & ~(EXTI_PROPERTY_MASK | EXTI_PIN_MASK)) == 0x00u) && \ - ((((__LINE__) & EXTI_PROPERTY_MASK) == EXTI_CONFIG) || \ - (((__LINE__) & EXTI_PROPERTY_MASK) == EXTI_GPIO)) && \ - (((__LINE__) & EXTI_PIN_MASK) < EXTI_LINE_NB)) +#define IS_EXTI_LINE(__EXTI_LINE__) ((((__EXTI_LINE__) & ~(EXTI_PROPERTY_MASK | EXTI_PIN_MASK)) == 0x00u) && \ + ((((__EXTI_LINE__) & EXTI_PROPERTY_MASK) == EXTI_CONFIG) || \ + (((__EXTI_LINE__) & EXTI_PROPERTY_MASK) == EXTI_GPIO)) && \ + (((__EXTI_LINE__) & EXTI_PIN_MASK) < EXTI_LINE_NB)) -#define IS_EXTI_MODE(__LINE__) ((((__LINE__) & EXTI_MODE_MASK) != 0x00u) && \ - (((__LINE__) & ~EXTI_MODE_MASK) == 0x00u)) +#define IS_EXTI_MODE(__EXTI_LINE__) ((((__EXTI_LINE__) & EXTI_MODE_MASK) != 0x00u) && \ + (((__EXTI_LINE__) & ~EXTI_MODE_MASK) == 0x00u)) -#define IS_EXTI_TRIGGER(__LINE__) (((__LINE__) & ~EXTI_TRIGGER_MASK) == 0x00u) +#define IS_EXTI_TRIGGER(__EXTI_LINE__) (((__EXTI_LINE__) & ~EXTI_TRIGGER_MASK) == 0x00u) -#define IS_EXTI_PENDING_EDGE(__LINE__) ((__LINE__) == EXTI_TRIGGER_RISING_FALLING) +#define IS_EXTI_PENDING_EDGE(__EXTI_LINE__) ((__EXTI_LINE__) == EXTI_TRIGGER_RISING_FALLING) -#define IS_EXTI_CONFIG_LINE(__LINE__) (((__LINE__) & EXTI_CONFIG) != 0x00u) +#define IS_EXTI_CONFIG_LINE(__EXTI_LINE__) (((__EXTI_LINE__) & EXTI_CONFIG) != 0x00u) #if !defined (GPIOD) #define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ @@ -365,4 +364,3 @@ void HAL_EXTI_GenerateSWI(EXTI_HandleTypeDef *hexti); #endif /* STM32f4xx_HAL_EXTI_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash.h index b817f63b..41f77d28 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash.h @@ -6,23 +6,21 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** - */ + */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __STM32F4xx_HAL_FLASH_H #define __STM32F4xx_HAL_FLASH_H #ifdef __cplusplus - extern "C" { +extern "C" { #endif /* Includes ------------------------------------------------------------------*/ @@ -34,46 +32,46 @@ /** @addtogroup FLASH * @{ - */ + */ /* Exported types ------------------------------------------------------------*/ /** @defgroup FLASH_Exported_Types FLASH Exported Types * @{ */ - + /** * @brief FLASH Procedure structure definition */ -typedef enum +typedef enum { - FLASH_PROC_NONE = 0U, + FLASH_PROC_NONE = 0U, FLASH_PROC_SECTERASE, FLASH_PROC_MASSERASE, FLASH_PROC_PROGRAM } FLASH_ProcedureTypeDef; -/** - * @brief FLASH handle Structure definition +/** + * @brief FLASH handle Structure definition */ typedef struct { __IO FLASH_ProcedureTypeDef ProcedureOnGoing; /*Internal variable to indicate which procedure is ongoing or not in IT context*/ - + __IO uint32_t NbSectorsToErase; /*Internal variable to save the remaining sectors to erase in IT context*/ - + __IO uint8_t VoltageForErase; /*Internal variable to provide voltage range selected by user in IT context*/ - + __IO uint32_t Sector; /*Internal variable to define the current sector which is erasing*/ - + __IO uint32_t Bank; /*Internal variable to save current bank selected during mass erase*/ - + __IO uint32_t Address; /*Internal variable to save address selected for program*/ - + HAL_LockTypeDef Lock; /* FLASH locking object */ __IO uint32_t ErrorCode; /* FLASH error code */ -}FLASH_ProcessTypeDef; +} FLASH_ProcessTypeDef; /** * @} @@ -82,11 +80,11 @@ typedef struct /* Exported constants --------------------------------------------------------*/ /** @defgroup FLASH_Exported_Constants FLASH Exported Constants * @{ - */ + */ /** @defgroup FLASH_Error_Code FLASH Error Code - * @brief FLASH Error Code + * @brief FLASH Error Code * @{ - */ + */ #define HAL_FLASH_ERROR_NONE 0x00000000U /*!< No error */ #define HAL_FLASH_ERROR_RD 0x00000001U /*!< Read Protection error */ #define HAL_FLASH_ERROR_PGS 0x00000002U /*!< Programming Sequence error */ @@ -97,10 +95,10 @@ typedef struct /** * @} */ - + /** @defgroup FLASH_Type_Program FLASH Type Program * @{ - */ + */ #define FLASH_TYPEPROGRAM_BYTE 0x00000000U /*!< Program byte (8-bit) at a specified address */ #define FLASH_TYPEPROGRAM_HALFWORD 0x00000001U /*!< Program a half-word (16-bit) at a specified address */ #define FLASH_TYPEPROGRAM_WORD 0x00000002U /*!< Program a word (32-bit) at a specified address */ @@ -112,7 +110,7 @@ typedef struct /** @defgroup FLASH_Flag_definition FLASH Flag definition * @brief Flag definition * @{ - */ + */ #define FLASH_FLAG_EOP FLASH_SR_EOP /*!< FLASH End of Operation flag */ #define FLASH_FLAG_OPERR FLASH_SR_SOP /*!< FLASH operation Error flag */ #define FLASH_FLAG_WRPERR FLASH_SR_WRPERR /*!< FLASH Write protected error flag */ @@ -122,20 +120,20 @@ typedef struct #if defined(FLASH_SR_RDERR) #define FLASH_FLAG_RDERR FLASH_SR_RDERR /*!< Read Protection error flag (PCROP) */ #endif /* FLASH_SR_RDERR */ -#define FLASH_FLAG_BSY FLASH_SR_BSY /*!< FLASH Busy flag */ +#define FLASH_FLAG_BSY FLASH_SR_BSY /*!< FLASH Busy flag */ /** * @} */ - + /** @defgroup FLASH_Interrupt_definition FLASH Interrupt definition * @brief FLASH Interrupt definition * @{ - */ + */ #define FLASH_IT_EOP FLASH_CR_EOPIE /*!< End of FLASH Operation Interrupt source */ #define FLASH_IT_ERR 0x02000000U /*!< Error Interrupt source */ /** * @} - */ + */ /** @defgroup FLASH_Program_Parallelism FLASH Program Parallelism * @{ @@ -147,11 +145,11 @@ typedef struct #define CR_PSIZE_MASK 0xFFFFFCFFU /** * @} - */ + */ /** @defgroup FLASH_Keys FLASH Keys * @{ - */ + */ #define RDP_KEY ((uint16_t)0x00A5) #define FLASH_KEY1 0x45670123U #define FLASH_KEY2 0xCDEF89ABU @@ -159,12 +157,12 @@ typedef struct #define FLASH_OPT_KEY2 0x4C5D6E7FU /** * @} - */ + */ /** * @} - */ - + */ + /* Exported macro ------------------------------------------------------------*/ /** @defgroup FLASH_Exported_Macros FLASH Exported Macros * @{ @@ -174,102 +172,102 @@ typedef struct * @param __LATENCY__ FLASH Latency * The value of this parameter depend on device used within the same series * @retval none - */ + */ #define __HAL_FLASH_SET_LATENCY(__LATENCY__) (*(__IO uint8_t *)ACR_BYTE0_ADDRESS = (uint8_t)(__LATENCY__)) /** * @brief Get the FLASH Latency. * @retval FLASH Latency * The value of this parameter depend on device used within the same series - */ + */ #define __HAL_FLASH_GET_LATENCY() (READ_BIT((FLASH->ACR), FLASH_ACR_LATENCY)) /** * @brief Enable the FLASH prefetch buffer. * @retval none - */ + */ #define __HAL_FLASH_PREFETCH_BUFFER_ENABLE() (FLASH->ACR |= FLASH_ACR_PRFTEN) /** * @brief Disable the FLASH prefetch buffer. * @retval none - */ + */ #define __HAL_FLASH_PREFETCH_BUFFER_DISABLE() (FLASH->ACR &= (~FLASH_ACR_PRFTEN)) /** * @brief Enable the FLASH instruction cache. * @retval none - */ + */ #define __HAL_FLASH_INSTRUCTION_CACHE_ENABLE() (FLASH->ACR |= FLASH_ACR_ICEN) /** * @brief Disable the FLASH instruction cache. * @retval none - */ + */ #define __HAL_FLASH_INSTRUCTION_CACHE_DISABLE() (FLASH->ACR &= (~FLASH_ACR_ICEN)) /** * @brief Enable the FLASH data cache. * @retval none - */ + */ #define __HAL_FLASH_DATA_CACHE_ENABLE() (FLASH->ACR |= FLASH_ACR_DCEN) /** * @brief Disable the FLASH data cache. * @retval none - */ + */ #define __HAL_FLASH_DATA_CACHE_DISABLE() (FLASH->ACR &= (~FLASH_ACR_DCEN)) /** * @brief Resets the FLASH instruction Cache. - * @note This function must be used only when the Instruction Cache is disabled. + * @note This function must be used only when the Instruction Cache is disabled. * @retval None */ #define __HAL_FLASH_INSTRUCTION_CACHE_RESET() do {FLASH->ACR |= FLASH_ACR_ICRST; \ - FLASH->ACR &= ~FLASH_ACR_ICRST; \ + FLASH->ACR &= ~FLASH_ACR_ICRST; \ }while(0U) /** * @brief Resets the FLASH data Cache. - * @note This function must be used only when the data Cache is disabled. + * @note This function must be used only when the data Cache is disabled. * @retval None */ #define __HAL_FLASH_DATA_CACHE_RESET() do {FLASH->ACR |= FLASH_ACR_DCRST; \ - FLASH->ACR &= ~FLASH_ACR_DCRST; \ + FLASH->ACR &= ~FLASH_ACR_DCRST; \ }while(0U) /** * @brief Enable the specified FLASH interrupt. - * @param __INTERRUPT__ FLASH interrupt + * @param __INTERRUPT__ FLASH interrupt * This parameter can be any combination of the following values: * @arg FLASH_IT_EOP: End of FLASH Operation Interrupt - * @arg FLASH_IT_ERR: Error Interrupt + * @arg FLASH_IT_ERR: Error Interrupt * @retval none - */ + */ #define __HAL_FLASH_ENABLE_IT(__INTERRUPT__) (FLASH->CR |= (__INTERRUPT__)) /** * @brief Disable the specified FLASH interrupt. - * @param __INTERRUPT__ FLASH interrupt + * @param __INTERRUPT__ FLASH interrupt * This parameter can be any combination of the following values: * @arg FLASH_IT_EOP: End of FLASH Operation Interrupt - * @arg FLASH_IT_ERR: Error Interrupt + * @arg FLASH_IT_ERR: Error Interrupt * @retval none - */ + */ #define __HAL_FLASH_DISABLE_IT(__INTERRUPT__) (FLASH->CR &= ~(uint32_t)(__INTERRUPT__)) /** - * @brief Get the specified FLASH flag status. + * @brief Get the specified FLASH flag status. * @param __FLAG__ specifies the FLASH flags to check. * This parameter can be any combination of the following values: - * @arg FLASH_FLAG_EOP : FLASH End of Operation flag - * @arg FLASH_FLAG_OPERR : FLASH operation Error flag - * @arg FLASH_FLAG_WRPERR: FLASH Write protected error flag + * @arg FLASH_FLAG_EOP : FLASH End of Operation flag + * @arg FLASH_FLAG_OPERR : FLASH operation Error flag + * @arg FLASH_FLAG_WRPERR: FLASH Write protected error flag * @arg FLASH_FLAG_PGAERR: FLASH Programming Alignment error flag * @arg FLASH_FLAG_PGPERR: FLASH Programming Parallelism error flag * @arg FLASH_FLAG_PGSERR: FLASH Programming Sequence error flag * @arg FLASH_FLAG_RDERR : FLASH Read Protection error flag (PCROP) (*) * @arg FLASH_FLAG_BSY : FLASH Busy flag - * (*) FLASH_FLAG_RDERR is not available for STM32F405xx/407xx/415xx/417xx devices + * (*) FLASH_FLAG_RDERR is not available for STM32F405xx/407xx/415xx/417xx devices * @retval The new state of __FLAG__ (SET or RESET). */ #define __HAL_FLASH_GET_FLAG(__FLAG__) ((FLASH->SR & (__FLAG__))) @@ -278,14 +276,14 @@ typedef struct * @brief Clear the specified FLASH flags. * @param __FLAG__ specifies the FLASH flags to clear. * This parameter can be any combination of the following values: - * @arg FLASH_FLAG_EOP : FLASH End of Operation flag - * @arg FLASH_FLAG_OPERR : FLASH operation Error flag - * @arg FLASH_FLAG_WRPERR: FLASH Write protected error flag - * @arg FLASH_FLAG_PGAERR: FLASH Programming Alignment error flag + * @arg FLASH_FLAG_EOP : FLASH End of Operation flag + * @arg FLASH_FLAG_OPERR : FLASH operation Error flag + * @arg FLASH_FLAG_WRPERR: FLASH Write protected error flag + * @arg FLASH_FLAG_PGAERR: FLASH Programming Alignment error flag * @arg FLASH_FLAG_PGPERR: FLASH Programming Parallelism error flag * @arg FLASH_FLAG_PGSERR: FLASH Programming Sequence error flag * @arg FLASH_FLAG_RDERR : FLASH Read Protection error flag (PCROP) (*) - * (*) FLASH_FLAG_RDERR is not available for STM32F405xx/407xx/415xx/417xx devices + * (*) FLASH_FLAG_RDERR is not available for STM32F405xx/407xx/415xx/417xx devices * @retval none */ #define __HAL_FLASH_CLEAR_FLAG(__FLAG__) (FLASH->SR = (__FLAG__)) @@ -309,7 +307,7 @@ HAL_StatusTypeDef HAL_FLASH_Program(uint32_t TypeProgram, uint32_t Address, uint HAL_StatusTypeDef HAL_FLASH_Program_IT(uint32_t TypeProgram, uint32_t Address, uint64_t Data); /* FLASH IRQ handler method */ void HAL_FLASH_IRQHandler(void); -/* Callbacks in non blocking modes */ +/* Callbacks in non blocking modes */ void HAL_FLASH_EndOfOperationCallback(uint32_t ReturnValue); void HAL_FLASH_OperationErrorCallback(uint32_t ReturnValue); /** @@ -342,7 +340,7 @@ HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); /** * @} - */ + */ /* Private types -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /** @defgroup FLASH_Private_Variables FLASH Private Variables @@ -357,25 +355,25 @@ HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); * @{ */ -/** - * @brief ACR register byte 0 (Bits[7:0]) base address - */ -#define ACR_BYTE0_ADDRESS 0x40023C00U -/** - * @brief OPTCR register byte 0 (Bits[7:0]) base address - */ +/** + * @brief ACR register byte 0 (Bits[7:0]) base address + */ +#define ACR_BYTE0_ADDRESS 0x40023C00U +/** + * @brief OPTCR register byte 0 (Bits[7:0]) base address + */ #define OPTCR_BYTE0_ADDRESS 0x40023C14U -/** - * @brief OPTCR register byte 1 (Bits[15:8]) base address - */ +/** + * @brief OPTCR register byte 1 (Bits[15:8]) base address + */ #define OPTCR_BYTE1_ADDRESS 0x40023C15U -/** - * @brief OPTCR register byte 2 (Bits[23:16]) base address - */ +/** + * @brief OPTCR register byte 2 (Bits[23:16]) base address + */ #define OPTCR_BYTE2_ADDRESS 0x40023C16U -/** - * @brief OPTCR register byte 3 (Bits[31:24]) base address - */ +/** + * @brief OPTCR register byte 3 (Bits[31:24]) base address + */ #define OPTCR_BYTE3_ADDRESS 0x40023C17U /** @@ -393,7 +391,7 @@ HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); #define IS_FLASH_TYPEPROGRAM(VALUE)(((VALUE) == FLASH_TYPEPROGRAM_BYTE) || \ ((VALUE) == FLASH_TYPEPROGRAM_HALFWORD) || \ ((VALUE) == FLASH_TYPEPROGRAM_WORD) || \ - ((VALUE) == FLASH_TYPEPROGRAM_DOUBLEWORD)) + ((VALUE) == FLASH_TYPEPROGRAM_DOUBLEWORD)) /** * @} */ @@ -413,7 +411,7 @@ HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); /** * @} - */ + */ /** * @} @@ -425,4 +423,3 @@ HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); #endif /* __STM32F4xx_HAL_FLASH_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ex.h index 4dbad673..5fa89db5 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ex.h @@ -6,23 +6,21 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** - */ + */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __STM32F4xx_HAL_FLASH_EX_H #define __STM32F4xx_HAL_FLASH_EX_H #ifdef __cplusplus - extern "C" { +extern "C" { #endif /* Includes ------------------------------------------------------------------*/ @@ -34,7 +32,7 @@ /** @addtogroup FLASHEx * @{ - */ + */ /* Exported types ------------------------------------------------------------*/ /** @defgroup FLASHEx_Exported_Types FLASH Exported Types @@ -78,7 +76,7 @@ typedef struct The value of this parameter depend on device used within the same series */ uint32_t Banks; /*!< Select banks for WRP activation/deactivation of all sectors. - This parameter must be a value of @ref FLASHEx_Banks */ + This parameter must be a value of @ref FLASHEx_Banks */ uint32_t RDPLevel; /*!< Set the read protection level. This parameter can be a value of @ref FLASHEx_Option_Bytes_Read_Protection */ @@ -127,7 +125,7 @@ typedef struct This parameter can be a value of @ref FLASHEx_Dual_Boot */ #endif /*STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ -}FLASH_AdvOBProgramInitTypeDef; +} FLASH_AdvOBProgramInitTypeDef; #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ /** @@ -142,16 +140,16 @@ typedef struct /** @defgroup FLASHEx_Type_Erase FLASH Type Erase * @{ - */ + */ #define FLASH_TYPEERASE_SECTORS 0x00000000U /*!< Sectors erase only */ #define FLASH_TYPEERASE_MASSERASE 0x00000001U /*!< Flash Mass erase activation */ /** * @} */ - + /** @defgroup FLASHEx_Voltage_Range FLASH Voltage Range * @{ - */ + */ #define FLASH_VOLTAGE_RANGE_1 0x00000000U /*!< Device operating range: 1.8V to 2.1V */ #define FLASH_VOLTAGE_RANGE_2 0x00000001U /*!< Device operating range: 2.1V to 2.7V */ #define FLASH_VOLTAGE_RANGE_3 0x00000002U /*!< Device operating range: 2.7V to 3.6V */ @@ -159,19 +157,19 @@ typedef struct /** * @} */ - + /** @defgroup FLASHEx_WRP_State FLASH WRP State * @{ - */ + */ #define OB_WRPSTATE_DISABLE 0x00000000U /*!< Disable the write protection of the desired bank 1 sectors */ #define OB_WRPSTATE_ENABLE 0x00000001U /*!< Enable the write protection of the desired bank 1 sectors */ /** * @} */ - + /** @defgroup FLASHEx_Option_Type FLASH Option Type * @{ - */ + */ #define OPTIONBYTE_WRP 0x00000001U /*!< WRP option byte configuration */ #define OPTIONBYTE_RDP 0x00000002U /*!< RDP option byte configuration */ #define OPTIONBYTE_USER 0x00000004U /*!< USER option byte configuration */ @@ -179,7 +177,7 @@ typedef struct /** * @} */ - + /** @defgroup FLASHEx_Option_Bytes_Read_Protection FLASH Option Bytes Read Protection * @{ */ @@ -189,39 +187,39 @@ typedef struct it s no more possible to go back to level 1 or 0 */ /** * @} - */ - + */ + /** @defgroup FLASHEx_Option_Bytes_IWatchdog FLASH Option Bytes IWatchdog * @{ - */ + */ #define OB_IWDG_SW ((uint8_t)0x20) /*!< Software IWDG selected */ #define OB_IWDG_HW ((uint8_t)0x00) /*!< Hardware IWDG selected */ /** * @} - */ - + */ + /** @defgroup FLASHEx_Option_Bytes_nRST_STOP FLASH Option Bytes nRST_STOP * @{ - */ + */ #define OB_STOP_NO_RST ((uint8_t)0x40) /*!< No reset generated when entering in STOP */ #define OB_STOP_RST ((uint8_t)0x00) /*!< Reset generated when entering in STOP */ /** * @} - */ + */ /** @defgroup FLASHEx_Option_Bytes_nRST_STDBY FLASH Option Bytes nRST_STDBY * @{ - */ + */ #define OB_STDBY_NO_RST ((uint8_t)0x80) /*!< No reset generated when entering in STANDBY */ #define OB_STDBY_RST ((uint8_t)0x00) /*!< Reset generated when entering in STANDBY */ /** * @} - */ + */ /** @defgroup FLASHEx_BOR_Reset_Level FLASH BOR Reset Level * @{ - */ + */ #define OB_BOR_LEVEL3 ((uint8_t)0x00) /*!< Supply voltage ranges from 2.70 to 3.60 V */ #define OB_BOR_LEVEL2 ((uint8_t)0x04) /*!< Supply voltage ranges from 2.40 to 2.70 V */ #define OB_BOR_LEVEL1 ((uint8_t)0x08) /*!< Supply voltage ranges from 2.10 to 2.40 V */ @@ -237,7 +235,7 @@ typedef struct defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) /** @defgroup FLASHEx_PCROP_State FLASH PCROP State * @{ - */ + */ #define OB_PCROP_STATE_DISABLE 0x00000000U /*!< Disable PCROP */ #define OB_PCROP_STATE_ENABLE 0x00000001U /*!< Enable PCROP */ /** @@ -249,7 +247,7 @@ typedef struct /** @defgroup FLASHEx_Advanced_Option_Type FLASH Advanced Option Type * @{ - */ + */ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ defined(STM32F469xx) || defined(STM32F479xx) #define OPTIONBYTE_PCROP 0x00000001U /*!< PCROP option byte configuration */ @@ -270,7 +268,7 @@ typedef struct /** @defgroup FLASH_Latency FLASH Latency * @{ */ -/*------------------------- STM32F42xxx/STM32F43xxx/STM32F446xx/STM32F469xx/STM32F479xx ----------------------*/ +/*------------------------- STM32F42xxx/STM32F43xxx/STM32F446xx/STM32F469xx/STM32F479xx ----------------------*/ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) #define FLASH_LATENCY_0 FLASH_ACR_LATENCY_0WS /*!< FLASH Zero Latency cycle */ @@ -292,12 +290,12 @@ typedef struct #endif /* STM32F427xx || STM32F437xx || STM32F429xx|| STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ /*--------------------------------------------------------------------------------------------------------------*/ -/*-------------------------- STM32F40xxx/STM32F41xxx/STM32F401xx/STM32F411xx/STM32F423xx -----------------------*/ +/*-------------------------- STM32F40xxx/STM32F41xxx/STM32F401xx/STM32F411xx/STM32F423xx -----------------------*/ #if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) ||\ defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F412Zx) || defined(STM32F412Vx) ||\ defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) - + #define FLASH_LATENCY_0 FLASH_ACR_LATENCY_0WS /*!< FLASH Zero Latency cycle */ #define FLASH_LATENCY_1 FLASH_ACR_LATENCY_1WS /*!< FLASH One Latency cycle */ #define FLASH_LATENCY_2 FLASH_ACR_LATENCY_2WS /*!< FLASH Two Latency cycles */ @@ -312,8 +310,8 @@ typedef struct /** * @} - */ - + */ + /** @defgroup FLASHEx_Banks FLASH Banks * @{ @@ -335,8 +333,8 @@ typedef struct STM32F413xx || STM32F423xx */ /** * @} - */ - + */ + /** @defgroup FLASHEx_MassErase_bit FLASH Mass Erase bit * @{ */ @@ -355,12 +353,12 @@ typedef struct STM32F413xx || STM32F423xx */ /** * @} - */ + */ /** @defgroup FLASHEx_Sectors FLASH Sectors * @{ */ -/*-------------------------------------- STM32F42xxx/STM32F43xxx/STM32F469xx ------------------------------------*/ +/*-------------------------------------- STM32F42xxx/STM32F43xxx/STM32F469xx ------------------------------------*/ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ defined(STM32F469xx) || defined(STM32F479xx) #define FLASH_SECTOR_0 0U /*!< Sector Number 0 */ @@ -390,7 +388,7 @@ typedef struct #endif /* STM32F427xx || STM32F437xx || STM32F429xx|| STM32F439xx || STM32F469xx || STM32F479xx */ /*-----------------------------------------------------------------------------------------------------*/ -/*-------------------------------------- STM32F413xx/STM32F423xx --------------------------------------*/ +/*-------------------------------------- STM32F413xx/STM32F423xx --------------------------------------*/ #if defined(STM32F413xx) || defined(STM32F423xx) #define FLASH_SECTOR_0 0U /*!< Sector Number 0 */ #define FLASH_SECTOR_1 1U /*!< Sector Number 1 */ @@ -409,11 +407,11 @@ typedef struct #define FLASH_SECTOR_14 14U /*!< Sector Number 14 */ #define FLASH_SECTOR_15 15U /*!< Sector Number 15 */ #endif /* STM32F413xx || STM32F423xx */ -/*-----------------------------------------------------------------------------------------------------*/ +/*-----------------------------------------------------------------------------------------------------*/ -/*--------------------------------------- STM32F40xxx/STM32F41xxx -------------------------------------*/ +/*--------------------------------------- STM32F40xxx/STM32F41xxx -------------------------------------*/ #if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || defined(STM32F412Zx) ||\ - defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) #define FLASH_SECTOR_0 0U /*!< Sector Number 0 */ #define FLASH_SECTOR_1 1U /*!< Sector Number 1 */ #define FLASH_SECTOR_2 2U /*!< Sector Number 2 */ @@ -429,7 +427,7 @@ typedef struct #endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ /*-----------------------------------------------------------------------------------------------------*/ -/*--------------------------------------------- STM32F401xC -------------------------------------------*/ +/*--------------------------------------------- STM32F401xC -------------------------------------------*/ #if defined(STM32F401xC) #define FLASH_SECTOR_0 0U /*!< Sector Number 0 */ #define FLASH_SECTOR_1 1U /*!< Sector Number 1 */ @@ -440,7 +438,7 @@ typedef struct #endif /* STM32F401xC */ /*-----------------------------------------------------------------------------------------------------*/ -/*--------------------------------------------- STM32F410xx -------------------------------------------*/ +/*--------------------------------------------- STM32F410xx -------------------------------------------*/ #if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) #define FLASH_SECTOR_0 0U /*!< Sector Number 0 */ #define FLASH_SECTOR_1 1U /*!< Sector Number 1 */ @@ -465,14 +463,14 @@ typedef struct /** * @} - */ + */ /** @defgroup FLASHEx_Option_Bytes_Write_Protection FLASH Option Bytes Write Protection * @{ */ -/*--------------------------- STM32F42xxx/STM32F43xxx/STM32F469xx/STM32F479xx -------------------------*/ +/*--------------------------- STM32F42xxx/STM32F43xxx/STM32F469xx/STM32F479xx -------------------------*/ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ - defined(STM32F469xx) || defined(STM32F479xx) + defined(STM32F469xx) || defined(STM32F479xx) #define OB_WRP_SECTOR_0 0x00000001U /*!< Write protection of Sector0 */ #define OB_WRP_SECTOR_1 0x00000002U /*!< Write protection of Sector1 */ #define OB_WRP_SECTOR_2 0x00000004U /*!< Write protection of Sector2 */ @@ -501,8 +499,8 @@ typedef struct #endif /* STM32F427xx || STM32F437xx || STM32F429xx|| STM32F439xx || STM32F469xx || STM32F479xx */ /*-----------------------------------------------------------------------------------------------------*/ -/*--------------------------------------- STM32F413xx/STM32F423xx -------------------------------------*/ -#if defined(STM32F413xx) || defined(STM32F423xx) +/*--------------------------------------- STM32F413xx/STM32F423xx -------------------------------------*/ +#if defined(STM32F413xx) || defined(STM32F423xx) #define OB_WRP_SECTOR_0 0x00000001U /*!< Write protection of Sector0 */ #define OB_WRP_SECTOR_1 0x00000002U /*!< Write protection of Sector1 */ #define OB_WRP_SECTOR_2 0x00000004U /*!< Write protection of Sector2 */ @@ -518,14 +516,14 @@ typedef struct #define OB_WRP_SECTOR_12 0x00001000U /*!< Write protection of Sector12 */ #define OB_WRP_SECTOR_13 0x00002000U /*!< Write protection of Sector13 */ #define OB_WRP_SECTOR_14 0x00004000U /*!< Write protection of Sector14 */ -#define OB_WRP_SECTOR_15 0x00004000U /*!< Write protection of Sector15 */ +#define OB_WRP_SECTOR_15 0x00004000U /*!< Write protection of Sector15 */ #define OB_WRP_SECTOR_All 0x00007FFFU /*!< Write protection of all Sectors */ #endif /* STM32F413xx || STM32F423xx */ -/*-----------------------------------------------------------------------------------------------------*/ - -/*--------------------------------------- STM32F40xxx/STM32F41xxx -------------------------------------*/ +/*-----------------------------------------------------------------------------------------------------*/ + +/*--------------------------------------- STM32F40xxx/STM32F41xxx -------------------------------------*/ #if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || defined(STM32F412Zx) ||\ - defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) #define OB_WRP_SECTOR_0 0x00000001U /*!< Write protection of Sector0 */ #define OB_WRP_SECTOR_1 0x00000002U /*!< Write protection of Sector1 */ #define OB_WRP_SECTOR_2 0x00000004U /*!< Write protection of Sector2 */ @@ -553,7 +551,7 @@ typedef struct #define OB_WRP_SECTOR_All 0x00000FFFU /*!< Write protection of all Sectors */ #endif /* STM32F401xC */ /*-----------------------------------------------------------------------------------------------------*/ - + /*--------------------------------------------- STM32F410xx -------------------------------------------*/ #if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) #define OB_WRP_SECTOR_0 0x00000001U /*!< Write protection of Sector0 */ @@ -581,13 +579,13 @@ typedef struct /** * @} */ - + /** @defgroup FLASHEx_Option_Bytes_PC_ReadWrite_Protection FLASH Option Bytes PC ReadWrite Protection * @{ */ -/*-------------------------------- STM32F42xxx/STM32F43xxx/STM32F469xx/STM32F479xx ---------------------------*/ +/*-------------------------------- STM32F42xxx/STM32F43xxx/STM32F469xx/STM32F479xx ---------------------------*/ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ - defined(STM32F469xx) || defined(STM32F479xx) + defined(STM32F469xx) || defined(STM32F479xx) #define OB_PCROP_SECTOR_0 0x00000001U /*!< PC Read/Write protection of Sector0 */ #define OB_PCROP_SECTOR_1 0x00000002U /*!< PC Read/Write protection of Sector1 */ #define OB_PCROP_SECTOR_2 0x00000004U /*!< PC Read/Write protection of Sector2 */ @@ -615,9 +613,9 @@ typedef struct #define OB_PCROP_SECTOR_All 0x00000FFFU /*!< PC Read/Write protection of all Sectors */ #endif /* STM32F427xx || STM32F437xx || STM32F429xx|| STM32F439xx || STM32F469xx || STM32F479xx */ /*-----------------------------------------------------------------------------------------------------*/ - + /*------------------------------------- STM32F413xx/STM32F423xx ---------------------------------------*/ -#if defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F413xx) || defined(STM32F423xx) #define OB_PCROP_SECTOR_0 0x00000001U /*!< PC Read/Write protection of Sector0 */ #define OB_PCROP_SECTOR_1 0x00000002U /*!< PC Read/Write protection of Sector1 */ #define OB_PCROP_SECTOR_2 0x00000004U /*!< PC Read/Write protection of Sector2 */ @@ -633,10 +631,10 @@ typedef struct #define OB_PCROP_SECTOR_12 0x00001000U /*!< PC Read/Write protection of Sector12 */ #define OB_PCROP_SECTOR_13 0x00002000U /*!< PC Read/Write protection of Sector13 */ #define OB_PCROP_SECTOR_14 0x00004000U /*!< PC Read/Write protection of Sector14 */ -#define OB_PCROP_SECTOR_15 0x00004000U /*!< PC Read/Write protection of Sector15 */ +#define OB_PCROP_SECTOR_15 0x00004000U /*!< PC Read/Write protection of Sector15 */ #define OB_PCROP_SECTOR_All 0x00007FFFU /*!< PC Read/Write protection of all Sectors */ #endif /* STM32F413xx || STM32F423xx */ -/*-----------------------------------------------------------------------------------------------------*/ +/*-----------------------------------------------------------------------------------------------------*/ /*--------------------------------------------- STM32F401xC -------------------------------------------*/ #if defined(STM32F401xC) @@ -663,7 +661,7 @@ typedef struct /*-------------- STM32F401xE/STM32F411xE/STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx/STM32F446xx --*/ #if defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) ||\ - defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) #define OB_PCROP_SECTOR_0 0x00000001U /*!< PC Read/Write protection of Sector0 */ #define OB_PCROP_SECTOR_1 0x00000002U /*!< PC Read/Write protection of Sector1 */ #define OB_PCROP_SECTOR_2 0x00000004U /*!< PC Read/Write protection of Sector2 */ @@ -679,12 +677,12 @@ typedef struct /** * @} */ - + /** @defgroup FLASHEx_Dual_Boot FLASH Dual Boot * @{ */ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ - defined(STM32F469xx) || defined(STM32F479xx) + defined(STM32F469xx) || defined(STM32F479xx) #define OB_DUAL_BOOT_ENABLE ((uint8_t)0x10) /*!< Dual Bank Boot Enable */ #define OB_DUAL_BOOT_DISABLE ((uint8_t)0x00) /*!< Dual Bank Boot Disable, always boot on User Flash */ #endif /* STM32F427xx || STM32F437xx || STM32F429xx|| STM32F439xx || STM32F469xx || STM32F479xx */ @@ -711,8 +709,8 @@ typedef struct /** * @} - */ - + */ + /* Exported macro ------------------------------------------------------------*/ /* Exported functions --------------------------------------------------------*/ @@ -734,7 +732,7 @@ void HAL_FLASHEx_OBGetConfig(FLASH_OBProgramInitTypeDef *pOBInit); defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) ||\ defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) -HAL_StatusTypeDef HAL_FLASHEx_AdvOBProgram (FLASH_AdvOBProgramInitTypeDef *pAdvOBInit); +HAL_StatusTypeDef HAL_FLASHEx_AdvOBProgram(FLASH_AdvOBProgramInitTypeDef *pAdvOBInit); void HAL_FLASHEx_AdvOBGetConfig(FLASH_AdvOBProgramInitTypeDef *pAdvOBInit); HAL_StatusTypeDef HAL_FLASHEx_OB_SelectPCROP(void); HAL_StatusTypeDef HAL_FLASHEx_OB_DeSelectPCROP(void); @@ -759,7 +757,7 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); /** @defgroup FLASHEx_Private_Constants FLASH Private Constants * @{ */ -/*--------------------------------- STM32F42xxx/STM32F43xxx/STM32F469xx/STM32F479xx---------------------*/ +/*--------------------------------- STM32F42xxx/STM32F43xxx/STM32F469xx/STM32F479xx---------------------*/ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) #define FLASH_SECTOR_TOTAL 24U #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ @@ -769,18 +767,18 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); #define FLASH_SECTOR_TOTAL 16U #endif /* STM32F413xx || STM32F423xx */ -/*--------------------------------------- STM32F40xxx/STM32F41xxx -------------------------------------*/ +/*--------------------------------------- STM32F40xxx/STM32F41xxx -------------------------------------*/ #if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || defined(STM32F412Zx) ||\ - defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) #define FLASH_SECTOR_TOTAL 12U #endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ -/*--------------------------------------------- STM32F401xC -------------------------------------------*/ +/*--------------------------------------------- STM32F401xC -------------------------------------------*/ #if defined(STM32F401xC) #define FLASH_SECTOR_TOTAL 6U #endif /* STM32F401xC */ -/*--------------------------------------------- STM32F410xx -------------------------------------------*/ +/*--------------------------------------------- STM32F410xx -------------------------------------------*/ #if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) #define FLASH_SECTOR_TOTAL 5U #endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ @@ -790,10 +788,10 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); #define FLASH_SECTOR_TOTAL 8U #endif /* STM32F401xE || STM32F411xE || STM32F446xx */ -/** - * @brief OPTCR1 register byte 2 (Bits[23:16]) base address - */ -#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +/** + * @brief OPTCR1 register byte 2 (Bits[23:16]) base address + */ +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) #define OPTCR1_BYTE2_ADDRESS 0x40023C1AU #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ @@ -811,15 +809,15 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); */ #define IS_FLASH_TYPEERASE(VALUE)(((VALUE) == FLASH_TYPEERASE_SECTORS) || \ - ((VALUE) == FLASH_TYPEERASE_MASSERASE)) + ((VALUE) == FLASH_TYPEERASE_MASSERASE)) #define IS_VOLTAGERANGE(RANGE)(((RANGE) == FLASH_VOLTAGE_RANGE_1) || \ ((RANGE) == FLASH_VOLTAGE_RANGE_2) || \ ((RANGE) == FLASH_VOLTAGE_RANGE_3) || \ - ((RANGE) == FLASH_VOLTAGE_RANGE_4)) + ((RANGE) == FLASH_VOLTAGE_RANGE_4)) #define IS_WRPSTATE(VALUE)(((VALUE) == OB_WRPSTATE_DISABLE) || \ - ((VALUE) == OB_WRPSTATE_ENABLE)) + ((VALUE) == OB_WRPSTATE_ENABLE)) #define IS_OPTIONBYTE(VALUE)(((VALUE) <= (OPTIONBYTE_WRP|OPTIONBYTE_RDP|OPTIONBYTE_USER|OPTIONBYTE_BOR))) @@ -842,7 +840,7 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) #define IS_PCROPSTATE(VALUE)(((VALUE) == OB_PCROP_STATE_DISABLE) || \ - ((VALUE) == OB_PCROP_STATE_ENABLE)) + ((VALUE) == OB_PCROP_STATE_ENABLE)) #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F401xC || STM32F401xE ||\ STM32F410xx || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx ||\ STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ @@ -850,17 +848,17 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ defined(STM32F469xx) || defined(STM32F479xx) #define IS_OBEX(VALUE)(((VALUE) == OPTIONBYTE_PCROP) || \ - ((VALUE) == OPTIONBYTE_BOOTCONFIG)) + ((VALUE) == OPTIONBYTE_BOOTCONFIG)) #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ #if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) ||\ defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) ||\ defined(STM32F423xx) -#define IS_OBEX(VALUE)(((VALUE) == OPTIONBYTE_PCROP)) +#define IS_OBEX(VALUE)(((VALUE) == OPTIONBYTE_PCROP)) #endif /* STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx ||\ STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ - + #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) #define IS_FLASH_LATENCY(LATENCY) (((LATENCY) == FLASH_LATENCY_0) || \ @@ -910,7 +908,7 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); #define IS_FLASH_BANK(BANK) (((BANK) == FLASH_BANK_1)) #endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx ||\ STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ - + #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) #define IS_FLASH_SECTOR(SECTOR) ( ((SECTOR) == FLASH_SECTOR_0) || ((SECTOR) == FLASH_SECTOR_1) ||\ ((SECTOR) == FLASH_SECTOR_2) || ((SECTOR) == FLASH_SECTOR_3) ||\ @@ -938,7 +936,7 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); #endif /* STM32F413xx || STM32F423xx */ #if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || defined(STM32F412Zx) ||\ - defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) #define IS_FLASH_SECTOR(SECTOR) (((SECTOR) == FLASH_SECTOR_0) || ((SECTOR) == FLASH_SECTOR_1) ||\ ((SECTOR) == FLASH_SECTOR_2) || ((SECTOR) == FLASH_SECTOR_3) ||\ ((SECTOR) == FLASH_SECTOR_4) || ((SECTOR) == FLASH_SECTOR_5) ||\ @@ -970,12 +968,12 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); (((ADDRESS) >= FLASH_OTP_BASE) && ((ADDRESS) <= FLASH_OTP_END))) #define IS_FLASH_NBSECTORS(NBSECTORS) (((NBSECTORS) != 0) && ((NBSECTORS) <= FLASH_SECTOR_TOTAL)) - -#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) + +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) #define IS_OB_WRP_SECTOR(SECTOR)((((SECTOR) & 0xFF000000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ -#if defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F413xx) || defined(STM32F423xx) #define IS_OB_WRP_SECTOR(SECTOR)((((SECTOR) & 0xFFFF8000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) #endif /* STM32F413xx || STM32F423xx */ @@ -992,16 +990,16 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); #endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ #if defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) ||\ - defined(STM32F412Rx) || defined(STM32F412Cx) + defined(STM32F412Rx) || defined(STM32F412Cx) #define IS_OB_WRP_SECTOR(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) #endif /* STM32F401xE || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ - + #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) #define IS_OB_PCROP(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ #if defined(STM32F413xx) || defined(STM32F423xx) -#define IS_OB_PCROP(SECTOR)((((SECTOR) & 0xFFFF8000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#define IS_OB_PCROP(SECTOR)((((SECTOR) & 0xFFFF8000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) #endif /* STM32F413xx || STM32F423xx */ #if defined(STM32F401xC) @@ -1013,12 +1011,12 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void); #endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ #if defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) ||\ - defined(STM32F412Rx) || defined(STM32F412Cx) + defined(STM32F412Rx) || defined(STM32F412Cx) #define IS_OB_PCROP(SECTOR)((((SECTOR) & 0xFFFFF000U) == 0x00000000U) && ((SECTOR) != 0x00000000U)) #endif /* STM32F401xE || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ - defined(STM32F469xx) || defined(STM32F479xx) + defined(STM32F469xx) || defined(STM32F479xx) #define IS_OB_BOOT(BOOT) (((BOOT) == OB_DUAL_BOOT_ENABLE) || ((BOOT) == OB_DUAL_BOOT_DISABLE)) #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ @@ -1047,11 +1045,11 @@ void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange); void FLASH_FlushCaches(void); /** * @} - */ + */ /** * @} - */ + */ /** * @} @@ -1063,4 +1061,3 @@ void FLASH_FlushCaches(void); #endif /* __STM32F4xx_HAL_FLASH_EX_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ramfunc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ramfunc.h index 9fab0c98..2112e747 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ramfunc.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_flash_ramfunc.h @@ -6,26 +6,24 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** - */ + */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __STM32F4xx_FLASH_RAMFUNC_H #define __STM32F4xx_FLASH_RAMFUNC_H #ifdef __cplusplus - extern "C" { +extern "C" { #endif #if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) ||\ - defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_hal_def.h" @@ -47,14 +45,14 @@ /** @addtogroup FLASH_RAMFUNC_Exported_Functions_Group1 * @{ - */ + */ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_StopFlashInterfaceClk(void); __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_StartFlashInterfaceClk(void); __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_EnableFlashSleepMode(void); __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_DisableFlashSleepMode(void); /** * @} - */ + */ /** * @} @@ -62,13 +60,13 @@ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_DisableFlashSleepMode(void); /** * @} - */ + */ /** * @} */ -#endif /* STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ +#endif /* STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ #ifdef __cplusplus } #endif @@ -76,4 +74,3 @@ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_DisableFlashSleepMode(void); #endif /* __STM32F4xx_FLASH_RAMFUNC_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio.h index 5a175384..5f3d749e 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -107,30 +106,30 @@ typedef enum */ /** @defgroup GPIO_mode_define GPIO mode define - * @brief GPIO Configuration Mode - * Elements values convention: 0xX0yz00YZ - * - X : GPIO mode or EXTI Mode - * - y : External IT or Event trigger detection - * - z : IO configuration on External IT or Event - * - Y : Output type (Push Pull or Open Drain) - * - Z : IO Direction mode (Input, Output, Alternate or Analog) + * @brief GPIO Configuration Mode + * Elements values convention: 0x00WX00YZ + * - W : EXTI trigger detection on 3 bits + * - X : EXTI mode (IT or Event) on 2 bits + * - Y : Output type (Push Pull or Open Drain) on 1 bit + * - Z : GPIO mode (Input, Output, Alternate or Analog) on 2 bits * @{ */ -#define GPIO_MODE_INPUT 0x00000000U /*!< Input Floating Mode */ -#define GPIO_MODE_OUTPUT_PP 0x00000001U /*!< Output Push Pull Mode */ -#define GPIO_MODE_OUTPUT_OD 0x00000011U /*!< Output Open Drain Mode */ -#define GPIO_MODE_AF_PP 0x00000002U /*!< Alternate Function Push Pull Mode */ -#define GPIO_MODE_AF_OD 0x00000012U /*!< Alternate Function Open Drain Mode */ +#define GPIO_MODE_INPUT MODE_INPUT /*!< Input Floating Mode */ +#define GPIO_MODE_OUTPUT_PP (MODE_OUTPUT | OUTPUT_PP) /*!< Output Push Pull Mode */ +#define GPIO_MODE_OUTPUT_OD (MODE_OUTPUT | OUTPUT_OD) /*!< Output Open Drain Mode */ +#define GPIO_MODE_AF_PP (MODE_AF | OUTPUT_PP) /*!< Alternate Function Push Pull Mode */ +#define GPIO_MODE_AF_OD (MODE_AF | OUTPUT_OD) /*!< Alternate Function Open Drain Mode */ -#define GPIO_MODE_ANALOG 0x00000003U /*!< Analog Mode */ +#define GPIO_MODE_ANALOG MODE_ANALOG /*!< Analog Mode */ -#define GPIO_MODE_IT_RISING 0x10110000U /*!< External Interrupt Mode with Rising edge trigger detection */ -#define GPIO_MODE_IT_FALLING 0x10210000U /*!< External Interrupt Mode with Falling edge trigger detection */ -#define GPIO_MODE_IT_RISING_FALLING 0x10310000U /*!< External Interrupt Mode with Rising/Falling edge trigger detection */ +#define GPIO_MODE_IT_RISING (MODE_INPUT | EXTI_IT | TRIGGER_RISING) /*!< External Interrupt Mode with Rising edge trigger detection */ +#define GPIO_MODE_IT_FALLING (MODE_INPUT | EXTI_IT | TRIGGER_FALLING) /*!< External Interrupt Mode with Falling edge trigger detection */ +#define GPIO_MODE_IT_RISING_FALLING (MODE_INPUT | EXTI_IT | TRIGGER_RISING | TRIGGER_FALLING) /*!< External Interrupt Mode with Rising/Falling edge trigger detection */ -#define GPIO_MODE_EVT_RISING 0x10120000U /*!< External Event Mode with Rising edge trigger detection */ -#define GPIO_MODE_EVT_FALLING 0x10220000U /*!< External Event Mode with Falling edge trigger detection */ -#define GPIO_MODE_EVT_RISING_FALLING 0x10320000U /*!< External Event Mode with Rising/Falling edge trigger detection */ +#define GPIO_MODE_EVT_RISING (MODE_INPUT | EXTI_EVT | TRIGGER_RISING) /*!< External Event Mode with Rising edge trigger detection */ +#define GPIO_MODE_EVT_FALLING (MODE_INPUT | EXTI_EVT | TRIGGER_FALLING) /*!< External Event Mode with Falling edge trigger detection */ +#define GPIO_MODE_EVT_RISING_FALLING (MODE_INPUT | EXTI_EVT | TRIGGER_RISING | TRIGGER_FALLING) /*!< External Event Mode with Rising/Falling edge trigger detection */ + /** * @} */ @@ -252,6 +251,24 @@ void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin); /** @defgroup GPIO_Private_Constants GPIO Private Constants * @{ */ +#define GPIO_MODE_Pos 0U +#define GPIO_MODE (0x3UL << GPIO_MODE_Pos) +#define MODE_INPUT (0x0UL << GPIO_MODE_Pos) +#define MODE_OUTPUT (0x1UL << GPIO_MODE_Pos) +#define MODE_AF (0x2UL << GPIO_MODE_Pos) +#define MODE_ANALOG (0x3UL << GPIO_MODE_Pos) +#define OUTPUT_TYPE_Pos 4U +#define OUTPUT_TYPE (0x1UL << OUTPUT_TYPE_Pos) +#define OUTPUT_PP (0x0UL << OUTPUT_TYPE_Pos) +#define OUTPUT_OD (0x1UL << OUTPUT_TYPE_Pos) +#define EXTI_MODE_Pos 16U +#define EXTI_MODE (0x3UL << EXTI_MODE_Pos) +#define EXTI_IT (0x1UL << EXTI_MODE_Pos) +#define EXTI_EVT (0x2UL << EXTI_MODE_Pos) +#define TRIGGER_MODE_Pos 20U +#define TRIGGER_MODE (0x7UL << TRIGGER_MODE_Pos) +#define TRIGGER_RISING (0x1UL << TRIGGER_MODE_Pos) +#define TRIGGER_FALLING (0x2UL << TRIGGER_MODE_Pos) /** * @} @@ -306,4 +323,3 @@ void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin); #endif /* __STM32F4xx_HAL_GPIO_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio_ex.h index aa1e34d8..5e0b7cc6 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_gpio_ex.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -1442,21 +1441,21 @@ /*----------------------------------------------------------------------------*/ /*---------------------------------------- STM32F401xx------------------------*/ -#if defined(STM32F401xC) || defined(STM32F401xE) +#if defined(STM32F401xC) || defined(STM32F401xE) #define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF12_SDIO) || \ ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF0_TAMPER) || \ ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ ((AF) == GPIO_AF1_TIM1) || ((AF) == GPIO_AF1_TIM2) || \ ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ - ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF4_I2C1) || \ - ((AF) == GPIO_AF4_I2C2) || ((AF) == GPIO_AF4_I2C3) || \ - ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ - ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ - ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ - ((AF) == GPIO_AF8_USART6) || ((AF) == GPIO_AF10_OTG_FS) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM9) || \ + ((AF) == GPIO_AF3_TIM10) || ((AF) == GPIO_AF3_TIM11) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF5_SPI1) || \ + ((AF) == GPIO_AF5_SPI2) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF7_USART1) || \ + ((AF) == GPIO_AF7_USART2) || ((AF) == GPIO_AF8_USART6) || \ ((AF) == GPIO_AF9_I2C2) || ((AF) == GPIO_AF9_I2C3) || \ - ((AF) == GPIO_AF15_EVENTOUT)) - + ((AF) == GPIO_AF10_OTG_FS) || ((AF) == GPIO_AF15_EVENTOUT)) #endif /* STM32F401xC || STM32F401xE */ /*----------------------------------------------------------------------------*/ /*---------------------------------------- STM32F410xx------------------------*/ @@ -1589,4 +1588,3 @@ #endif /* __STM32F4xx_HAL_GPIO_EX_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2c.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2c.h index ff576558..9a7a67ef 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2c.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2c.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -85,7 +84,7 @@ typedef struct * 01 : Abort (Abort user request on going) * 10 : Timeout * 11 : Error - * b5 Peripheral initilisation status + * b5 Peripheral initialization status * 0 : Reset (Peripheral not initialized) * 1 : Init done (Peripheral initialized and ready to use. HAL I2C Init function called) * b4 (not used) @@ -740,4 +739,3 @@ uint32_t HAL_I2C_GetError(I2C_HandleTypeDef *hi2c); #endif /* __STM32F4xx_HAL_I2C_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2c_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2c_ex.h index 6864d835..31ad99c3 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2c_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2c_ex.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -114,4 +113,3 @@ HAL_StatusTypeDef HAL_I2CEx_ConfigDigitalFilter(I2C_HandleTypeDef *hi2c, uint32_ #endif /* __STM32F4xx_HAL_I2C_EX_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2s.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2s.h index 203ffc1b..66cee01b 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2s.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2s.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -516,7 +515,7 @@ uint32_t HAL_I2S_GetError(I2S_HandleTypeDef *hi2s); */ /** @brief Check whether the specified SPI flag is set or not. - * @param __SR__ copy of I2S SR regsiter. + * @param __SR__ copy of I2S SR register. * @param __FLAG__ specifies the flag to check. * This parameter can be one of the following values: * @arg I2S_FLAG_RXNE: Receive buffer not empty flag @@ -531,7 +530,7 @@ uint32_t HAL_I2S_GetError(I2S_HandleTypeDef *hi2s); & ((__FLAG__) & I2S_FLAG_MASK)) == ((__FLAG__) & I2S_FLAG_MASK)) ? SET : RESET) /** @brief Check whether the specified SPI Interrupt is set or not. - * @param __CR2__ copy of I2S CR2 regsiter. + * @param __CR2__ copy of I2S CR2 register. * @param __INTERRUPT__ specifies the SPI interrupt source to check. * This parameter can be one of the following values: * @arg I2S_IT_TXE: Tx buffer empty interrupt enable @@ -617,4 +616,3 @@ uint32_t HAL_I2S_GetError(I2S_HandleTypeDef *hi2s); #endif /* STM32F4xx_HAL_I2S_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2s_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2s_ex.h index 10335f49..3aaa45b7 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2s_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_i2s_ex.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -182,4 +181,3 @@ void HAL_I2SEx_TxRxCpltCallback(I2S_HandleTypeDef *hi2s); #endif /* STM32F4xx_HAL_I2S_EX_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_mmc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_mmc.h new file mode 100644 index 00000000..30921aaa --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_mmc.h @@ -0,0 +1,747 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_mmc.h + * @author MCD Application Team + * @brief Header file of MMC HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F4xx_HAL_MMC_H +#define STM32F4xx_HAL_MMC_H + +#if defined(SDIO) + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_ll_sdmmc.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @addtogroup MMC + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup MMC_Exported_Types MMC Exported Types + * @{ + */ + +/** @defgroup MMC_Exported_Types_Group1 MMC State enumeration structure + * @{ + */ +typedef enum +{ + HAL_MMC_STATE_RESET = 0x00000000U, /*!< MMC not yet initialized or disabled */ + HAL_MMC_STATE_READY = 0x00000001U, /*!< MMC initialized and ready for use */ + HAL_MMC_STATE_TIMEOUT = 0x00000002U, /*!< MMC Timeout state */ + HAL_MMC_STATE_BUSY = 0x00000003U, /*!< MMC process ongoing */ + HAL_MMC_STATE_PROGRAMMING = 0x00000004U, /*!< MMC Programming State */ + HAL_MMC_STATE_RECEIVING = 0x00000005U, /*!< MMC Receinving State */ + HAL_MMC_STATE_TRANSFER = 0x00000006U, /*!< MMC Transfer State */ + HAL_MMC_STATE_ERROR = 0x0000000FU /*!< MMC is in error state */ +}HAL_MMC_StateTypeDef; +/** + * @} + */ + +/** @defgroup MMC_Exported_Types_Group2 MMC Card State enumeration structure + * @{ + */ +typedef uint32_t HAL_MMC_CardStateTypeDef; + +#define HAL_MMC_CARD_READY 0x00000001U /*!< Card state is ready */ +#define HAL_MMC_CARD_IDENTIFICATION 0x00000002U /*!< Card is in identification state */ +#define HAL_MMC_CARD_STANDBY 0x00000003U /*!< Card is in standby state */ +#define HAL_MMC_CARD_TRANSFER 0x00000004U /*!< Card is in transfer state */ +#define HAL_MMC_CARD_SENDING 0x00000005U /*!< Card is sending an operation */ +#define HAL_MMC_CARD_RECEIVING 0x00000006U /*!< Card is receiving operation information */ +#define HAL_MMC_CARD_PROGRAMMING 0x00000007U /*!< Card is in programming state */ +#define HAL_MMC_CARD_DISCONNECTED 0x00000008U /*!< Card is disconnected */ +#define HAL_MMC_CARD_ERROR 0x000000FFU /*!< Card response Error */ +/** + * @} + */ + +/** @defgroup MMC_Exported_Types_Group3 MMC Handle Structure definition + * @{ + */ +#define MMC_InitTypeDef SDIO_InitTypeDef +#define MMC_TypeDef SDIO_TypeDef + +/** + * @brief MMC Card Information Structure definition + */ +typedef struct +{ + uint32_t CardType; /*!< Specifies the card Type */ + + uint32_t Class; /*!< Specifies the class of the card class */ + + uint32_t RelCardAdd; /*!< Specifies the Relative Card Address */ + + uint32_t BlockNbr; /*!< Specifies the Card Capacity in blocks */ + + uint32_t BlockSize; /*!< Specifies one block size in bytes */ + + uint32_t LogBlockNbr; /*!< Specifies the Card logical Capacity in blocks */ + + uint32_t LogBlockSize; /*!< Specifies logical block size in bytes */ + +}HAL_MMC_CardInfoTypeDef; + +/** + * @brief MMC handle Structure definition + */ +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) +typedef struct __MMC_HandleTypeDef +#else +typedef struct +#endif /* USE_HAL_MMC_REGISTER_CALLBACKS */ +{ + MMC_TypeDef *Instance; /*!< MMC registers base address */ + + MMC_InitTypeDef Init; /*!< MMC required parameters */ + + HAL_LockTypeDef Lock; /*!< MMC locking object */ + + uint8_t *pTxBuffPtr; /*!< Pointer to MMC Tx transfer Buffer */ + + uint32_t TxXferSize; /*!< MMC Tx Transfer size */ + + uint8_t *pRxBuffPtr; /*!< Pointer to MMC Rx transfer Buffer */ + + uint32_t RxXferSize; /*!< MMC Rx Transfer size */ + + __IO uint32_t Context; /*!< MMC transfer context */ + + __IO HAL_MMC_StateTypeDef State; /*!< MMC card State */ + + __IO uint32_t ErrorCode; /*!< MMC Card Error codes */ + + DMA_HandleTypeDef *hdmarx; /*!< MMC Rx DMA handle parameters */ + + DMA_HandleTypeDef *hdmatx; /*!< MMC Tx DMA handle parameters */ + + HAL_MMC_CardInfoTypeDef MmcCard; /*!< MMC Card information */ + + uint32_t CSD[4U]; /*!< MMC card specific data table */ + + uint32_t CID[4U]; /*!< MMC card identification number table */ + + uint32_t Ext_CSD[128]; + +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + void (* TxCpltCallback) (struct __MMC_HandleTypeDef *hmmc); + void (* RxCpltCallback) (struct __MMC_HandleTypeDef *hmmc); + void (* ErrorCallback) (struct __MMC_HandleTypeDef *hmmc); + void (* AbortCpltCallback) (struct __MMC_HandleTypeDef *hmmc); + + void (* MspInitCallback) (struct __MMC_HandleTypeDef *hmmc); + void (* MspDeInitCallback) (struct __MMC_HandleTypeDef *hmmc); +#endif +}MMC_HandleTypeDef; + +/** + * @} + */ + +/** @defgroup MMC_Exported_Types_Group4 Card Specific Data: CSD Register + * @{ + */ +typedef struct +{ + __IO uint8_t CSDStruct; /*!< CSD structure */ + __IO uint8_t SysSpecVersion; /*!< System specification version */ + __IO uint8_t Reserved1; /*!< Reserved */ + __IO uint8_t TAAC; /*!< Data read access time 1 */ + __IO uint8_t NSAC; /*!< Data read access time 2 in CLK cycles */ + __IO uint8_t MaxBusClkFrec; /*!< Max. bus clock frequency */ + __IO uint16_t CardComdClasses; /*!< Card command classes */ + __IO uint8_t RdBlockLen; /*!< Max. read data block length */ + __IO uint8_t PartBlockRead; /*!< Partial blocks for read allowed */ + __IO uint8_t WrBlockMisalign; /*!< Write block misalignment */ + __IO uint8_t RdBlockMisalign; /*!< Read block misalignment */ + __IO uint8_t DSRImpl; /*!< DSR implemented */ + __IO uint8_t Reserved2; /*!< Reserved */ + __IO uint32_t DeviceSize; /*!< Device Size */ + __IO uint8_t MaxRdCurrentVDDMin; /*!< Max. read current @ VDD min */ + __IO uint8_t MaxRdCurrentVDDMax; /*!< Max. read current @ VDD max */ + __IO uint8_t MaxWrCurrentVDDMin; /*!< Max. write current @ VDD min */ + __IO uint8_t MaxWrCurrentVDDMax; /*!< Max. write current @ VDD max */ + __IO uint8_t DeviceSizeMul; /*!< Device size multiplier */ + __IO uint8_t EraseGrSize; /*!< Erase group size */ + __IO uint8_t EraseGrMul; /*!< Erase group size multiplier */ + __IO uint8_t WrProtectGrSize; /*!< Write protect group size */ + __IO uint8_t WrProtectGrEnable; /*!< Write protect group enable */ + __IO uint8_t ManDeflECC; /*!< Manufacturer default ECC */ + __IO uint8_t WrSpeedFact; /*!< Write speed factor */ + __IO uint8_t MaxWrBlockLen; /*!< Max. write data block length */ + __IO uint8_t WriteBlockPaPartial; /*!< Partial blocks for write allowed */ + __IO uint8_t Reserved3; /*!< Reserved */ + __IO uint8_t ContentProtectAppli; /*!< Content protection application */ + __IO uint8_t FileFormatGroup; /*!< File format group */ + __IO uint8_t CopyFlag; /*!< Copy flag (OTP) */ + __IO uint8_t PermWrProtect; /*!< Permanent write protection */ + __IO uint8_t TempWrProtect; /*!< Temporary write protection */ + __IO uint8_t FileFormat; /*!< File format */ + __IO uint8_t ECC; /*!< ECC code */ + __IO uint8_t CSD_CRC; /*!< CSD CRC */ + __IO uint8_t Reserved4; /*!< Always 1 */ + +}HAL_MMC_CardCSDTypeDef; +/** + * @} + */ + +/** @defgroup MMC_Exported_Types_Group5 Card Identification Data: CID Register + * @{ + */ +typedef struct +{ + __IO uint8_t ManufacturerID; /*!< Manufacturer ID */ + __IO uint16_t OEM_AppliID; /*!< OEM/Application ID */ + __IO uint32_t ProdName1; /*!< Product Name part1 */ + __IO uint8_t ProdName2; /*!< Product Name part2 */ + __IO uint8_t ProdRev; /*!< Product Revision */ + __IO uint32_t ProdSN; /*!< Product Serial Number */ + __IO uint8_t Reserved1; /*!< Reserved1 */ + __IO uint16_t ManufactDate; /*!< Manufacturing Date */ + __IO uint8_t CID_CRC; /*!< CID CRC */ + __IO uint8_t Reserved2; /*!< Always 1 */ + +}HAL_MMC_CardCIDTypeDef; +/** + * @} + */ + +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) +/** @defgroup MMC_Exported_Types_Group6 MMC Callback ID enumeration definition + * @{ + */ +typedef enum +{ + HAL_MMC_TX_CPLT_CB_ID = 0x00U, /*!< MMC Tx Complete Callback ID */ + HAL_MMC_RX_CPLT_CB_ID = 0x01U, /*!< MMC Rx Complete Callback ID */ + HAL_MMC_ERROR_CB_ID = 0x02U, /*!< MMC Error Callback ID */ + HAL_MMC_ABORT_CB_ID = 0x03U, /*!< MMC Abort Callback ID */ + + HAL_MMC_MSP_INIT_CB_ID = 0x10U, /*!< MMC MspInit Callback ID */ + HAL_MMC_MSP_DEINIT_CB_ID = 0x11U /*!< MMC MspDeInit Callback ID */ +}HAL_MMC_CallbackIDTypeDef; +/** + * @} + */ + +/** @defgroup MMC_Exported_Types_Group7 MMC Callback pointer definition + * @{ + */ +typedef void (*pMMC_CallbackTypeDef) (MMC_HandleTypeDef *hmmc); +/** + * @} + */ +#endif +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup MMC_Exported_Constants Exported Constants + * @{ + */ + +#define MMC_BLOCKSIZE 512U /*!< Block size is 512 bytes */ + +/** @defgroup MMC_Exported_Constansts_Group1 MMC Error status enumeration Structure definition + * @{ + */ +#define HAL_MMC_ERROR_NONE SDMMC_ERROR_NONE /*!< No error */ +#define HAL_MMC_ERROR_CMD_CRC_FAIL SDMMC_ERROR_CMD_CRC_FAIL /*!< Command response received (but CRC check failed) */ +#define HAL_MMC_ERROR_DATA_CRC_FAIL SDMMC_ERROR_DATA_CRC_FAIL /*!< Data block sent/received (CRC check failed) */ +#define HAL_MMC_ERROR_CMD_RSP_TIMEOUT SDMMC_ERROR_CMD_RSP_TIMEOUT /*!< Command response timeout */ +#define HAL_MMC_ERROR_DATA_TIMEOUT SDMMC_ERROR_DATA_TIMEOUT /*!< Data timeout */ +#define HAL_MMC_ERROR_TX_UNDERRUN SDMMC_ERROR_TX_UNDERRUN /*!< Transmit FIFO underrun */ +#define HAL_MMC_ERROR_RX_OVERRUN SDMMC_ERROR_RX_OVERRUN /*!< Receive FIFO overrun */ +#define HAL_MMC_ERROR_ADDR_MISALIGNED SDMMC_ERROR_ADDR_MISALIGNED /*!< Misaligned address */ +#define HAL_MMC_ERROR_BLOCK_LEN_ERR SDMMC_ERROR_BLOCK_LEN_ERR /*!< Transferred block length is not allowed for the card or the + number of transferred bytes does not match the block length */ +#define HAL_MMC_ERROR_ERASE_SEQ_ERR SDMMC_ERROR_ERASE_SEQ_ERR /*!< An error in the sequence of erase command occurs */ +#define HAL_MMC_ERROR_BAD_ERASE_PARAM SDMMC_ERROR_BAD_ERASE_PARAM /*!< An invalid selection for erase groups */ +#define HAL_MMC_ERROR_WRITE_PROT_VIOLATION SDMMC_ERROR_WRITE_PROT_VIOLATION /*!< Attempt to program a write protect block */ +#define HAL_MMC_ERROR_LOCK_UNLOCK_FAILED SDMMC_ERROR_LOCK_UNLOCK_FAILED /*!< Sequence or password error has been detected in unlock + command or if there was an attempt to access a locked card */ +#define HAL_MMC_ERROR_COM_CRC_FAILED SDMMC_ERROR_COM_CRC_FAILED /*!< CRC check of the previous command failed */ +#define HAL_MMC_ERROR_ILLEGAL_CMD SDMMC_ERROR_ILLEGAL_CMD /*!< Command is not legal for the card state */ +#define HAL_MMC_ERROR_CARD_ECC_FAILED SDMMC_ERROR_CARD_ECC_FAILED /*!< Card internal ECC was applied but failed to correct the data */ +#define HAL_MMC_ERROR_CC_ERR SDMMC_ERROR_CC_ERR /*!< Internal card controller error */ +#define HAL_MMC_ERROR_GENERAL_UNKNOWN_ERR SDMMC_ERROR_GENERAL_UNKNOWN_ERR /*!< General or unknown error */ +#define HAL_MMC_ERROR_STREAM_READ_UNDERRUN SDMMC_ERROR_STREAM_READ_UNDERRUN /*!< The card could not sustain data reading in stream rmode */ +#define HAL_MMC_ERROR_STREAM_WRITE_OVERRUN SDMMC_ERROR_STREAM_WRITE_OVERRUN /*!< The card could not sustain data programming in stream mode */ +#define HAL_MMC_ERROR_CID_CSD_OVERWRITE SDMMC_ERROR_CID_CSD_OVERWRITE /*!< CID/CSD overwrite error */ +#define HAL_MMC_ERROR_WP_ERASE_SKIP SDMMC_ERROR_WP_ERASE_SKIP /*!< Only partial address space was erased */ +#define HAL_MMC_ERROR_CARD_ECC_DISABLED SDMMC_ERROR_CARD_ECC_DISABLED /*!< Command has been executed without using internal ECC */ +#define HAL_MMC_ERROR_ERASE_RESET SDMMC_ERROR_ERASE_RESET /*!< Erase sequence was cleared before executing because an out + of erase sequence command was received */ +#define HAL_MMC_ERROR_AKE_SEQ_ERR SDMMC_ERROR_AKE_SEQ_ERR /*!< Error in sequence of authentication */ +#define HAL_MMC_ERROR_INVALID_VOLTRANGE SDMMC_ERROR_INVALID_VOLTRANGE /*!< Error in case of invalid voltage range */ +#define HAL_MMC_ERROR_ADDR_OUT_OF_RANGE SDMMC_ERROR_ADDR_OUT_OF_RANGE /*!< Error when addressed block is out of range */ +#define HAL_MMC_ERROR_REQUEST_NOT_APPLICABLE SDMMC_ERROR_REQUEST_NOT_APPLICABLE /*!< Error when command request is not applicable */ +#define HAL_MMC_ERROR_PARAM SDMMC_ERROR_INVALID_PARAMETER /*!< the used parameter is not valid */ +#define HAL_MMC_ERROR_UNSUPPORTED_FEATURE SDMMC_ERROR_UNSUPPORTED_FEATURE /*!< Error when feature is not insupported */ +#define HAL_MMC_ERROR_BUSY SDMMC_ERROR_BUSY /*!< Error when transfer process is busy */ +#define HAL_MMC_ERROR_DMA SDMMC_ERROR_DMA /*!< Error while DMA transfer */ +#define HAL_MMC_ERROR_TIMEOUT SDMMC_ERROR_TIMEOUT /*!< Timeout error */ + +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) +#define HAL_MMC_ERROR_INVALID_CALLBACK SDMMC_ERROR_INVALID_PARAMETER /*!< Invalid callback error */ +#endif +/** + * @} + */ + +/** @defgroup MMC_Exported_Constansts_Group2 MMC context enumeration + * @{ + */ +#define MMC_CONTEXT_NONE 0x00000000U /*!< None */ +#define MMC_CONTEXT_READ_SINGLE_BLOCK 0x00000001U /*!< Read single block operation */ +#define MMC_CONTEXT_READ_MULTIPLE_BLOCK 0x00000002U /*!< Read multiple blocks operation */ +#define MMC_CONTEXT_WRITE_SINGLE_BLOCK 0x00000010U /*!< Write single block operation */ +#define MMC_CONTEXT_WRITE_MULTIPLE_BLOCK 0x00000020U /*!< Write multiple blocks operation */ +#define MMC_CONTEXT_IT 0x00000008U /*!< Process in Interrupt mode */ +#define MMC_CONTEXT_DMA 0x00000080U /*!< Process in DMA mode */ + +/** + * @} + */ + +/** @defgroup MMC_Exported_Constansts_Group3 MMC Voltage mode + * @{ + */ +/** + * @brief + */ +#define MMC_HIGH_VOLTAGE_RANGE 0x80FF8000U /*!< High voltage in byte mode */ +#define MMC_DUAL_VOLTAGE_RANGE 0x80FF8080U /*!< Dual voltage in byte mode */ +#define MMC_LOW_VOLTAGE_RANGE 0x80000080U /*!< Low voltage in byte mode */ +#define EMMC_HIGH_VOLTAGE_RANGE 0xC0FF8000U /*!< High voltage in sector mode */ +#define EMMC_DUAL_VOLTAGE_RANGE 0xC0FF8080U /*!< Dual voltage in sector mode */ +#define EMMC_LOW_VOLTAGE_RANGE 0xC0000080U /*!< Low voltage in sector mode */ +#define MMC_INVALID_VOLTAGE_RANGE 0x0001FF01U +/** + * @} + */ + +/** @defgroup MMC_Exported_Constansts_Group4 MMC Memory Cards + * @{ + */ +#define MMC_LOW_CAPACITY_CARD 0x00000000U /*!< MMC Card Capacity <=2Gbytes */ +#define MMC_HIGH_CAPACITY_CARD 0x00000001U /*!< MMC Card Capacity >2Gbytes and <2Tbytes */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup MMC_Exported_macros MMC Exported Macros + * @brief macros to handle interrupts and specific clock configurations + * @{ + */ +/** @brief Reset MMC handle state. + * @param __HANDLE__ : MMC handle. + * @retval None + */ +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) +#define __HAL_MMC_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_MMC_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_MMC_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_MMC_STATE_RESET) +#endif + +/** + * @brief Enable the MMC device. + * @retval None + */ +#define __HAL_MMC_ENABLE(__HANDLE__) __SDIO_ENABLE((__HANDLE__)->Instance) + +/** + * @brief Disable the MMC device. + * @retval None + */ +#define __HAL_MMC_DISABLE(__HANDLE__) __SDIO_DISABLE((__HANDLE__)->Instance) + +/** + * @brief Enable the SDMMC DMA transfer. + * @retval None + */ +#define __HAL_MMC_DMA_ENABLE(__HANDLE__) __SDIO_DMA_ENABLE((__HANDLE__)->Instance) + +/** + * @brief Disable the SDMMC DMA transfer. + * @retval None + */ +#define __HAL_MMC_DMA_DISABLE(__HANDLE__) __SDIO_DMA_DISABLE((__HANDLE__)->Instance) + +/** + * @brief Enable the MMC device interrupt. + * @param __HANDLE__: MMC Handle + * @param __INTERRUPT__: specifies the SDMMC interrupt sources to be enabled. + * This parameter can be one or a combination of the following values: + * @arg SDIO_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDIO_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDIO_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDIO_IT_DTIMEOUT: Data timeout interrupt + * @arg SDIO_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDIO_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDIO_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDIO_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDIO_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDIO_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt + * @arg SDIO_IT_CMDACT: Command transfer in progress interrupt + * @arg SDIO_IT_TXACT: Data transmit in progress interrupt + * @arg SDIO_IT_RXACT: Data receive in progress interrupt + * @arg SDIO_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt + * @arg SDIO_IT_RXFIFOHF: Receive FIFO Half Full interrupt + * @arg SDIO_IT_TXFIFOF: Transmit FIFO full interrupt + * @arg SDIO_IT_RXFIFOF: Receive FIFO full interrupt + * @arg SDIO_IT_TXFIFOE: Transmit FIFO empty interrupt + * @arg SDIO_IT_RXFIFOE: Receive FIFO empty interrupt + * @arg SDIO_IT_TXDAVL: Data available in transmit FIFO interrupt + * @arg SDIO_IT_RXDAVL: Data available in receive FIFO interrupt + * @arg SDIO_IT_SDIOIT: SD I/O interrupt received interrupt + * @retval None + */ +#define __HAL_MMC_ENABLE_IT(__HANDLE__, __INTERRUPT__) __SDIO_ENABLE_IT((__HANDLE__)->Instance, (__INTERRUPT__)) + +/** + * @brief Disable the MMC device interrupt. + * @param __HANDLE__: MMC Handle + * @param __INTERRUPT__: specifies the SDMMC interrupt sources to be disabled. + * This parameter can be one or a combination of the following values: + * @arg SDIO_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDIO_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDIO_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDIO_IT_DTIMEOUT: Data timeout interrupt + * @arg SDIO_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDIO_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDIO_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDIO_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDIO_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDIO_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt + * @arg SDIO_IT_CMDACT: Command transfer in progress interrupt + * @arg SDIO_IT_TXACT: Data transmit in progress interrupt + * @arg SDIO_IT_RXACT: Data receive in progress interrupt + * @arg SDIO_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt + * @arg SDIO_IT_RXFIFOHF: Receive FIFO Half Full interrupt + * @arg SDIO_IT_TXFIFOF: Transmit FIFO full interrupt + * @arg SDIO_IT_RXFIFOF: Receive FIFO full interrupt + * @arg SDIO_IT_TXFIFOE: Transmit FIFO empty interrupt + * @arg SDIO_IT_RXFIFOE: Receive FIFO empty interrupt + * @arg SDIO_IT_TXDAVL: Data available in transmit FIFO interrupt + * @arg SDIO_IT_RXDAVL: Data available in receive FIFO interrupt + * @arg SDIO_IT_SDIOIT: SD I/O interrupt received interrupt + * @retval None + */ +#define __HAL_MMC_DISABLE_IT(__HANDLE__, __INTERRUPT__) __SDIO_DISABLE_IT((__HANDLE__)->Instance, (__INTERRUPT__)) + +/** + * @brief Check whether the specified MMC flag is set or not. + * @param __HANDLE__: MMC Handle + * @param __FLAG__: specifies the flag to check. + * This parameter can be one of the following values: + * @arg SDIO_FLAG_CCRCFAIL: Command response received (CRC check failed) + * @arg SDIO_FLAG_DCRCFAIL: Data block sent/received (CRC check failed) + * @arg SDIO_FLAG_CTIMEOUT: Command response timeout + * @arg SDIO_FLAG_DTIMEOUT: Data timeout + * @arg SDIO_FLAG_TXUNDERR: Transmit FIFO underrun error + * @arg SDIO_FLAG_RXOVERR: Received FIFO overrun error + * @arg SDIO_FLAG_CMDREND: Command response received (CRC check passed) + * @arg SDIO_FLAG_CMDSENT: Command sent (no response required) + * @arg SDIO_FLAG_DATAEND: Data end (data counter, DATACOUNT, is zero) + * @arg SDIO_FLAG_DBCKEND: Data block sent/received (CRC check passed) + * @arg SDIO_FLAG_CMDACT: Command transfer in progress + * @arg SDIO_FLAG_TXACT: Data transmit in progress + * @arg SDIO_FLAG_RXACT: Data receive in progress + * @arg SDIO_FLAG_TXFIFOHE: Transmit FIFO Half Empty + * @arg SDIO_FLAG_RXFIFOHF: Receive FIFO Half Full + * @arg SDIO_FLAG_TXFIFOF: Transmit FIFO full + * @arg SDIO_FLAG_RXFIFOF: Receive FIFO full + * @arg SDIO_FLAG_TXFIFOE: Transmit FIFO empty + * @arg SDIO_FLAG_RXFIFOE: Receive FIFO empty + * @arg SDIO_FLAG_TXDAVL: Data available in transmit FIFO + * @arg SDIO_FLAG_RXDAVL: Data available in receive FIFO + * @arg SDIO_FLAG_SDIOIT: SD I/O interrupt received + * @retval The new state of MMC FLAG (SET or RESET). + */ +#define __HAL_MMC_GET_FLAG(__HANDLE__, __FLAG__) __SDIO_GET_FLAG((__HANDLE__)->Instance, (__FLAG__)) + +/** + * @brief Clear the MMC's pending flags. + * @param __HANDLE__: MMC Handle + * @param __FLAG__: specifies the flag to clear. + * This parameter can be one or a combination of the following values: + * @arg SDIO_FLAG_CCRCFAIL: Command response received (CRC check failed) + * @arg SDIO_FLAG_DCRCFAIL: Data block sent/received (CRC check failed) + * @arg SDIO_FLAG_CTIMEOUT: Command response timeout + * @arg SDIO_FLAG_DTIMEOUT: Data timeout + * @arg SDIO_FLAG_TXUNDERR: Transmit FIFO underrun error + * @arg SDIO_FLAG_RXOVERR: Received FIFO overrun error + * @arg SDIO_FLAG_CMDREND: Command response received (CRC check passed) + * @arg SDIO_FLAG_CMDSENT: Command sent (no response required) + * @arg SDIO_FLAG_DATAEND: Data end (data counter, DATACOUNT, is zero) + * @arg SDIO_FLAG_DBCKEND: Data block sent/received (CRC check passed) + * @arg SDIO_FLAG_SDIOIT: SD I/O interrupt received + * @retval None + */ +#define __HAL_MMC_CLEAR_FLAG(__HANDLE__, __FLAG__) __SDIO_CLEAR_FLAG((__HANDLE__)->Instance, (__FLAG__)) + +/** + * @brief Check whether the specified MMC interrupt has occurred or not. + * @param __HANDLE__: MMC Handle + * @param __INTERRUPT__: specifies the SDMMC interrupt source to check. + * This parameter can be one of the following values: + * @arg SDIO_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDIO_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDIO_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDIO_IT_DTIMEOUT: Data timeout interrupt + * @arg SDIO_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDIO_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDIO_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDIO_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDIO_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDIO_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt + * @arg SDIO_IT_CMDACT: Command transfer in progress interrupt + * @arg SDIO_IT_TXACT: Data transmit in progress interrupt + * @arg SDIO_IT_RXACT: Data receive in progress interrupt + * @arg SDIO_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt + * @arg SDIO_IT_RXFIFOHF: Receive FIFO Half Full interrupt + * @arg SDIO_IT_TXFIFOF: Transmit FIFO full interrupt + * @arg SDIO_IT_RXFIFOF: Receive FIFO full interrupt + * @arg SDIO_IT_TXFIFOE: Transmit FIFO empty interrupt + * @arg SDIO_IT_RXFIFOE: Receive FIFO empty interrupt + * @arg SDIO_IT_TXDAVL: Data available in transmit FIFO interrupt + * @arg SDIO_IT_RXDAVL: Data available in receive FIFO interrupt + * @arg SDIO_IT_SDIOIT: SD I/O interrupt received interrupt + * @retval The new state of MMC IT (SET or RESET). + */ +#define __HAL_MMC_GET_IT(__HANDLE__, __INTERRUPT__) __SDIO_GET_IT((__HANDLE__)->Instance, (__INTERRUPT__)) + +/** + * @brief Clear the MMC's interrupt pending bits. + * @param __HANDLE__: MMC Handle + * @param __INTERRUPT__: specifies the interrupt pending bit to clear. + * This parameter can be one or a combination of the following values: + * @arg SDIO_IT_CCRCFAIL: Command response received (CRC check failed) interrupt + * @arg SDIO_IT_DCRCFAIL: Data block sent/received (CRC check failed) interrupt + * @arg SDIO_IT_CTIMEOUT: Command response timeout interrupt + * @arg SDIO_IT_DTIMEOUT: Data timeout interrupt + * @arg SDIO_IT_TXUNDERR: Transmit FIFO underrun error interrupt + * @arg SDIO_IT_RXOVERR: Received FIFO overrun error interrupt + * @arg SDIO_IT_CMDREND: Command response received (CRC check passed) interrupt + * @arg SDIO_IT_CMDSENT: Command sent (no response required) interrupt + * @arg SDIO_IT_DATAEND: Data end (data counter, DATACOUNT, is zero) interrupt + * @arg SDIO_IT_DBCKEND: Data block sent/received (CRC check passed) interrupt + * @arg SDIO_IT_TXFIFOHE: Transmit FIFO Half Empty interrupt + * @arg SDIO_IT_RXFIFOHF: Receive FIFO Half Full interrupt + * @arg SDIO_IT_RXFIFOF: Receive FIFO full interrupt + * @arg SDIO_IT_TXFIFOE: Transmit FIFO empty interrupt + * @arg SDIO_IT_SDIOIT: SD I/O interrupt received interrupt + * @retval None + */ +#define __HAL_MMC_CLEAR_IT(__HANDLE__, __INTERRUPT__) __SDIO_CLEAR_IT((__HANDLE__)->Instance, (__INTERRUPT__)) + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup MMC_Exported_Functions MMC Exported Functions + * @{ + */ + +/** @defgroup MMC_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ +HAL_StatusTypeDef HAL_MMC_Init(MMC_HandleTypeDef *hmmc); +HAL_StatusTypeDef HAL_MMC_InitCard(MMC_HandleTypeDef *hmmc); +HAL_StatusTypeDef HAL_MMC_DeInit (MMC_HandleTypeDef *hmmc); +void HAL_MMC_MspInit(MMC_HandleTypeDef *hmmc); +void HAL_MMC_MspDeInit(MMC_HandleTypeDef *hmmc); + +/** + * @} + */ + +/** @defgroup MMC_Exported_Functions_Group2 Input and Output operation functions + * @{ + */ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_MMC_ReadBlocks(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks, uint32_t Timeout); +HAL_StatusTypeDef HAL_MMC_WriteBlocks(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks, uint32_t Timeout); +HAL_StatusTypeDef HAL_MMC_Erase(MMC_HandleTypeDef *hmmc, uint32_t BlockStartAdd, uint32_t BlockEndAdd); +/* Non-Blocking mode: IT */ +HAL_StatusTypeDef HAL_MMC_ReadBlocks_IT(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks); +HAL_StatusTypeDef HAL_MMC_WriteBlocks_IT(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_MMC_ReadBlocks_DMA(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks); +HAL_StatusTypeDef HAL_MMC_WriteBlocks_DMA(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks); + +void HAL_MMC_IRQHandler(MMC_HandleTypeDef *hmmc); + +/* Callback in non blocking modes (DMA) */ +void HAL_MMC_TxCpltCallback(MMC_HandleTypeDef *hmmc); +void HAL_MMC_RxCpltCallback(MMC_HandleTypeDef *hmmc); +void HAL_MMC_ErrorCallback(MMC_HandleTypeDef *hmmc); +void HAL_MMC_AbortCallback(MMC_HandleTypeDef *hmmc); + +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) +/* MMC callback registering/unregistering */ +HAL_StatusTypeDef HAL_MMC_RegisterCallback (MMC_HandleTypeDef *hmmc, HAL_MMC_CallbackIDTypeDef CallbackId, pMMC_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_MMC_UnRegisterCallback(MMC_HandleTypeDef *hmmc, HAL_MMC_CallbackIDTypeDef CallbackId); +#endif +/** + * @} + */ + +/** @defgroup MMC_Exported_Functions_Group3 Peripheral Control functions + * @{ + */ +HAL_StatusTypeDef HAL_MMC_ConfigWideBusOperation(MMC_HandleTypeDef *hmmc, uint32_t WideMode); +/** + * @} + */ + +/** @defgroup MMC_Exported_Functions_Group4 MMC card related functions + * @{ + */ +HAL_MMC_CardStateTypeDef HAL_MMC_GetCardState(MMC_HandleTypeDef *hmmc); +HAL_StatusTypeDef HAL_MMC_GetCardCID(MMC_HandleTypeDef *hmmc, HAL_MMC_CardCIDTypeDef *pCID); +HAL_StatusTypeDef HAL_MMC_GetCardCSD(MMC_HandleTypeDef *hmmc, HAL_MMC_CardCSDTypeDef *pCSD); +HAL_StatusTypeDef HAL_MMC_GetCardInfo(MMC_HandleTypeDef *hmmc, HAL_MMC_CardInfoTypeDef *pCardInfo); +HAL_StatusTypeDef HAL_MMC_GetCardExtCSD(MMC_HandleTypeDef *hmmc, uint32_t *pExtCSD, uint32_t Timeout); +/** + * @} + */ + +/** @defgroup MMC_Exported_Functions_Group5 Peripheral State and Errors functions + * @{ + */ +HAL_MMC_StateTypeDef HAL_MMC_GetState(MMC_HandleTypeDef *hmmc); +uint32_t HAL_MMC_GetError(MMC_HandleTypeDef *hmmc); +/** + * @} + */ + +/** @defgroup MMC_Exported_Functions_Group6 Peripheral Abort management + * @{ + */ +HAL_StatusTypeDef HAL_MMC_Abort(MMC_HandleTypeDef *hmmc); +HAL_StatusTypeDef HAL_MMC_Abort_IT(MMC_HandleTypeDef *hmmc); +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/** @defgroup MMC_Private_Types MMC Private Types + * @{ + */ + +/** + * @} + */ + +/* Private defines -----------------------------------------------------------*/ +/** @defgroup MMC_Private_Defines MMC Private Defines + * @{ + */ + +/** + * @} + */ + +/* Private variables ---------------------------------------------------------*/ +/** @defgroup MMC_Private_Variables MMC Private Variables + * @{ + */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup MMC_Private_Constants MMC Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup MMC_Private_Macros MMC Private Macros + * @{ + */ + +/** + * @} + */ + +/* Private functions prototypes ----------------------------------------------*/ +/** @defgroup MMC_Private_Functions_Prototypes MMC Private Functions Prototypes + * @{ + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup MMC_Private_Functions MMC Private Functions + * @{ + */ + +/** + * @} + */ + + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* SDIO */ + +#endif /* STM32F4xx_HAL_MMC_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pcd.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pcd.h index 87b9efa0..de1ec241 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pcd.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pcd.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -107,6 +106,7 @@ typedef struct uint32_t Setup[12]; /*!< Setup packet buffer */ PCD_LPM_StateTypeDef LPM_State; /*!< LPM State */ uint32_t BESL; + uint32_t FrameNumber; /*!< Store Current Frame number */ uint32_t lpm_active; /*!< Enable or disable the Link Power Management . @@ -190,20 +190,24 @@ typedef struct * @brief macros to handle interrupts and specific clock configurations * @{ */ -#if defined (USB_OTG_FS) || defined (USB_OTG_HS) #define __HAL_PCD_ENABLE(__HANDLE__) (void)USB_EnableGlobalInt ((__HANDLE__)->Instance) #define __HAL_PCD_DISABLE(__HANDLE__) (void)USB_DisableGlobalInt ((__HANDLE__)->Instance) -#define __HAL_PCD_GET_FLAG(__HANDLE__, __INTERRUPT__) ((USB_ReadInterrupts((__HANDLE__)->Instance) & (__INTERRUPT__)) == (__INTERRUPT__)) -#define __HAL_PCD_CLEAR_FLAG(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->GINTSTS) &= (__INTERRUPT__)) -#define __HAL_PCD_IS_INVALID_INTERRUPT(__HANDLE__) (USB_ReadInterrupts((__HANDLE__)->Instance) == 0U) +#define __HAL_PCD_GET_FLAG(__HANDLE__, __INTERRUPT__) \ + ((USB_ReadInterrupts((__HANDLE__)->Instance) & (__INTERRUPT__)) == (__INTERRUPT__)) +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +#define __HAL_PCD_CLEAR_FLAG(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->GINTSTS) &= (__INTERRUPT__)) +#define __HAL_PCD_IS_INVALID_INTERRUPT(__HANDLE__) (USB_ReadInterrupts((__HANDLE__)->Instance) == 0U) -#define __HAL_PCD_UNGATE_PHYCLOCK(__HANDLE__) *(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE) &= ~(USB_OTG_PCGCCTL_STOPCLK) +#define __HAL_PCD_UNGATE_PHYCLOCK(__HANDLE__) \ + *(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE) &= ~(USB_OTG_PCGCCTL_STOPCLK) -#define __HAL_PCD_GATE_PHYCLOCK(__HANDLE__) *(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE) |= USB_OTG_PCGCCTL_STOPCLK +#define __HAL_PCD_GATE_PHYCLOCK(__HANDLE__) \ + *(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE) |= USB_OTG_PCGCCTL_STOPCLK -#define __HAL_PCD_IS_PHY_SUSPENDED(__HANDLE__) ((*(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE)) & 0x10U) +#define __HAL_PCD_IS_PHY_SUSPENDED(__HANDLE__) \ + ((*(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE)) & 0x10U) #define __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_IT() EXTI->IMR |= (USB_OTG_HS_WAKEUP_EXTI_LINE) #define __HAL_USB_OTG_HS_WAKEUP_EXTI_DISABLE_IT() EXTI->IMR &= ~(USB_OTG_HS_WAKEUP_EXTI_LINE) @@ -286,12 +290,10 @@ typedef void (*pPCD_BcdCallbackTypeDef)(PCD_HandleTypeDef *hpcd, PCD_BCD_MsgType * @} */ -HAL_StatusTypeDef HAL_PCD_RegisterCallback(PCD_HandleTypeDef *hpcd, - HAL_PCD_CallbackIDTypeDef CallbackID, +HAL_StatusTypeDef HAL_PCD_RegisterCallback(PCD_HandleTypeDef *hpcd, HAL_PCD_CallbackIDTypeDef CallbackID, pPCD_CallbackTypeDef pCallback); -HAL_StatusTypeDef HAL_PCD_UnRegisterCallback(PCD_HandleTypeDef *hpcd, - HAL_PCD_CallbackIDTypeDef CallbackID); +HAL_StatusTypeDef HAL_PCD_UnRegisterCallback(PCD_HandleTypeDef *hpcd, HAL_PCD_CallbackIDTypeDef CallbackID); HAL_StatusTypeDef HAL_PCD_RegisterDataOutStageCallback(PCD_HandleTypeDef *hpcd, pPCD_DataOutStageCallbackTypeDef pCallback); @@ -313,14 +315,10 @@ HAL_StatusTypeDef HAL_PCD_RegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd, HAL_StatusTypeDef HAL_PCD_UnRegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd); -HAL_StatusTypeDef HAL_PCD_RegisterBcdCallback(PCD_HandleTypeDef *hpcd, - pPCD_BcdCallbackTypeDef pCallback); - +HAL_StatusTypeDef HAL_PCD_RegisterBcdCallback(PCD_HandleTypeDef *hpcd, pPCD_BcdCallbackTypeDef pCallback); HAL_StatusTypeDef HAL_PCD_UnRegisterBcdCallback(PCD_HandleTypeDef *hpcd); -HAL_StatusTypeDef HAL_PCD_RegisterLpmCallback(PCD_HandleTypeDef *hpcd, - pPCD_LpmCallbackTypeDef pCallback); - +HAL_StatusTypeDef HAL_PCD_RegisterLpmCallback(PCD_HandleTypeDef *hpcd, pPCD_LpmCallbackTypeDef pCallback); HAL_StatusTypeDef HAL_PCD_UnRegisterLpmCallback(PCD_HandleTypeDef *hpcd); #endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ /** @@ -360,24 +358,21 @@ void HAL_PCD_ISOINIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum); HAL_StatusTypeDef HAL_PCD_DevConnect(PCD_HandleTypeDef *hpcd); HAL_StatusTypeDef HAL_PCD_DevDisconnect(PCD_HandleTypeDef *hpcd); HAL_StatusTypeDef HAL_PCD_SetAddress(PCD_HandleTypeDef *hpcd, uint8_t address); -HAL_StatusTypeDef HAL_PCD_EP_Open(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, - uint16_t ep_mps, uint8_t ep_type); - +HAL_StatusTypeDef HAL_PCD_EP_Open(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint16_t ep_mps, uint8_t ep_type); HAL_StatusTypeDef HAL_PCD_EP_Close(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); -HAL_StatusTypeDef HAL_PCD_EP_Receive(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, - uint8_t *pBuf, uint32_t len); - -HAL_StatusTypeDef HAL_PCD_EP_Transmit(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, - uint8_t *pBuf, uint32_t len); - - +HAL_StatusTypeDef HAL_PCD_EP_Receive(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint8_t *pBuf, uint32_t len); +HAL_StatusTypeDef HAL_PCD_EP_Transmit(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint8_t *pBuf, uint32_t len); HAL_StatusTypeDef HAL_PCD_EP_SetStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); HAL_StatusTypeDef HAL_PCD_EP_ClrStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); HAL_StatusTypeDef HAL_PCD_EP_Flush(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +HAL_StatusTypeDef HAL_PCD_EP_Abort(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); HAL_StatusTypeDef HAL_PCD_ActivateRemoteWakeup(PCD_HandleTypeDef *hpcd); HAL_StatusTypeDef HAL_PCD_DeActivateRemoteWakeup(PCD_HandleTypeDef *hpcd); +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +HAL_StatusTypeDef HAL_PCD_SetTestMode(const PCD_HandleTypeDef *hpcd, uint8_t testmode); +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ -uint32_t HAL_PCD_EP_GetRxCount(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +uint32_t HAL_PCD_EP_GetRxCount(PCD_HandleTypeDef const *hpcd, uint8_t ep_addr); /** * @} */ @@ -386,7 +381,7 @@ uint32_t HAL_PCD_EP_GetRxCount(PCD_HandleTypeDef *hpcd, uint8_t ep_addr /** @addtogroup PCD_Exported_Functions_Group4 Peripheral State functions * @{ */ -PCD_StateTypeDef HAL_PCD_GetState(PCD_HandleTypeDef *hpcd); +PCD_StateTypeDef HAL_PCD_GetState(PCD_HandleTypeDef const *hpcd); /** * @} */ @@ -418,27 +413,27 @@ PCD_StateTypeDef HAL_PCD_GetState(PCD_HandleTypeDef *hpcd); #if defined (USB_OTG_FS) || defined (USB_OTG_HS) #ifndef USB_OTG_DOEPINT_OTEPSPR #define USB_OTG_DOEPINT_OTEPSPR (0x1UL << 5) /*!< Status Phase Received interrupt */ -#endif +#endif /* defined USB_OTG_DOEPINT_OTEPSPR */ #ifndef USB_OTG_DOEPMSK_OTEPSPRM #define USB_OTG_DOEPMSK_OTEPSPRM (0x1UL << 5) /*!< Setup Packet Received interrupt mask */ -#endif +#endif /* defined USB_OTG_DOEPMSK_OTEPSPRM */ #ifndef USB_OTG_DOEPINT_NAK #define USB_OTG_DOEPINT_NAK (0x1UL << 13) /*!< NAK interrupt */ -#endif +#endif /* defined USB_OTG_DOEPINT_NAK */ #ifndef USB_OTG_DOEPMSK_NAKM #define USB_OTG_DOEPMSK_NAKM (0x1UL << 13) /*!< OUT Packet NAK interrupt mask */ -#endif +#endif /* defined USB_OTG_DOEPMSK_NAKM */ #ifndef USB_OTG_DOEPINT_STPKTRX #define USB_OTG_DOEPINT_STPKTRX (0x1UL << 15) /*!< Setup Packet Received interrupt */ -#endif +#endif /* defined USB_OTG_DOEPINT_STPKTRX */ #ifndef USB_OTG_DOEPMSK_NYETM #define USB_OTG_DOEPMSK_NYETM (0x1UL << 14) /*!< Setup Packet Received interrupt mask */ -#endif +#endif /* defined USB_OTG_DOEPMSK_NYETM */ #endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ /* Private macros ------------------------------------------------------------*/ @@ -464,5 +459,3 @@ PCD_StateTypeDef HAL_PCD_GetState(PCD_HandleTypeDef *hpcd); #endif #endif /* STM32F4xx_HAL_PCD_H */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pcd_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pcd_ex.h index 71e9a2c1..284c9d56 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pcd_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pcd_ex.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -23,7 +22,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* __cplusplus */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_hal_def.h" @@ -46,21 +45,26 @@ extern "C" { /** @addtogroup PCDEx_Exported_Functions_Group1 Peripheral Control functions * @{ */ - #if defined (USB_OTG_FS) || defined (USB_OTG_HS) HAL_StatusTypeDef HAL_PCDEx_SetTxFiFo(PCD_HandleTypeDef *hpcd, uint8_t fifo, uint16_t size); HAL_StatusTypeDef HAL_PCDEx_SetRxFiFo(PCD_HandleTypeDef *hpcd, uint16_t size); #endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ -#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) \ + || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) \ + || defined(STM32F423xx) HAL_StatusTypeDef HAL_PCDEx_ActivateLPM(PCD_HandleTypeDef *hpcd); HAL_StatusTypeDef HAL_PCDEx_DeActivateLPM(PCD_HandleTypeDef *hpcd); -#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || + defined(STM32F423xx) */ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) \ + || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) HAL_StatusTypeDef HAL_PCDEx_ActivateBCD(PCD_HandleTypeDef *hpcd); HAL_StatusTypeDef HAL_PCDEx_DeActivateBCD(PCD_HandleTypeDef *hpcd); void HAL_PCDEx_BCD_VBUSDetect(PCD_HandleTypeDef *hpcd); -#endif /* defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ +#endif /* defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ void HAL_PCDEx_LPM_Callback(PCD_HandleTypeDef *hpcd, PCD_LPM_MsgTypeDef msg); void HAL_PCDEx_BCD_Callback(PCD_HandleTypeDef *hpcd, PCD_BCD_MsgTypeDef msg); @@ -83,9 +87,7 @@ void HAL_PCDEx_BCD_Callback(PCD_HandleTypeDef *hpcd, PCD_BCD_MsgTypeDef msg); #ifdef __cplusplus } -#endif +#endif /* __cplusplus */ #endif /* STM32F4xx_HAL_PCD_EX_H */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr.h index 6f3bf714..a7273d5e 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr.h @@ -6,14 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ @@ -116,6 +114,8 @@ typedef struct */ #define PWR_SLEEPENTRY_WFI ((uint8_t)0x01) #define PWR_SLEEPENTRY_WFE ((uint8_t)0x02) +#define PWR_SLEEPENTRY_WFE_NO_EVT_CLEAR ((uint8_t)0x03) + /** * @} */ @@ -125,6 +125,7 @@ typedef struct */ #define PWR_STOPENTRY_WFI ((uint8_t)0x01) #define PWR_STOPENTRY_WFE ((uint8_t)0x02) +#define PWR_STOPENTRY_WFE_NO_EVT_CLEAR ((uint8_t)0x03) /** * @} */ @@ -403,8 +404,14 @@ void HAL_PWR_DisableSEVOnPend(void); ((MODE) == PWR_PVD_MODE_NORMAL)) #define IS_PWR_REGULATOR(REGULATOR) (((REGULATOR) == PWR_MAINREGULATOR_ON) || \ ((REGULATOR) == PWR_LOWPOWERREGULATOR_ON)) -#define IS_PWR_SLEEP_ENTRY(ENTRY) (((ENTRY) == PWR_SLEEPENTRY_WFI) || ((ENTRY) == PWR_SLEEPENTRY_WFE)) -#define IS_PWR_STOP_ENTRY(ENTRY) (((ENTRY) == PWR_STOPENTRY_WFI) || ((ENTRY) == PWR_STOPENTRY_WFE)) + +#define IS_PWR_SLEEP_ENTRY(ENTRY) (((ENTRY) == PWR_SLEEPENTRY_WFI) || \ + ((ENTRY) == PWR_SLEEPENTRY_WFE) || \ + ((ENTRY) == PWR_SLEEPENTRY_WFE_NO_EVT_CLEAR)) + +#define IS_PWR_STOP_ENTRY(ENTRY) (((ENTRY) == PWR_STOPENTRY_WFI) || \ + ((ENTRY) == PWR_STOPENTRY_WFE) || \ + ((ENTRY) == PWR_STOPENTRY_WFE_NO_EVT_CLEAR)) /** * @} */ @@ -427,5 +434,3 @@ void HAL_PWR_DisableSEVOnPend(void); #endif /* __STM32F4xx_HAL_PWR_H */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr_ex.h index 10e017a1..57fd4d93 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_pwr_ex.h @@ -6,14 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ @@ -340,5 +338,3 @@ HAL_StatusTypeDef HAL_PWREx_EnterUnderDriveSTOPMode(uint32_t Regulator, uint8_t #endif /* __STM32F4xx_HAL_PWR_EX_H */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc.h index 36a639b4..cf01e514 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc.h @@ -6,14 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ @@ -22,7 +20,7 @@ #define __STM32F4xx_HAL_RCC_H #ifdef __cplusplus - extern "C" { +extern "C" { #endif /* Includes ------------------------------------------------------------------*/ @@ -69,7 +67,7 @@ typedef struct This parameter can be a value of @ref RCC_LSI_Config */ RCC_PLLInitTypeDef PLL; /*!< PLL structure parameters */ -}RCC_OscInitTypeDef; +} RCC_OscInitTypeDef; /** * @brief RCC System, AHB and APB busses clock configuration structure definition @@ -91,7 +89,7 @@ typedef struct uint32_t APB2CLKDivider; /*!< The APB2 clock (PCLK2) divider. This clock is derived from the AHB clock (HCLK). This parameter can be a value of @ref RCC_APB1_APB2_Clock_Source */ -}RCC_ClkInitTypeDef; +} RCC_ClkInitTypeDef; /** * @} @@ -384,47 +382,47 @@ typedef struct * @{ */ #define __HAL_RCC_GPIOA_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOAEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOAEN);\ - UNUSED(tmpreg); \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOAEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOAEN);\ + UNUSED(tmpreg); \ } while(0U) #define __HAL_RCC_GPIOB_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOBEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOBEN);\ - UNUSED(tmpreg); \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOBEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOBEN);\ + UNUSED(tmpreg); \ } while(0U) #define __HAL_RCC_GPIOC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN);\ - UNUSED(tmpreg); \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN);\ + UNUSED(tmpreg); \ } while(0U) #define __HAL_RCC_GPIOH_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOHEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOHEN);\ - UNUSED(tmpreg); \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOHEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOHEN);\ + UNUSED(tmpreg); \ } while(0U) #define __HAL_RCC_DMA1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA1EN);\ - UNUSED(tmpreg); \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA1EN);\ + UNUSED(tmpreg); \ } while(0U) #define __HAL_RCC_DMA2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOA_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOAEN)) #define __HAL_RCC_GPIOB_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOBEN)) @@ -468,53 +466,53 @@ typedef struct * @{ */ #define __HAL_RCC_TIM5_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM5EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM5EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM5EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_WWDG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_WWDGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_WWDGEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_WWDGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_WWDGEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USART2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_I2C1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_I2C2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_PWR_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_PWREN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_PWREN);\ - UNUSED(tmpreg); \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_PWREN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_PWREN);\ + UNUSED(tmpreg); \ } while(0U) #define __HAL_RCC_TIM5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM5EN)) @@ -562,61 +560,61 @@ typedef struct * @{ */ #define __HAL_RCC_TIM1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USART1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_USART1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_USART1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_USART1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_USART1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USART6_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_USART6EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_USART6EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_USART6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_USART6EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ADC1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SYSCFG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SYSCFGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SYSCFGEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SYSCFGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SYSCFGEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM9_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM9EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM9EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM9EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM9EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM11_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM11EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM11EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM11EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM11EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM1EN)) #define __HAL_RCC_USART1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_USART1EN)) @@ -662,7 +660,6 @@ typedef struct * @brief Force or release AHB1 peripheral reset. * @{ */ -#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0xFFFFFFFFU) #define __HAL_RCC_GPIOA_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOARST)) #define __HAL_RCC_GPIOB_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOBRST)) #define __HAL_RCC_GPIOC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOCRST)) @@ -685,7 +682,6 @@ typedef struct * @brief Force or release APB1 peripheral reset. * @{ */ -#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0xFFFFFFFFU) #define __HAL_RCC_TIM5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM5RST)) #define __HAL_RCC_WWDG_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_WWDGRST)) #define __HAL_RCC_SPI2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI2RST)) @@ -710,7 +706,6 @@ typedef struct * @brief Force or release APB2 peripheral reset. * @{ */ -#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0xFFFFFFFFU) #define __HAL_RCC_TIM1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM1RST)) #define __HAL_RCC_USART1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_USART1RST)) #define __HAL_RCC_USART6_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_USART6RST)) @@ -844,7 +839,7 @@ typedef struct * This parameter must be a number between 0 and 0x1F. */ #define __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(__HSICalibrationValue__) (MODIFY_REG(RCC->CR,\ - RCC_CR_HSITRIM, (uint32_t)(__HSICalibrationValue__) << RCC_CR_HSITRIM_Pos)) + RCC_CR_HSITRIM, (uint32_t)(__HSICalibrationValue__) << RCC_CR_HSITRIM_Pos)) /** * @} */ @@ -893,22 +888,22 @@ typedef struct * @arg RCC_HSE_BYPASS: HSE oscillator bypassed with external clock. */ #define __HAL_RCC_HSE_CONFIG(__STATE__) \ - do { \ - if ((__STATE__) == RCC_HSE_ON) \ - { \ - SET_BIT(RCC->CR, RCC_CR_HSEON); \ - } \ - else if ((__STATE__) == RCC_HSE_BYPASS) \ - { \ - SET_BIT(RCC->CR, RCC_CR_HSEBYP); \ - SET_BIT(RCC->CR, RCC_CR_HSEON); \ - } \ - else \ - { \ - CLEAR_BIT(RCC->CR, RCC_CR_HSEON); \ - CLEAR_BIT(RCC->CR, RCC_CR_HSEBYP); \ - } \ - } while(0U) + do { \ + if ((__STATE__) == RCC_HSE_ON) \ + { \ + SET_BIT(RCC->CR, RCC_CR_HSEON); \ + } \ + else if ((__STATE__) == RCC_HSE_BYPASS) \ + { \ + SET_BIT(RCC->CR, RCC_CR_HSEBYP); \ + SET_BIT(RCC->CR, RCC_CR_HSEON); \ + } \ + else \ + { \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON); \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEBYP); \ + } \ + } while(0U) /** * @} */ @@ -936,22 +931,22 @@ typedef struct * @arg RCC_LSE_BYPASS: LSE oscillator bypassed with external clock. */ #define __HAL_RCC_LSE_CONFIG(__STATE__) \ - do { \ - if((__STATE__) == RCC_LSE_ON) \ - { \ - SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ - } \ - else if((__STATE__) == RCC_LSE_BYPASS) \ - { \ - SET_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ - SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ - } \ - else \ - { \ - CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ - CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ - } \ - } while(0U) + do { \ + if((__STATE__) == RCC_LSE_ON) \ + { \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + } \ + else if((__STATE__) == RCC_LSE_BYPASS) \ + { \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + } \ + else \ + { \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ + } \ + } while(0U) /** * @} */ @@ -976,11 +971,10 @@ typedef struct * a Power On Reset (POR). * @param __RTCCLKSource__ specifies the RTC clock source. * This parameter can be one of the following values: - @arg @ref RCC_RTCCLKSOURCE_NO_CLK: No clock selected as RTC clock. - * @arg @ref RCC_RTCCLKSOURCE_LSE: LSE selected as RTC clock. - * @arg @ref RCC_RTCCLKSOURCE_LSI: LSI selected as RTC clock. - * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX: HSE clock divided by x selected - * as RTC clock, where x:[2,31] + * @arg @ref RCC_RTCCLKSOURCE_NO_CLK : No clock selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_LSE : LSE selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_LSI : LSI selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX HSE divided by X selected as RTC clock (X can be retrieved thanks to @ref __HAL_RCC_GET_RTC_HSE_PRESCALER() * @note If the LSE or LSI is used as RTC clock source, the RTC continues to * work in STOP and STANDBY modes, and can be used as wake-up source. * However, when the HSE clock is used as RTC clock source, the RTC @@ -989,7 +983,7 @@ typedef struct * RTC clock source). */ #define __HAL_RCC_RTC_CLKPRESCALER(__RTCCLKSource__) (((__RTCCLKSource__) & RCC_BDCR_RTCSEL) == RCC_BDCR_RTCSEL) ? \ - MODIFY_REG(RCC->CFGR, RCC_CFGR_RTCPRE, ((__RTCCLKSource__) & 0xFFFFCFFU)) : CLEAR_BIT(RCC->CFGR, RCC_CFGR_RTCPRE) + MODIFY_REG(RCC->CFGR, RCC_CFGR_RTCPRE, ((__RTCCLKSource__) & 0xFFFFCFFU)) : CLEAR_BIT(RCC->CFGR, RCC_CFGR_RTCPRE) #define __HAL_RCC_RTC_CONFIG(__RTCCLKSource__) do { __HAL_RCC_RTC_CLKPRESCALER(__RTCCLKSource__); \ RCC->BDCR |= ((__RTCCLKSource__) & 0x00000FFFU); \ @@ -1007,8 +1001,7 @@ typedef struct /** * @brief Get the RTC and HSE clock divider (RTCPRE). * @retval Returned value can be one of the following values: - * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX: HSE clock divided by x selected - * as RTC clock, where x:[2,31] + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX HSE divided by X selected as RTC clock (X can be retrieved thanks to @ref __HAL_RCC_GET_RTC_HSE_PRESCALER() */ #define __HAL_RCC_GET_RTC_HSE_PRESCALER() (READ_BIT(RCC->CFGR, RCC_CFGR_RTCPRE) | RCC_BDCR_RTCSEL) @@ -1118,7 +1111,7 @@ typedef struct * @arg RCC_MCODIV_5: division by 5 applied to MCOx clock */ #define __HAL_RCC_MCO1_CONFIG(__MCOCLKSOURCE__, __MCODIV__) \ - MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO1 | RCC_CFGR_MCO1PRE), ((__MCOCLKSOURCE__) | (__MCODIV__))) + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO1 | RCC_CFGR_MCO1PRE), ((__MCOCLKSOURCE__) | (__MCODIV__))) /** @brief Macro to configure the MCO2 clock. * @param __MCOCLKSOURCE__ specifies the MCO clock source. @@ -1139,7 +1132,7 @@ typedef struct * at least one of the SPI clocks enabled (SPI1, SPI2 or SPI5). */ #define __HAL_RCC_MCO2_CONFIG(__MCOCLKSOURCE__, __MCODIV__) \ - MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO2 | RCC_CFGR_MCO2PRE), ((__MCOCLKSOURCE__) | ((__MCODIV__) << 3U))); + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO2 | RCC_CFGR_MCO2PRE), ((__MCOCLKSOURCE__) | ((__MCODIV__) << 3U))); /** * @} */ @@ -1227,7 +1220,9 @@ typedef struct * @retval The new state of __FLAG__ (TRUE or FALSE). */ #define RCC_FLAG_MASK ((uint8_t)0x1FU) -#define __HAL_RCC_GET_FLAG(__FLAG__) (((((((__FLAG__) >> 5U) == 1U)? RCC->CR :((((__FLAG__) >> 5U) == 2U) ? RCC->BDCR :((((__FLAG__) >> 5U) == 3U)? RCC->CSR :RCC->CIR))) & (1U << ((__FLAG__) & RCC_FLAG_MASK)))!= 0U)? 1U : 0U) +#define __HAL_RCC_GET_FLAG(__FLAG__) (((((((__FLAG__) >> 5U)\ + == 1U)? RCC->CR :((((__FLAG__) >> 5U) == 2U) ? RCC->BDCR :((((__FLAG__) >> 5U) == 3U)? RCC->CSR :RCC->CIR))) &\ + (1U << ((__FLAG__) & RCC_FLAG_MASK)))!= 0U)? 1U : 0U) /** * @} @@ -1238,9 +1233,9 @@ typedef struct */ /* Exported functions --------------------------------------------------------*/ - /** @addtogroup RCC_Exported_Functions +/** @addtogroup RCC_Exported_Functions * @{ - */ + */ /** @addtogroup RCC_Exported_Functions_Group1 * @{ @@ -1411,7 +1406,7 @@ void HAL_RCC_CSSCallback(void); ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV30) || \ ((__SOURCE__) == RCC_RTCCLKSOURCE_HSE_DIV31)) -#define IS_RCC_PLLM_VALUE(VALUE) ((VALUE) <= 63U) +#define IS_RCC_PLLM_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 63U)) #define IS_RCC_PLLP_VALUE(VALUE) (((VALUE) == 2U) || ((VALUE) == 4U) || ((VALUE) == 6U) || ((VALUE) == 8U)) @@ -1435,8 +1430,8 @@ void HAL_RCC_CSSCallback(void); ((SOURCE) == RCC_MCO1SOURCE_HSE) || ((SOURCE) == RCC_MCO1SOURCE_PLLCLK)) #define IS_RCC_MCODIV(DIV) (((DIV) == RCC_MCODIV_1) || ((DIV) == RCC_MCODIV_2) || \ - ((DIV) == RCC_MCODIV_3) || ((DIV) == RCC_MCODIV_4) || \ - ((DIV) == RCC_MCODIV_5)) + ((DIV) == RCC_MCODIV_3) || ((DIV) == RCC_MCODIV_4) || \ + ((DIV) == RCC_MCODIV_5)) #define IS_RCC_CALIBRATION_VALUE(VALUE) ((VALUE) <= 0x1FU) /** @@ -1461,4 +1456,3 @@ void HAL_RCC_CSSCallback(void); #endif /* __STM32F4xx_HAL_RCC_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc_ex.h index 0b8152b1..3b621340 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rcc_ex.h @@ -6,23 +6,21 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** - */ + */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __STM32F4xx_HAL_RCC_EX_H #define __STM32F4xx_HAL_RCC_EX_H #ifdef __cplusplus - extern "C" { +extern "C" { #endif /* Includes ------------------------------------------------------------------*/ @@ -34,7 +32,7 @@ /** @addtogroup RCCEx * @{ - */ + */ /* Exported types ------------------------------------------------------------*/ /** @defgroup RCCEx_Exported_Types RCCEx Exported Types @@ -56,7 +54,7 @@ typedef struct This parameter must be a number between Min_Data = 0 and Max_Data = 63 */ uint32_t PLLN; /*!< PLLN: Multiplication factor for PLL VCO output clock. - This parameter must be a number between Min_Data = 50 and Max_Data = 432 + This parameter must be a number between Min_Data = 50 and Max_Data = 432 except for STM32F411xE devices where the Min_Data = 192 */ uint32_t PLLP; /*!< PLLP: Division factor for main system clock (SYSCLK). @@ -69,14 +67,14 @@ typedef struct defined(STM32F413xx) || defined(STM32F423xx) uint32_t PLLR; /*!< PLLR: PLL division factor for I2S, SAI, SYSTEM, SPDIFRX clocks. This parameter is only available in STM32F410xx/STM32F446xx/STM32F469xx/STM32F479xx - and STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx/STM32F413xx/STM32F423xx devices. + and STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx/STM32F413xx/STM32F423xx devices. This parameter must be a number between Min_Data = 2 and Max_Data = 7 */ -#endif /* STM32F410xx || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ -}RCC_PLLInitTypeDef; +#endif /* STM32F410xx || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ +} RCC_PLLInitTypeDef; #if defined(STM32F446xx) -/** - * @brief PLLI2S Clock structure definition +/** + * @brief PLLI2S Clock structure definition */ typedef struct { @@ -90,20 +88,20 @@ typedef struct This parameter must be a value of @ref RCCEx_PLLI2SP_Clock_Divider */ uint32_t PLLI2SQ; /*!< Specifies the division factor for SAI clock. - This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. This parameter will be used only when PLLI2S is selected as Clock Source SAI */ - + uint32_t PLLI2SR; /*!< Specifies the division factor for I2S clock. - This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter must be a number between Min_Data = 2 and Max_Data = 7. This parameter will be used only when PLLI2S is selected as Clock Source I2S */ -}RCC_PLLI2SInitTypeDef; +} RCC_PLLI2SInitTypeDef; -/** - * @brief PLLSAI Clock structure definition +/** + * @brief PLLSAI Clock structure definition */ typedef struct { - uint32_t PLLSAIM; /*!< Spcifies division factor for PLL VCO input clock. + uint32_t PLLSAIM; /*!< Specifies division factor for PLL VCO input clock. This parameter must be a number between Min_Data = 2 and Max_Data = 63 */ uint32_t PLLSAIN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. @@ -111,24 +109,24 @@ typedef struct uint32_t PLLSAIP; /*!< Specifies division factor for OTG FS, SDIO and RNG clocks. This parameter must be a value of @ref RCCEx_PLLSAIP_Clock_Divider */ - + uint32_t PLLSAIQ; /*!< Specifies the division factor for SAI clock. This parameter must be a number between Min_Data = 2 and Max_Data = 15. This parameter will be used only when PLLSAI is selected as Clock Source SAI */ -}RCC_PLLSAIInitTypeDef; +} RCC_PLLSAIInitTypeDef; -/** - * @brief RCC extended clocks structure definition +/** + * @brief RCC extended clocks structure definition */ typedef struct { uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ - RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. + RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ - RCC_PLLSAIInitTypeDef PLLSAI; /*!< PLL SAI structure parameters. + RCC_PLLSAIInitTypeDef PLLSAI; /*!< PLL SAI structure parameters. This parameter will be used only when PLLI2S is selected as Clock Source SAI or LTDC */ uint32_t PLLI2SDivQ; /*!< Specifies the PLLI2S division factor for SAI1 clock. @@ -139,43 +137,43 @@ typedef struct This parameter must be a number between Min_Data = 1 and Max_Data = 32 This parameter will be used only when PLLSAI is selected as Clock Source SAI */ - uint32_t Sai1ClockSelection; /*!< Specifies SAI1 Clock Source Selection. + uint32_t Sai1ClockSelection; /*!< Specifies SAI1 Clock Source Selection. This parameter can be a value of @ref RCCEx_SAI1_Clock_Source */ - uint32_t Sai2ClockSelection; /*!< Specifies SAI2 Clock Source Selection. + uint32_t Sai2ClockSelection; /*!< Specifies SAI2 Clock Source Selection. This parameter can be a value of @ref RCCEx_SAI2_Clock_Source */ - - uint32_t I2sApb1ClockSelection; /*!< Specifies I2S APB1 Clock Source Selection. + + uint32_t I2sApb1ClockSelection; /*!< Specifies I2S APB1 Clock Source Selection. This parameter can be a value of @ref RCCEx_I2SAPB1_Clock_Source */ - uint32_t I2sApb2ClockSelection; /*!< Specifies I2S APB2 Clock Source Selection. + uint32_t I2sApb2ClockSelection; /*!< Specifies I2S APB2 Clock Source Selection. This parameter can be a value of @ref RCCEx_I2SAPB2_Clock_Source */ - uint32_t RTCClockSelection; /*!< Specifies RTC Clock Source Selection. + uint32_t RTCClockSelection; /*!< Specifies RTC Clock Source Selection. This parameter can be a value of @ref RCC_RTC_Clock_Source */ - uint32_t SdioClockSelection; /*!< Specifies SDIO Clock Source Selection. + uint32_t SdioClockSelection; /*!< Specifies SDIO Clock Source Selection. This parameter can be a value of @ref RCCEx_SDIO_Clock_Source */ - uint32_t CecClockSelection; /*!< Specifies CEC Clock Source Selection. + uint32_t CecClockSelection; /*!< Specifies CEC Clock Source Selection. This parameter can be a value of @ref RCCEx_CEC_Clock_Source */ - uint32_t Fmpi2c1ClockSelection; /*!< Specifies FMPI2C1 Clock Source Selection. + uint32_t Fmpi2c1ClockSelection; /*!< Specifies FMPI2C1 Clock Source Selection. This parameter can be a value of @ref RCCEx_FMPI2C1_Clock_Source */ - uint32_t SpdifClockSelection; /*!< Specifies SPDIFRX Clock Source Selection. + uint32_t SpdifClockSelection; /*!< Specifies SPDIFRX Clock Source Selection. This parameter can be a value of @ref RCCEx_SPDIFRX_Clock_Source */ - uint32_t Clk48ClockSelection; /*!< Specifies CLK48 Clock Selection this clock used OTG FS, SDIO and RNG clocks. + uint32_t Clk48ClockSelection; /*!< Specifies CLK48 Clock Selection this clock used OTG FS, SDIO and RNG clocks. This parameter can be a value of @ref RCCEx_CLK48_Clock_Source */ - - uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. + + uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. This parameter can be a value of @ref RCCEx_TIM_PRescaler_Selection */ -}RCC_PeriphCLKInitTypeDef; -#endif /* STM32F446xx */ +} RCC_PeriphCLKInitTypeDef; +#endif /* STM32F446xx */ #if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) -/** +/** * @brief RCC extended clocks structure definition */ typedef struct @@ -183,26 +181,26 @@ typedef struct uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ - uint32_t I2SClockSelection; /*!< Specifies RTC Clock Source Selection. + uint32_t I2SClockSelection; /*!< Specifies RTC Clock Source Selection. This parameter can be a value of @ref RCCEx_I2S_APB_Clock_Source */ - - uint32_t RTCClockSelection; /*!< Specifies RTC Clock Source Selection. + + uint32_t RTCClockSelection; /*!< Specifies RTC Clock Source Selection. This parameter can be a value of @ref RCC_RTC_Clock_Source */ - uint32_t Lptim1ClockSelection; /*!< Specifies LPTIM1 Clock Source Selection. + uint32_t Lptim1ClockSelection; /*!< Specifies LPTIM1 Clock Source Selection. This parameter can be a value of @ref RCCEx_LPTIM1_Clock_Source */ - - uint32_t Fmpi2c1ClockSelection; /*!< Specifies FMPI2C1 Clock Source Selection. + + uint32_t Fmpi2c1ClockSelection; /*!< Specifies FMPI2C1 Clock Source Selection. This parameter can be a value of @ref RCCEx_FMPI2C1_Clock_Source */ - uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. + uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. This parameter can be a value of @ref RCCEx_TIM_PRescaler_Selection */ -}RCC_PeriphCLKInitTypeDef; +} RCC_PeriphCLKInitTypeDef; #endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ #if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) -/** - * @brief PLLI2S Clock structure definition +/** + * @brief PLLI2S Clock structure definition */ typedef struct { @@ -213,15 +211,15 @@ typedef struct This parameter must be a number between Min_Data = 50 and Max_Data = 432 */ uint32_t PLLI2SQ; /*!< Specifies the division factor for SAI clock. - This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. This parameter will be used only when PLLI2S is selected as Clock Source SAI */ - + uint32_t PLLI2SR; /*!< Specifies the division factor for I2S clock. - This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter must be a number between Min_Data = 2 and Max_Data = 7. This parameter will be used only when PLLI2S is selected as Clock Source I2S */ -}RCC_PLLI2SInitTypeDef; +} RCC_PLLI2SInitTypeDef; -/** +/** * @brief RCC extended clocks structure definition */ typedef struct @@ -229,9 +227,9 @@ typedef struct uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ - RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. + RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. This parameter will be used only when PLLI2S is selected as Clock Source I2S */ - + #if defined(STM32F413xx) || defined(STM32F423xx) uint32_t PLLDivR; /*!< Specifies the PLL division factor for SAI1 clock. This parameter must be a number between Min_Data = 1 and Max_Data = 32 @@ -240,42 +238,42 @@ typedef struct uint32_t PLLI2SDivR; /*!< Specifies the PLLI2S division factor for SAI1 clock. This parameter must be a number between Min_Data = 1 and Max_Data = 32 This parameter will be used only when PLLI2S is selected as Clock Source SAI */ -#endif /* STM32F413xx || STM32F423xx */ - - uint32_t I2sApb1ClockSelection; /*!< Specifies I2S APB1 Clock Source Selection. +#endif /* STM32F413xx || STM32F423xx */ + + uint32_t I2sApb1ClockSelection; /*!< Specifies I2S APB1 Clock Source Selection. This parameter can be a value of @ref RCCEx_I2SAPB1_Clock_Source */ - uint32_t I2sApb2ClockSelection; /*!< Specifies I2S APB2 Clock Source Selection. + uint32_t I2sApb2ClockSelection; /*!< Specifies I2S APB2 Clock Source Selection. This parameter can be a value of @ref RCCEx_I2SAPB2_Clock_Source */ - uint32_t RTCClockSelection; /*!< Specifies RTC Clock Source Selection. + uint32_t RTCClockSelection; /*!< Specifies RTC Clock Source Selection. This parameter can be a value of @ref RCC_RTC_Clock_Source */ - uint32_t SdioClockSelection; /*!< Specifies SDIO Clock Source Selection. + uint32_t SdioClockSelection; /*!< Specifies SDIO Clock Source Selection. This parameter can be a value of @ref RCCEx_SDIO_Clock_Source */ - uint32_t Fmpi2c1ClockSelection; /*!< Specifies FMPI2C1 Clock Source Selection. + uint32_t Fmpi2c1ClockSelection; /*!< Specifies FMPI2C1 Clock Source Selection. This parameter can be a value of @ref RCCEx_FMPI2C1_Clock_Source */ uint32_t Clk48ClockSelection; /*!< Specifies CLK48 Clock Selection this clock used OTG FS, SDIO and RNG clocks. This parameter can be a value of @ref RCCEx_CLK48_Clock_Source */ - + uint32_t Dfsdm1ClockSelection; /*!< Specifies DFSDM1 Clock Selection. This parameter can be a value of @ref RCCEx_DFSDM1_Kernel_Clock_Source */ uint32_t Dfsdm1AudioClockSelection;/*!< Specifies DFSDM1 Audio Clock Selection. This parameter can be a value of @ref RCCEx_DFSDM1_Audio_Clock_Source */ - + #if defined(STM32F413xx) || defined(STM32F423xx) uint32_t Dfsdm2ClockSelection; /*!< Specifies DFSDM2 Clock Selection. This parameter can be a value of @ref RCCEx_DFSDM2_Kernel_Clock_Source */ uint32_t Dfsdm2AudioClockSelection;/*!< Specifies DFSDM2 Audio Clock Selection. This parameter can be a value of @ref RCCEx_DFSDM2_Audio_Clock_Source */ - - uint32_t Lptim1ClockSelection; /*!< Specifies LPTIM1 Clock Source Selection. + + uint32_t Lptim1ClockSelection; /*!< Specifies LPTIM1 Clock Source Selection. This parameter can be a value of @ref RCCEx_LPTIM1_Clock_Source */ - + uint32_t SaiAClockSelection; /*!< Specifies SAI1_A Clock Prescalers Selection This parameter can be a value of @ref RCCEx_SAI1_BlockA_Clock_Source */ @@ -283,18 +281,18 @@ typedef struct This parameter can be a value of @ref RCCEx_SAI1_BlockB_Clock_Source */ #endif /* STM32F413xx || STM32F423xx */ - uint32_t PLLI2SSelection; /*!< Specifies PLL I2S Clock Source Selection. + uint32_t PLLI2SSelection; /*!< Specifies PLL I2S Clock Source Selection. This parameter can be a value of @ref RCCEx_PLL_I2S_Clock_Source */ - uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. + uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. This parameter can be a value of @ref RCCEx_TIM_PRescaler_Selection */ -}RCC_PeriphCLKInitTypeDef; +} RCC_PeriphCLKInitTypeDef; #endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) -/** - * @brief PLLI2S Clock structure definition +/** + * @brief PLLI2S Clock structure definition */ typedef struct { @@ -303,50 +301,50 @@ typedef struct This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ uint32_t PLLI2SR; /*!< Specifies the division factor for I2S clock. - This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter must be a number between Min_Data = 2 and Max_Data = 7. This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ uint32_t PLLI2SQ; /*!< Specifies the division factor for SAI1 clock. - This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. This parameter will be used only when PLLI2S is selected as Clock Source SAI */ -}RCC_PLLI2SInitTypeDef; +} RCC_PLLI2SInitTypeDef; -/** - * @brief PLLSAI Clock structure definition +/** + * @brief PLLSAI Clock structure definition */ typedef struct { uint32_t PLLSAIN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. This parameter must be a number between Min_Data = 50 and Max_Data = 432. - This parameter will be used only when PLLSAI is selected as Clock Source SAI or LTDC */ + This parameter will be used only when PLLSAI is selected as Clock Source SAI or LTDC */ #if defined(STM32F469xx) || defined(STM32F479xx) uint32_t PLLSAIP; /*!< Specifies division factor for OTG FS and SDIO clocks. This parameter is only available in STM32F469xx/STM32F479xx devices. - This parameter must be a value of @ref RCCEx_PLLSAIP_Clock_Divider */ + This parameter must be a value of @ref RCCEx_PLLSAIP_Clock_Divider */ #endif /* STM32F469xx || STM32F479xx */ - + uint32_t PLLSAIQ; /*!< Specifies the division factor for SAI1 clock. This parameter must be a number between Min_Data = 2 and Max_Data = 15. This parameter will be used only when PLLSAI is selected as Clock Source SAI or LTDC */ - + uint32_t PLLSAIR; /*!< specifies the division factor for LTDC clock This parameter must be a number between Min_Data = 2 and Max_Data = 7. This parameter will be used only when PLLSAI is selected as Clock Source LTDC */ -}RCC_PLLSAIInitTypeDef; +} RCC_PLLSAIInitTypeDef; -/** - * @brief RCC extended clocks structure definition +/** + * @brief RCC extended clocks structure definition */ typedef struct { uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ - RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. + RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ - RCC_PLLSAIInitTypeDef PLLSAI; /*!< PLL SAI structure parameters. + RCC_PLLSAIInitTypeDef PLLSAI; /*!< PLL SAI structure parameters. This parameter will be used only when PLLI2S is selected as Clock Source SAI or LTDC */ uint32_t PLLI2SDivQ; /*!< Specifies the PLLI2S division factor for SAI1 clock. @@ -360,26 +358,26 @@ typedef struct uint32_t PLLSAIDivR; /*!< Specifies the PLLSAI division factor for LTDC clock. This parameter must be one value of @ref RCCEx_PLLSAI_DIVR */ - uint32_t RTCClockSelection; /*!< Specifies RTC Clock Prescalers Selection. + uint32_t RTCClockSelection; /*!< Specifies RTC Clock Prescalers Selection. This parameter can be a value of @ref RCC_RTC_Clock_Source */ - uint8_t TIMPresSelection; /*!< Specifies TIM Clock Prescalers Selection. + uint8_t TIMPresSelection; /*!< Specifies TIM Clock Prescalers Selection. This parameter can be a value of @ref RCCEx_TIM_PRescaler_Selection */ #if defined(STM32F469xx) || defined(STM32F479xx) - uint32_t Clk48ClockSelection; /*!< Specifies CLK48 Clock Selection this clock used OTG FS, SDIO and RNG clocks. + uint32_t Clk48ClockSelection; /*!< Specifies CLK48 Clock Selection this clock used OTG FS, SDIO and RNG clocks. This parameter can be a value of @ref RCCEx_CLK48_Clock_Source */ - uint32_t SdioClockSelection; /*!< Specifies SDIO Clock Source Selection. - This parameter can be a value of @ref RCCEx_SDIO_Clock_Source */ -#endif /* STM32F469xx || STM32F479xx */ -}RCC_PeriphCLKInitTypeDef; + uint32_t SdioClockSelection; /*!< Specifies SDIO Clock Source Selection. + This parameter can be a value of @ref RCCEx_SDIO_Clock_Source */ +#endif /* STM32F469xx || STM32F479xx */ +} RCC_PeriphCLKInitTypeDef; #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ #if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) ||\ defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) -/** - * @brief PLLI2S Clock structure definition +/** + * @brief PLLI2S Clock structure definition */ typedef struct { @@ -387,20 +385,20 @@ typedef struct uint32_t PLLI2SM; /*!< PLLM: Division factor for PLLI2S VCO input clock. This parameter must be a number between Min_Data = 2 and Max_Data = 62 */ #endif /* STM32F411xE */ - + uint32_t PLLI2SN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. This parameter must be a number between Min_Data = 50 and Max_Data = 432 - Except for STM32F411xE devices where the Min_Data = 192. + Except for STM32F411xE devices where the Min_Data = 192. This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ uint32_t PLLI2SR; /*!< Specifies the division factor for I2S clock. - This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter must be a number between Min_Data = 2 and Max_Data = 7. This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ -}RCC_PLLI2SInitTypeDef; - -/** - * @brief RCC extended clocks structure definition +} RCC_PLLI2SInitTypeDef; + +/** + * @brief RCC extended clocks structure definition */ typedef struct { @@ -412,15 +410,15 @@ typedef struct uint32_t RTCClockSelection; /*!< Specifies RTC Clock Prescalers Selection. This parameter can be a value of @ref RCC_RTC_Clock_Source */ -#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) - uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) + uint8_t TIMPresSelection; /*!< Specifies TIM Clock Source Selection. This parameter can be a value of @ref RCCEx_TIM_PRescaler_Selection */ #endif /* STM32F401xC || STM32F401xE || STM32F411xE */ -}RCC_PeriphCLKInitTypeDef; +} RCC_PeriphCLKInitTypeDef; #endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F401xC || STM32F401xE || STM32F411xE */ /** * @} - */ + */ /* Exported constants --------------------------------------------------------*/ /** @defgroup RCCEx_Exported_Constants RCCEx Exported Constants @@ -479,7 +477,7 @@ typedef struct #define RCC_PERIPHCLK_PLLI2S 0x00000800U #endif /* STM32F446xx */ /*-----------------------------------------------------------------------------*/ - + /*----------- Peripheral Clock source for STM32F469xx/STM32F479xx -------------*/ #if defined(STM32F469xx) || defined(STM32F479xx) #define RCC_PERIPHCLK_I2S 0x00000001U @@ -508,14 +506,14 @@ typedef struct /*-------- Peripheral Clock source for STM32F40xxx/STM32F41xxx ---------------*/ #if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx)|| defined(STM32F417xx) ||\ - defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) + defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) #define RCC_PERIPHCLK_I2S 0x00000001U #define RCC_PERIPHCLK_RTC 0x00000002U #define RCC_PERIPHCLK_PLLI2S 0x00000004U #endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F401xC || STM32F401xE || STM32F411xE */ #if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) #define RCC_PERIPHCLK_TIM 0x00000008U -#endif /* STM32F401xC || STM32F401xE || STM32F411xE */ +#endif /* STM32F401xC || STM32F401xE || STM32F411xE */ /*----------------------------------------------------------------------------*/ /** * @} @@ -523,12 +521,12 @@ typedef struct #if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || \ defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F469xx) || \ - defined(STM32F479xx) + defined(STM32F479xx) /** @defgroup RCCEx_I2S_Clock_Source I2S Clock Source * @{ */ #define RCC_I2SCLKSOURCE_PLLI2S 0x00000000U -#define RCC_I2SCLKSOURCE_EXT 0x00000001U +#define RCC_I2SCLKSOURCE_EXT RCC_CFGR_I2SSRC /** * @} */ @@ -539,7 +537,7 @@ typedef struct * @{ */ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) ||\ - defined(STM32F469xx) || defined(STM32F479xx) + defined(STM32F469xx) || defined(STM32F479xx) #define RCC_PLLSAIDIVR_2 0x00000000U #define RCC_PLLSAIDIVR_4 0x00010000U #define RCC_PLLSAIDIVR_8 0x00020000U @@ -566,7 +564,7 @@ typedef struct /** @defgroup RCCEx_PLLSAIP_Clock_Divider RCC PLLSAIP Clock Divider * @{ */ -#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) +#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) #define RCC_PLLSAIP_DIV2 0x00000002U #define RCC_PLLSAIP_DIV4 0x00000004U #define RCC_PLLSAIP_DIV6 0x00000006U @@ -585,7 +583,7 @@ typedef struct #define RCC_SAIACLKSOURCE_EXT 0x00200000U /** * @} - */ + */ /** @defgroup RCCEx_SAI_BlockB_Clock_Source RCC SAI BlockB Clock Source * @{ @@ -595,9 +593,9 @@ typedef struct #define RCC_SAIBCLKSOURCE_EXT 0x00800000U /** * @} - */ + */ #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ - + #if defined(STM32F469xx) || defined(STM32F479xx) /** @defgroup RCCEx_CLK48_Clock_Source RCC CLK48 Clock Source * @{ @@ -615,8 +613,8 @@ typedef struct #define RCC_SDIOCLKSOURCE_SYSCLK ((uint32_t)RCC_DCKCFGR_SDIOSEL) /** * @} - */ - + */ + /** @defgroup RCCEx_DSI_Clock_Source RCC DSI Clock Source * @{ */ @@ -628,7 +626,7 @@ typedef struct #endif /* STM32F469xx || STM32F479xx */ #if defined(STM32F446xx) -/** @defgroup RCCEx_SAI1_Clock_Source RCC SAI1 Clock Source +/** @defgroup RCCEx_SAI1_Clock_Source RCC SAI1 Clock Source * @{ */ #define RCC_SAI1CLKSOURCE_PLLSAI 0x00000000U @@ -637,7 +635,7 @@ typedef struct #define RCC_SAI1CLKSOURCE_EXT ((uint32_t)RCC_DCKCFGR_SAI1SRC) /** * @} - */ + */ /** @defgroup RCCEx_SAI2_Clock_Source RCC SAI2 Clock Source * @{ @@ -659,7 +657,7 @@ typedef struct #define RCC_I2SAPB1CLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_I2S1SRC) /** * @} - */ + */ /** @defgroup RCCEx_I2SAPB2_Clock_Source RCC I2S APB2 Clock Source * @{ @@ -730,7 +728,7 @@ typedef struct #define RCC_SAIACLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_SAI1ASRC_0 | RCC_DCKCFGR_SAI1ASRC_1) /** * @} - */ + */ /** @defgroup RCCEx_SAI1_BlockB_Clock_Source RCC SAI BlockB Clock Source * @{ @@ -741,8 +739,8 @@ typedef struct #define RCC_SAIBCLKSOURCE_PLLSRC ((uint32_t)RCC_DCKCFGR_SAI1BSRC_0 | RCC_DCKCFGR_SAI1BSRC_1) /** * @} - */ - + */ + /** @defgroup RCCEx_LPTIM1_Clock_Source RCC LPTIM1 Clock Source * @{ */ @@ -753,7 +751,7 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_DFSDM2_Audio_Clock_Source RCC DFSDM2 Audio Clock Source * @{ @@ -779,7 +777,7 @@ typedef struct /** @defgroup RCCEx_PLL_I2S_Clock_Source PLL I2S Clock Source * @{ */ -#define RCC_PLLI2SCLKSOURCE_PLLSRC 0x00000000U +#define RCC_PLLI2SCLKSOURCE_PLLSRC 0x00000000U #define RCC_PLLI2SCLKSOURCE_EXT ((uint32_t)RCC_PLLI2SCFGR_PLLI2SSRC) /** * @} @@ -924,7 +922,7 @@ typedef struct defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || \ defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ - defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) /** @defgroup RCC_MCO2_Clock_Source MCO2 Clock Source * @{ */ @@ -955,7 +953,7 @@ typedef struct /** * @} */ - + /* Exported macro ------------------------------------------------------------*/ /** @defgroup RCCEx_Exported_Macros RCCEx Exported Macros * @{ @@ -965,129 +963,129 @@ typedef struct /** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable * @brief Enables or disables the AHB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_BKPSRAM_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CCMDATARAMEN_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CRC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOF_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOJ_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOJEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOJEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOJEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOJEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOK_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOKEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOKEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOKEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOKEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_DMA2D_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2DEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2DEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2DEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2DEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ETHMAC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ETHMACTX_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ETHMACRX_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ETHMACPTP_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) #define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) #define __HAL_RCC_GPIOF_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOFEN)) @@ -1125,7 +1123,7 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the AHB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) @@ -1133,43 +1131,43 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) -#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) -#define __HAL_RCC_GPIOF_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) != RESET) +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_GPIOF_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) != RESET) #define __HAL_RCC_GPIOG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) != RESET) -#define __HAL_RCC_GPIOI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) != RESET) -#define __HAL_RCC_GPIOJ_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOJEN)) != RESET) +#define __HAL_RCC_GPIOI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) != RESET) +#define __HAL_RCC_GPIOJ_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOJEN)) != RESET) #define __HAL_RCC_GPIOK_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOKEN)) != RESET) -#define __HAL_RCC_DMA2D_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2DEN)) != RESET) -#define __HAL_RCC_ETHMAC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) != RESET) +#define __HAL_RCC_DMA2D_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2DEN)) != RESET) +#define __HAL_RCC_ETHMAC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) != RESET) #define __HAL_RCC_ETHMACTX_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACTXEN)) != RESET) #define __HAL_RCC_ETHMACRX_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACRXEN)) != RESET) #define __HAL_RCC_ETHMACPTP_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACPTPEN)) != RESET) #define __HAL_RCC_USB_OTG_HS_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) != RESET) #define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) != RESET) #define __HAL_RCC_BKPSRAM_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) != RESET) -#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) #define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) #define __HAL_RCC_ETH_IS_CLK_ENABLED() (__HAL_RCC_ETHMAC_IS_CLK_ENABLED() && \ __HAL_RCC_ETHMACTX_IS_CLK_ENABLED() && \ - __HAL_RCC_ETHMACRX_IS_CLK_ENABLED()) + __HAL_RCC_ETHMACRX_IS_CLK_ENABLED()) -#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) -#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) -#define __HAL_RCC_GPIOF_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) == RESET) +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_GPIOF_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) == RESET) #define __HAL_RCC_GPIOG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) == RESET) -#define __HAL_RCC_GPIOI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) == RESET) -#define __HAL_RCC_GPIOJ_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOJEN)) == RESET) +#define __HAL_RCC_GPIOI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) == RESET) +#define __HAL_RCC_GPIOJ_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOJEN)) == RESET) #define __HAL_RCC_GPIOK_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOKEN)) == RESET) -#define __HAL_RCC_DMA2D_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2DEN)) == RESET) -#define __HAL_RCC_ETHMAC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) == RESET) +#define __HAL_RCC_DMA2D_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2DEN)) == RESET) +#define __HAL_RCC_ETHMAC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) == RESET) #define __HAL_RCC_ETHMACTX_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACTXEN)) == RESET) #define __HAL_RCC_ETHMACRX_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACRXEN)) == RESET) #define __HAL_RCC_ETHMACPTP_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACPTPEN)) == RESET) #define __HAL_RCC_USB_OTG_HS_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) == RESET) #define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) == RESET) #define __HAL_RCC_BKPSRAM_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) == RESET) -#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) #define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) #define __HAL_RCC_ETH_IS_CLK_DISABLED() (__HAL_RCC_ETHMAC_IS_CLK_DISABLED() && \ __HAL_RCC_ETHMACTX_IS_CLK_DISABLED() && \ @@ -1177,68 +1175,68 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable * @brief Enable or disable the AHB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ - #define __HAL_RCC_DCMI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ - UNUSED(tmpreg); \ - } while(0U) +#define __HAL_RCC_DCMI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_DCMI_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_DCMIEN)) #if defined(STM32F437xx)|| defined(STM32F439xx) || defined(STM32F479xx) #define __HAL_RCC_CRYP_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_HASH_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CRYP_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_CRYPEN)) #define __HAL_RCC_HASH_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_HASHEN)) #endif /* STM32F437xx || STM32F439xx || STM32F479xx */ #define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ - __HAL_RCC_SYSCFG_CLK_ENABLE();\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ }while(0U) - + #define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) #define __HAL_RCC_RNG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_RNGEN)) /** * @} */ - + /** @defgroup RCCEx_AHB2_Peripheral_Clock_Enable_Disable_Status AHB2 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the AHB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) * is disabled and the application software has to enable this clock before * using it. * @{ - */ + */ #define __HAL_RCC_DCMI_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) != RESET) #define __HAL_RCC_DCMI_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) == RESET) @@ -1253,35 +1251,35 @@ typedef struct #define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) #define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) -#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) -#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) /** * @} - */ + */ /** @defgroup RCCEx_AHB3_Clock_Enable_Disable AHB3 Peripheral Clock Enable Disable * @brief Enables or disables the AHB3 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. - * @{ + * @{ */ #define __HAL_RCC_FMC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_FMC_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_FMCEN)) #if defined(STM32F469xx) || defined(STM32F479xx) #define __HAL_RCC_QSPI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_QSPI_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_QSPIEN)) #endif /* STM32F469xx || STM32F479xx */ /** @@ -1301,151 +1299,151 @@ typedef struct #if defined(STM32F469xx) || defined(STM32F479xx) #define __HAL_RCC_QSPI_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) != RESET) #define __HAL_RCC_QSPI_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) == RESET) -#endif /* STM32F469xx || STM32F479xx */ +#endif /* STM32F469xx || STM32F479xx */ /** * @} */ - + /** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_TIM6_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM7_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM12_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM13_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM14_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM14_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USART3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART5_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CAN1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CAN2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_DAC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART7_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART8_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_I2C3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) #define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) #define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) @@ -1475,117 +1473,117 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) -#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) #define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) -#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) #define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) -#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) -#define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) -#define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) -#define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) -#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) -#define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) -#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) -#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) +#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) +#define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) +#define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) +#define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) +#define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) +#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) #define __HAL_RCC_CAN1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) != RESET) #define __HAL_RCC_CAN2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) != RESET) -#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) #define __HAL_RCC_UART7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) != RESET) -#define __HAL_RCC_UART8_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) != RESET) +#define __HAL_RCC_UART8_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) != RESET) -#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) -#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) #define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) -#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) #define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) -#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) -#define __HAL_RCC_TIM7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) == RESET) -#define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) -#define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) -#define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) -#define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) -#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) -#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) +#define __HAL_RCC_TIM7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) == RESET) +#define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) +#define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) +#define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) +#define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) +#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) #define __HAL_RCC_CAN1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) == RESET) #define __HAL_RCC_CAN2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) == RESET) -#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) #define __HAL_RCC_UART7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) == RESET) -#define __HAL_RCC_UART8_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) == RESET) +#define __HAL_RCC_UART8_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) == RESET) /** * @} */ - + /** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable * @brief Enable or disable the High Speed APB (APB2) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_TIM8_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ADC2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ADC3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI5_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI6_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI6EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI6EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI6EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SAI1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SDIO_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM10_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) #define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) #define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) @@ -1598,47 +1596,47 @@ typedef struct #if defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) #define __HAL_RCC_LTDC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_LTDCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_LTDCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_LTDCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_LTDCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_LTDC_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_LTDCEN)) #endif /* STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ #if defined(STM32F469xx) || defined(STM32F479xx) #define __HAL_RCC_DSI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DSIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DSIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DSIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DSIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_DSI_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DSIEN)) #endif /* STM32F469xx || STM32F479xx */ /** * @} */ - + /** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the APB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) * is disabled and the application software has to enable this clock before * using it. * @{ - */ + */ #define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) #define __HAL_RCC_ADC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) != RESET) -#define __HAL_RCC_ADC3_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) != RESET) -#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) -#define __HAL_RCC_SPI6_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI6EN)) != RESET) -#define __HAL_RCC_SAI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) != RESET) +#define __HAL_RCC_ADC3_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) != RESET) +#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) +#define __HAL_RCC_SPI6_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI6EN)) != RESET) +#define __HAL_RCC_SAI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) != RESET) #define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) #define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) -#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN))!= RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN))!= RESET) #define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) #define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) @@ -1663,10 +1661,11 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset * @brief Force or release AHB1 peripheral reset. * @{ */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x22E017FFU) #define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) #define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) #define __HAL_RCC_GPIOF_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOFRST)) @@ -1694,11 +1693,16 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset * @brief Force or release AHB2 peripheral reset. * @{ */ -#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0xFFFFFFFFU) +#if defined(STM32F427xx) || defined(STM32F429xx) || defined(STM32F469xx) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000C1U) +#endif /* STM32F427xx || STM32F429xx || STM32F469xx */ +#if defined(STM32F437xx) || defined(STM32F439xx) || defined(STM32F479xx) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000F1U) +#endif /* STM32F437xx || STM32F439xx || STM32F479xx */ #define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) #define __HAL_RCC_RNG_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_RNGRST)) #define __HAL_RCC_DCMI_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_DCMIRST)) @@ -1708,7 +1712,7 @@ typedef struct #define __HAL_RCC_RNG_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_RNGRST)) #define __HAL_RCC_DCMI_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_DCMIRST)) -#if defined(STM32F437xx)|| defined(STM32F439xx) || defined(STM32F479xx) +#if defined(STM32F437xx)|| defined(STM32F439xx) || defined(STM32F479xx) #define __HAL_RCC_CRYP_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_CRYPRST)) #define __HAL_RCC_HASH_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_HASHRST)) @@ -1719,27 +1723,33 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset * @brief Force or release AHB3 peripheral reset. * @{ - */ -#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0xFFFFFFFFU) -#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) + */ +#if defined(STM32F427xx) || defined(STM32F429xx) || defined(STM32F437xx) || defined(STM32F439xx) +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0x00000001U) +#endif /* STM32F427xx || STM32F429xx || STM32F437xx || STM32F439xx */ +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0x00000003U) +#endif /* STM32F469xx || STM32F479xx */ +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) #define __HAL_RCC_FMC_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_FMCRST)) #define __HAL_RCC_FMC_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_FMCRST)) #if defined(STM32F469xx) || defined(STM32F479xx) #define __HAL_RCC_QSPI_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_QSPIRST)) -#define __HAL_RCC_QSPI_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_QSPIRST)) +#define __HAL_RCC_QSPI_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_QSPIRST)) #endif /* STM32F469xx || STM32F479xx */ /** * @} */ -/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset * @brief Force or release APB1 peripheral reset. * @{ - */ + */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0xF6FEC9FFU) #define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) #define __HAL_RCC_TIM7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM7RST)) #define __HAL_RCC_TIM12_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM12RST)) @@ -1781,10 +1791,19 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset * @brief Force or release APB2 peripheral reset. * @{ */ +#if defined(STM32F469xx) || defined(STM32F479xx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x0C777933U) +#endif /* STM32F469xx || STM32F479xx */ +#if defined(STM32F429xx) || defined(STM32F439xx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x04777933U) +#endif /* STM32F429xx || STM32F439xx */ +#if defined(STM32F427xx) || defined(STM32F437xx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00777933U) +#endif /* STM32F427xx || STM32F437xx */ #define __HAL_RCC_TIM8_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM8RST)) #define __HAL_RCC_SPI5_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI5RST)) #define __HAL_RCC_SPI6_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI6RST)) @@ -1883,7 +1902,7 @@ typedef struct #define __HAL_RCC_DCMI_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_DCMILPEN)) #define __HAL_RCC_DCMI_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_DCMILPEN)) -#if defined(STM32F437xx)|| defined(STM32F439xx) || defined(STM32F479xx) +#if defined(STM32F437xx)|| defined(STM32F439xx) || defined(STM32F479xx) #define __HAL_RCC_CRYP_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_CRYPLPEN)) #define __HAL_RCC_HASH_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_HASHLPEN)) @@ -1920,7 +1939,7 @@ typedef struct * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. * @note By default, all peripheral clocks are enabled during SLEEP mode. * @{ - */ + */ #define __HAL_RCC_TIM6_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM6LPEN)) #define __HAL_RCC_TIM7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM7LPEN)) #define __HAL_RCC_TIM12_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM12LPEN)) @@ -1961,7 +1980,7 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. * @note Peripheral clock gating in SLEEP mode can be used to further reduce @@ -1969,7 +1988,7 @@ typedef struct * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. * @note By default, all peripheral clocks are enabled during SLEEP mode. * @{ - */ + */ #define __HAL_RCC_TIM8_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM8LPEN)) #define __HAL_RCC_ADC2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC2LPEN)) #define __HAL_RCC_ADC3_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC3LPEN)) @@ -2011,80 +2030,80 @@ typedef struct /** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable * @brief Enables or disables the AHB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_BKPSRAM_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CCMDATARAMEN_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CRC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOF_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) #define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) #define __HAL_RCC_GPIOF_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOFEN)) @@ -2100,38 +2119,38 @@ typedef struct * @brief Enable ETHERNET clock. */ #define __HAL_RCC_ETHMAC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ETHMACTX_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ETHMACRX_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ETHMACPTP_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ETH_CLK_ENABLE() do { \ - __HAL_RCC_ETHMAC_CLK_ENABLE(); \ - __HAL_RCC_ETHMACTX_CLK_ENABLE(); \ - __HAL_RCC_ETHMACRX_CLK_ENABLE(); \ - } while(0U) + __HAL_RCC_ETHMAC_CLK_ENABLE(); \ + __HAL_RCC_ETHMACTX_CLK_ENABLE(); \ + __HAL_RCC_ETHMACRX_CLK_ENABLE(); \ + } while(0U) /** * @brief Disable ETHERNET clock. @@ -2139,24 +2158,24 @@ typedef struct #define __HAL_RCC_ETHMAC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACEN)) #define __HAL_RCC_ETHMACTX_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACTXEN)) #define __HAL_RCC_ETHMACRX_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACRXEN)) -#define __HAL_RCC_ETHMACPTP_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACPTPEN)) +#define __HAL_RCC_ETHMACPTP_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACPTPEN)) #define __HAL_RCC_ETH_CLK_DISABLE() do { \ - __HAL_RCC_ETHMACTX_CLK_DISABLE(); \ - __HAL_RCC_ETHMACRX_CLK_DISABLE(); \ - __HAL_RCC_ETHMAC_CLK_DISABLE(); \ - } while(0U) + __HAL_RCC_ETHMACTX_CLK_DISABLE(); \ + __HAL_RCC_ETHMACRX_CLK_DISABLE(); \ + __HAL_RCC_ETHMAC_CLK_DISABLE(); \ + } while(0U) #endif /* STM32F407xx || STM32F417xx */ /** * @} */ - + /** @defgroup RCCEx_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the AHB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) * is disabled and the application software has to enable this clock before * using it. * @{ - */ + */ #define __HAL_RCC_BKPSRAM_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) != RESET) #define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) #define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) @@ -2203,55 +2222,55 @@ typedef struct /** * @} */ - -/** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable + +/** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable * @brief Enable or disable the AHB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ - __HAL_RCC_SYSCFG_CLK_ENABLE();\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ }while(0U) - + #define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) #define __HAL_RCC_RNG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_RNGEN)) -#if defined(STM32F407xx)|| defined(STM32F417xx) +#if defined(STM32F407xx)|| defined(STM32F417xx) #define __HAL_RCC_DCMI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_DCMI_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_DCMIEN)) #endif /* STM32F407xx || STM32F417xx */ #if defined(STM32F415xx) || defined(STM32F417xx) #define __HAL_RCC_CRYP_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_HASH_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CRYP_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_CRYPEN)) #define __HAL_RCC_HASH_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_HASHEN)) #endif /* STM32F415xx || STM32F417xx */ @@ -2268,41 +2287,41 @@ typedef struct * @{ */ #define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) -#define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) -#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) -#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) -#if defined(STM32F407xx)|| defined(STM32F417xx) -#define __HAL_RCC_DCMI_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) != RESET) -#define __HAL_RCC_DCMI_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) == RESET) +#if defined(STM32F407xx)|| defined(STM32F417xx) +#define __HAL_RCC_DCMI_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) != RESET) +#define __HAL_RCC_DCMI_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) == RESET) #endif /* STM32F407xx || STM32F417xx */ #if defined(STM32F415xx) || defined(STM32F417xx) -#define __HAL_RCC_CRYP_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) != RESET) -#define __HAL_RCC_HASH_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) != RESET) +#define __HAL_RCC_CRYP_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) != RESET) +#define __HAL_RCC_HASH_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) != RESET) -#define __HAL_RCC_CRYP_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) == RESET) -#define __HAL_RCC_HASH_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) == RESET) -#endif /* STM32F415xx || STM32F417xx */ +#define __HAL_RCC_CRYP_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) == RESET) +#define __HAL_RCC_HASH_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) == RESET) +#endif /* STM32F415xx || STM32F417xx */ /** * @} - */ - + */ + /** @defgroup RCCEx_AHB3_Clock_Enable_Disable AHB3 Peripheral Clock Enable Disable * @brief Enables or disables the AHB3 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. - * @{ + * @{ */ #define __HAL_RCC_FSMC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_FSMC_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_FSMCEN)) /** * @} @@ -2315,131 +2334,131 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_FSMC_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) != RESET) -#define __HAL_RCC_FSMC_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) == RESET) +#define __HAL_RCC_FSMC_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) != RESET) +#define __HAL_RCC_FSMC_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) == RESET) /** * @} - */ - + */ + /** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. - * @{ + * @{ */ #define __HAL_RCC_TIM6_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM7_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM12_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM13_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM14_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USART3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART5_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CAN1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CAN2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_DAC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_I2C3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) #define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) #define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) @@ -2459,100 +2478,100 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the APB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) * is disabled and the application software has to enable this clock before * using it. * @{ - */ -#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) -#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) + */ +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) #define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) -#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) -#define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) -#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) -#define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) -#define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) -#define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) -#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) -#define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) -#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) -#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) -#define __HAL_RCC_CAN1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) != RESET) -#define __HAL_RCC_CAN2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) != RESET) -#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) - -#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) -#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) +#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) +#define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) +#define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) +#define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) +#define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) +#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) +#define __HAL_RCC_CAN1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) != RESET) +#define __HAL_RCC_CAN2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) + +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) #define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) -#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) -#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) -#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) -#define __HAL_RCC_TIM7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) == RESET) -#define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) -#define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) -#define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) -#define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) -#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) -#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) -#define __HAL_RCC_CAN1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) == RESET) -#define __HAL_RCC_CAN2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) == RESET) -#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) - /** - * @} - */ - +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) +#define __HAL_RCC_TIM7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) == RESET) +#define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) +#define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) +#define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) +#define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) +#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) +#define __HAL_RCC_CAN1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) == RESET) +#define __HAL_RCC_CAN2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +/** + * @} + */ + /** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable * @brief Enable or disable the High Speed APB (APB2) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ - */ + */ #define __HAL_RCC_TIM8_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ADC2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ADC3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SDIO_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM10_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) #define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) @@ -2571,27 +2590,33 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) -#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) -#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) -#define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) -#define __HAL_RCC_ADC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) != RESET) +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) +#define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) +#define __HAL_RCC_ADC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) != RESET) #define __HAL_RCC_ADC3_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) != RESET) - -#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) -#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) -#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) -#define __HAL_RCC_TIM8_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) == RESET) -#define __HAL_RCC_ADC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) == RESET) + +#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +#define __HAL_RCC_TIM8_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) == RESET) +#define __HAL_RCC_ADC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) == RESET) #define __HAL_RCC_ADC3_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) == RESET) /** * @} */ - -/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset * @brief Force or release AHB1 peripheral reset. * @{ */ +#if defined (STM32F405xx) || defined (STM32F415xx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x206011FFU) +#endif /* STM32F405xx || STM32F415xx */ +#if defined (STM32F407xx) || defined (STM32F417xx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x226011FFU) +#endif /* STM32F407xx || STM32F417xx */ #define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) #define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) #define __HAL_RCC_GPIOF_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOFRST)) @@ -2613,26 +2638,31 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset * @brief Force or release AHB2 peripheral reset. * @{ */ -#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0xFFFFFFFFU) +#if defined (STM32F415xx) || defined (STM32F417xx) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000F1U) +#endif /* STM32F415xx || STM32F417xx */ +#if defined (STM32F405xx) || defined (STM32F407xx) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000C1U) +#endif /* STM32F405xx || STM32F407xx */ #define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) -#if defined(STM32F407xx)|| defined(STM32F417xx) +#if defined(STM32F407xx)|| defined(STM32F417xx) #define __HAL_RCC_DCMI_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_DCMIRST)) #define __HAL_RCC_DCMI_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_DCMIRST)) #endif /* STM32F407xx || STM32F417xx */ -#if defined(STM32F415xx) || defined(STM32F417xx) +#if defined(STM32F415xx) || defined(STM32F417xx) #define __HAL_RCC_CRYP_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_CRYPRST)) #define __HAL_RCC_HASH_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_HASHRST)) #define __HAL_RCC_CRYP_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_CRYPRST)) #define __HAL_RCC_HASH_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_HASHRST)) #endif /* STM32F415xx || STM32F417xx */ - + #define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) #define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_OTGFSRST)) @@ -2642,12 +2672,12 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset * @brief Force or release AHB3 peripheral reset. * @{ - */ -#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0xFFFFFFFFU) -#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) + */ +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0x00000001U) +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) #define __HAL_RCC_FSMC_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_FSMCRST)) #define __HAL_RCC_FSMC_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_FSMCRST)) @@ -2655,10 +2685,11 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset * @brief Force or release APB1 peripheral reset. * @{ */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0xF6FEC9FFU) #define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) #define __HAL_RCC_TIM7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM7RST)) #define __HAL_RCC_TIM12_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM12RST)) @@ -2696,15 +2727,16 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset * @brief Force or release APB2 peripheral reset. * @{ */ +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x04777933U) #define __HAL_RCC_TIM8_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM8RST)) #define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) #define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) #define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) - + #define __HAL_RCC_SDIO_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDIORST)) #define __HAL_RCC_SPI4_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI4RST)) #define __HAL_RCC_TIM10_RELEASE_RESET()(RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) @@ -2712,7 +2744,7 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. * @note Peripheral clock gating in SLEEP mode can be used to further reduce @@ -2772,12 +2804,12 @@ typedef struct #define __HAL_RCC_RNG_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_RNGLPEN)) #define __HAL_RCC_RNG_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_RNGLPEN)) -#if defined(STM32F407xx)|| defined(STM32F417xx) +#if defined(STM32F407xx)|| defined(STM32F417xx) #define __HAL_RCC_DCMI_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_DCMILPEN)) #define __HAL_RCC_DCMI_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_DCMILPEN)) #endif /* STM32F407xx || STM32F417xx */ -#if defined(STM32F415xx) || defined(STM32F417xx) +#if defined(STM32F415xx) || defined(STM32F417xx) #define __HAL_RCC_CRYP_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_CRYPLPEN)) #define __HAL_RCC_HASH_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_HASHLPEN)) @@ -2787,7 +2819,7 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_AHB3_LowPower_Enable_Disable AHB3 Peripheral Low Power Enable Disable * @brief Enable or disable the AHB3 peripheral clock during Low Power (Sleep) mode. * @note Peripheral clock gating in SLEEP mode can be used to further reduce @@ -2801,7 +2833,7 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. * @note Peripheral clock gating in SLEEP mode can be used to further reduce @@ -2846,7 +2878,7 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. * @note Peripheral clock gating in SLEEP mode can be used to further reduce @@ -2879,38 +2911,38 @@ typedef struct /** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable * @brief Enable or disable the AHB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before - * using it. + * is disabled and the application software has to enable this clock before + * using it. * @{ */ #define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CRC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CCMDATARAMEN_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) #define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) @@ -2927,30 +2959,30 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) -#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) -#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) -#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) -#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) -#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) -#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) -#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) /** * @} - */ - + */ + /** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable * @brief Enable or disable the AHB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ - __HAL_RCC_SYSCFG_CLK_ENABLE();\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ }while(0U) - + #define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) /** * @} @@ -2967,8 +2999,8 @@ typedef struct #define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) /** * @} - */ - + */ + /** @defgroup RCC_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) @@ -2977,40 +3009,40 @@ typedef struct * @{ */ #define __HAL_RCC_TIM2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_I2C3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) #define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) #define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) @@ -3027,49 +3059,49 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) -#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) -#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) -#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) #define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) -#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) -#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) -#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) -#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) #define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) /** * @} - */ - + */ + /** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable * @brief Enable or disable the High Speed APB (APB2) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_SDIO_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM10_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) #define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) @@ -3077,7 +3109,7 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the APB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) @@ -3085,21 +3117,21 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) -#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) -#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) #define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) -#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) -#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) /** * @} */ -/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset * @brief Force or release AHB1 peripheral reset. * @{ - */ -#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0xFFFFFFFFU) + */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x0060109FU) #define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) #define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) #define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) @@ -3112,11 +3144,11 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset * @brief Force or release AHB2 peripheral reset. * @{ */ -#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x00000080U) #define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) #define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) @@ -3125,18 +3157,18 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset * @brief Force or release APB1 peripheral reset. * @{ */ -#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x10E2C80FU) #define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) #define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) #define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) #define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) #define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) -#define __HAL_RCC_APB1_RELEASE_RESET() (RCC->APB1RSTR = 0x00U) +#define __HAL_RCC_APB1_RELEASE_RESET() (RCC->APB1RSTR = 0x00U) #define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) #define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) #define __HAL_RCC_TIM4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM4RST)) @@ -3146,11 +3178,11 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset * @brief Force or release APB2 peripheral reset. * @{ */ -#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00077931U) #define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) #define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) #define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) @@ -3163,17 +3195,17 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset * @brief Force or release AHB3 peripheral reset. * @{ - */ + */ #define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0xFFFFFFFFU) -#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) /** * @} */ -/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. * @note Peripheral clock gating in SLEEP mode can be used to further reduce * power consumption. @@ -3257,27 +3289,27 @@ typedef struct /*-------------------------------- STM32F410xx -------------------------------*/ #if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) -/** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable +/** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable * @brief Enables or disables the AHB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_CRC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_RNG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_RNGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_RNGEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CRC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CRCEN)) #define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_RNGEN)) /** @@ -3293,53 +3325,53 @@ typedef struct */ #define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) #define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_RNGEN)) != RESET) - + #define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) #define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_RNGEN)) == RESET) /** * @} */ - -/** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + +/** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable * @brief Enable or disable the High Speed APB (APB1) peripheral clock. * @{ */ #define __HAL_RCC_TIM6_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_LPTIM1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_RTCAPB_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_FMPI2C1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_DAC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ - UNUSED(tmpreg); \ - } while(0U) - + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0U) + #define __HAL_RCC_TIM6_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM6EN)) #define __HAL_RCC_RTCAPB_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_RTCAPBEN)) #define __HAL_RCC_LPTIM1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_LPTIM1EN)) @@ -3348,53 +3380,53 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the APB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) * is disabled and the application software has to enable this clock before * using it. * @{ - */ -#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) + */ +#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) #define __HAL_RCC_RTCAPB_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCAPBEN)) != RESET) -#define __HAL_RCC_LPTIM1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) != RESET) -#define __HAL_RCC_FMPI2C1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) != RESET) -#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) != RESET) +#define __HAL_RCC_FMPI2C1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) -#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) #define __HAL_RCC_RTCAPB_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCAPBEN)) == RESET) -#define __HAL_RCC_LPTIM1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) == RESET) -#define __HAL_RCC_FMPI2C1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) == RESET) -#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) == RESET) +#define __HAL_RCC_FMPI2C1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) /** * @} */ - -/** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + +/** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable * @brief Enable or disable the High Speed APB (APB2) peripheral clock. * @{ - */ + */ #define __HAL_RCC_SPI5_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_EXTIT_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI5_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI5EN)) #define __HAL_RCC_EXTIT_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_EXTITEN)) /** * @} */ - + /** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the APB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) @@ -3402,19 +3434,20 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) -#define __HAL_RCC_EXTIT_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_EXTITEN)) != RESET) - -#define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) -#define __HAL_RCC_EXTIT_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_EXTITEN)) == RESET) +#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) +#define __HAL_RCC_EXTIT_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_EXTITEN)) != RESET) + +#define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) +#define __HAL_RCC_EXTIT_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_EXTITEN)) == RESET) /** * @} */ - -/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset * @brief Force or release AHB1 peripheral reset. * @{ */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x80601087U) #define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) #define __HAL_RCC_RNG_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_RNGRST)) #define __HAL_RCC_CRC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_CRCRST)) @@ -3423,7 +3456,7 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset * @brief Force or release AHB2 peripheral reset. * @{ */ @@ -3433,20 +3466,26 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset * @brief Force or release AHB3 peripheral reset. * @{ - */ + */ #define __HAL_RCC_AHB3_FORCE_RESET() #define __HAL_RCC_AHB3_RELEASE_RESET() /** * @} */ -/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset * @brief Force or release APB1 peripheral reset. * @{ */ +#if defined (STM32F410Rx) || defined (STM32F410Cx) +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x31624A18U) +#endif /* STM32F410Rx || STM32F410Cx */ +#if defined (STM32F410Tx) +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x31620A18U) +#endif /* STM32F410Tx */ #define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) #define __HAL_RCC_LPTIM1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_LPTIM1RST)) #define __HAL_RCC_FMPI2C1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_FMPI2C1RST)) @@ -3460,17 +3499,23 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset * @brief Force or release APB2 peripheral reset. * @{ */ +#if defined (STM32F410Rx) || defined (STM32F410Cx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00155131U) +#endif /* STM32F410Rx || STM32F410Cx */ +#if defined (STM32F410Tx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00055111U) +#endif /* STM32F410Tx */ #define __HAL_RCC_SPI5_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI5RST)) -#define __HAL_RCC_SPI5_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI5RST)) +#define __HAL_RCC_SPI5_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI5RST)) /** * @} */ -/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. * @note Peripheral clock gating in SLEEP mode can be used to further reduce * power consumption. @@ -3491,7 +3536,7 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable +/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. * @{ */ @@ -3510,13 +3555,13 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable +/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. * @{ */ #define __HAL_RCC_SPI5_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI5LPEN)) -#define __HAL_RCC_EXTIT_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_EXTITLPEN)) -#define __HAL_RCC_SPI5_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI5LPEN)) +#define __HAL_RCC_EXTIT_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_EXTITLPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI5LPEN)) #define __HAL_RCC_EXTIT_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_EXTITLPEN)) /** * @} @@ -3530,38 +3575,38 @@ typedef struct /** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable * @brief Enables or disables the AHB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_CCMDATARAMEN_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CRC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) #define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) #define __HAL_RCC_CCMDATARAMEN_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CCMDATARAMEN)) @@ -3576,29 +3621,29 @@ typedef struct * is disabled and the application software has to enable this clock before * using it. * @{ - */ -#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) -#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) -#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) -#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) + */ +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) != RESET) +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) -#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) -#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) -#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) -#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) /** * @} */ - + /** @defgroup RCCEX_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable * @brief Enable or disable the AHB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ - __HAL_RCC_SYSCFG_CLK_ENABLE();\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ }while(0U) #define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) @@ -3617,50 +3662,50 @@ typedef struct #define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) /** * @} - */ + */ /** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before - * using it. + * is disabled and the application software has to enable this clock before + * using it. * @{ */ #define __HAL_RCC_TIM2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_I2C3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) #define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) #define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) @@ -3668,8 +3713,8 @@ typedef struct #define __HAL_RCC_I2C3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C3EN)) /** * @} - */ - + */ + /** @defgroup RCCEx_APB1_Peripheral_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the APB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) @@ -3677,53 +3722,53 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) -#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) -#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) -#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) #define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) -#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) -#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) -#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) -#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) -#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) /** * @} - */ - + */ + /** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable * @brief Enable or disable the High Speed APB (APB2) peripheral clock. * @{ */ #define __HAL_RCC_SPI5_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SDIO_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM10_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) #define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) #define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) @@ -3731,7 +3776,7 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_APB2_Peripheral_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the APB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) @@ -3739,23 +3784,24 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) -#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) -#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) -#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) +#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) -#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) -#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) -#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) -#define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) +#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +#define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) /** * @} - */ - -/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + */ + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset * @brief Force or release AHB1 peripheral reset. * @{ - */ + */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x0060109FU) #define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) #define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) #define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) @@ -3767,11 +3813,11 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset * @brief Force or release AHB2 peripheral reset. * @{ */ -#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x00000080U) #define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) #define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) @@ -3780,20 +3826,21 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset * @brief Force or release AHB3 peripheral reset. * @{ - */ + */ #define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0xFFFFFFFFU) -#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) /** * @} */ -/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset * @brief Force or release APB1 peripheral reset. * @{ */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x10E2C80FU) #define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) #define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) #define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) @@ -3809,10 +3856,11 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset * @brief Force or release APB2 peripheral reset. * @{ */ +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00177931U) #define __HAL_RCC_SPI5_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI5RST)) #define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) #define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) @@ -3826,7 +3874,7 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. * @note Peripheral clock gating in SLEEP mode can be used to further reduce * power consumption. @@ -3839,8 +3887,8 @@ typedef struct #define __HAL_RCC_CRC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_CRCLPEN)) #define __HAL_RCC_FLITF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_FLITFLPEN)) #define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) - -#define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) + +#define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) #define __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOELPEN)) #define __HAL_RCC_CRC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_CRCLPEN)) #define __HAL_RCC_FLITF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_FLITFLPEN)) @@ -3863,7 +3911,7 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable +/** @defgroup RCCEx_APB1_LowPower_Enable_Disable APB1 Peripheral Low Power Enable Disable * @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. * @{ */ @@ -3882,7 +3930,7 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable +/** @defgroup RCCEx_APB2_LowPower_Enable_Disable APB2 Peripheral Low Power Enable Disable * @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. * @{ */ @@ -3906,73 +3954,73 @@ typedef struct /** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable * @brief Enables or disables the AHB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_BKPSRAM_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CCMDATARAMEN_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CCMDATARAMEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CRC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOF_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) #define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) #define __HAL_RCC_GPIOF_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOFEN)) @@ -3993,62 +4041,62 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) -#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) -#define __HAL_RCC_GPIOF_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) != RESET) -#define __HAL_RCC_GPIOG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) != RESET) -#define __HAL_RCC_USB_OTG_HS_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) != RESET) -#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) != RESET) -#define __HAL_RCC_BKPSRAM_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) != RESET) -#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN))!= RESET) -#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) - -#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) -#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) -#define __HAL_RCC_GPIOF_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) == RESET) -#define __HAL_RCC_GPIOG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) == RESET) -#define __HAL_RCC_USB_OTG_HS_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) == RESET) -#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) == RESET) -#define __HAL_RCC_BKPSRAM_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) == RESET) -#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) -#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) -/** - * @} - */ - +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_GPIOF_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) != RESET) +#define __HAL_RCC_GPIOG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) != RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) != RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN))!= RESET) +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) + +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_GPIOF_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) == RESET) +#define __HAL_RCC_GPIOG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) == RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) == RESET) +#define __HAL_RCC_CCMDATARAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CCMDATARAMEN)) == RESET) +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +/** + * @} + */ + /** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable * @brief Enable or disable the AHB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_DCMI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_DCMI_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_DCMIEN)) #define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ - __HAL_RCC_SYSCFG_CLK_ENABLE();\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ }while(0U) - + #define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) #define __HAL_RCC_RNG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_RNGEN)) /** * @} */ - + /** @defgroup RCCEx_AHB2_Peripheral_Clock_Enable_Disable_Status AHB2 Peripheral Clock Enable Disable Status * @brief Get the enable or disable status of the AHB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) @@ -4062,33 +4110,33 @@ typedef struct #define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) #define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) -#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) -#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) /** * @} */ - + /** @defgroup RCCEx_AHB3_Clock_Enable_Disable AHB3 Peripheral Clock Enable Disable * @brief Enables or disables the AHB3 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before - * using it. + * is disabled and the application software has to enable this clock before + * using it. * @{ */ #define __HAL_RCC_FMC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_QSPI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_FMC_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_FMCEN)) #define __HAL_RCC_QSPI_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_QSPIEN)) @@ -4111,147 +4159,147 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before - * using it. + * is disabled and the application software has to enable this clock before + * using it. * @{ */ #define __HAL_RCC_TIM6_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM7_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM12_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM13_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM14_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPDIFRX_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPDIFRXEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPDIFRXEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPDIFRXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPDIFRXEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USART3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART5_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_FMPI2C1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CAN1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CAN2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CEC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CECEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CECEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CECEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CECEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_DAC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_I2C3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) #define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) #define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) @@ -4282,9 +4330,9 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) #define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) -#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) #define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) #define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) #define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) @@ -4302,9 +4350,9 @@ typedef struct #define __HAL_RCC_CEC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CECEN)) != RESET) #define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) -#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) #define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) -#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) #define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) #define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) #define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) @@ -4324,70 +4372,70 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable * @brief Enable or disable the High Speed APB (APB2) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_TIM8_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ADC2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_ADC3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SAI1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SAI2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SDIO_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM10_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) #define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) #define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) @@ -4407,31 +4455,32 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) -#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) -#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) #define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) -#define __HAL_RCC_ADC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) != RESET) -#define __HAL_RCC_ADC3_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) != RESET) +#define __HAL_RCC_ADC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) != RESET) +#define __HAL_RCC_ADC3_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) != RESET) #define __HAL_RCC_SAI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) != RESET) #define __HAL_RCC_SAI2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI2EN)) != RESET) -#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) -#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) -#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) #define __HAL_RCC_TIM8_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) == RESET) -#define __HAL_RCC_ADC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) == RESET) -#define __HAL_RCC_ADC3_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) == RESET) +#define __HAL_RCC_ADC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) == RESET) +#define __HAL_RCC_ADC3_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) == RESET) #define __HAL_RCC_SAI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) == RESET) -#define __HAL_RCC_SAI2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI2EN)) == RESET) +#define __HAL_RCC_SAI2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI2EN)) == RESET) /** * @} */ - -/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset * @brief Force or release AHB1 peripheral reset. * @{ */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x206010FFU) #define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) #define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) #define __HAL_RCC_GPIOF_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOFRST)) @@ -4449,11 +4498,11 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset * @brief Force or release AHB2 peripheral reset. * @{ */ -#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x00000081U) #define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) #define __HAL_RCC_RNG_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_RNGRST)) #define __HAL_RCC_DCMI_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_DCMIRST)) @@ -4466,12 +4515,12 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset * @brief Force or release AHB3 peripheral reset. * @{ - */ -#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0xFFFFFFFFU) -#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) + */ +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0x00000003U) +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) #define __HAL_RCC_FMC_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_FMCRST)) #define __HAL_RCC_QSPI_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_QSPIRST)) @@ -4482,10 +4531,11 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset * @brief Force or release APB1 peripheral reset. * @{ */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x3FFFC9FFU) #define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) #define __HAL_RCC_TIM7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM7RST)) #define __HAL_RCC_TIM12_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM12RST)) @@ -4505,7 +4555,7 @@ typedef struct #define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) #define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) #define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) - + #define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) #define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) #define __HAL_RCC_TIM4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM4RST)) @@ -4529,12 +4579,13 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset +/** @defgroup RCCEx_APB2_Force_Release_Reset APB2 Force Release Reset * @brief Force or release APB2 peripheral reset. * @{ */ +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x00C77933U) #define __HAL_RCC_TIM8_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM8RST)) -#define __HAL_RCC_SAI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI1RST)) +#define __HAL_RCC_SAI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI1RST)) #define __HAL_RCC_SAI2_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI2RST)) #define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) #define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) @@ -4545,12 +4596,12 @@ typedef struct #define __HAL_RCC_TIM10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) #define __HAL_RCC_TIM8_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM8RST)) #define __HAL_RCC_SAI1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI1RST)) -#define __HAL_RCC_SAI2_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI2RST)) +#define __HAL_RCC_SAI2_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI2RST)) /** * @} */ -/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable +/** @defgroup RCCEx_AHB1_LowPower_Enable_Disable AHB1 Peripheral Low Power Enable Disable * @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. * @note Peripheral clock gating in SLEEP mode can be used to further reduce * power consumption. @@ -4629,7 +4680,7 @@ typedef struct * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. * @note By default, all peripheral clocks are enabled during SLEEP mode. * @{ - */ + */ #define __HAL_RCC_TIM6_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM6LPEN)) #define __HAL_RCC_TIM7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM7LPEN)) #define __HAL_RCC_TIM12_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM12LPEN)) @@ -4706,59 +4757,59 @@ typedef struct /*----------------------------------------------------------------------------*/ /*-------STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx-------*/ -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) /** @defgroup RCCEx_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable * @brief Enables or disables the AHB1 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ -#if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0U) #endif /* STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ -#if defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0U) #endif /* STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ -#if defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_GPIOF_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_GPIOG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ - UNUSED(tmpreg); \ - } while(0U) -#endif /* STM32F412Zx || STM32F413xx || STM32F423xx */ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F412Zx || STM32F413xx || STM32F423xx */ #define __HAL_RCC_CRC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ - UNUSED(tmpreg); \ - } while(0U) -#if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0U) +#if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) #endif /* STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ -#if defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) #endif /* STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ #if defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) @@ -4807,35 +4858,35 @@ typedef struct /** @defgroup RCCEx_AHB2_Clock_Enable_Disable AHB2 Peripheral Clock Enable Disable * @brief Enable or disable the AHB2 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #if defined(STM32F423xx) #define __HAL_RCC_AES_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_AESEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_AESEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_AESEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_AESEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_AES_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_AESEN)) #endif /* STM32F423xx */ - + #define __HAL_RCC_RNG_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_RNGEN)) - + #define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do {(RCC->AHB2ENR |= (RCC_AHB2ENR_OTGFSEN));\ - __HAL_RCC_SYSCFG_CLK_ENABLE();\ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ }while(0U) - + #define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) /** * @} @@ -4852,42 +4903,42 @@ typedef struct #define __HAL_RCC_AES_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_AESEN)) != RESET) #define __HAL_RCC_AES_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_AESEN)) == RESET) #endif /* STM32F423xx */ - + #define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) #define __HAL_RCC_USB_OTG_FS_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) - -#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) -#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) + +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) /** * @} - */ + */ /** @defgroup RCCEx_AHB3_Clock_Enable_Disable AHB3 Peripheral Clock Enable Disable * @brief Enables or disables the AHB3 peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before - * using it. + * is disabled and the application software has to enable this clock before + * using it. * @{ */ #if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_FSMC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FSMCEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_QSPI_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_FSMC_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_FSMCEN)) #define __HAL_RCC_QSPI_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_QSPIEN)) -#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ /** * @} */ @@ -4900,8 +4951,8 @@ typedef struct * @{ */ #if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_FSMC_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) != RESET) -#define __HAL_RCC_QSPI_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) != RESET) +#define __HAL_RCC_FSMC_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) != RESET) +#define __HAL_RCC_QSPI_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) != RESET) #define __HAL_RCC_FSMC_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FSMCEN)) == RESET) #define __HAL_RCC_QSPI_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) == RESET) @@ -4910,181 +4961,179 @@ typedef struct /** * @} */ - + /** @defgroup RCCEx_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before - * using it. + * is disabled and the application software has to enable this clock before + * using it. * @{ */ #define __HAL_RCC_TIM6_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM7_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM12_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM13_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM14_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ - UNUSED(tmpreg); \ - } while(0U) -#if defined(STM32F413xx) || defined(STM32F423xx) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0U) +#if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_LPTIM1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ - UNUSED(tmpreg); \ - } while(0U) -#endif /* STM32F413xx || STM32F423xx */ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_RTCAPB_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ - UNUSED(tmpreg); \ - } while(0U) -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCAPBEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_USART3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ - UNUSED(tmpreg); \ - } while(0U) -#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + UNUSED(tmpreg); \ + } while(0U) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART5_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + UNUSED(tmpreg); \ + } while(0U) #endif /* STM32F413xx || STM32F423xx */ - + #define __HAL_RCC_FMPI2C1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_FMPI2C1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CAN1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_CAN2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + UNUSED(tmpreg); \ + } while(0U) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_CAN3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN3EN);\ + UNUSED(tmpreg); \ + } while(0U) #endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_TIM2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_I2C3_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0U) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_DAC_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART7_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART8_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + UNUSED(tmpreg); \ + } while(0U) #endif /* STM32F413xx || STM32F423xx */ - + #define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) #define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) #define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) @@ -5092,30 +5141,28 @@ typedef struct #define __HAL_RCC_TIM7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM7EN)) #define __HAL_RCC_TIM12_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM12EN)) #define __HAL_RCC_TIM13_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM13EN)) -#define __HAL_RCC_TIM14_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM14EN)) -#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_TIM14_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM14EN)) +#if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_LPTIM1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_LPTIM1EN)) #endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_RTCAPB_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_RTCAPBEN)) +#define __HAL_RCC_RTCAPB_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_RTCAPBEN)) #define __HAL_RCC_SPI3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI3EN)) -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_USART3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_USART3EN)) -#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ -#if defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART4EN)) #define __HAL_RCC_UART5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART5EN)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_I2C3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C3EN)) #define __HAL_RCC_FMPI2C1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_FMPI2C1EN)) #define __HAL_RCC_CAN1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN1EN)) #define __HAL_RCC_CAN2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN2EN)) #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_CAN3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN3EN)) +#define __HAL_RCC_CAN3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN3EN)) #define __HAL_RCC_DAC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_DACEN)) #define __HAL_RCC_UART7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART7EN)) #define __HAL_RCC_UART8_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART8EN)) -#endif /* STM32F413xx || STM32F423xx */ - +#endif /* STM32F413xx || STM32F423xx */ + /** * @} */ @@ -5127,38 +5174,36 @@ typedef struct * using it. * @{ */ -#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) #define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) #define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) #define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) #define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) #define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) #define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) -#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_LPTIM1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) != RESET) -#endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_RTCAPB_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCAPBEN)) != RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_RTCAPB_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCAPBEN)) != RESET) #define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) -#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx | STM32F423xx */ #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) -#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) #define __HAL_RCC_FMPI2C1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) != RESET) #define __HAL_RCC_CAN1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN))!= RESET) #define __HAL_RCC_CAN2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) != RESET) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_CAN3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN3EN)) != RESET) -#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) #define __HAL_RCC_UART7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) != RESET) -#define __HAL_RCC_UART8_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) != RESET) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART8_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) #define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) #define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) #define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) @@ -5166,139 +5211,137 @@ typedef struct #define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) #define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) #define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) -#if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_LPTIM1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) == RESET) -#endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_RTCAPB_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCAPBEN)) == RESET) +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_LPTIM1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_RTCAPB_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCAPBEN)) == RESET) #define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) -#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx | STM32F423xx */ -#if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) -#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) -#endif /* STM32F413xx || STM32F423xx */ +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) #define __HAL_RCC_FMPI2C1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_FMPI2C1EN)) == RESET) #define __HAL_RCC_CAN1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) == RESET) #define __HAL_RCC_CAN2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) == RESET) -#if defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_CAN3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN3EN)) == RESET) -#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) #define __HAL_RCC_UART7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) == RESET) #define __HAL_RCC_UART8_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) == RESET) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ /** * @} - */ + */ /** @defgroup RCCEx_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable * @brief Enable or disable the High Speed APB (APB2) peripheral clock. * @note After reset, the peripheral clock (used for registers read/write access) - * is disabled and the application software has to enable this clock before + * is disabled and the application software has to enable this clock before * using it. * @{ */ #define __HAL_RCC_TIM8_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + UNUSED(tmpreg); \ + } while(0U) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART9_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_UART9EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_UART9EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_UART9EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_UART9EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_UART10_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_UART10EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_UART10EN);\ - UNUSED(tmpreg); \ - } while(0U) -#endif /* STM32F413xx || STM32F423xx */ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_UART10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_UART10EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_SDIO_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDIOEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI4_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_EXTIT_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_EXTITEN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_TIM10_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0U) #define __HAL_RCC_SPI5_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + UNUSED(tmpreg); \ + } while(0U) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_SAI1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ - UNUSED(tmpreg); \ - } while(0U) -#endif /* STM32F413xx || STM32F423xx */ + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_DFSDM1_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM1EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM1EN);\ - UNUSED(tmpreg); \ - } while(0U) + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM1EN);\ + UNUSED(tmpreg); \ + } while(0U) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_DFSDM2_CLK_ENABLE() do { \ - __IO uint32_t tmpreg = 0x00U; \ - SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM2EN);\ - /* Delay after an RCC peripheral clock enabling */ \ - tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM2EN);\ - UNUSED(tmpreg); \ - } while(0U) -#endif /* STM32F413xx || STM32F423xx */ - + __IO uint32_t tmpreg = 0x00U; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM2EN);\ + UNUSED(tmpreg); \ + } while(0U) +#endif /* STM32F413xx || STM32F423xx */ + #define __HAL_RCC_TIM8_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM8EN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART9_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_UART9EN)) -#define __HAL_RCC_UART10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_UART10EN)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_UART10EN)) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_SDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDIOEN)) #define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) #define __HAL_RCC_EXTIT_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_EXTITEN)) #define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) #define __HAL_RCC_SPI5_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI5EN)) #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_SAI1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SAI1EN)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SAI1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SAI1EN)) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_DFSDM1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DFSDM1EN)) #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_DFSDM2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DFSDM2EN)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DFSDM2EN)) +#endif /* STM32F413xx || STM32F423xx */ /** * @} */ @@ -5313,46 +5356,58 @@ typedef struct #define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART9_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART9EN)) != RESET) -#define __HAL_RCC_UART10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART10EN)) != RESET) -#endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) +#define __HAL_RCC_UART10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART10EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) != RESET) #define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) #define __HAL_RCC_EXTIT_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_EXTITEN)) != RESET) #define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) #define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_SAI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) != RESET) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SAI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_DFSDM1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM1EN)) != RESET) #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_DFSDM2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM2EN)) != RESET) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM2EN)) != RESET) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_TIM8_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) == RESET) #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_UART9_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART9EN)) == RESET) -#define __HAL_RCC_UART10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART10EN)) == RESET) -#endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) +#define __HAL_RCC_UART9_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART9EN)) == RESET) +#define __HAL_RCC_UART10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_UART10EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDIOEN)) == RESET) #define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) #define __HAL_RCC_EXTIT_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_EXTITEN)) == RESET) #define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) #define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_SAI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) == RESET) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SAI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_DFSDM1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM1EN)) == RESET) #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_DFSDM2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM2EN)) == RESET) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_DFSDM2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM2EN)) == RESET) +#endif /* STM32F413xx || STM32F423xx */ /** * @} */ - -/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset + +/** @defgroup RCCEx_AHB1_Force_Release_Reset AHB1 Force Release Reset * @brief Force or release AHB1 peripheral reset. * @{ */ +#if defined (STM32F412Zx) || defined(STM32F413xx) || defined (STM32F423xx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x006010FFU) +#endif /* STM32F412Zx || STM32F413xx || STM32F423xx */ +#if defined (STM32F412Cx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x00601087U) +#endif /* STM32F412Cx */ +#if defined (STM32F412Vx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x0060109FU) +#endif /* STM32F412Vx */ +#if defined (STM32F412Rx) +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0x0060108FU) +#endif /* STM32F412Rx */ #if defined(STM32F412Rx) || defined(STM32F412Vx) || defined(STM32F412Zx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) #endif /* STM32F412Rx || STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ @@ -5380,18 +5435,19 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset +/** @defgroup RCCEx_AHB2_Force_Release_Reset AHB2 Force Release Reset * @brief Force or release AHB2 peripheral reset. * @{ */ -#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0xFFFFFFFFU) -#define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) - #if defined(STM32F423xx) +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000D0U) #define __HAL_RCC_AES_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_AESRST)) -#define __HAL_RCC_AES_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_AESRST)) -#endif /* STM32F423xx */ - +#define __HAL_RCC_AES_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_AESRST)) +#else +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0x000000C0U) +#endif /* STM32F423xx */ +#define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) + #define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) #define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_OTGFSRST)) @@ -5401,20 +5457,20 @@ typedef struct * @} */ -/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset +/** @defgroup RCCEx_AHB3_Force_Release_Reset AHB3 Force Release Reset * @brief Force or release AHB3 peripheral reset. * @{ - */ + */ #if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0xFFFFFFFFU) -#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0x00000003U) +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) #define __HAL_RCC_FSMC_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_FSMCRST)) #define __HAL_RCC_QSPI_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_QSPIRST)) #define __HAL_RCC_FSMC_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_FSMCRST)) #define __HAL_RCC_QSPI_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_QSPIRST)) -#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ #if defined(STM32F412Cx) #define __HAL_RCC_AHB3_FORCE_RESET() #define __HAL_RCC_AHB3_RELEASE_RESET() @@ -5429,30 +5485,34 @@ typedef struct * @} */ -/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset +/** @defgroup RCCEx_APB1_Force_Release_Reset APB1 Force Release Reset * @brief Force or release APB1 peripheral reset. * @{ */ +#if defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0xFFFECBFFU) +#endif /* STM32F413xx || STM32F423xx */ +#if defined (STM32F412Zx) || defined (STM32F412Vx) || defined (STM32F412Rx) || defined (STM32F412Cx) +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0x17E6C9FFU) +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ #define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) -#define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) -#define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) #define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) #define __HAL_RCC_TIM7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM7RST)) #define __HAL_RCC_TIM12_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM12RST)) #define __HAL_RCC_TIM13_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM13RST)) -#define __HAL_RCC_TIM14_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_TIM14_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM14RST)) #if defined(STM32F413xx) || defined(STM32F423xx) -#define __HAL_RCC_LPTIM1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_LPTIM1RST)) -#endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_LPTIM1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_LPTIM1RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) #define __HAL_RCC_USART3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_USART3RST)) -#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART4RST)) -#define __HAL_RCC_UART5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART5RST)) -#endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) +#define __HAL_RCC_UART5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART5RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) #define __HAL_RCC_FMPI2C1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_FMPI2C1RST)) #define __HAL_RCC_CAN1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN1RST)) #define __HAL_RCC_CAN2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN2RST)) @@ -5460,8 +5520,8 @@ typedef struct #define __HAL_RCC_CAN3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN3RST)) #define __HAL_RCC_DAC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_DACRST)) #define __HAL_RCC_UART7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART7RST)) -#define __HAL_RCC_UART8_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART8RST)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART8_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART8RST)) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) #define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) @@ -5470,19 +5530,17 @@ typedef struct #define __HAL_RCC_TIM7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM7RST)) #define __HAL_RCC_TIM12_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM12RST)) #define __HAL_RCC_TIM13_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM13RST)) -#define __HAL_RCC_TIM14_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_TIM14_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM14RST)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_LPTIM1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_LPTIM1RST)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_SPI3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI3RST)) -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_USART3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_USART3RST)) -#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART4RST)) #define __HAL_RCC_UART5_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART5RST)) -#endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_I2C3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C3RST)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C3RST)) #define __HAL_RCC_FMPI2C1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_FMPI2C1RST)) #define __HAL_RCC_CAN1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN1RST)) #define __HAL_RCC_CAN2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN2RST)) @@ -5490,8 +5548,8 @@ typedef struct #define __HAL_RCC_CAN3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN3RST)) #define __HAL_RCC_DAC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_DACRST)) #define __HAL_RCC_UART7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART7RST)) -#define __HAL_RCC_UART8_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART8RST)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART8_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART8RST)) +#endif /* STM32F413xx || STM32F423xx */ /** * @} */ @@ -5500,39 +5558,45 @@ typedef struct * @brief Force or release APB2 peripheral reset. * @{ */ +#if defined(STM32F413xx)|| defined(STM32F423xx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x035779F3U) +#endif /* STM32F413xx || STM32F423xx */ +#if defined (STM32F412Zx) || defined (STM32F412Vx) || defined (STM32F412Rx) || defined (STM32F412Cx) +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0x01177933U) +#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ #define __HAL_RCC_TIM8_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM8RST)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART9_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_UART9RST)) -#define __HAL_RCC_UART10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_UART10RST)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_UART10RST)) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_SDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDIORST)) #define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) -#define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) #define __HAL_RCC_SPI5_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI5RST)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_SAI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI1RST)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_DFSDM1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_DFSDM1RST)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_DFSDM2_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_DFSDM2RST)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_TIM8_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM8RST)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART9_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_UART9RST)) -#define __HAL_RCC_UART10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_UART10RST)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_UART10RST)) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_SDIO_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDIORST)) #define __HAL_RCC_SPI4_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI4RST)) #define __HAL_RCC_TIM10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) #define __HAL_RCC_SPI5_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI5RST)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_SAI1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI1RST)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_DFSDM1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_DFSDM1RST)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_DFSDM2_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_DFSDM2RST)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ /** * @} */ @@ -5554,7 +5618,7 @@ typedef struct #define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_SRAM2_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM2LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) #define __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOELPEN)) @@ -5565,7 +5629,7 @@ typedef struct #define __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM1LPEN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_SRAM2_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM2LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ /** * @} */ @@ -5579,10 +5643,10 @@ typedef struct * @{ */ #if defined(STM32F423xx) -#define __HAL_RCC_AES_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_AESLPEN)) +#define __HAL_RCC_AES_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_AESLPEN)) #define __HAL_RCC_AES_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_AESLPEN)) #endif /* STM32F423xx */ - + #define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_OTGFSLPEN)) #define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_OTGFSLPEN)) @@ -5622,7 +5686,7 @@ typedef struct */ #define __HAL_RCC_TIM2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM2LPEN)) #define __HAL_RCC_TIM3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM3LPEN)) -#define __HAL_RCC_TIM4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM4LPEN)) #define __HAL_RCC_TIM6_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM6LPEN)) #define __HAL_RCC_TIM7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM7LPEN)) #define __HAL_RCC_TIM12_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM12LPEN)) @@ -5630,17 +5694,15 @@ typedef struct #define __HAL_RCC_TIM14_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM14LPEN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_LPTIM1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_LPTIM1LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_RTCAPB_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_RTCAPBLPEN)) -#define __HAL_RCC_SPI3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI3LPEN)) -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SPI3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI3LPEN)) #define __HAL_RCC_USART3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_USART3LPEN)) -#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART4LPEN)) #define __HAL_RCC_UART5_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART5LPEN)) -#endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_I2C3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C3LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C3LPEN)) #define __HAL_RCC_FMPI2C1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_FMPI2C1LPEN)) #define __HAL_RCC_CAN1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN1LPEN)) #define __HAL_RCC_CAN2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN2LPEN)) @@ -5648,8 +5710,8 @@ typedef struct #define __HAL_RCC_CAN3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN3LPEN)) #define __HAL_RCC_DAC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_DACLPEN)) #define __HAL_RCC_UART7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART7LPEN)) -#define __HAL_RCC_UART8_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART8LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART8_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART8LPEN)) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_TIM2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM2LPEN)) #define __HAL_RCC_TIM3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM3LPEN)) @@ -5661,17 +5723,15 @@ typedef struct #define __HAL_RCC_TIM14_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM14LPEN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_LPTIM1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_LPTIM1LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_RTCAPB_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_RTCAPBLPEN)) -#define __HAL_RCC_SPI3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI3LPEN)) -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) +#define __HAL_RCC_SPI3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI3LPEN)) #define __HAL_RCC_USART3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_USART3LPEN)) -#endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F413xx || STM32F423xx */ #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART4LPEN)) #define __HAL_RCC_UART5_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART5LPEN)) -#endif /* STM32F413xx || STM32F423xx */ -#define __HAL_RCC_I2C3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C3LPEN)) +#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_I2C3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C3LPEN)) #define __HAL_RCC_FMPI2C1_CLK_SLEEP_DISABLE()(RCC->APB1LPENR &= ~(RCC_APB1LPENR_FMPI2C1LPEN)) #define __HAL_RCC_CAN1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN1LPEN)) #define __HAL_RCC_CAN2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN2LPEN)) @@ -5679,8 +5739,8 @@ typedef struct #define __HAL_RCC_CAN3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN3LPEN)) #define __HAL_RCC_DAC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_DACLPEN)) #define __HAL_RCC_UART7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART7LPEN)) -#define __HAL_RCC_UART8_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART8LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART8_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART8LPEN)) +#endif /* STM32F413xx || STM32F423xx */ /** * @} */ @@ -5696,38 +5756,38 @@ typedef struct #define __HAL_RCC_TIM8_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM8LPEN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART9_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_UART9LPEN)) -#define __HAL_RCC_UART10_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_UART10LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART10_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_UART10LPEN)) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_SDIO_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDIOLPEN)) -#define __HAL_RCC_SPI4_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI4LPEN)) -#define __HAL_RCC_EXTIT_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_EXTITLPEN)) -#define __HAL_RCC_TIM10_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_EXTIT_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_EXTITLPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM10LPEN)) #define __HAL_RCC_SPI5_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI5LPEN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_SAI1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SAI1LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_DFSDM1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_DFSDM1LPEN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_DFSDM2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_DFSDM2LPEN)) #endif /* STM32F413xx || STM32F423xx */ - + #define __HAL_RCC_TIM8_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM8LPEN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_UART9_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_UART9LPEN)) -#define __HAL_RCC_UART10_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_UART10LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#define __HAL_RCC_UART10_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_UART10LPEN)) +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_SDIO_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDIOLPEN)) #define __HAL_RCC_SPI4_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI4LPEN)) #define __HAL_RCC_EXTIT_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_EXTITLPEN)) -#define __HAL_RCC_TIM10_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM10LPEN)) #define __HAL_RCC_SPI5_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI5LPEN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_SAI1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SAI1LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ #define __HAL_RCC_DFSDM1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_DFSDM1LPEN)) #if defined(STM32F413xx) || defined(STM32F423xx) #define __HAL_RCC_DFSDM2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_DFSDM2LPEN)) -#endif /* STM32F413xx || STM32F423xx */ +#endif /* STM32F413xx || STM32F423xx */ /** * @} */ @@ -5744,7 +5804,7 @@ typedef struct * This parameter can be one of the following values: * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry - * @note This clock source (RCC_PLLSource) is common for the main PLL and PLLI2S. + * @note This clock source (RCC_PLLSource) is common for the main PLL and PLLI2S. * @param __PLLM__ specifies the division factor for PLL VCO input clock * This parameter must be a number between Min_Data = 2 and Max_Data = 63. * @note You have to set the PLLM parameter correctly to ensure that the VCO input @@ -5754,29 +5814,29 @@ typedef struct * This parameter must be a number between Min_Data = 50 and Max_Data = 432. * @note You have to set the PLLN parameter correctly to ensure that the VCO * output frequency is between 100 and 432 MHz. - * + * * @param __PLLP__ specifies the division factor for main system clock (SYSCLK) * This parameter must be a number in the range {2, 4, 6, or 8}. - * + * * @param __PLLQ__ specifies the division factor for OTG FS, SDIO and RNG clocks * This parameter must be a number between Min_Data = 2 and Max_Data = 15. * @note If the USB OTG FS is used in your application, you have to set the * PLLQ parameter correctly to have 48 MHz clock for the USB. However, * the SDIO and RNG need a frequency lower than or equal to 48 MHz to work * correctly. - * + * * @param __PLLR__ PLL division factor for I2S, SAI, SYSTEM, SPDIFRX clocks. * This parameter must be a number between Min_Data = 2 and Max_Data = 7. * @note This parameter is only available in STM32F446xx/STM32F469xx/STM32F479xx/ STM32F412Zx/STM32F412Vx/STM32F412Rx/STM32F412Cx/STM32F413xx/STM32F423xx devices. - * + * */ #define __HAL_RCC_PLL_CONFIG(__RCC_PLLSource__, __PLLM__, __PLLN__, __PLLP__, __PLLQ__,__PLLR__) \ - (RCC->PLLCFGR = ((__RCC_PLLSource__) | (__PLLM__) | \ - ((__PLLN__) << RCC_PLLCFGR_PLLN_Pos) | \ - ((((__PLLP__) >> 1U) -1U) << RCC_PLLCFGR_PLLP_Pos) | \ - ((__PLLQ__) << RCC_PLLCFGR_PLLQ_Pos) | \ - ((__PLLR__) << RCC_PLLCFGR_PLLR_Pos))) + (RCC->PLLCFGR = ((__RCC_PLLSource__) | (__PLLM__) | \ + ((__PLLN__) << RCC_PLLCFGR_PLLN_Pos) | \ + ((((__PLLP__) >> 1U) -1U) << RCC_PLLCFGR_PLLP_Pos) | \ + ((__PLLQ__) << RCC_PLLCFGR_PLLQ_Pos) | \ + ((__PLLR__) << RCC_PLLCFGR_PLLR_Pos))) #else /** @brief Macro to configure the main PLL clock source, multiplication and division factors. * @note This function must be used only when the main PLL is disabled. @@ -5784,7 +5844,7 @@ typedef struct * This parameter can be one of the following values: * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry - * @note This clock source (RCC_PLLSource) is common for the main PLL and PLLI2S. + * @note This clock source (RCC_PLLSource) is common for the main PLL and PLLI2S. * @param __PLLM__ specifies the division factor for PLL VCO input clock * This parameter must be a number between Min_Data = 2 and Max_Data = 63. * @note You have to set the PLLM parameter correctly to ensure that the VCO input @@ -5798,23 +5858,23 @@ typedef struct * where frequency is between 192 and 432 MHz. * @param __PLLP__ specifies the division factor for main system clock (SYSCLK) * This parameter must be a number in the range {2, 4, 6, or 8}. - * + * * @param __PLLQ__ specifies the division factor for OTG FS, SDIO and RNG clocks * This parameter must be a number between Min_Data = 2 and Max_Data = 15. * @note If the USB OTG FS is used in your application, you have to set the * PLLQ parameter correctly to have 48 MHz clock for the USB. However, * the SDIO and RNG need a frequency lower than or equal to 48 MHz to work * correctly. - * + * */ #define __HAL_RCC_PLL_CONFIG(__RCC_PLLSource__, __PLLM__, __PLLN__, __PLLP__, __PLLQ__) \ - (RCC->PLLCFGR = (0x20000000U | (__RCC_PLLSource__) | (__PLLM__)| \ - ((__PLLN__) << RCC_PLLCFGR_PLLN_Pos) | \ - ((((__PLLP__) >> 1U) -1U) << RCC_PLLCFGR_PLLP_Pos) | \ - ((__PLLQ__) << RCC_PLLCFGR_PLLQ_Pos))) - #endif /* STM32F410xx || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ + (RCC->PLLCFGR = (0x20000000U | (__RCC_PLLSource__) | (__PLLM__)| \ + ((__PLLN__) << RCC_PLLCFGR_PLLN_Pos) | \ + ((((__PLLP__) >> 1U) -1U) << RCC_PLLCFGR_PLLP_Pos) | \ + ((__PLLQ__) << RCC_PLLCFGR_PLLQ_Pos))) +#endif /* STM32F410xx || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ /*----------------------------------------------------------------------------*/ - + /*----------------------------PLLI2S Configuration ---------------------------*/ #if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx) || defined(STM32F417xx) || \ defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ @@ -5822,7 +5882,7 @@ typedef struct defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) -/** @brief Macros to enable or disable the PLLI2S. +/** @brief Macros to enable or disable the PLLI2S. * @note The PLLI2S is disabled by hardware when entering STOP and STANDBY modes. */ #define __HAL_RCC_PLLI2S_ENABLE() (*(__IO uint32_t *) RCC_CR_PLLI2SON_BB = ENABLE) @@ -5834,7 +5894,7 @@ typedef struct #if defined(STM32F446xx) /** @brief Macro to configure the PLLI2S clock multiplication and division factors . * @note This macro must be used only when the PLLI2S is disabled. - * @note PLLI2S clock source is common with the main PLL (configured in + * @note PLLI2S clock source is common with the main PLL (configured in * HAL_RCC_ClockConfig() API). * @param __PLLI2SM__ specifies the division factor for PLLI2S VCO input clock * This parameter must be a number between Min_Data = 2 and Max_Data = 63. @@ -5844,32 +5904,32 @@ typedef struct * * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock * This parameter must be a number between Min_Data = 50 and Max_Data = 432. - * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. * * @param __PLLI2SP__ specifies division factor for SPDIFRX Clock. * This parameter must be a number in the range {2, 4, 6, or 8}. * @note the PLLI2SP parameter is only available with STM32F446xx Devices - * + * * @param __PLLI2SR__ specifies the division factor for I2S clock * This parameter must be a number between Min_Data = 2 and Max_Data = 7. * @note You have to set the PLLI2SR parameter correctly to not exceed 192 MHz * on the I2S clock frequency. - * + * * @param __PLLI2SQ__ specifies the division factor for SAI clock * This parameter must be a number between Min_Data = 2 and Max_Data = 15. */ #define __HAL_RCC_PLLI2S_CONFIG(__PLLI2SM__, __PLLI2SN__, __PLLI2SP__, __PLLI2SQ__, __PLLI2SR__) \ - (RCC->PLLI2SCFGR = ((__PLLI2SM__) |\ - ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ - ((((__PLLI2SP__) >> 1U) -1U) << RCC_PLLI2SCFGR_PLLI2SP_Pos) |\ - ((__PLLI2SQ__) << RCC_PLLI2SCFGR_PLLI2SQ_Pos) |\ - ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) + (RCC->PLLI2SCFGR = ((__PLLI2SM__) |\ + ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((((__PLLI2SP__) >> 1U) -1U) << RCC_PLLI2SCFGR_PLLI2SP_Pos) |\ + ((__PLLI2SQ__) << RCC_PLLI2SCFGR_PLLI2SQ_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) #elif defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ defined(STM32F413xx) || defined(STM32F423xx) /** @brief Macro to configure the PLLI2S clock multiplication and division factors . * @note This macro must be used only when the PLLI2S is disabled. - * @note PLLI2S clock source is common with the main PLL (configured in + * @note PLLI2S clock source is common with the main PLL (configured in * HAL_RCC_ClockConfig() API). * @param __PLLI2SM__ specifies the division factor for PLLI2S VCO input clock * This parameter must be a number between Min_Data = 2 and Max_Data = 63. @@ -5879,7 +5939,7 @@ typedef struct * * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock * This parameter must be a number between Min_Data = 50 and Max_Data = 432. - * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. * * @param __PLLI2SR__ specifies the division factor for I2S clock @@ -5891,18 +5951,18 @@ typedef struct * This parameter must be a number between Min_Data = 2 and Max_Data = 15. */ #define __HAL_RCC_PLLI2S_CONFIG(__PLLI2SM__, __PLLI2SN__, __PLLI2SQ__, __PLLI2SR__) \ - (RCC->PLLI2SCFGR = ((__PLLI2SM__) |\ - ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ - ((__PLLI2SQ__) << RCC_PLLI2SCFGR_PLLI2SQ_Pos) |\ - ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) + (RCC->PLLI2SCFGR = ((__PLLI2SM__) |\ + ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((__PLLI2SQ__) << RCC_PLLI2SCFGR_PLLI2SQ_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) #else /** @brief Macro to configure the PLLI2S clock multiplication and division factors . * @note This macro must be used only when the PLLI2S is disabled. - * @note PLLI2S clock source is common with the main PLL (configured in + * @note PLLI2S clock source is common with the main PLL (configured in * HAL_RCC_ClockConfig() API). * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock * This parameter must be a number between Min_Data = 50 and Max_Data = 432. - * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. * * @param __PLLI2SR__ specifies the division factor for I2S clock @@ -5912,25 +5972,25 @@ typedef struct * */ #define __HAL_RCC_PLLI2S_CONFIG(__PLLI2SN__, __PLLI2SR__) \ - (RCC->PLLI2SCFGR = (((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ - ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) + (RCC->PLLI2SCFGR = (((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) #endif /* STM32F446xx */ #if defined(STM32F411xE) /** @brief Macro to configure the PLLI2S clock multiplication and division factors . * @note This macro must be used only when the PLLI2S is disabled. * @note This macro must be used only when the PLLI2S is disabled. - * @note PLLI2S clock source is common with the main PLL (configured in + * @note PLLI2S clock source is common with the main PLL (configured in * HAL_RCC_ClockConfig() API). * @param __PLLI2SM__ specifies the division factor for PLLI2S VCO input clock * This parameter must be a number between Min_Data = 2 and Max_Data = 63. * @note The PLLI2SM parameter is only used with STM32F411xE/STM32F410xx Devices * @note You have to set the PLLI2SM parameter correctly to ensure that the VCO input * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency - * of 2 MHz to limit PLLI2S jitter. + * of 2 MHz to limit PLLI2S jitter. * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock * This parameter must be a number between Min_Data = 192 and Max_Data = 432. - * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO * output frequency is between Min_Data = 192 and Max_Data = 432 MHz. * @param __PLLI2SR__ specifies the division factor for I2S clock * This parameter must be a number between Min_Data = 2 and Max_Data = 7. @@ -5938,22 +5998,22 @@ typedef struct * on the I2S clock frequency. */ #define __HAL_RCC_PLLI2S_I2SCLK_CONFIG(__PLLI2SM__, __PLLI2SN__, __PLLI2SR__) (RCC->PLLI2SCFGR = ((__PLLI2SM__) |\ - ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ - ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) + ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos))) #endif /* STM32F411xE */ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) /** @brief Macro used by the SAI HAL driver to configure the PLLI2S clock multiplication and division factors. * @note This macro must be used only when the PLLI2S is disabled. - * @note PLLI2S clock source is common with the main PLL (configured in - * HAL_RCC_ClockConfig() API) + * @note PLLI2S clock source is common with the main PLL (configured in + * HAL_RCC_ClockConfig() API) * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock. * This parameter must be a number between Min_Data = 50 and Max_Data = 432. - * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. * @param __PLLI2SQ__ specifies the division factor for SAI1 clock. - * This parameter must be a number between Min_Data = 2 and Max_Data = 15. - * @note the PLLI2SQ parameter is only available with STM32F427xx/437xx/429xx/439xx/469xx/479xx + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @note the PLLI2SQ parameter is only available with STM32F427xx/437xx/429xx/439xx/469xx/479xx * Devices and can be configured using the __HAL_RCC_PLLI2S_PLLSAICLK_CONFIG() macro * @param __PLLI2SR__ specifies the division factor for I2S clock * This parameter must be a number between Min_Data = 2 and Max_Data = 7. @@ -5961,16 +6021,16 @@ typedef struct * on the I2S clock frequency. */ #define __HAL_RCC_PLLI2S_SAICLK_CONFIG(__PLLI2SN__, __PLLI2SQ__, __PLLI2SR__) (RCC->PLLI2SCFGR = ((__PLLI2SN__) << 6U) |\ - ((__PLLI2SQ__) << 24U) |\ - ((__PLLI2SR__) << 28U)) -#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ + ((__PLLI2SQ__) << 24U) |\ + ((__PLLI2SR__) << 28U)) +#endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ /*----------------------------------------------------------------------------*/ /*------------------------------ PLLSAI Configuration ------------------------*/ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) -/** @brief Macros to Enable or Disable the PLLISAI. +/** @brief Macros to Enable or Disable the PLLISAI. * @note The PLLSAI is only available with STM32F429x/439x Devices. - * @note The PLLSAI is disabled by hardware when entering STOP and STANDBY modes. + * @note The PLLSAI is disabled by hardware when entering STOP and STANDBY modes. */ #define __HAL_RCC_PLLSAI_ENABLE() (*(__IO uint32_t *) RCC_CR_PLLSAION_BB = ENABLE) #define __HAL_RCC_PLLSAI_DISABLE() (*(__IO uint32_t *) RCC_CR_PLLSAION_BB = DISABLE) @@ -5984,73 +6044,73 @@ typedef struct * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency * of 1 MHz to limit PLLI2S jitter. * @note The PLLSAIM parameter is only used with STM32F446xx Devices - * + * * @param __PLLSAIN__ specifies the multiplication factor for PLLSAI VCO output clock. * This parameter must be a number between Min_Data = 50 and Max_Data = 432. - * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO + * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. * * @param __PLLSAIP__ specifies division factor for OTG FS, SDIO and RNG clocks. * This parameter must be a number in the range {2, 4, 6, or 8}. * @note the PLLSAIP parameter is only available with STM32F446xx Devices - * + * * @param __PLLSAIQ__ specifies the division factor for SAI clock * This parameter must be a number between Min_Data = 2 and Max_Data = 15. - * + * * @param __PLLSAIR__ specifies the division factor for LTDC clock * This parameter must be a number between Min_Data = 2 and Max_Data = 7. - * @note the PLLI2SR parameter is only available with STM32F427/437/429/439xx Devices + * @note the PLLI2SR parameter is only available with STM32F427/437/429/439xx Devices */ #define __HAL_RCC_PLLSAI_CONFIG(__PLLSAIM__, __PLLSAIN__, __PLLSAIP__, __PLLSAIQ__, __PLLSAIR__) \ - (RCC->PLLSAICFGR = ((__PLLSAIM__) | \ - ((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) | \ - ((((__PLLSAIP__) >> 1U) -1U) << RCC_PLLSAICFGR_PLLSAIP_Pos) | \ - ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos))) + (RCC->PLLSAICFGR = ((__PLLSAIM__) | \ + ((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) | \ + ((((__PLLSAIP__) >> 1U) -1U) << RCC_PLLSAICFGR_PLLSAIP_Pos) | \ + ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos))) #endif /* STM32F446xx */ - + #if defined(STM32F469xx) || defined(STM32F479xx) /** @brief Macro to configure the PLLSAI clock multiplication and division factors. - * + * * @param __PLLSAIN__ specifies the multiplication factor for PLLSAI VCO output clock. * This parameter must be a number between Min_Data = 50 and Max_Data = 432. - * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO + * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. * * @param __PLLSAIP__ specifies division factor for SDIO and CLK48 clocks. * This parameter must be a number in the range {2, 4, 6, or 8}. - * + * * @param __PLLSAIQ__ specifies the division factor for SAI clock * This parameter must be a number between Min_Data = 2 and Max_Data = 15. - * + * * @param __PLLSAIR__ specifies the division factor for LTDC clock - * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. */ #define __HAL_RCC_PLLSAI_CONFIG(__PLLSAIN__, __PLLSAIP__, __PLLSAIQ__, __PLLSAIR__) \ - (RCC->PLLSAICFGR = (((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) |\ - ((((__PLLSAIP__) >> 1U) -1U) << RCC_PLLSAICFGR_PLLSAIP_Pos) |\ - ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos) |\ - ((__PLLSAIR__) << RCC_PLLSAICFGR_PLLSAIR_Pos))) -#endif /* STM32F469xx || STM32F479xx */ + (RCC->PLLSAICFGR = (((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) |\ + ((((__PLLSAIP__) >> 1U) -1U) << RCC_PLLSAICFGR_PLLSAIP_Pos) |\ + ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos) |\ + ((__PLLSAIR__) << RCC_PLLSAICFGR_PLLSAIR_Pos))) +#endif /* STM32F469xx || STM32F479xx */ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) /** @brief Macro to configure the PLLSAI clock multiplication and division factors. - * + * * @param __PLLSAIN__ specifies the multiplication factor for PLLSAI VCO output clock. * This parameter must be a number between Min_Data = 50 and Max_Data = 432. - * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO + * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. * * @param __PLLSAIQ__ specifies the division factor for SAI clock * This parameter must be a number between Min_Data = 2 and Max_Data = 15. - * + * * @param __PLLSAIR__ specifies the division factor for LTDC clock * This parameter must be a number between Min_Data = 2 and Max_Data = 7. - * @note the PLLI2SR parameter is only available with STM32F427/437/429/439xx Devices + * @note the PLLI2SR parameter is only available with STM32F427/437/429/439xx Devices */ #define __HAL_RCC_PLLSAI_CONFIG(__PLLSAIN__, __PLLSAIQ__, __PLLSAIR__) \ - (RCC->PLLSAICFGR = (((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) | \ - ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos) | \ - ((__PLLSAIR__) << RCC_PLLSAICFGR_PLLSAIR_Pos))) + (RCC->PLLSAICFGR = (((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) | \ + ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos) | \ + ((__PLLSAIR__) << RCC_PLLSAICFGR_PLLSAIR_Pos))) #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx */ #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ @@ -6062,25 +6122,25 @@ typedef struct * @note This function must be called before enabling the PLLI2S. * @param __PLLI2SDivR__ specifies the PLLI2S division factor for SAI1 clock. * This parameter must be a number between 1 and 32. - * SAI1 clock frequency = f(PLLI2SR) / __PLLI2SDivR__ + * SAI1 clock frequency = f(PLLI2SR) / __PLLI2SDivR__ */ #define __HAL_RCC_PLLI2S_PLLSAICLKDIVR_CONFIG(__PLLI2SDivR__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVR, (__PLLI2SDivR__)-1U)) /** @brief Macro to configure the SAI clock Divider coming from PLL. * @param __PLLDivR__ specifies the PLL division factor for SAI1 clock. * This parameter must be a number between 1 and 32. - * SAI1 clock frequency = f(PLLR) / __PLLDivR__ + * SAI1 clock frequency = f(PLLR) / __PLLDivR__ */ -#define __HAL_RCC_PLL_PLLSAICLKDIVR_CONFIG(__PLLDivR__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLDIVR, ((__PLLDivR__)-1U)<<8U)) -#endif /* STM32F413xx || STM32F423xx */ - +#define __HAL_RCC_PLL_PLLSAICLKDIVR_CONFIG(__PLLDivR__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLDIVR, ((__PLLDivR__)-1U)<<8U)) +#endif /* STM32F413xx || STM32F423xx */ + #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F446xx) ||\ defined(STM32F469xx) || defined(STM32F479xx) /** @brief Macro to configure the SAI clock Divider coming from PLLI2S. * @note This function must be called before enabling the PLLI2S. * @param __PLLI2SDivQ__ specifies the PLLI2S division factor for SAI1 clock. * This parameter must be a number between 1 and 32. - * SAI1 clock frequency = f(PLLI2SQ) / __PLLI2SDivQ__ + * SAI1 clock frequency = f(PLLI2SQ) / __PLLI2SDivQ__ */ #define __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(__PLLI2SDivQ__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVQ, (__PLLI2SDivQ__)-1U)) @@ -6088,19 +6148,19 @@ typedef struct * @note This function must be called before enabling the PLLSAI. * @param __PLLSAIDivQ__ specifies the PLLSAI division factor for SAI1 clock . * This parameter must be a number between Min_Data = 1 and Max_Data = 32. - * SAI1 clock frequency = f(PLLSAIQ) / __PLLSAIDivQ__ + * SAI1 clock frequency = f(PLLSAIQ) / __PLLSAIDivQ__ */ #define __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(__PLLSAIDivQ__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVQ, ((__PLLSAIDivQ__)-1U)<<8U)) #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F446xx || STM32F469xx || STM32F479xx */ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) /** @brief Macro to configure the LTDC clock Divider coming from PLLSAI. - * + * * @note The LTDC peripheral is only available with STM32F427/437/429/439/469/479xx Devices. - * @note This function must be called before enabling the PLLSAI. + * @note This function must be called before enabling the PLLSAI. * @param __PLLSAIDivR__ specifies the PLLSAI division factor for LTDC clock . * This parameter must be a number between Min_Data = 2 and Max_Data = 16. - * LTDC clock frequency = f(PLLSAIR) / __PLLSAIDivR__ + * LTDC clock frequency = f(PLLSAIR) / __PLLSAIDivR__ */ #define __HAL_RCC_PLLSAI_PLLSAICLKDIVR_CONFIG(__PLLSAIDivR__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVR, (__PLLSAIDivR__))) #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F469xx || STM32F479xx */ @@ -6119,7 +6179,7 @@ typedef struct * @arg RCC_I2SCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin * used as I2S clock source. */ -#define __HAL_RCC_I2S_CONFIG(__SOURCE__) (*(__IO uint32_t *) RCC_CFGR_I2SSRC_BB = (__SOURCE__)) +#define __HAL_RCC_I2S_CONFIG(__SOURCE__) (MODIFY_REG(RCC->CFGR, RCC_CFGR_I2SSRC, (__SOURCE__))) /** @brief Macro to get the I2S clock source (I2SCLK). @@ -6130,18 +6190,18 @@ typedef struct */ #define __HAL_RCC_GET_I2S_SOURCE() ((uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_I2SSRC))) #endif /* STM32F40xxx || STM32F41xxx || STM32F42xxx || STM32F43xxx || STM32F469xx || STM32F479xx */ - + #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) - + /** @brief Macro to configure SAI1BlockA clock source selection. - * @note The SAI peripheral is only available with STM32F427/437/429/439/469/479xx Devices. - * @note This function must be called before enabling PLLSAI, PLLI2S and + * @note The SAI peripheral is only available with STM32F427/437/429/439/469/479xx Devices. + * @note This function must be called before enabling PLLSAI, PLLI2S and * the SAI clock. * @param __SOURCE__ specifies the SAI Block A clock source. * This parameter can be one of the following values: - * @arg RCC_SAIACLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used - * as SAI1 Block A clock. - * @arg RCC_SAIACLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * @arg RCC_SAIACLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI1 Block A clock. + * @arg RCC_SAIACLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used * as SAI1 Block A clock. * @arg RCC_SAIACLKSOURCE_Ext: External clock mapped on the I2S_CKIN pin * used as SAI1 Block A clock. @@ -6150,14 +6210,14 @@ typedef struct /** @brief Macro to configure SAI1BlockB clock source selection. * @note The SAI peripheral is only available with STM32F427/437/429/439/469/479xx Devices. - * @note This function must be called before enabling PLLSAI, PLLI2S and + * @note This function must be called before enabling PLLSAI, PLLI2S and * the SAI clock. * @param __SOURCE__ specifies the SAI Block B clock source. * This parameter can be one of the following values: - * @arg RCC_SAIBCLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used - * as SAI1 Block B clock. - * @arg RCC_SAIBCLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used - * as SAI1 Block B clock. + * @arg RCC_SAIBCLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI1 Block B clock. + * @arg RCC_SAIBCLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI1 Block B clock. * @arg RCC_SAIBCLKSOURCE_Ext: External clock mapped on the I2S_CKIN pin * used as SAI1 Block B clock. */ @@ -6167,46 +6227,46 @@ typedef struct #if defined(STM32F446xx) /** @brief Macro to configure SAI1 clock source selection. * @note This configuration is only available with STM32F446xx Devices. - * @note This function must be called before enabling PLL, PLLSAI, PLLI2S and + * @note This function must be called before enabling PLL, PLLSAI, PLLI2S and * the SAI clock. * @param __SOURCE__ specifies the SAI1 clock source. * This parameter can be one of the following values: - * @arg RCC_SAI1CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI1 clock. * @arg RCC_SAI1CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used as SAI1 clock. - * @arg RCC_SAI1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI1 clock. * @arg RCC_SAI1CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as SAI1 clock. */ #define __HAL_RCC_SAI1_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SAI1SRC, (__SOURCE__))) /** @brief Macro to Get SAI1 clock source selection. - * @note This configuration is only available with STM32F446xx Devices. + * @note This configuration is only available with STM32F446xx Devices. * @retval The clock source can be one of the following values: - * @arg RCC_SAI1CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI1 clock. * @arg RCC_SAI1CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used as SAI1 clock. - * @arg RCC_SAI1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI1 clock. * @arg RCC_SAI1CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as SAI1 clock. */ #define __HAL_RCC_GET_SAI1_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_SAI1SRC)) /** @brief Macro to configure SAI2 clock source selection. - * @note This configuration is only available with STM32F446xx Devices. - * @note This function must be called before enabling PLL, PLLSAI, PLLI2S and + * @note This configuration is only available with STM32F446xx Devices. + * @note This function must be called before enabling PLL, PLLSAI, PLLI2S and * the SAI clock. * @param __SOURCE__ specifies the SAI2 clock source. * This parameter can be one of the following values: - * @arg RCC_SAI2CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI2 clock. * @arg RCC_SAI2CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used as SAI2 clock. - * @arg RCC_SAI2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI2 clock. * @arg RCC_SAI2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock used as SAI2 clock. */ #define __HAL_RCC_SAI2_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SAI2SRC, (__SOURCE__))) /** @brief Macro to Get SAI2 clock source selection. - * @note This configuration is only available with STM32F446xx Devices. + * @note This configuration is only available with STM32F446xx Devices. * @retval The clock source can be one of the following values: - * @arg RCC_SAI2CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used as SAI2 clock. * @arg RCC_SAI2CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used as SAI2 clock. - * @arg RCC_SAI2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SAI2 clock. * @arg RCC_SAI2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock used as SAI2 clock. */ #define __HAL_RCC_GET_SAI2_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_SAI2SRC)) @@ -6215,18 +6275,18 @@ typedef struct * @note This function must be called before enabling PLL, PLLI2S and the I2S clock. * @param __SOURCE__ specifies the I2S APB1 clock source. * This parameter can be one of the following values: - * @arg RCC_I2SAPB1CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. + * @arg RCC_I2SAPB1CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. * @arg RCC_I2SAPB1CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as I2S APB1 clock. - * @arg RCC_I2SAPB1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB1 clock. + * @arg RCC_I2SAPB1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB1 clock. * @arg RCC_I2SAPB1CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. */ #define __HAL_RCC_I2S_APB1_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_I2S1SRC, (__SOURCE__))) /** @brief Macro to Get I2S APB1 clock source selection. * @retval The clock source can be one of the following values: - * @arg RCC_I2SAPB1CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. + * @arg RCC_I2SAPB1CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. * @arg RCC_I2SAPB1CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as I2S APB1 clock. - * @arg RCC_I2SAPB1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB1 clock. + * @arg RCC_I2SAPB1CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB1 clock. * @arg RCC_I2SAPB1CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. */ #define __HAL_RCC_GET_I2S_APB1_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_I2S1SRC)) @@ -6235,18 +6295,18 @@ typedef struct * @note This function must be called before enabling PLL, PLLI2S and the I2S clock. * @param __SOURCE__ specifies the SAI Block A clock source. * This parameter can be one of the following values: - * @arg RCC_I2SAPB2CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. + * @arg RCC_I2SAPB2CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. * @arg RCC_I2SAPB2CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as I2S APB2 clock. - * @arg RCC_I2SAPB2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB2 clock. + * @arg RCC_I2SAPB2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB2 clock. * @arg RCC_I2SAPB2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. */ #define __HAL_RCC_I2S_APB2_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_I2S2SRC, (__SOURCE__))) /** @brief Macro to Get I2S APB2 clock source selection. * @retval The clock source can be one of the following values: - * @arg RCC_I2SAPB2CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. + * @arg RCC_I2SAPB2CLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. * @arg RCC_I2SAPB2CLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as I2S APB2 clock. - * @arg RCC_I2SAPB2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB2 clock. + * @arg RCC_I2SAPB2CLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as I2S APB2 clock. * @arg RCC_I2SAPB2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. */ #define __HAL_RCC_GET_I2S_APB2_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_I2S2SRC)) @@ -6286,113 +6346,113 @@ typedef struct /** @brief Macro to configure the CLK48 clock. * @param __SOURCE__ specifies the CLK48 clock source. * This parameter can be one of the following values: - * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. - * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. */ #define __HAL_RCC_CLK48_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, (uint32_t)(__SOURCE__))) /** @brief Macro to Get the CLK48 clock. * @retval The clock source can be one of the following values: - * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. - * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. */ #define __HAL_RCC_GET_CLK48_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL)) /** @brief Macro to configure the SDIO clock. * @param __SOURCE__ specifies the SDIO clock source. * This parameter can be one of the following values: - * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. - * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. */ #define __HAL_RCC_SDIO_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SDIOSEL, (uint32_t)(__SOURCE__))) /** @brief Macro to Get the SDIO clock. * @retval The clock source can be one of the following values: - * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. - * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. */ #define __HAL_RCC_GET_SDIO_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_SDIOSEL)) /** @brief Macro to configure the SPDIFRX clock. * @param __SOURCE__ specifies the SPDIFRX clock source. * This parameter can be one of the following values: - * @arg RCC_SPDIFRXCLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SPDIFRX clock. - * @arg RCC_SPDIFRXCLKSOURCE_PLLI2SP: PLLI2S VCO Output divided by PLLI2SP used as SPDIFRX clock. + * @arg RCC_SPDIFRXCLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SPDIFRX clock. + * @arg RCC_SPDIFRXCLKSOURCE_PLLI2SP: PLLI2S VCO Output divided by PLLI2SP used as SPDIFRX clock. */ #define __HAL_RCC_SPDIFRX_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SPDIFRXSEL, (uint32_t)(__SOURCE__))) /** @brief Macro to Get the SPDIFRX clock. * @retval The clock source can be one of the following values: - * @arg RCC_SPDIFRXCLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SPDIFRX clock. - * @arg RCC_SPDIFRXCLKSOURCE_PLLI2SP: PLLI2S VCO Output divided by PLLI2SP used as SPDIFRX clock. + * @arg RCC_SPDIFRXCLKSOURCE_PLLR: PLL VCO Output divided by PLLR used as SPDIFRX clock. + * @arg RCC_SPDIFRXCLKSOURCE_PLLI2SP: PLLI2S VCO Output divided by PLLI2SP used as SPDIFRX clock. */ #define __HAL_RCC_GET_SPDIFRX_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_SPDIFRXSEL)) #endif /* STM32F446xx */ - + #if defined(STM32F469xx) || defined(STM32F479xx) - + /** @brief Macro to configure the CLK48 clock. * @param __SOURCE__ specifies the CLK48 clock source. * This parameter can be one of the following values: - * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. - * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. */ #define __HAL_RCC_CLK48_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CK48MSEL, (uint32_t)(__SOURCE__))) /** @brief Macro to Get the CLK48 clock. * @retval The clock source can be one of the following values: - * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. - * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLSAIP: PLLSAI VCO Output divided by PLLSAIP used as CLK48 clock. */ #define __HAL_RCC_GET_CLK48_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_CK48MSEL)) /** @brief Macro to configure the SDIO clock. * @param __SOURCE__ specifies the SDIO clock source. * This parameter can be one of the following values: - * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. - * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. */ #define __HAL_RCC_SDIO_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SDIOSEL, (uint32_t)(__SOURCE__))) /** @brief Macro to Get the SDIO clock. * @retval The clock source can be one of the following values: - * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. - * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. */ -#define __HAL_RCC_GET_SDIO_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_SDIOSEL)) - +#define __HAL_RCC_GET_SDIO_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_SDIOSEL)) + /** @brief Macro to configure the DSI clock. * @param __SOURCE__ specifies the DSI clock source. * This parameter can be one of the following values: - * @arg RCC_DSICLKSOURCE_PLLR: PLLR output used as DSI clock. - * @arg RCC_DSICLKSOURCE_DSIPHY: DSI-PHY output used as DSI clock. + * @arg RCC_DSICLKSOURCE_PLLR: PLLR output used as DSI clock. + * @arg RCC_DSICLKSOURCE_DSIPHY: DSI-PHY output used as DSI clock. */ #define __HAL_RCC_DSI_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_DSISEL, (uint32_t)(__SOURCE__))) /** @brief Macro to Get the DSI clock. * @retval The clock source can be one of the following values: - * @arg RCC_DSICLKSOURCE_PLLR: PLLR output used as DSI clock. - * @arg RCC_DSICLKSOURCE_DSIPHY: DSI-PHY output used as DSI clock. + * @arg RCC_DSICLKSOURCE_PLLR: PLLR output used as DSI clock. + * @arg RCC_DSICLKSOURCE_DSIPHY: DSI-PHY output used as DSI clock. */ -#define __HAL_RCC_GET_DSI_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_DSISEL)) - +#define __HAL_RCC_GET_DSI_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_DSISEL)) + #endif /* STM32F469xx || STM32F479xx */ #if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ defined(STM32F413xx) || defined(STM32F423xx) - /** @brief Macro to configure the DFSDM1 clock. +/** @brief Macro to configure the DFSDM1 clock. * @param __DFSDM1_CLKSOURCE__ specifies the DFSDM1 clock source. * This parameter can be one of the following values: - * @arg RCC_DFSDM1CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. - * @arg RCC_DFSDM1CLKSOURCE_SYSCLK: System clock used as kernal clock. + * @arg RCC_DFSDM1CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. + * @arg RCC_DFSDM1CLKSOURCE_SYSCLK: System clock used as kernel clock. * @retval None - */ + */ #define __HAL_RCC_DFSDM1_CONFIG(__DFSDM1_CLKSOURCE__) MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1SEL, (__DFSDM1_CLKSOURCE__)) /** @brief Macro to get the DFSDM1 clock source. * @retval The clock source can be one of the following values: - * @arg RCC_DFSDM1CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. - * @arg RCC_DFSDM1CLKSOURCE_SYSCLK: System clock used as kernal clock. + * @arg RCC_DFSDM1CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. + * @arg RCC_DFSDM1CLKSOURCE_SYSCLK: System clock used as kernel clock. */ #define __HAL_RCC_GET_DFSDM1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1SEL))) @@ -6416,19 +6476,19 @@ typedef struct #define __HAL_RCC_GET_DFSDM1AUDIO_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1ASEL)) #if defined(STM32F413xx) || defined(STM32F423xx) - /** @brief Macro to configure the DFSDM2 clock. +/** @brief Macro to configure the DFSDM2 clock. * @param __DFSDM2_CLKSOURCE__ specifies the DFSDM1 clock source. * This parameter can be one of the following values: - * @arg RCC_DFSDM2CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. - * @arg RCC_DFSDM2CLKSOURCE_SYSCLK: System clock used as kernal clock. + * @arg RCC_DFSDM2CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. + * @arg RCC_DFSDM2CLKSOURCE_SYSCLK: System clock used as kernel clock. * @retval None - */ + */ #define __HAL_RCC_DFSDM2_CONFIG(__DFSDM2_CLKSOURCE__) MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1SEL, (__DFSDM2_CLKSOURCE__)) /** @brief Macro to get the DFSDM2 clock source. * @retval The clock source can be one of the following values: - * @arg RCC_DFSDM2CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. - * @arg RCC_DFSDM2CLKSOURCE_SYSCLK: System clock used as kernal clock. + * @arg RCC_DFSDM2CLKSOURCE_PCLK2: PCLK2 clock used as kernel clock. + * @arg RCC_DFSDM2CLKSOURCE_SYSCLK: System clock used as kernel clock. */ #define __HAL_RCC_GET_DFSDM2_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1SEL))) @@ -6448,10 +6508,10 @@ typedef struct * @arg RCC_DFSDM2AUDIOCLKSOURCE_I2S2: CK_I2S_PCLK2 selected as audio clock */ #define __HAL_RCC_GET_DFSDM2AUDIO_SOURCE() (READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM2ASEL)) - + /** @brief Macro to configure SAI1BlockA clock source selection. - * @note The SAI peripheral is only available with STM32F413xx/STM32F423xx Devices. - * @note This function must be called before enabling PLLSAI, PLLI2S and + * @note The SAI peripheral is only available with STM32F413xx/STM32F423xx Devices. + * @note This function must be called before enabling PLLSAI, PLLI2S and * the SAI clock. * @param __SOURCE__ specifies the SAI Block A clock source. * This parameter can be one of the following values: @@ -6461,9 +6521,9 @@ typedef struct * @arg RCC_SAIACLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. */ #define __HAL_RCC_SAI_BLOCKACLKSOURCE_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SAI1ASRC, (__SOURCE__))) - + /** @brief Macro to Get SAI1 BlockA clock source selection. - * @note This configuration is only available with STM32F413xx/STM32F423xx Devices. + * @note This configuration is only available with STM32F413xx/STM32F423xx Devices. * @retval The clock source can be one of the following values: * @arg RCC_SAIACLKSOURCE_PLLI2SR: PLLI2S_R clock divided (R2) used as SAI1 Block A clock. * @arg RCC_SAIACLKSOURCE_EXT: External clock mapped on the I2S_CKIN pinused as SAI1 Block A clock. @@ -6474,7 +6534,7 @@ typedef struct /** @brief Macro to configure SAI1 BlockB clock source selection. * @note The SAI peripheral is only available with STM32F413xx/STM32F423xx Devices. - * @note This function must be called before enabling PLLSAI, PLLI2S and + * @note This function must be called before enabling PLLSAI, PLLI2S and * the SAI clock. * @param __SOURCE__ specifies the SAI Block B clock source. * This parameter can be one of the following values: @@ -6484,9 +6544,9 @@ typedef struct * @arg RCC_SAIBCLKSOURCE_PLLSRC: HSI or HSE depending from PLL source Clock. */ #define __HAL_RCC_SAI_BLOCKBCLKSOURCE_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SAI1BSRC, (__SOURCE__))) - + /** @brief Macro to Get SAI1 BlockB clock source selection. - * @note This configuration is only available with STM32F413xx/STM32F423xx Devices. + * @note This configuration is only available with STM32F413xx/STM32F423xx Devices. * @retval The clock source can be one of the following values: * @arg RCC_SAIBCLKSOURCE_PLLI2SR: PLLI2S_R clock divided (R2) used as SAI1 Block A clock. * @arg RCC_SAIBCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as SAI1 Block A clock. @@ -6512,9 +6572,9 @@ typedef struct * @arg RCC_LPTIM1CLKSOURCE_LSI: LSI selected as LPTIM1 clock * @arg RCC_LPTIM1CLKSOURCE_LSE: LSE selected as LPTIM1 clock */ -#define __HAL_RCC_GET_LPTIM1_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL)) +#define __HAL_RCC_GET_LPTIM1_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL)) #endif /* STM32F413xx || STM32F423xx */ - + /** @brief Macro to configure I2S APB1 clock source selection. * @param __SOURCE__ specifies the I2S APB1 clock source. * This parameter can be one of the following values: @@ -6562,7 +6622,7 @@ typedef struct * used as I2S clock source. */ #define __HAL_RCC_PLL_I2S_CONFIG(__SOURCE__) (*(__IO uint32_t *) RCC_PLLI2SCFGR_PLLI2SSRC_BB = (__SOURCE__)) - + /** @brief Macro to configure the FMPI2C1 clock. * @param __SOURCE__ specifies the FMPI2C1 clock source. * This parameter can be one of the following values: @@ -6583,14 +6643,14 @@ typedef struct /** @brief Macro to configure the CLK48 clock. * @param __SOURCE__ specifies the CLK48 clock source. * This parameter can be one of the following values: - * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. * @arg RCC_CLK48CLKSOURCE_PLLI2SQ: PLLI2S VCO Output divided by PLLI2SQ used as CLK48 clock. */ #define __HAL_RCC_CLK48_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, (uint32_t)(__SOURCE__))) /** @brief Macro to Get the CLK48 clock. * @retval The clock source can be one of the following values: - * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. + * @arg RCC_CLK48CLKSOURCE_PLLQ: PLL VCO Output divided by PLLQ used as CLK48 clock. * @arg RCC_CLK48CLKSOURCE_PLLI2SQ: PLLI2S VCO Output divided by PLLI2SQ used as CLK48 clock */ #define __HAL_RCC_GET_CLK48_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL)) @@ -6598,15 +6658,15 @@ typedef struct /** @brief Macro to configure the SDIO clock. * @param __SOURCE__ specifies the SDIO clock source. * This parameter can be one of the following values: - * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. - * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. */ #define __HAL_RCC_SDIO_CONFIG(__SOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SDIOSEL, (uint32_t)(__SOURCE__))) /** @brief Macro to Get the SDIO clock. * @retval The clock source can be one of the following values: - * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. - * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_CLK48: CLK48 output used as SDIO clock. + * @arg RCC_SDIOCLKSOURCE_SYSCLK: System clock output used as SDIO clock. */ #define __HAL_RCC_GET_SDIO_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_SDIOSEL)) @@ -6666,25 +6726,25 @@ typedef struct */ #define __HAL_RCC_GET_LPTIM1_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL)) #endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ - + #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) ||\ defined(STM32F410Rx) || defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F469xx) ||\ defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) ||\ defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) -/** @brief Macro to configure the Timers clocks prescalers - * @note This feature is only available with STM32F429x/439x Devices. +/** @brief Macro to configure the Timers clocks prescalers + * @note This feature is only available with STM32F429x/439x Devices. * @param __PRESC__ specifies the Timers clocks prescalers selection * This parameter can be one of the following values: - * @arg RCC_TIMPRES_DESACTIVATED: The Timers kernels clocks prescaler is - * equal to HPRE if PPREx is corresponding to division by 1 or 2, - * else it is equal to [(HPRE * PPREx) / 2] if PPREx is corresponding to - * division by 4 or more. - * @arg RCC_TIMPRES_ACTIVATED: The Timers kernels clocks prescaler is - * equal to HPRE if PPREx is corresponding to division by 1, 2 or 4, - * else it is equal to [(HPRE * PPREx) / 4] if PPREx is corresponding + * @arg RCC_TIMPRES_DESACTIVATED: The Timers kernels clocks prescaler is + * equal to HPRE if PPREx is corresponding to division by 1 or 2, + * else it is equal to [(HPRE * PPREx) / 2] if PPREx is corresponding to + * division by 4 or more. + * @arg RCC_TIMPRES_ACTIVATED: The Timers kernels clocks prescaler is + * equal to HPRE if PPREx is corresponding to division by 1, 2 or 4, + * else it is equal to [(HPRE * PPREx) / 4] if PPREx is corresponding * to division by 8 or more. - */ + */ #define __HAL_RCC_TIMCLKPRESCALER(__PRESC__) (*(__IO uint32_t *) RCC_DCKCFGR_TIMPRE_BB = (__PRESC__)) #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx) || STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE ||\ @@ -6764,7 +6824,7 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); #endif /* RCC_PLLSAI_SUPPORT */ /** * @} - */ + */ /** * @} @@ -6780,7 +6840,7 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); * @brief RCC registers bit address in the alias region * @{ */ -/* --- CR Register ---*/ +/* --- CR Register ---*/ #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) ||\ defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) /* Alias word address of PLLSAION bit */ @@ -6825,19 +6885,20 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); /* Alias word address of I2SSRC bit */ #define RCC_I2SSRC_BIT_NUMBER 0x17U #define RCC_CFGR_I2SSRC_BB (PERIPH_BB_BASE + (RCC_CFGR_OFFSET * 32U) + (RCC_I2SSRC_BIT_NUMBER * 4U)) - + #define PLLI2S_TIMEOUT_VALUE 2U /* Timeout value fixed to 2 ms */ #endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F401xC || STM32F401xE || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx */ - + #if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ defined(STM32F413xx) || defined(STM32F423xx) /* --- PLLI2SCFGR Register ---*/ #define RCC_PLLI2SCFGR_OFFSET (RCC_OFFSET + 0x84U) /* Alias word address of PLLI2SSRC bit */ #define RCC_PLLI2SSRC_BIT_NUMBER 0x16U -#define RCC_PLLI2SCFGR_PLLI2SSRC_BB (PERIPH_BB_BASE + (RCC_PLLI2SCFGR_OFFSET * 32U) + (RCC_PLLI2SSRC_BIT_NUMBER * 4U)) - +#define RCC_PLLI2SCFGR_PLLI2SSRC_BB (PERIPH_BB_BASE\ + + (RCC_PLLI2SCFGR_OFFSET * 32U) + (RCC_PLLI2SSRC_BIT_NUMBER * 4U)) + #define PLLI2S_TIMEOUT_VALUE 2U /* Timeout value fixed to 2 ms */ #endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx | STM32F423xx */ @@ -6867,26 +6928,18 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); /** @defgroup RCCEx_IS_RCC_Definitions RCC Private macros to check input parameters * @{ */ -#if defined(STM32F411xE) -#define IS_RCC_PLLN_VALUE(VALUE) ((192U <= (VALUE)) && ((VALUE) <= 432U)) -#define IS_RCC_PLLI2SN_VALUE(VALUE) ((192U <= (VALUE)) && ((VALUE) <= 432U)) -#else /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx || STM32F427xx || STM32F437xx || - STM32F429xx || STM32F439xx || STM32F401xC || STM32F401xE || STM32F410Tx || STM32F410Cx || - STM32F410Rx || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Cx || STM32F412Rx || - STM32F412Vx || STM32F412Zx || STM32F413xx || STM32F423xx */ #define IS_RCC_PLLN_VALUE(VALUE) ((50U <= (VALUE)) && ((VALUE) <= 432U)) #define IS_RCC_PLLI2SN_VALUE(VALUE) ((50U <= (VALUE)) && ((VALUE) <= 432U)) -#endif /* STM32F411xE */ - + #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) #define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x0000007FU)) #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx */ -#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx)|| defined(STM32F417xx) +#if defined(STM32F405xx) || defined(STM32F415xx) || defined(STM32F407xx)|| defined(STM32F417xx) #define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x00000007U)) #endif /* STM32F405xx || STM32F415xx || STM32F407xx || STM32F417xx */ -#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) +#if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) #define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x0000000FU)) #endif /* STM32F401xC || STM32F401xE || STM32F411xE */ @@ -6905,11 +6958,11 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); #if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) #define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x000003FFU)) #endif /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ - + #if defined(STM32F413xx) || defined(STM32F423xx) #define IS_RCC_PERIPHCLOCK(SELECTION) ((1U <= (SELECTION)) && ((SELECTION) <= 0x00007FFFU)) #endif /* STM32F413xx || STM32F423xx */ - + #define IS_RCC_PLLI2SR_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 7U)) #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) ||\ @@ -6935,7 +6988,7 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); #if defined(STM32F411xE) || defined(STM32F446xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) #define IS_RCC_PLLI2SM_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 63U)) - + #define IS_RCC_LSE_MODE(MODE) (((MODE) == RCC_LSE_LOWPOWER_MODE) ||\ ((MODE) == RCC_LSE_HIGHDRIVE_MODE)) #endif /* STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ @@ -6962,14 +7015,14 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); #if defined(STM32F446xx) #define IS_RCC_PLLR_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 7U)) - + #define IS_RCC_PLLI2SP_VALUE(VALUE) (((VALUE) == RCC_PLLI2SP_DIV2) ||\ ((VALUE) == RCC_PLLI2SP_DIV4) ||\ ((VALUE) == RCC_PLLI2SP_DIV6) ||\ ((VALUE) == RCC_PLLI2SP_DIV8)) #define IS_RCC_PLLSAIM_VALUE(VALUE) ((VALUE) <= 63U) - + #define IS_RCC_PLLSAIP_VALUE(VALUE) (((VALUE) == RCC_PLLSAIP_DIV2) ||\ ((VALUE) == RCC_PLLSAIP_DIV4) ||\ ((VALUE) == RCC_PLLSAIP_DIV6) ||\ @@ -6984,16 +7037,16 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); ((SOURCE) == RCC_SAI2CLKSOURCE_PLLI2S) ||\ ((SOURCE) == RCC_SAI2CLKSOURCE_PLLR) ||\ ((SOURCE) == RCC_SAI2CLKSOURCE_PLLSRC)) - + #define IS_RCC_I2SAPB1CLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLI2S) ||\ ((SOURCE) == RCC_I2SAPB1CLKSOURCE_EXT) ||\ ((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLR) ||\ ((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLSRC)) - - #define IS_RCC_I2SAPB2CLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLI2S) ||\ - ((SOURCE) == RCC_I2SAPB2CLKSOURCE_EXT) ||\ - ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLR) ||\ - ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLSRC)) + +#define IS_RCC_I2SAPB2CLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLI2S) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_EXT) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLSRC)) #define IS_RCC_FMPI2C1CLKSOURCE(SOURCE) (((SOURCE) == RCC_FMPI2C1CLKSOURCE_PCLK1) ||\ ((SOURCE) == RCC_FMPI2C1CLKSOURCE_SYSCLK) ||\ @@ -7009,7 +7062,7 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); ((SOURCE) == RCC_SDIOCLKSOURCE_SYSCLK)) #define IS_RCC_SPDIFRXCLKSOURCE(SOURCE) (((SOURCE) == RCC_SPDIFRXCLKSOURCE_PLLR) ||\ - ((SOURCE) == RCC_SPDIFRXCLKSOURCE_PLLI2SP)) + ((SOURCE) == RCC_SPDIFRXCLKSOURCE_PLLI2SP)) #endif /* STM32F446xx */ #if defined(STM32F469xx) || defined(STM32F479xx) @@ -7019,7 +7072,7 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); ((VALUE) == RCC_PLLSAIP_DIV4) ||\ ((VALUE) == RCC_PLLSAIP_DIV6) ||\ ((VALUE) == RCC_PLLSAIP_DIV8)) - + #define IS_RCC_CLK48CLKSOURCE(SOURCE) (((SOURCE) == RCC_CLK48CLKSOURCE_PLLQ) ||\ ((SOURCE) == RCC_CLK48CLKSOURCE_PLLSAIP)) @@ -7036,21 +7089,21 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); #if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) ||\ defined(STM32F413xx) || defined(STM32F423xx) #define IS_RCC_PLLI2SQ_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 15U)) - + #define IS_RCC_PLLR_VALUE(VALUE) ((2U <= (VALUE)) && ((VALUE) <= 7U)) #define IS_RCC_PLLI2SCLKSOURCE(__SOURCE__) (((__SOURCE__) == RCC_PLLI2SCLKSOURCE_PLLSRC) || \ ((__SOURCE__) == RCC_PLLI2SCLKSOURCE_EXT)) - + #define IS_RCC_I2SAPB1CLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLI2S) ||\ ((SOURCE) == RCC_I2SAPB1CLKSOURCE_EXT) ||\ ((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLR) ||\ ((SOURCE) == RCC_I2SAPB1CLKSOURCE_PLLSRC)) - - #define IS_RCC_I2SAPB2CLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLI2S) ||\ - ((SOURCE) == RCC_I2SAPB2CLKSOURCE_EXT) ||\ - ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLR) ||\ - ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLSRC)) + +#define IS_RCC_I2SAPB2CLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLI2S) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_EXT) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_I2SAPB2CLKSOURCE_PLLSRC)) #define IS_RCC_FMPI2C1CLKSOURCE(SOURCE) (((SOURCE) == RCC_FMPI2C1CLKSOURCE_PCLK1) ||\ ((SOURCE) == RCC_FMPI2C1CLKSOURCE_SYSCLK) ||\ @@ -7101,8 +7154,8 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || \ defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) || defined(STM32F446xx) || \ defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || \ - defined(STM32F412Rx) || defined(STM32F413xx) || defined(STM32F423xx) - + defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) + #define IS_RCC_MCO2SOURCE(SOURCE) (((SOURCE) == RCC_MCO2SOURCE_SYSCLK) || ((SOURCE) == RCC_MCO2SOURCE_PLLI2SCLK)|| \ ((SOURCE) == RCC_MCO2SOURCE_HSE) || ((SOURCE) == RCC_MCO2SOURCE_PLLCLK)) @@ -7110,7 +7163,7 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); STM32F401xC || STM32F401xE || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || \ STM32F412Rx */ -#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) +#if defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) #define IS_RCC_MCO2SOURCE(SOURCE) (((SOURCE) == RCC_MCO2SOURCE_SYSCLK) || ((SOURCE) == RCC_MCO2SOURCE_I2SCLK)|| \ ((SOURCE) == RCC_MCO2SOURCE_HSE) || ((SOURCE) == RCC_MCO2SOURCE_PLLCLK)) #endif /* STM32F410Tx || STM32F410Cx || STM32F410Rx */ @@ -7124,15 +7177,14 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); /** * @} - */ + */ /** * @} - */ + */ #ifdef __cplusplus } #endif #endif /* __STM32F4xx_HAL_RCC_EX_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rtc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rtc.h index 24affc54..9d58161f 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rtc.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rtc.h @@ -6,7 +6,7 @@ ****************************************************************************** * @attention * - * Copyright (c) 2017 STMicroelectronics. + * Copyright (c) 2016 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file @@ -489,6 +489,12 @@ typedef void (*pRTC_CallbackTypeDef)(RTC_HandleTypeDef *hrtc); /*!< pointer to (__HANDLE__)->Instance->WPR = 0xFFU; \ } while(0U) +/** + * @brief Check whether the RTC Calendar is initialized. + * @param __HANDLE__ specifies the RTC handle. + * @retval None + */ +#define __HAL_RTC_IS_CALENDAR_INITIALIZED(__HANDLE__) (((((__HANDLE__)->Instance->ISR) & (RTC_FLAG_INITS)) == RTC_FLAG_INITS) ? 1U : 0U) /** * @brief Enable the RTC ALARMA peripheral. @@ -772,6 +778,7 @@ HAL_RTCStateTypeDef HAL_RTC_GetState(RTC_HandleTypeDef *hrtc); RTC_DR_MT | RTC_DR_MU | \ RTC_DR_DT | RTC_DR_DU | \ RTC_DR_WDU)) +#define RTC_ISR_RESERVED_MASK ((uint32_t)(RTC_FLAGS_MASK | RTC_ISR_INIT)) #define RTC_INIT_MASK 0xFFFFFFFFU #define RTC_RSF_MASK ((uint32_t)~(RTC_ISR_INIT | RTC_ISR_RSF)) #define RTC_FLAGS_MASK ((uint32_t)(RTC_FLAG_INITF | RTC_FLAG_INITS | \ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rtc_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rtc_ex.h index ff5f14d4..bee4e320 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rtc_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_rtc_ex.h @@ -6,7 +6,7 @@ ****************************************************************************** * @attention * - * Copyright (c) 2017 STMicroelectronics. + * Copyright (c) 2016 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file @@ -162,7 +162,7 @@ typedef struct * @{ */ #define RTC_TAMPERTRIGGER_RISINGEDGE 0x00000000U -#define RTC_TAMPERTRIGGER_FALLINGEDGE RTC_TAFCR_TAMP1TRG +#define RTC_TAMPERTRIGGER_FALLINGEDGE 0x00000002U #define RTC_TAMPERTRIGGER_LOWLEVEL RTC_TAMPERTRIGGER_RISINGEDGE #define RTC_TAMPERTRIGGER_HIGHLEVEL RTC_TAMPERTRIGGER_FALLINGEDGE /** @@ -635,18 +635,6 @@ typedef struct */ #define __HAL_RTC_TAMPER_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->TAFCR &= ~(__INTERRUPT__)) -/** - * @brief Check whether the specified RTC Tamper interrupt has occurred or not. - * @param __HANDLE__ specifies the RTC handle. - * @param __INTERRUPT__ specifies the RTC Tamper interrupt to check. - * This parameter can be: - * @arg RTC_IT_TAMP1: Tamper 1 interrupt - * @arg RTC_IT_TAMP2: Tamper 2 interrupt - * @note RTC_IT_TAMP2 is not applicable to all devices. - * @retval None - */ -#define __HAL_RTC_TAMPER_GET_IT(__HANDLE__, __INTERRUPT__) (((((__HANDLE__)->Instance->ISR) & ((__INTERRUPT__) >> 4U)) != 0U) ? 1U : 0U) - /** * @brief Check whether the specified RTC Tamper interrupt has been enabled or not. * @param __HANDLE__ specifies the RTC handle. diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_spi.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_spi.h index 41903da1..4e2de041 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_spi.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_spi.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -108,7 +107,7 @@ typedef struct __SPI_HandleTypeDef SPI_InitTypeDef Init; /*!< SPI communication parameters */ - uint8_t *pTxBuffPtr; /*!< Pointer to SPI Tx transfer Buffer */ + const uint8_t *pTxBuffPtr; /*!< Pointer to SPI Tx transfer Buffer */ uint16_t TxXferSize; /*!< SPI Tx Transfer size */ @@ -493,7 +492,7 @@ typedef void (*pSPI_CallbackTypeDef)(SPI_HandleTypeDef *hspi); /*!< pointer to SET_BIT((__HANDLE__)->Instance->CR1, SPI_CR1_CRCEN);}while(0U) /** @brief Check whether the specified SPI flag is set or not. - * @param __SR__ copy of SPI SR regsiter. + * @param __SR__ copy of SPI SR register. * @param __FLAG__ specifies the flag to check. * This parameter can be one of the following values: * @arg SPI_FLAG_RXNE: Receive buffer not empty flag @@ -505,10 +504,11 @@ typedef void (*pSPI_CallbackTypeDef)(SPI_HandleTypeDef *hspi); /*!< pointer to * @arg SPI_FLAG_FRE: Frame format error flag * @retval SET or RESET. */ -#define SPI_CHECK_FLAG(__SR__, __FLAG__) ((((__SR__) & ((__FLAG__) & SPI_FLAG_MASK)) == ((__FLAG__) & SPI_FLAG_MASK)) ? SET : RESET) +#define SPI_CHECK_FLAG(__SR__, __FLAG__) ((((__SR__) & ((__FLAG__) & SPI_FLAG_MASK)) == \ + ((__FLAG__) & SPI_FLAG_MASK)) ? SET : RESET) /** @brief Check whether the specified SPI Interrupt is set or not. - * @param __CR2__ copy of SPI CR2 regsiter. + * @param __CR2__ copy of SPI CR2 register. * @param __INTERRUPT__ specifies the SPI interrupt source to check. * This parameter can be one of the following values: * @arg SPI_IT_TXE: Tx buffer empty interrupt enable @@ -516,15 +516,16 @@ typedef void (*pSPI_CallbackTypeDef)(SPI_HandleTypeDef *hspi); /*!< pointer to * @arg SPI_IT_ERR: Error interrupt enable * @retval SET or RESET. */ -#define SPI_CHECK_IT_SOURCE(__CR2__, __INTERRUPT__) ((((__CR2__) & (__INTERRUPT__)) == (__INTERRUPT__)) ? SET : RESET) +#define SPI_CHECK_IT_SOURCE(__CR2__, __INTERRUPT__) ((((__CR2__) & (__INTERRUPT__)) == \ + (__INTERRUPT__)) ? SET : RESET) /** @brief Checks if SPI Mode parameter is in allowed range. * @param __MODE__ specifies the SPI Mode. * This parameter can be a value of @ref SPI_Mode * @retval None */ -#define IS_SPI_MODE(__MODE__) (((__MODE__) == SPI_MODE_SLAVE) || \ - ((__MODE__) == SPI_MODE_MASTER)) +#define IS_SPI_MODE(__MODE__) (((__MODE__) == SPI_MODE_SLAVE) || \ + ((__MODE__) == SPI_MODE_MASTER)) /** @brief Checks if SPI Direction Mode parameter is in allowed range. * @param __MODE__ specifies the SPI Direction Mode. @@ -561,25 +562,25 @@ typedef void (*pSPI_CallbackTypeDef)(SPI_HandleTypeDef *hspi); /*!< pointer to * This parameter can be a value of @ref SPI_Clock_Polarity * @retval None */ -#define IS_SPI_CPOL(__CPOL__) (((__CPOL__) == SPI_POLARITY_LOW) || \ - ((__CPOL__) == SPI_POLARITY_HIGH)) +#define IS_SPI_CPOL(__CPOL__) (((__CPOL__) == SPI_POLARITY_LOW) || \ + ((__CPOL__) == SPI_POLARITY_HIGH)) /** @brief Checks if SPI Clock Phase parameter is in allowed range. * @param __CPHA__ specifies the SPI Clock Phase. * This parameter can be a value of @ref SPI_Clock_Phase * @retval None */ -#define IS_SPI_CPHA(__CPHA__) (((__CPHA__) == SPI_PHASE_1EDGE) || \ - ((__CPHA__) == SPI_PHASE_2EDGE)) +#define IS_SPI_CPHA(__CPHA__) (((__CPHA__) == SPI_PHASE_1EDGE) || \ + ((__CPHA__) == SPI_PHASE_2EDGE)) /** @brief Checks if SPI Slave Select parameter is in allowed range. * @param __NSS__ specifies the SPI Slave Select management parameter. * This parameter can be a value of @ref SPI_Slave_Select_management * @retval None */ -#define IS_SPI_NSS(__NSS__) (((__NSS__) == SPI_NSS_SOFT) || \ - ((__NSS__) == SPI_NSS_HARD_INPUT) || \ - ((__NSS__) == SPI_NSS_HARD_OUTPUT)) +#define IS_SPI_NSS(__NSS__) (((__NSS__) == SPI_NSS_SOFT) || \ + ((__NSS__) == SPI_NSS_HARD_INPUT) || \ + ((__NSS__) == SPI_NSS_HARD_OUTPUT)) /** @brief Checks if SPI Baudrate prescaler parameter is in allowed range. * @param __PRESCALER__ specifies the SPI Baudrate prescaler. @@ -600,16 +601,16 @@ typedef void (*pSPI_CallbackTypeDef)(SPI_HandleTypeDef *hspi); /*!< pointer to * This parameter can be a value of @ref SPI_MSB_LSB_transmission * @retval None */ -#define IS_SPI_FIRST_BIT(__BIT__) (((__BIT__) == SPI_FIRSTBIT_MSB) || \ - ((__BIT__) == SPI_FIRSTBIT_LSB)) +#define IS_SPI_FIRST_BIT(__BIT__) (((__BIT__) == SPI_FIRSTBIT_MSB) || \ + ((__BIT__) == SPI_FIRSTBIT_LSB)) /** @brief Checks if SPI TI mode parameter is in allowed range. * @param __MODE__ specifies the SPI TI mode. * This parameter can be a value of @ref SPI_TI_mode * @retval None */ -#define IS_SPI_TIMODE(__MODE__) (((__MODE__) == SPI_TIMODE_DISABLE) || \ - ((__MODE__) == SPI_TIMODE_ENABLE)) +#define IS_SPI_TIMODE(__MODE__) (((__MODE__) == SPI_TIMODE_DISABLE) || \ + ((__MODE__) == SPI_TIMODE_ENABLE)) /** @brief Checks if SPI CRC calculation enabled state is in allowed range. * @param __CALCULATION__ specifies the SPI CRC calculation enable state. @@ -624,7 +625,9 @@ typedef void (*pSPI_CallbackTypeDef)(SPI_HandleTypeDef *hspi); /*!< pointer to * This parameter must be a number between Min_Data = 0 and Max_Data = 65535 * @retval None */ -#define IS_SPI_CRC_POLYNOMIAL(__POLYNOMIAL__) (((__POLYNOMIAL__) >= 0x1U) && ((__POLYNOMIAL__) <= 0xFFFFU) && (((__POLYNOMIAL__)&0x1U) != 0U)) +#define IS_SPI_CRC_POLYNOMIAL(__POLYNOMIAL__) (((__POLYNOMIAL__) >= 0x1U) && \ + ((__POLYNOMIAL__) <= 0xFFFFU) && \ + (((__POLYNOMIAL__)&0x1U) != 0U)) /** @brief Checks if DMA handle is valid. * @param __HANDLE__ specifies a DMA Handle. @@ -652,7 +655,8 @@ void HAL_SPI_MspDeInit(SPI_HandleTypeDef *hspi); /* Callbacks Register/UnRegister functions ***********************************/ #if (USE_HAL_SPI_REGISTER_CALLBACKS == 1U) -HAL_StatusTypeDef HAL_SPI_RegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID, pSPI_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_SPI_RegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID, + pSPI_CallbackTypeDef pCallback); HAL_StatusTypeDef HAL_SPI_UnRegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_CallbackIDTypeDef CallbackID); #endif /* USE_HAL_SPI_REGISTER_CALLBACKS */ /** @@ -663,17 +667,17 @@ HAL_StatusTypeDef HAL_SPI_UnRegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_Ca * @{ */ /* I/O operation functions ***************************************************/ -HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size, uint32_t Timeout); HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size, uint32_t Timeout); -HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, uint16_t Size, - uint32_t Timeout); -HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size); HAL_StatusTypeDef HAL_SPI_Receive_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size); -HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, +HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, uint16_t Size); -HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size); HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size); -HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, +HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, uint16_t Size); HAL_StatusTypeDef HAL_SPI_DMAPause(SPI_HandleTypeDef *hspi); HAL_StatusTypeDef HAL_SPI_DMAResume(SPI_HandleTypeDef *hspi); @@ -699,8 +703,8 @@ void HAL_SPI_AbortCpltCallback(SPI_HandleTypeDef *hspi); * @{ */ /* Peripheral State and Error functions ***************************************/ -HAL_SPI_StateTypeDef HAL_SPI_GetState(SPI_HandleTypeDef *hspi); -uint32_t HAL_SPI_GetError(SPI_HandleTypeDef *hspi); +HAL_SPI_StateTypeDef HAL_SPI_GetState(const SPI_HandleTypeDef *hspi); +uint32_t HAL_SPI_GetError(const SPI_HandleTypeDef *hspi); /** * @} */ @@ -723,4 +727,3 @@ uint32_t HAL_SPI_GetError(SPI_HandleTypeDef *hspi); #endif /* STM32F4xx_HAL_SPI_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim.h index d6f1a1a6..1a8357cd 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -65,8 +64,10 @@ typedef struct This means in PWM mode that (N+1) corresponds to: - the number of PWM periods in edge-aligned mode - the number of half PWM period in center-aligned mode - GP timers: this parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF. - Advanced timers: this parameter must be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF. */ + GP timers: this parameter must be a number between Min_Data = 0x00 and + Max_Data = 0xFF. + Advanced timers: this parameter must be a number between Min_Data = 0x0000 and + Max_Data = 0xFFFF. */ uint32_t AutoReloadPreload; /*!< Specifies the auto-reload preload. This parameter can be a value of @ref TIM_AutoReloadPreload */ @@ -218,7 +219,8 @@ typedef struct uint32_t ClearInputPolarity; /*!< TIM Clear Input polarity This parameter can be a value of @ref TIM_ClearInput_Polarity */ uint32_t ClearInputPrescaler; /*!< TIM Clear Input prescaler - This parameter must be 0: When OCRef clear feature is used with ETR source, ETR prescaler must be off */ + This parameter must be 0: When OCRef clear feature is used with ETR source, + ETR prescaler must be off */ uint32_t ClearInputFilter; /*!< TIM Clear Input filter This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ } TIM_ClearInputConfigTypeDef; @@ -264,22 +266,22 @@ typedef struct */ typedef struct { - uint32_t OffStateRunMode; /*!< TIM off state in run mode - This parameter can be a value of @ref TIM_OSSR_Off_State_Selection_for_Run_mode_state */ - uint32_t OffStateIDLEMode; /*!< TIM off state in IDLE mode - This parameter can be a value of @ref TIM_OSSI_Off_State_Selection_for_Idle_mode_state */ - uint32_t LockLevel; /*!< TIM Lock level - This parameter can be a value of @ref TIM_Lock_level */ - uint32_t DeadTime; /*!< TIM dead Time - This parameter can be a number between Min_Data = 0x00 and Max_Data = 0xFF */ - uint32_t BreakState; /*!< TIM Break State - This parameter can be a value of @ref TIM_Break_Input_enable_disable */ - uint32_t BreakPolarity; /*!< TIM Break input polarity - This parameter can be a value of @ref TIM_Break_Polarity */ - uint32_t BreakFilter; /*!< Specifies the break input filter. - This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ - uint32_t AutomaticOutput; /*!< TIM Automatic Output Enable state - This parameter can be a value of @ref TIM_AOE_Bit_Set_Reset */ + uint32_t OffStateRunMode; /*!< TIM off state in run mode, This parameter can be a value of @ref TIM_OSSR_Off_State_Selection_for_Run_mode_state */ + + uint32_t OffStateIDLEMode; /*!< TIM off state in IDLE mode, This parameter can be a value of @ref TIM_OSSI_Off_State_Selection_for_Idle_mode_state */ + + uint32_t LockLevel; /*!< TIM Lock level, This parameter can be a value of @ref TIM_Lock_level */ + + uint32_t DeadTime; /*!< TIM dead Time, This parameter can be a number between Min_Data = 0x00 and Max_Data = 0xFF */ + + uint32_t BreakState; /*!< TIM Break State, This parameter can be a value of @ref TIM_Break_Input_enable_disable */ + + uint32_t BreakPolarity; /*!< TIM Break input polarity, This parameter can be a value of @ref TIM_Break_Polarity */ + + uint32_t BreakFilter; /*!< Specifies the break input filter.This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + + uint32_t AutomaticOutput; /*!< TIM Automatic Output Enable state, This parameter can be a value of @ref TIM_AOE_Bit_Set_Reset */ + } TIM_BreakDeadTimeConfigTypeDef; /** @@ -294,6 +296,26 @@ typedef enum HAL_TIM_STATE_ERROR = 0x04U /*!< Reception process is ongoing */ } HAL_TIM_StateTypeDef; +/** + * @brief TIM Channel States definition + */ +typedef enum +{ + HAL_TIM_CHANNEL_STATE_RESET = 0x00U, /*!< TIM Channel initial state */ + HAL_TIM_CHANNEL_STATE_READY = 0x01U, /*!< TIM Channel ready for use */ + HAL_TIM_CHANNEL_STATE_BUSY = 0x02U, /*!< An internal process is ongoing on the TIM channel */ +} HAL_TIM_ChannelStateTypeDef; + +/** + * @brief DMA Burst States definition + */ +typedef enum +{ + HAL_DMA_BURST_STATE_RESET = 0x00U, /*!< DMA Burst initial state */ + HAL_DMA_BURST_STATE_READY = 0x01U, /*!< DMA Burst ready for use */ + HAL_DMA_BURST_STATE_BUSY = 0x02U, /*!< Ongoing DMA Burst */ +} HAL_TIM_DMABurstStateTypeDef; + /** * @brief HAL Active channel structures definition */ @@ -315,13 +337,16 @@ typedef struct __TIM_HandleTypeDef typedef struct #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ { - TIM_TypeDef *Instance; /*!< Register base address */ - TIM_Base_InitTypeDef Init; /*!< TIM Time Base required parameters */ - HAL_TIM_ActiveChannel Channel; /*!< Active channel */ - DMA_HandleTypeDef *hdma[7]; /*!< DMA Handlers array - This array is accessed by a @ref DMA_Handle_index */ - HAL_LockTypeDef Lock; /*!< Locking object */ - __IO HAL_TIM_StateTypeDef State; /*!< TIM operation state */ + TIM_TypeDef *Instance; /*!< Register base address */ + TIM_Base_InitTypeDef Init; /*!< TIM Time Base required parameters */ + HAL_TIM_ActiveChannel Channel; /*!< Active channel */ + DMA_HandleTypeDef *hdma[7]; /*!< DMA Handlers array + This array is accessed by a @ref DMA_Handle_index */ + HAL_LockTypeDef Lock; /*!< Locking object */ + __IO HAL_TIM_StateTypeDef State; /*!< TIM operation state */ + __IO HAL_TIM_ChannelStateTypeDef ChannelState[4]; /*!< TIM channel operation state */ + __IO HAL_TIM_ChannelStateTypeDef ChannelNState[4]; /*!< TIM complementary channel operation state */ + __IO HAL_TIM_DMABurstStateTypeDef DMABurstState; /*!< DMA burst operation state */ #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) void (* Base_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Base Msp Init Callback */ @@ -360,34 +385,33 @@ typedef struct */ typedef enum { - HAL_TIM_BASE_MSPINIT_CB_ID = 0x00U /*!< TIM Base MspInit Callback ID */ - ,HAL_TIM_BASE_MSPDEINIT_CB_ID = 0x01U /*!< TIM Base MspDeInit Callback ID */ - ,HAL_TIM_IC_MSPINIT_CB_ID = 0x02U /*!< TIM IC MspInit Callback ID */ - ,HAL_TIM_IC_MSPDEINIT_CB_ID = 0x03U /*!< TIM IC MspDeInit Callback ID */ - ,HAL_TIM_OC_MSPINIT_CB_ID = 0x04U /*!< TIM OC MspInit Callback ID */ - ,HAL_TIM_OC_MSPDEINIT_CB_ID = 0x05U /*!< TIM OC MspDeInit Callback ID */ - ,HAL_TIM_PWM_MSPINIT_CB_ID = 0x06U /*!< TIM PWM MspInit Callback ID */ - ,HAL_TIM_PWM_MSPDEINIT_CB_ID = 0x07U /*!< TIM PWM MspDeInit Callback ID */ - ,HAL_TIM_ONE_PULSE_MSPINIT_CB_ID = 0x08U /*!< TIM One Pulse MspInit Callback ID */ - ,HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID = 0x09U /*!< TIM One Pulse MspDeInit Callback ID */ - ,HAL_TIM_ENCODER_MSPINIT_CB_ID = 0x0AU /*!< TIM Encoder MspInit Callback ID */ - ,HAL_TIM_ENCODER_MSPDEINIT_CB_ID = 0x0BU /*!< TIM Encoder MspDeInit Callback ID */ - ,HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID = 0x0CU /*!< TIM Hall Sensor MspDeInit Callback ID */ - ,HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID = 0x0DU /*!< TIM Hall Sensor MspDeInit Callback ID */ - ,HAL_TIM_PERIOD_ELAPSED_CB_ID = 0x0EU /*!< TIM Period Elapsed Callback ID */ - ,HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID = 0x0FU /*!< TIM Period Elapsed half complete Callback ID */ - ,HAL_TIM_TRIGGER_CB_ID = 0x10U /*!< TIM Trigger Callback ID */ - ,HAL_TIM_TRIGGER_HALF_CB_ID = 0x11U /*!< TIM Trigger half complete Callback ID */ - - ,HAL_TIM_IC_CAPTURE_CB_ID = 0x12U /*!< TIM Input Capture Callback ID */ - ,HAL_TIM_IC_CAPTURE_HALF_CB_ID = 0x13U /*!< TIM Input Capture half complete Callback ID */ - ,HAL_TIM_OC_DELAY_ELAPSED_CB_ID = 0x14U /*!< TIM Output Compare Delay Elapsed Callback ID */ - ,HAL_TIM_PWM_PULSE_FINISHED_CB_ID = 0x15U /*!< TIM PWM Pulse Finished Callback ID */ - ,HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID = 0x16U /*!< TIM PWM Pulse Finished half complete Callback ID */ - ,HAL_TIM_ERROR_CB_ID = 0x17U /*!< TIM Error Callback ID */ - ,HAL_TIM_COMMUTATION_CB_ID = 0x18U /*!< TIM Commutation Callback ID */ - ,HAL_TIM_COMMUTATION_HALF_CB_ID = 0x19U /*!< TIM Commutation half complete Callback ID */ - ,HAL_TIM_BREAK_CB_ID = 0x1AU /*!< TIM Break Callback ID */ + HAL_TIM_BASE_MSPINIT_CB_ID = 0x00U /*!< TIM Base MspInit Callback ID */ + , HAL_TIM_BASE_MSPDEINIT_CB_ID = 0x01U /*!< TIM Base MspDeInit Callback ID */ + , HAL_TIM_IC_MSPINIT_CB_ID = 0x02U /*!< TIM IC MspInit Callback ID */ + , HAL_TIM_IC_MSPDEINIT_CB_ID = 0x03U /*!< TIM IC MspDeInit Callback ID */ + , HAL_TIM_OC_MSPINIT_CB_ID = 0x04U /*!< TIM OC MspInit Callback ID */ + , HAL_TIM_OC_MSPDEINIT_CB_ID = 0x05U /*!< TIM OC MspDeInit Callback ID */ + , HAL_TIM_PWM_MSPINIT_CB_ID = 0x06U /*!< TIM PWM MspInit Callback ID */ + , HAL_TIM_PWM_MSPDEINIT_CB_ID = 0x07U /*!< TIM PWM MspDeInit Callback ID */ + , HAL_TIM_ONE_PULSE_MSPINIT_CB_ID = 0x08U /*!< TIM One Pulse MspInit Callback ID */ + , HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID = 0x09U /*!< TIM One Pulse MspDeInit Callback ID */ + , HAL_TIM_ENCODER_MSPINIT_CB_ID = 0x0AU /*!< TIM Encoder MspInit Callback ID */ + , HAL_TIM_ENCODER_MSPDEINIT_CB_ID = 0x0BU /*!< TIM Encoder MspDeInit Callback ID */ + , HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID = 0x0CU /*!< TIM Hall Sensor MspDeInit Callback ID */ + , HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID = 0x0DU /*!< TIM Hall Sensor MspDeInit Callback ID */ + , HAL_TIM_PERIOD_ELAPSED_CB_ID = 0x0EU /*!< TIM Period Elapsed Callback ID */ + , HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID = 0x0FU /*!< TIM Period Elapsed half complete Callback ID */ + , HAL_TIM_TRIGGER_CB_ID = 0x10U /*!< TIM Trigger Callback ID */ + , HAL_TIM_TRIGGER_HALF_CB_ID = 0x11U /*!< TIM Trigger half complete Callback ID */ + , HAL_TIM_IC_CAPTURE_CB_ID = 0x12U /*!< TIM Input Capture Callback ID */ + , HAL_TIM_IC_CAPTURE_HALF_CB_ID = 0x13U /*!< TIM Input Capture half complete Callback ID */ + , HAL_TIM_OC_DELAY_ELAPSED_CB_ID = 0x14U /*!< TIM Output Compare Delay Elapsed Callback ID */ + , HAL_TIM_PWM_PULSE_FINISHED_CB_ID = 0x15U /*!< TIM PWM Pulse Finished Callback ID */ + , HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID = 0x16U /*!< TIM PWM Pulse Finished half complete Callback ID */ + , HAL_TIM_ERROR_CB_ID = 0x17U /*!< TIM Error Callback ID */ + , HAL_TIM_COMMUTATION_CB_ID = 0x18U /*!< TIM Commutation Callback ID */ + , HAL_TIM_COMMUTATION_HALF_CB_ID = 0x19U /*!< TIM Commutation half complete Callback ID */ + , HAL_TIM_BREAK_CB_ID = 0x1AU /*!< TIM Break Callback ID */ } HAL_TIM_CallbackIDTypeDef; /** @@ -605,10 +629,8 @@ typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to /** @defgroup TIM_Input_Capture_Selection TIM Input Capture Selection * @{ */ -#define TIM_ICSELECTION_DIRECTTI TIM_CCMR1_CC1S_0 /*!< TIM Input 1, 2, 3 or 4 is selected to be - connected to IC1, IC2, IC3 or IC4, respectively */ -#define TIM_ICSELECTION_INDIRECTTI TIM_CCMR1_CC1S_1 /*!< TIM Input 1, 2, 3 or 4 is selected to be - connected to IC2, IC1, IC4 or IC3, respectively */ +#define TIM_ICSELECTION_DIRECTTI TIM_CCMR1_CC1S_0 /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to IC1, IC2, IC3 or IC4, respectively */ +#define TIM_ICSELECTION_INDIRECTTI TIM_CCMR1_CC1S_1 /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to IC2, IC1, IC4 or IC3, respectively */ #define TIM_ICSELECTION_TRC TIM_CCMR1_CC1S /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to TRC */ /** * @} @@ -682,6 +704,15 @@ typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to * @} */ +/** @defgroup TIM_CC_DMA_Request CCx DMA request selection + * @{ + */ +#define TIM_CCDMAREQUEST_CC 0x00000000U /*!< CCx DMA request sent when capture or compare match event occurs */ +#define TIM_CCDMAREQUEST_UPDATE TIM_CR2_CCDS /*!< CCx DMA requests sent when update event occurs */ +/** + * @} + */ + /** @defgroup TIM_Flag_definition TIM Flag Definition * @{ */ @@ -716,16 +747,16 @@ typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to /** @defgroup TIM_Clock_Source TIM Clock Source * @{ */ -#define TIM_CLOCKSOURCE_ETRMODE2 TIM_SMCR_ETPS_1 /*!< External clock source mode 2 */ #define TIM_CLOCKSOURCE_INTERNAL TIM_SMCR_ETPS_0 /*!< Internal clock source */ +#define TIM_CLOCKSOURCE_ETRMODE1 TIM_TS_ETRF /*!< External clock source mode 1 (ETRF) */ +#define TIM_CLOCKSOURCE_ETRMODE2 TIM_SMCR_ETPS_1 /*!< External clock source mode 2 */ +#define TIM_CLOCKSOURCE_TI1ED TIM_TS_TI1F_ED /*!< External clock source mode 1 (TTI1FP1 + edge detect.) */ +#define TIM_CLOCKSOURCE_TI1 TIM_TS_TI1FP1 /*!< External clock source mode 1 (TTI1FP1) */ +#define TIM_CLOCKSOURCE_TI2 TIM_TS_TI2FP2 /*!< External clock source mode 1 (TTI2FP2) */ #define TIM_CLOCKSOURCE_ITR0 TIM_TS_ITR0 /*!< External clock source mode 1 (ITR0) */ #define TIM_CLOCKSOURCE_ITR1 TIM_TS_ITR1 /*!< External clock source mode 1 (ITR1) */ #define TIM_CLOCKSOURCE_ITR2 TIM_TS_ITR2 /*!< External clock source mode 1 (ITR2) */ #define TIM_CLOCKSOURCE_ITR3 TIM_TS_ITR3 /*!< External clock source mode 1 (ITR3) */ -#define TIM_CLOCKSOURCE_TI1ED TIM_TS_TI1F_ED /*!< External clock source mode 1 (TTI1FP1 + edge detect.) */ -#define TIM_CLOCKSOURCE_TI1 TIM_TS_TI1FP1 /*!< External clock source mode 1 (TTI1FP1) */ -#define TIM_CLOCKSOURCE_TI2 TIM_TS_TI2FP2 /*!< External clock source mode 1 (TTI2FP2) */ -#define TIM_CLOCKSOURCE_ETRMODE1 TIM_TS_ETRF /*!< External clock source mode 1 (ETRF) */ /** * @} */ @@ -823,8 +854,7 @@ typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to * @{ */ #define TIM_AUTOMATICOUTPUT_DISABLE 0x00000000U /*!< MOE can be set only by software */ -#define TIM_AUTOMATICOUTPUT_ENABLE TIM_BDTR_AOE /*!< MOE can be set by software or automatically at the next update event - (if none of the break inputs BRK and BRK2 is active) */ +#define TIM_AUTOMATICOUTPUT_ENABLE TIM_BDTR_AOE /*!< MOE can be set by software or automatically at the next update event (if none of the break inputs BRK and BRK2 is active) */ /** * @} */ @@ -931,24 +961,24 @@ typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to /** @defgroup TIM_DMA_Burst_Length TIM DMA Burst Length * @{ */ -#define TIM_DMABURSTLENGTH_1TRANSFER 0x00000000U /*!< The transfer is done to 1 register starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_2TRANSFERS 0x00000100U /*!< The transfer is done to 2 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_3TRANSFERS 0x00000200U /*!< The transfer is done to 3 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_4TRANSFERS 0x00000300U /*!< The transfer is done to 4 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_5TRANSFERS 0x00000400U /*!< The transfer is done to 5 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_6TRANSFERS 0x00000500U /*!< The transfer is done to 6 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_7TRANSFERS 0x00000600U /*!< The transfer is done to 7 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_8TRANSFERS 0x00000700U /*!< The transfer is done to 8 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_9TRANSFERS 0x00000800U /*!< The transfer is done to 9 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_10TRANSFERS 0x00000900U /*!< The transfer is done to 10 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_11TRANSFERS 0x00000A00U /*!< The transfer is done to 11 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_12TRANSFERS 0x00000B00U /*!< The transfer is done to 12 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_13TRANSFERS 0x00000C00U /*!< The transfer is done to 13 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_14TRANSFERS 0x00000D00U /*!< The transfer is done to 14 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_15TRANSFERS 0x00000E00U /*!< The transfer is done to 15 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_16TRANSFERS 0x00000F00U /*!< The transfer is done to 16 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_17TRANSFERS 0x00001000U /*!< The transfer is done to 17 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ -#define TIM_DMABURSTLENGTH_18TRANSFERS 0x00001100U /*!< The transfer is done to 18 registers starting trom TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_1TRANSFER 0x00000000U /*!< The transfer is done to 1 register starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_2TRANSFERS 0x00000100U /*!< The transfer is done to 2 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_3TRANSFERS 0x00000200U /*!< The transfer is done to 3 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_4TRANSFERS 0x00000300U /*!< The transfer is done to 4 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_5TRANSFERS 0x00000400U /*!< The transfer is done to 5 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_6TRANSFERS 0x00000500U /*!< The transfer is done to 6 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_7TRANSFERS 0x00000600U /*!< The transfer is done to 7 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_8TRANSFERS 0x00000700U /*!< The transfer is done to 8 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_9TRANSFERS 0x00000800U /*!< The transfer is done to 9 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_10TRANSFERS 0x00000900U /*!< The transfer is done to 10 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_11TRANSFERS 0x00000A00U /*!< The transfer is done to 11 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_12TRANSFERS 0x00000B00U /*!< The transfer is done to 12 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_13TRANSFERS 0x00000C00U /*!< The transfer is done to 13 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_14TRANSFERS 0x00000D00U /*!< The transfer is done to 14 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_15TRANSFERS 0x00000E00U /*!< The transfer is done to 15 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_16TRANSFERS 0x00000F00U /*!< The transfer is done to 16 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_17TRANSFERS 0x00001000U /*!< The transfer is done to 17 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_18TRANSFERS 0x00001100U /*!< The transfer is done to 18 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ /** * @} */ @@ -993,25 +1023,45 @@ typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to * @retval None */ #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) -#define __HAL_TIM_RESET_HANDLE_STATE(__HANDLE__) do { \ - (__HANDLE__)->State = HAL_TIM_STATE_RESET; \ - (__HANDLE__)->Base_MspInitCallback = NULL; \ - (__HANDLE__)->Base_MspDeInitCallback = NULL; \ - (__HANDLE__)->IC_MspInitCallback = NULL; \ - (__HANDLE__)->IC_MspDeInitCallback = NULL; \ - (__HANDLE__)->OC_MspInitCallback = NULL; \ - (__HANDLE__)->OC_MspDeInitCallback = NULL; \ - (__HANDLE__)->PWM_MspInitCallback = NULL; \ - (__HANDLE__)->PWM_MspDeInitCallback = NULL; \ - (__HANDLE__)->OnePulse_MspInitCallback = NULL; \ - (__HANDLE__)->OnePulse_MspDeInitCallback = NULL; \ - (__HANDLE__)->Encoder_MspInitCallback = NULL; \ - (__HANDLE__)->Encoder_MspDeInitCallback = NULL; \ - (__HANDLE__)->HallSensor_MspInitCallback = NULL; \ - (__HANDLE__)->HallSensor_MspDeInitCallback = NULL; \ +#define __HAL_TIM_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_TIM_STATE_RESET; \ + (__HANDLE__)->ChannelState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->DMABurstState = HAL_DMA_BURST_STATE_RESET; \ + (__HANDLE__)->Base_MspInitCallback = NULL; \ + (__HANDLE__)->Base_MspDeInitCallback = NULL; \ + (__HANDLE__)->IC_MspInitCallback = NULL; \ + (__HANDLE__)->IC_MspDeInitCallback = NULL; \ + (__HANDLE__)->OC_MspInitCallback = NULL; \ + (__HANDLE__)->OC_MspDeInitCallback = NULL; \ + (__HANDLE__)->PWM_MspInitCallback = NULL; \ + (__HANDLE__)->PWM_MspDeInitCallback = NULL; \ + (__HANDLE__)->OnePulse_MspInitCallback = NULL; \ + (__HANDLE__)->OnePulse_MspDeInitCallback = NULL; \ + (__HANDLE__)->Encoder_MspInitCallback = NULL; \ + (__HANDLE__)->Encoder_MspDeInitCallback = NULL; \ + (__HANDLE__)->HallSensor_MspInitCallback = NULL; \ + (__HANDLE__)->HallSensor_MspDeInitCallback = NULL; \ } while(0) #else -#define __HAL_TIM_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_TIM_STATE_RESET) +#define __HAL_TIM_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_TIM_STATE_RESET; \ + (__HANDLE__)->ChannelState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->DMABurstState = HAL_DMA_BURST_STATE_RESET; \ + } while(0) #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ /** @@ -1048,7 +1098,8 @@ typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to * @brief Disable the TIM main Output. * @param __HANDLE__ TIM handle * @retval None - * @note The Main Output Enable of a timer instance is disabled only if all the CCx and CCxN channels have been disabled + * @note The Main Output Enable of a timer instance is disabled only if all the CCx and CCxN channels have been + * disabled */ #define __HAL_TIM_MOE_DISABLE(__HANDLE__) \ do { \ @@ -1209,8 +1260,8 @@ typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to * @brief Indicates whether or not the TIM Counter is used as downcounter. * @param __HANDLE__ TIM handle. * @retval False (Counter used as upcounter) or True (Counter used as downcounter) - * @note This macro is particularly useful to get the counting mode when the timer operates in Center-aligned mode or Encoder -mode. + * @note This macro is particularly useful to get the counting mode when the timer operates in Center-aligned mode + * or Encoder mode. */ #define __HAL_TIM_IS_TIM_COUNTING_DOWN(__HANDLE__) (((__HANDLE__)->Instance->CR1 &(TIM_CR1_DIR)) == (TIM_CR1_DIR)) @@ -1284,7 +1335,8 @@ mode. #define __HAL_TIM_GET_CLOCKDIVISION(__HANDLE__) ((__HANDLE__)->Instance->CR1 & TIM_CR1_CKD) /** - * @brief Set the TIM Input Capture prescaler on runtime without calling another time HAL_TIM_IC_ConfigChannel() function. + * @brief Set the TIM Input Capture prescaler on runtime without calling another time HAL_TIM_IC_ConfigChannel() + * function. * @param __HANDLE__ TIM handle. * @param __CHANNEL__ TIM Channels to be configured. * This parameter can be one of the following values: @@ -1482,6 +1534,17 @@ mode. TIM_SET_CAPTUREPOLARITY((__HANDLE__), (__CHANNEL__), (__POLARITY__)); \ }while(0) +/** @brief Select the Capture/compare DMA request source. + * @param __HANDLE__ specifies the TIM Handle. + * @param __CCDMA__ specifies Capture/compare DMA request source + * This parameter can be one of the following values: + * @arg TIM_CCDMAREQUEST_CC: CCx DMA request generated on Capture/Compare event + * @arg TIM_CCDMAREQUEST_UPDATE: CCx DMA request generated on Update event + * @retval None + */ +#define __HAL_TIM_SELECT_CCDMAREQUEST(__HANDLE__, __CCDMA__) \ + MODIFY_REG((__HANDLE__)->Instance->CR2, TIM_CR2_CCDS, (__CCDMA__)) + /** * @} */ @@ -1590,20 +1653,24 @@ mode. #define IS_TIM_OPM_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ ((__CHANNEL__) == TIM_CHANNEL_2)) +#define IS_TIM_PERIOD(__HANDLE__, __PERIOD__) ((IS_TIM_32B_COUNTER_INSTANCE(((__HANDLE__)->Instance)) == 0U) ? \ + (((__PERIOD__) > 0U) && ((__PERIOD__) <= 0x0000FFFFU)) : \ + ((__PERIOD__) > 0U)) + #define IS_TIM_COMPLEMENTARY_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ ((__CHANNEL__) == TIM_CHANNEL_2) || \ ((__CHANNEL__) == TIM_CHANNEL_3)) #define IS_TIM_CLOCKSOURCE(__CLOCK__) (((__CLOCK__) == TIM_CLOCKSOURCE_INTERNAL) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ETRMODE1) || \ ((__CLOCK__) == TIM_CLOCKSOURCE_ETRMODE2) || \ - ((__CLOCK__) == TIM_CLOCKSOURCE_ITR0) || \ - ((__CLOCK__) == TIM_CLOCKSOURCE_ITR1) || \ - ((__CLOCK__) == TIM_CLOCKSOURCE_ITR2) || \ - ((__CLOCK__) == TIM_CLOCKSOURCE_ITR3) || \ ((__CLOCK__) == TIM_CLOCKSOURCE_TI1ED) || \ ((__CLOCK__) == TIM_CLOCKSOURCE_TI1) || \ ((__CLOCK__) == TIM_CLOCKSOURCE_TI2) || \ - ((__CLOCK__) == TIM_CLOCKSOURCE_ETRMODE1)) + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR0) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR1) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR2) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR3)) #define IS_TIM_CLOCKPOLARITY(__POLARITY__) (((__POLARITY__) == TIM_CLOCKPOLARITY_INVERTED) || \ ((__POLARITY__) == TIM_CLOCKPOLARITY_NONINVERTED) || \ @@ -1641,7 +1708,6 @@ mode. #define IS_TIM_BREAK_FILTER(__BRKFILTER__) ((__BRKFILTER__) <= 0xFUL) - #define IS_TIM_BREAK_STATE(__STATE__) (((__STATE__) == TIM_BREAK_ENABLE) || \ ((__STATE__) == TIM_BREAK_DISABLE)) @@ -1679,13 +1745,13 @@ mode. ((__MODE__) == TIM_OCMODE_FORCED_ACTIVE) || \ ((__MODE__) == TIM_OCMODE_FORCED_INACTIVE)) -#define IS_TIM_TRIGGER_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_TS_ITR0) || \ - ((__SELECTION__) == TIM_TS_ITR1) || \ - ((__SELECTION__) == TIM_TS_ITR2) || \ - ((__SELECTION__) == TIM_TS_ITR3) || \ +#define IS_TIM_TRIGGER_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_TS_ITR0) || \ + ((__SELECTION__) == TIM_TS_ITR1) || \ + ((__SELECTION__) == TIM_TS_ITR2) || \ + ((__SELECTION__) == TIM_TS_ITR3) || \ ((__SELECTION__) == TIM_TS_TI1F_ED) || \ - ((__SELECTION__) == TIM_TS_TI1FP1) || \ - ((__SELECTION__) == TIM_TS_TI2FP2) || \ + ((__SELECTION__) == TIM_TS_TI1FP1) || \ + ((__SELECTION__) == TIM_TS_TI2FP2) || \ ((__SELECTION__) == TIM_TS_ETRF)) #define IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_TS_ITR0) || \ @@ -1710,15 +1776,15 @@ mode. #define IS_TIM_TI1SELECTION(__TI1SELECTION__) (((__TI1SELECTION__) == TIM_TI1SELECTION_CH1) || \ ((__TI1SELECTION__) == TIM_TI1SELECTION_XORCOMBINATION)) -#define IS_TIM_DMA_LENGTH(__LENGTH__) (((__LENGTH__) == TIM_DMABURSTLENGTH_1TRANSFER) || \ - ((__LENGTH__) == TIM_DMABURSTLENGTH_2TRANSFERS) || \ - ((__LENGTH__) == TIM_DMABURSTLENGTH_3TRANSFERS) || \ - ((__LENGTH__) == TIM_DMABURSTLENGTH_4TRANSFERS) || \ - ((__LENGTH__) == TIM_DMABURSTLENGTH_5TRANSFERS) || \ - ((__LENGTH__) == TIM_DMABURSTLENGTH_6TRANSFERS) || \ - ((__LENGTH__) == TIM_DMABURSTLENGTH_7TRANSFERS) || \ - ((__LENGTH__) == TIM_DMABURSTLENGTH_8TRANSFERS) || \ - ((__LENGTH__) == TIM_DMABURSTLENGTH_9TRANSFERS) || \ +#define IS_TIM_DMA_LENGTH(__LENGTH__) (((__LENGTH__) == TIM_DMABURSTLENGTH_1TRANSFER) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_2TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_3TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_4TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_5TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_6TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_7TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_8TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_9TRANSFERS) || \ ((__LENGTH__) == TIM_DMABURSTLENGTH_10TRANSFERS) || \ ((__LENGTH__) == TIM_DMABURSTLENGTH_11TRANSFERS) || \ ((__LENGTH__) == TIM_DMABURSTLENGTH_12TRANSFERS) || \ @@ -1729,6 +1795,8 @@ mode. ((__LENGTH__) == TIM_DMABURSTLENGTH_17TRANSFERS) || \ ((__LENGTH__) == TIM_DMABURSTLENGTH_18TRANSFERS)) +#define IS_TIM_DMA_DATA_LENGTH(LENGTH) (((LENGTH) >= 0x1U) && ((LENGTH) < 0x10000U)) + #define IS_TIM_IC_FILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) #define IS_TIM_DEADTIME(__DEADTIME__) ((__DEADTIME__) <= 0xFFU) @@ -1759,6 +1827,48 @@ mode. ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC3P | TIM_CCER_CC3NP)) :\ ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC4P | TIM_CCER_CC4NP))) +#define TIM_CHANNEL_STATE_GET(__HANDLE__, __CHANNEL__)\ + (((__CHANNEL__) == TIM_CHANNEL_1) ? (__HANDLE__)->ChannelState[0] :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? (__HANDLE__)->ChannelState[1] :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? (__HANDLE__)->ChannelState[2] :\ + (__HANDLE__)->ChannelState[3]) + +#define TIM_CHANNEL_STATE_SET(__HANDLE__, __CHANNEL__, __CHANNEL_STATE__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->ChannelState[0] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->ChannelState[1] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->ChannelState[2] = (__CHANNEL_STATE__)) :\ + ((__HANDLE__)->ChannelState[3] = (__CHANNEL_STATE__))) + +#define TIM_CHANNEL_STATE_SET_ALL(__HANDLE__, __CHANNEL_STATE__) do { \ + (__HANDLE__)->ChannelState[0] = (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[1] = (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[2] = (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[3] = (__CHANNEL_STATE__); \ + } while(0) + +#define TIM_CHANNEL_N_STATE_GET(__HANDLE__, __CHANNEL__)\ + (((__CHANNEL__) == TIM_CHANNEL_1) ? (__HANDLE__)->ChannelNState[0] :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? (__HANDLE__)->ChannelNState[1] :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? (__HANDLE__)->ChannelNState[2] :\ + (__HANDLE__)->ChannelNState[3]) + +#define TIM_CHANNEL_N_STATE_SET(__HANDLE__, __CHANNEL__, __CHANNEL_STATE__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->ChannelNState[0] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->ChannelNState[1] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->ChannelNState[2] = (__CHANNEL_STATE__)) :\ + ((__HANDLE__)->ChannelNState[3] = (__CHANNEL_STATE__))) + +#define TIM_CHANNEL_N_STATE_SET_ALL(__HANDLE__, __CHANNEL_STATE__) do { \ + (__HANDLE__)->ChannelNState[0] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelNState[1] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelNState[2] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelNState[3] = \ + (__CHANNEL_STATE__); \ + } while(0) + /** * @} */ @@ -1788,7 +1898,7 @@ HAL_StatusTypeDef HAL_TIM_Base_Stop(TIM_HandleTypeDef *htim); HAL_StatusTypeDef HAL_TIM_Base_Start_IT(TIM_HandleTypeDef *htim); HAL_StatusTypeDef HAL_TIM_Base_Stop_IT(TIM_HandleTypeDef *htim); /* Non-Blocking mode: DMA */ -HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, const uint32_t *pData, uint16_t Length); HAL_StatusTypeDef HAL_TIM_Base_Stop_DMA(TIM_HandleTypeDef *htim); /** * @} @@ -1810,7 +1920,8 @@ HAL_StatusTypeDef HAL_TIM_OC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); HAL_StatusTypeDef HAL_TIM_OC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); HAL_StatusTypeDef HAL_TIM_OC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); /* Non-Blocking mode: DMA */ -HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length); HAL_StatusTypeDef HAL_TIM_OC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); /** * @} @@ -1832,7 +1943,8 @@ HAL_StatusTypeDef HAL_TIM_PWM_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); HAL_StatusTypeDef HAL_TIM_PWM_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); HAL_StatusTypeDef HAL_TIM_PWM_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); /* Non-Blocking mode: DMA */ -HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length); HAL_StatusTypeDef HAL_TIM_PWM_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); /** * @} @@ -1884,7 +1996,7 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Out * @{ */ /* Timer Encoder functions ****************************************************/ -HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, TIM_Encoder_InitTypeDef *sConfig); +HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, const TIM_Encoder_InitTypeDef *sConfig); HAL_StatusTypeDef HAL_TIM_Encoder_DeInit(TIM_HandleTypeDef *htim); void HAL_TIM_Encoder_MspInit(TIM_HandleTypeDef *htim); void HAL_TIM_Encoder_MspDeInit(TIM_HandleTypeDef *htim); @@ -1917,25 +2029,36 @@ void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim); * @{ */ /* Control functions *********************************************************/ -HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OC_InitTypeDef *sConfig, uint32_t Channel); -HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OC_InitTypeDef *sConfig, uint32_t Channel); -HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_IC_InitTypeDef *sConfig, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, const TIM_OC_InitTypeDef *sConfig, + uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, const TIM_OC_InitTypeDef *sConfig, + uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, const TIM_IC_InitTypeDef *sConfig, + uint32_t Channel); HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OnePulse_InitTypeDef *sConfig, uint32_t OutputChannel, uint32_t InputChannel); -HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, TIM_ClearInputConfigTypeDef *sClearInputConfig, +HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, + const TIM_ClearInputConfigTypeDef *sClearInputConfig, uint32_t Channel); -HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, TIM_ClockConfigTypeDef *sClockSourceConfig); +HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, const TIM_ClockConfigTypeDef *sClockSourceConfig); HAL_StatusTypeDef HAL_TIM_ConfigTI1Input(TIM_HandleTypeDef *htim, uint32_t TI1_Selection); -HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, TIM_SlaveConfigTypeDef *sSlaveConfig); -HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro_IT(TIM_HandleTypeDef *htim, TIM_SlaveConfigTypeDef *sSlaveConfig); +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, const TIM_SlaveConfigTypeDef *sSlaveConfig); +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro_IT(TIM_HandleTypeDef *htim, const TIM_SlaveConfigTypeDef *sSlaveConfig); HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, - uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength); + uint32_t BurstRequestSrc, const uint32_t *BurstBuffer, + uint32_t BurstLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiWriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, const uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength); HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc); HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength); HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc); HAL_StatusTypeDef HAL_TIM_GenerateEvent(TIM_HandleTypeDef *htim, uint32_t EventSource); -uint32_t HAL_TIM_ReadCapturedValue(TIM_HandleTypeDef *htim, uint32_t Channel); +uint32_t HAL_TIM_ReadCapturedValue(const TIM_HandleTypeDef *htim, uint32_t Channel); /** * @} */ @@ -1972,12 +2095,17 @@ HAL_StatusTypeDef HAL_TIM_UnRegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_Ca * @{ */ /* Peripheral State functions ************************************************/ -HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(TIM_HandleTypeDef *htim); -HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(TIM_HandleTypeDef *htim); -HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(TIM_HandleTypeDef *htim); -HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(TIM_HandleTypeDef *htim); -HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(TIM_HandleTypeDef *htim); -HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(const TIM_HandleTypeDef *htim); + +/* Peripheral Channel state functions ************************************************/ +HAL_TIM_ActiveChannel HAL_TIM_GetActiveChannel(const TIM_HandleTypeDef *htim); +HAL_TIM_ChannelStateTypeDef HAL_TIM_GetChannelState(const TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_TIM_DMABurstStateTypeDef HAL_TIM_DMABurstState(const TIM_HandleTypeDef *htim); /** * @} */ @@ -1991,13 +2119,12 @@ HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(TIM_HandleTypeDef *htim); /** @defgroup TIM_Private_Functions TIM Private Functions * @{ */ -void TIM_Base_SetConfig(TIM_TypeDef *TIMx, TIM_Base_InitTypeDef *Structure); +void TIM_Base_SetConfig(TIM_TypeDef *TIMx, const TIM_Base_InitTypeDef *Structure); void TIM_TI1_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, uint32_t TIM_ICFilter); -void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config); void TIM_ETR_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ExtTRGPrescaler, uint32_t TIM_ExtTRGPolarity, uint32_t ExtTRGFilter); -void TIM_DMADelayPulseCplt(DMA_HandleTypeDef *hdma); void TIM_DMADelayPulseHalfCplt(DMA_HandleTypeDef *hdma); void TIM_DMAError(DMA_HandleTypeDef *hdma); void TIM_DMACaptureCplt(DMA_HandleTypeDef *hdma); @@ -2026,5 +2153,3 @@ void TIM_ResetCallback(TIM_HandleTypeDef *htim); #endif #endif /* STM32F4xx_HAL_TIM_H */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim_ex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim_ex.h index 88ce2817..561e9bbe 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim_ex.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_tim_ex.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -75,9 +74,8 @@ typedef struct #if defined (TIM2) #if defined(TIM8) #define TIM_TIM2_TIM8_TRGO 0x00000000U /*!< TIM2 ITR1 is connected to TIM8 TRGO */ -#else -#define TIM_TIM2_ETH_PTP TIM_OR_ITR1_RMP_0 /*!< TIM2 ITR1 is connected to PTP trigger output */ #endif /* TIM8 */ +#define TIM_TIM2_ETH_PTP TIM_OR_ITR1_RMP_0 /*!< TIM2 ITR1 is connected to PTP trigger output */ #define TIM_TIM2_USBFS_SOF TIM_OR_ITR1_RMP_1 /*!< TIM2 ITR1 is connected to OTG FS SOF */ #define TIM_TIM2_USBHS_SOF (TIM_OR_ITR1_RMP_1 | TIM_OR_ITR1_RMP_0) /*!< TIM2 ITR1 is connected to OTG HS SOF */ #endif /* TIM2 */ @@ -129,66 +127,66 @@ typedef struct * @{ */ #if defined(SPDIFRX) -#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ - ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_TIM8_TRGO) || \ - ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ - ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ - (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ - ((TIM_REMAP) == TIM_TIM5_LSI) || \ - ((TIM_REMAP) == TIM_TIM5_LSE) || \ - ((TIM_REMAP) == TIM_TIM5_RTC))) || \ - (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ - ((TIM_REMAP) == TIM_TIM11_SPDIFRX) || \ - ((TIM_REMAP) == TIM_TIM11_HSE)))) +#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ + ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_TIM8_TRGO) || \ + ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ + ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ + (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ + ((TIM_REMAP) == TIM_TIM5_LSI) || \ + ((TIM_REMAP) == TIM_TIM5_LSE) || \ + ((TIM_REMAP) == TIM_TIM5_RTC))) || \ + (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ + ((TIM_REMAP) == TIM_TIM11_SPDIFRX) || \ + ((TIM_REMAP) == TIM_TIM11_HSE)))) #elif defined(TIM2) #if defined(LPTIM_OR_TIM1_ITR2_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) -#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ - ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_TIM8_TRGO) || \ - ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ - ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ - (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ - ((TIM_REMAP) == TIM_TIM5_LSI) || \ - ((TIM_REMAP) == TIM_TIM5_LSE) || \ - ((TIM_REMAP) == TIM_TIM5_RTC))) || \ - (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ - ((TIM_REMAP) == TIM_TIM11_HSE))) || \ - (((INSTANCE) == TIM1) && (((TIM_REMAP) == TIM_TIM1_TIM3_TRGO) || \ - ((TIM_REMAP) == TIM_TIM1_LPTIM))) || \ - (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_TIM3_TRGO) || \ - ((TIM_REMAP) == TIM_TIM5_LPTIM))) || \ - (((INSTANCE) == TIM9) && (((TIM_REMAP) == TIM_TIM9_TIM3_TRGO) || \ - ((TIM_REMAP) == TIM_TIM9_LPTIM)))) +#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ + ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_TIM8_TRGO) || \ + ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ + ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ + (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ + ((TIM_REMAP) == TIM_TIM5_LSI) || \ + ((TIM_REMAP) == TIM_TIM5_LSE) || \ + ((TIM_REMAP) == TIM_TIM5_RTC))) || \ + (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ + ((TIM_REMAP) == TIM_TIM11_HSE))) || \ + (((INSTANCE) == TIM1) && (((TIM_REMAP) == TIM_TIM1_TIM3_TRGO) || \ + ((TIM_REMAP) == TIM_TIM1_LPTIM))) || \ + (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_TIM3_TRGO) || \ + ((TIM_REMAP) == TIM_TIM5_LPTIM))) || \ + (((INSTANCE) == TIM9) && (((TIM_REMAP) == TIM_TIM9_TIM3_TRGO) || \ + ((TIM_REMAP) == TIM_TIM9_LPTIM)))) #elif defined(TIM8) -#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ - ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_TIM8_TRGO) || \ - ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ - ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ - (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ - ((TIM_REMAP) == TIM_TIM5_LSI) || \ - ((TIM_REMAP) == TIM_TIM5_LSE) || \ - ((TIM_REMAP) == TIM_TIM5_RTC))) || \ - (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ - ((TIM_REMAP) == TIM_TIM11_HSE)))) +#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ + ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_TIM8_TRGO) || \ + ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ + ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ + (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ + ((TIM_REMAP) == TIM_TIM5_LSI) || \ + ((TIM_REMAP) == TIM_TIM5_LSE) || \ + ((TIM_REMAP) == TIM_TIM5_RTC))) || \ + (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ + ((TIM_REMAP) == TIM_TIM11_HSE)))) #else -#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ - ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_ETH_PTP) || \ - ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ - ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ - (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ - ((TIM_REMAP) == TIM_TIM5_LSI) || \ - ((TIM_REMAP) == TIM_TIM5_LSE) || \ - ((TIM_REMAP) == TIM_TIM5_RTC))) || \ - (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ - ((TIM_REMAP) == TIM_TIM11_HSE)))) +#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ + ((((INSTANCE) == TIM2) && (((TIM_REMAP) == TIM_TIM2_ETH_PTP) || \ + ((TIM_REMAP) == TIM_TIM2_USBFS_SOF) || \ + ((TIM_REMAP) == TIM_TIM2_USBHS_SOF))) || \ + (((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ + ((TIM_REMAP) == TIM_TIM5_LSI) || \ + ((TIM_REMAP) == TIM_TIM5_LSE) || \ + ((TIM_REMAP) == TIM_TIM5_RTC))) || \ + (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ + ((TIM_REMAP) == TIM_TIM11_HSE)))) #endif /* LPTIM_OR_TIM1_ITR2_RMP && LPTIM_OR_TIM5_ITR1_RMP && LPTIM_OR_TIM5_ITR1_RMP */ #else -#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ - ((((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ - ((TIM_REMAP) == TIM_TIM5_LSI) || \ - ((TIM_REMAP) == TIM_TIM5_LSE) || \ - ((TIM_REMAP) == TIM_TIM5_RTC))) || \ - (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ - ((TIM_REMAP) == TIM_TIM11_HSE)))) +#define IS_TIM_REMAP(INSTANCE, TIM_REMAP) \ + ((((INSTANCE) == TIM5) && (((TIM_REMAP) == TIM_TIM5_GPIO) || \ + ((TIM_REMAP) == TIM_TIM5_LSI) || \ + ((TIM_REMAP) == TIM_TIM5_LSE) || \ + ((TIM_REMAP) == TIM_TIM5_RTC))) || \ + (((INSTANCE) == TIM11) && (((TIM_REMAP) == TIM_TIM11_GPIO) || \ + ((TIM_REMAP) == TIM_TIM11_HSE)))) #endif /* SPDIFRX */ /** @@ -206,7 +204,7 @@ typedef struct * @{ */ /* Timer Hall Sensor functions **********************************************/ -HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, TIM_HallSensor_InitTypeDef *sConfig); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, const TIM_HallSensor_InitTypeDef *sConfig); HAL_StatusTypeDef HAL_TIMEx_HallSensor_DeInit(TIM_HandleTypeDef *htim); void HAL_TIMEx_HallSensor_MspInit(TIM_HandleTypeDef *htim); @@ -239,7 +237,8 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Chann HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); /* Non-Blocking mode: DMA */ -HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length); HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); /** * @} @@ -258,7 +257,8 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); /* Non-Blocking mode: DMA */ -HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length); HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); /** * @} @@ -292,9 +292,9 @@ HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_IT(TIM_HandleTypeDef *htim, uint32 HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_DMA(TIM_HandleTypeDef *htim, uint32_t InputTrigger, uint32_t CommutationSource); HAL_StatusTypeDef HAL_TIMEx_MasterConfigSynchronization(TIM_HandleTypeDef *htim, - TIM_MasterConfigTypeDef *sMasterConfig); + const TIM_MasterConfigTypeDef *sMasterConfig); HAL_StatusTypeDef HAL_TIMEx_ConfigBreakDeadTime(TIM_HandleTypeDef *htim, - TIM_BreakDeadTimeConfigTypeDef *sBreakDeadTimeConfig); + const TIM_BreakDeadTimeConfigTypeDef *sBreakDeadTimeConfig); HAL_StatusTypeDef HAL_TIMEx_RemapConfig(TIM_HandleTypeDef *htim, uint32_t Remap); /** * @} @@ -317,7 +317,8 @@ void HAL_TIMEx_BreakCallback(TIM_HandleTypeDef *htim); * @{ */ /* Extended Peripheral State functions ***************************************/ -HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(const TIM_HandleTypeDef *htim); +HAL_TIM_ChannelStateTypeDef HAL_TIMEx_GetChannelNState(const TIM_HandleTypeDef *htim, uint32_t ChannelN); /** * @} */ @@ -328,7 +329,7 @@ HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(TIM_HandleTypeDef *htim); /* End of exported functions -------------------------------------------------*/ /* Private functions----------------------------------------------------------*/ -/** @addtogroup TIMEx_Private_Functions TIMEx Private Functions +/** @addtogroup TIMEx_Private_Functions TIM Extended Private Functions * @{ */ void TIMEx_DMACommutationCplt(DMA_HandleTypeDef *hdma); @@ -352,5 +353,3 @@ void TIMEx_DMACommutationHalfCplt(DMA_HandleTypeDef *hdma); #endif /* STM32F4xx_HAL_TIM_EX_H */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_uart.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_uart.h index 2154a02f..e6ce82fc 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_uart.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_hal_uart.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -88,7 +87,7 @@ typedef struct * 11 : Error * b5 Peripheral initialization status * 0 : Reset (Peripheral not initialized) - * 1 : Init done (Peripheral not initialized. HAL UART Init function already called) + * 1 : Init done (Peripheral initialized. HAL UART Init function already called) * b4-b3 (not used) * xx : Should be set to 00 * b2 Intrinsic process state @@ -105,7 +104,7 @@ typedef struct * xx : Should be set to 00 * b5 Peripheral initialization status * 0 : Reset (Peripheral not initialized) - * 1 : Init done (Peripheral not initialized) + * 1 : Init done (Peripheral initialized) * b4-b2 (not used) * xxx : Should be set to 000 * b1 Rx state @@ -135,6 +134,26 @@ typedef enum Value is allowed for gState only */ } HAL_UART_StateTypeDef; +/** + * @brief HAL UART Reception type definition + * @note HAL UART Reception type value aims to identify which type of Reception is ongoing. + * This parameter can be a value of @ref UART_Reception_Type_Values : + * HAL_UART_RECEPTION_STANDARD = 0x00U, + * HAL_UART_RECEPTION_TOIDLE = 0x01U, + */ +typedef uint32_t HAL_UART_RxTypeTypeDef; + +/** + * @brief HAL UART Rx Event type definition + * @note HAL UART Rx Event type value aims to identify which type of Event has occurred + * leading to call of the RxEvent callback. + * This parameter can be a value of @ref UART_RxEvent_Type_Values : + * HAL_UART_RXEVENT_TC = 0x00U, + * HAL_UART_RXEVENT_HT = 0x01U, + * HAL_UART_RXEVENT_IDLE = 0x02U, + */ +typedef uint32_t HAL_UART_RxEventTypeTypeDef; + /** * @brief UART handle Structure definition */ @@ -144,7 +163,7 @@ typedef struct __UART_HandleTypeDef UART_InitTypeDef Init; /*!< UART communication parameters */ - uint8_t *pTxBuffPtr; /*!< Pointer to UART Tx transfer Buffer */ + const uint8_t *pTxBuffPtr; /*!< Pointer to UART Tx transfer Buffer */ uint16_t TxXferSize; /*!< UART Tx Transfer size */ @@ -156,6 +175,10 @@ typedef struct __UART_HandleTypeDef __IO uint16_t RxXferCount; /*!< UART Rx Transfer Counter */ + __IO HAL_UART_RxTypeTypeDef ReceptionType; /*!< Type of ongoing reception */ + + __IO HAL_UART_RxEventTypeTypeDef RxEventType; /*!< Type of Rx Event */ + DMA_HandleTypeDef *hdmatx; /*!< UART Tx DMA Handle parameters */ DMA_HandleTypeDef *hdmarx; /*!< UART Rx DMA Handle parameters */ @@ -181,6 +204,7 @@ typedef struct __UART_HandleTypeDef void (* AbortTransmitCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Transmit Complete Callback */ void (* AbortReceiveCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Receive Complete Callback */ void (* WakeupCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Wakeup Callback */ + void (* RxEventCallback)(struct __UART_HandleTypeDef *huart, uint16_t Pos); /*!< UART Reception Event Callback */ void (* MspInitCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Msp Init callback */ void (* MspDeInitCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Msp DeInit callback */ @@ -213,6 +237,7 @@ typedef enum * @brief HAL UART Callback pointer definition */ typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer to an UART callback function */ +typedef void (*pUART_RxEventCallbackTypeDef)(struct __UART_HandleTypeDef *huart, uint16_t Pos); /*!< pointer to a UART Rx Event specific callback function */ #endif /* USE_HAL_UART_REGISTER_CALLBACKS */ @@ -369,6 +394,25 @@ typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer * @} */ +/** @defgroup UART_Reception_Type_Values UART Reception type values + * @{ + */ +#define HAL_UART_RECEPTION_STANDARD (0x00000000U) /*!< Standard reception */ +#define HAL_UART_RECEPTION_TOIDLE (0x00000001U) /*!< Reception till completion or IDLE event */ +/** + * @} + */ + +/** @defgroup UART_RxEvent_Type_Values UART RxEvent type values + * @{ + */ +#define HAL_UART_RXEVENT_TC (0x00000000U) /*!< RxEvent linked to Transfer Complete event */ +#define HAL_UART_RXEVENT_HT (0x00000001U) /*!< RxEvent linked to Half Transfer event */ +#define HAL_UART_RXEVENT_IDLE (0x00000002U) +/** + * @} + */ + /** * @} */ @@ -551,7 +595,7 @@ typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer * @retval The new state of __IT__ (TRUE or FALSE). */ #define __HAL_UART_GET_IT_SOURCE(__HANDLE__, __IT__) (((((__IT__) >> 28U) == UART_CR1_REG_INDEX)? (__HANDLE__)->Instance->CR1:(((((uint32_t)(__IT__)) >> 28U) == UART_CR2_REG_INDEX)? \ - (__HANDLE__)->Instance->CR2 : (__HANDLE__)->Instance->CR3)) & (((uint32_t)(__IT__)) & UART_IT_MASK)) + (__HANDLE__)->Instance->CR2 : (__HANDLE__)->Instance->CR3)) & (((uint32_t)(__IT__)) & UART_IT_MASK)) /** @brief Enable CTS flow control * @note This macro allows to enable CTS hardware flow control for a given UART instance, @@ -569,7 +613,7 @@ typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer */ #define __HAL_UART_HWCONTROL_CTS_ENABLE(__HANDLE__) \ do{ \ - SET_BIT((__HANDLE__)->Instance->CR3, USART_CR3_CTSE); \ + ATOMIC_SET_BIT((__HANDLE__)->Instance->CR3, USART_CR3_CTSE); \ (__HANDLE__)->Init.HwFlowCtl |= USART_CR3_CTSE; \ } while(0U) @@ -589,7 +633,7 @@ typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer */ #define __HAL_UART_HWCONTROL_CTS_DISABLE(__HANDLE__) \ do{ \ - CLEAR_BIT((__HANDLE__)->Instance->CR3, USART_CR3_CTSE); \ + ATOMIC_CLEAR_BIT((__HANDLE__)->Instance->CR3, USART_CR3_CTSE); \ (__HANDLE__)->Init.HwFlowCtl &= ~(USART_CR3_CTSE); \ } while(0U) @@ -609,7 +653,7 @@ typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer */ #define __HAL_UART_HWCONTROL_RTS_ENABLE(__HANDLE__) \ do{ \ - SET_BIT((__HANDLE__)->Instance->CR3, USART_CR3_RTSE); \ + ATOMIC_SET_BIT((__HANDLE__)->Instance->CR3, USART_CR3_RTSE); \ (__HANDLE__)->Init.HwFlowCtl |= USART_CR3_RTSE; \ } while(0U) @@ -629,7 +673,7 @@ typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer */ #define __HAL_UART_HWCONTROL_RTS_DISABLE(__HANDLE__) \ do{ \ - CLEAR_BIT((__HANDLE__)->Instance->CR3, USART_CR3_RTSE);\ + ATOMIC_CLEAR_BIT((__HANDLE__)->Instance->CR3, USART_CR3_RTSE);\ (__HANDLE__)->Init.HwFlowCtl &= ~(USART_CR3_RTSE); \ } while(0U) @@ -643,7 +687,8 @@ typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer * @param __HANDLE__ specifies the UART Handle. * @retval None */ -#define __HAL_UART_ONE_BIT_SAMPLE_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR3 &= (uint16_t)~((uint16_t)USART_CR3_ONEBIT)) +#define __HAL_UART_ONE_BIT_SAMPLE_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR3\ + &= (uint16_t)~((uint16_t)USART_CR3_ONEBIT)) /** @brief Enable UART * @param __HANDLE__ specifies the UART Handle. @@ -680,8 +725,12 @@ void HAL_UART_MspDeInit(UART_HandleTypeDef *huart); /* Callbacks Register/UnRegister functions ***********************************/ #if (USE_HAL_UART_REGISTER_CALLBACKS == 1) -HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID, pUART_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID, + pUART_CallbackTypeDef pCallback); HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID); + +HAL_StatusTypeDef HAL_UART_RegisterRxEventCallback(UART_HandleTypeDef *huart, pUART_RxEventCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_UART_UnRegisterRxEventCallback(UART_HandleTypeDef *huart); #endif /* USE_HAL_UART_REGISTER_CALLBACKS */ /** @@ -693,15 +742,23 @@ HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UAR */ /* IO operation functions *******************************************************/ -HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size, uint32_t Timeout); HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout); -HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size); HAL_StatusTypeDef HAL_UART_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); -HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size); HAL_StatusTypeDef HAL_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); HAL_StatusTypeDef HAL_UART_DMAPause(UART_HandleTypeDef *huart); HAL_StatusTypeDef HAL_UART_DMAResume(UART_HandleTypeDef *huart); HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart); + +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint16_t *RxLen, + uint32_t Timeout); +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); + +HAL_UART_RxEventTypeTypeDef HAL_UARTEx_GetRxEventType(UART_HandleTypeDef *huart); + /* Transfer Abort functions */ HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart); HAL_StatusTypeDef HAL_UART_AbortTransmit(UART_HandleTypeDef *huart); @@ -720,6 +777,8 @@ void HAL_UART_AbortCpltCallback(UART_HandleTypeDef *huart); void HAL_UART_AbortTransmitCpltCallback(UART_HandleTypeDef *huart); void HAL_UART_AbortReceiveCpltCallback(UART_HandleTypeDef *huart); +void HAL_UARTEx_RxEventCallback(UART_HandleTypeDef *huart, uint16_t Size); + /** * @} */ @@ -741,8 +800,8 @@ HAL_StatusTypeDef HAL_HalfDuplex_EnableReceiver(UART_HandleTypeDef *huart); * @{ */ /* Peripheral State functions **************************************************/ -HAL_UART_StateTypeDef HAL_UART_GetState(UART_HandleTypeDef *huart); -uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart); +HAL_UART_StateTypeDef HAL_UART_GetState(const UART_HandleTypeDef *huart); +uint32_t HAL_UART_GetError(const UART_HandleTypeDef *huart); /** * @} */ @@ -800,7 +859,8 @@ uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart); #define UART_DIV_SAMPLING16(_PCLK_, _BAUD_) ((uint32_t)((((uint64_t)(_PCLK_))*25U)/(4U*((uint64_t)(_BAUD_))))) #define UART_DIVMANT_SAMPLING16(_PCLK_, _BAUD_) (UART_DIV_SAMPLING16((_PCLK_), (_BAUD_))/100U) -#define UART_DIVFRAQ_SAMPLING16(_PCLK_, _BAUD_) ((((UART_DIV_SAMPLING16((_PCLK_), (_BAUD_)) - (UART_DIVMANT_SAMPLING16((_PCLK_), (_BAUD_)) * 100U)) * 16U) + 50U) / 100U) +#define UART_DIVFRAQ_SAMPLING16(_PCLK_, _BAUD_) ((((UART_DIV_SAMPLING16((_PCLK_), (_BAUD_)) - (UART_DIVMANT_SAMPLING16((_PCLK_), (_BAUD_)) * 100U)) * 16U)\ + + 50U) / 100U) /* UART BRR = mantissa + overflow + fraction = (UART DIVMANT << 4) + (UART DIVFRAQ & 0xF0) + (UART DIVFRAQ & 0x0FU) */ #define UART_BRR_SAMPLING16(_PCLK_, _BAUD_) ((UART_DIVMANT_SAMPLING16((_PCLK_), (_BAUD_)) << 4U) + \ @@ -809,7 +869,8 @@ uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart); #define UART_DIV_SAMPLING8(_PCLK_, _BAUD_) ((uint32_t)((((uint64_t)(_PCLK_))*25U)/(2U*((uint64_t)(_BAUD_))))) #define UART_DIVMANT_SAMPLING8(_PCLK_, _BAUD_) (UART_DIV_SAMPLING8((_PCLK_), (_BAUD_))/100U) -#define UART_DIVFRAQ_SAMPLING8(_PCLK_, _BAUD_) ((((UART_DIV_SAMPLING8((_PCLK_), (_BAUD_)) - (UART_DIVMANT_SAMPLING8((_PCLK_), (_BAUD_)) * 100U)) * 8U) + 50U) / 100U) +#define UART_DIVFRAQ_SAMPLING8(_PCLK_, _BAUD_) ((((UART_DIV_SAMPLING8((_PCLK_), (_BAUD_)) - (UART_DIVMANT_SAMPLING8((_PCLK_), (_BAUD_)) * 100U)) * 8U)\ + + 50U) / 100U) /* UART BRR = mantissa + overflow + fraction = (UART DIVMANT << 4) + ((UART DIVFRAQ & 0xF8) << 1) + (UART DIVFRAQ & 0x07U) */ #define UART_BRR_SAMPLING8(_PCLK_, _BAUD_) ((UART_DIVMANT_SAMPLING8((_PCLK_), (_BAUD_)) << 4U) + \ @@ -825,6 +886,9 @@ uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart); * @{ */ +HAL_StatusTypeDef UART_Start_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef UART_Start_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); + /** * @} */ @@ -843,4 +907,3 @@ uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart); #endif /* __STM32F4xx_HAL_UART_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_adc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_adc.h index 93489114..13f6751b 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_adc.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_adc.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -322,23 +321,23 @@ typedef struct { uint32_t CommonClock; /*!< Set parameter common to several ADC: Clock source and prescaler. This parameter can be a value of @ref ADC_LL_EC_COMMON_CLOCK_SOURCE - + This feature can be modified afterwards using unitary function @ref LL_ADC_SetCommonClock(). */ #if defined(ADC_MULTIMODE_SUPPORT) uint32_t Multimode; /*!< Set ADC multimode configuration to operate in independent mode or multimode (for devices with several ADC instances). This parameter can be a value of @ref ADC_LL_EC_MULTI_MODE - + This feature can be modified afterwards using unitary function @ref LL_ADC_SetMultimode(). */ uint32_t MultiDMATransfer; /*!< Set ADC multimode conversion data transfer: no transfer or transfer by DMA. This parameter can be a value of @ref ADC_LL_EC_MULTI_DMA_TRANSFER - + This feature can be modified afterwards using unitary function @ref LL_ADC_SetMultiDMATransfer(). */ uint32_t MultiTwoSamplingDelay; /*!< Set ADC multimode delay between 2 sampling phases. This parameter can be a value of @ref ADC_LL_EC_MULTI_TWOSMP_DELAY - + This feature can be modified afterwards using unitary function @ref LL_ADC_SetMultiTwoSamplingDelay(). */ #endif /* ADC_MULTIMODE_SUPPORT */ @@ -368,17 +367,17 @@ typedef struct { uint32_t Resolution; /*!< Set ADC resolution. This parameter can be a value of @ref ADC_LL_EC_RESOLUTION - + This feature can be modified afterwards using unitary function @ref LL_ADC_SetResolution(). */ uint32_t DataAlignment; /*!< Set ADC conversion data alignment. This parameter can be a value of @ref ADC_LL_EC_DATA_ALIGN - + This feature can be modified afterwards using unitary function @ref LL_ADC_SetDataAlignment(). */ uint32_t SequencersScanMode; /*!< Set ADC scan selection. This parameter can be a value of @ref ADC_LL_EC_SCAN_SELECTION - + This feature can be modified afterwards using unitary function @ref LL_ADC_SetSequencersScanMode(). */ } LL_ADC_InitTypeDef; @@ -408,31 +407,31 @@ typedef struct This parameter can be a value of @ref ADC_LL_EC_REG_TRIGGER_SOURCE @note On this STM32 series, setting of external trigger edge is performed using function @ref LL_ADC_REG_StartConversionExtTrig(). - + This feature can be modified afterwards using unitary function @ref LL_ADC_REG_SetTriggerSource(). */ uint32_t SequencerLength; /*!< Set ADC group regular sequencer length. This parameter can be a value of @ref ADC_LL_EC_REG_SEQ_SCAN_LENGTH @note This parameter is discarded if scan mode is disabled (refer to parameter 'ADC_SequencersScanMode'). - + This feature can be modified afterwards using unitary function @ref LL_ADC_REG_SetSequencerLength(). */ uint32_t SequencerDiscont; /*!< Set ADC group regular sequencer discontinuous mode: sequence subdivided and scan conversions interrupted every selected number of ranks. This parameter can be a value of @ref ADC_LL_EC_REG_SEQ_DISCONT_MODE @note This parameter has an effect only if group regular sequencer is enabled (scan length of 2 ranks or more). - + This feature can be modified afterwards using unitary function @ref LL_ADC_REG_SetSequencerDiscont(). */ uint32_t ContinuousMode; /*!< Set ADC continuous conversion mode on ADC group regular, whether ADC conversions are performed in single mode (one conversion per trigger) or in continuous mode (after the first trigger, following conversions launched successively automatically). This parameter can be a value of @ref ADC_LL_EC_REG_CONTINUOUS_MODE Note: It is not possible to enable both ADC group regular continuous mode and discontinuous mode. - + This feature can be modified afterwards using unitary function @ref LL_ADC_REG_SetContinuousMode(). */ uint32_t DMATransfer; /*!< Set ADC group regular conversion data transfer: no transfer or transfer by DMA, and DMA requests mode. This parameter can be a value of @ref ADC_LL_EC_REG_DMA_TRANSFER - + This feature can be modified afterwards using unitary function @ref LL_ADC_REG_SetDMATransfer(). */ } LL_ADC_REG_InitTypeDef; @@ -462,26 +461,26 @@ typedef struct This parameter can be a value of @ref ADC_LL_EC_INJ_TRIGGER_SOURCE @note On this STM32 series, setting of external trigger edge is performed using function @ref LL_ADC_INJ_StartConversionExtTrig(). - + This feature can be modified afterwards using unitary function @ref LL_ADC_INJ_SetTriggerSource(). */ uint32_t SequencerLength; /*!< Set ADC group injected sequencer length. This parameter can be a value of @ref ADC_LL_EC_INJ_SEQ_SCAN_LENGTH @note This parameter is discarded if scan mode is disabled (refer to parameter 'ADC_SequencersScanMode'). - + This feature can be modified afterwards using unitary function @ref LL_ADC_INJ_SetSequencerLength(). */ uint32_t SequencerDiscont; /*!< Set ADC group injected sequencer discontinuous mode: sequence subdivided and scan conversions interrupted every selected number of ranks. This parameter can be a value of @ref ADC_LL_EC_INJ_SEQ_DISCONT_MODE @note This parameter has an effect only if group injected sequencer is enabled (scan length of 2 ranks or more). - + This feature can be modified afterwards using unitary function @ref LL_ADC_INJ_SetSequencerDiscont(). */ uint32_t TrigAuto; /*!< Set ADC group injected conversion trigger: independent or from ADC group regular. This parameter can be a value of @ref ADC_LL_EC_INJ_TRIG_AUTO - Note: This parameter must be set to set to independent trigger if injected trigger source is set to an external trigger. - + Note: This parameter must be set to set to independent trigger if injected trigger source is set to an external trigger. + This feature can be modified afterwards using unitary function @ref LL_ADC_INJ_SetTrigAuto(). */ } LL_ADC_INJ_InitTypeDef; @@ -510,7 +509,7 @@ typedef struct #define LL_ADC_FLAG_EOCS_MST ADC_CSR_EOC1 /*!< ADC flag ADC multimode master group regular end of unitary conversion or sequence conversions (to configure flag of end of conversion, use function @ref LL_ADC_REG_SetFlagEndOfConversion() ) */ #define LL_ADC_FLAG_EOCS_SLV1 ADC_CSR_EOC2 /*!< ADC flag ADC multimode slave 1 group regular end of unitary conversion or sequence conversions (to configure flag of end of conversion, use function @ref LL_ADC_REG_SetFlagEndOfConversion() ) */ #define LL_ADC_FLAG_EOCS_SLV2 ADC_CSR_EOC3 /*!< ADC flag ADC multimode slave 2 group regular end of unitary conversion or sequence conversions (to configure flag of end of conversion, use function @ref LL_ADC_REG_SetFlagEndOfConversion() ) */ -#define LL_ADC_FLAG_OVR_MST ADC_CSR_OVR1 /*!< ADC flag ADC multimode master group regular overrun */ +#define LL_ADC_FLAG_OVR_MST ADC_CSR_OVR1 /*!< ADC flag ADC multimode master group regular overrun */ #define LL_ADC_FLAG_OVR_SLV1 ADC_CSR_OVR2 /*!< ADC flag ADC multimode slave 1 group regular overrun */ #define LL_ADC_FLAG_OVR_SLV2 ADC_CSR_OVR3 /*!< ADC flag ADC multimode slave 2 group regular overrun */ #define LL_ADC_FLAG_JEOS_MST ADC_CSR_JEOC1 /*!< ADC flag ADC multimode master group injected end of sequence conversions (Note: on this STM32 series, there is no flag ADC group injected end of unitary conversion. Flag noted as "JEOC" is corresponding to flag "JEOS" in other STM32 families) */ @@ -685,8 +684,8 @@ typedef struct */ /** @defgroup ADC_LL_EC_REG_CONTINUOUS_MODE ADC group regular - Continuous mode -* @{ -*/ + * @{ + */ #define LL_ADC_REG_CONV_SINGLE 0x00000000UL /*!< ADC conversions are performed in single mode: one conversion per trigger */ #define LL_ADC_REG_CONV_CONTINUOUS (ADC_CR2_CONT) /*!< ADC conversions are performed in continuous mode: after the first trigger, following conversions launched successively automatically */ /** @@ -809,8 +808,8 @@ typedef struct */ /** @defgroup ADC_LL_EC_INJ_TRIG_AUTO ADC group injected - Automatic trigger mode -* @{ -*/ + * @{ + */ #define LL_ADC_INJ_TRIG_INDEPENDENT 0x00000000UL /*!< ADC group injected conversion trigger independent. Setting mandatory if ADC group injected injected trigger source is set to an external trigger. */ #define LL_ADC_INJ_TRIG_FROM_GRP_REGULAR (ADC_CR1_JAUTO) /*!< ADC group injected conversion trigger from ADC group regular. Setting compliant only with group injected trigger source set to SW start, without any further action on ADC group injected conversion start or stop: in this case, ADC group injected is controlled only from ADC group regular. */ /** @@ -1046,7 +1045,7 @@ typedef struct * above each literal definition. * @{ */ - + /* Note: Only ADC IP HW delays are defined in ADC LL driver driver, */ /* not timeout values. */ /* Timeout values for ADC operations are dependent to device clock */ @@ -1146,7 +1145,7 @@ typedef struct * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled. * @retval Value between Min_Data=0 and Max_Data=18 @@ -1184,7 +1183,7 @@ typedef struct * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled.\n * (1) For ADC channel read back from ADC register, @@ -1244,7 +1243,7 @@ typedef struct * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled. * @retval Value "0" if the channel corresponds to a parameter definition of a ADC external channel (channel connected to a GPIO pin). @@ -1289,7 +1288,7 @@ typedef struct * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled. * @retval Returned value can be one of the following values: @@ -1334,7 +1333,7 @@ typedef struct * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1. * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled. * @retval Value "0" if the internal channel selected is not available on the ADC instance selected. @@ -1378,7 +1377,7 @@ typedef struct * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled.\n * (1) For ADC channel read back from ADC register, @@ -1393,35 +1392,35 @@ typedef struct * @arg @ref LL_ADC_AWD_ALL_CHANNELS_REG * @arg @ref LL_ADC_AWD_ALL_CHANNELS_INJ * @arg @ref LL_ADC_AWD_ALL_CHANNELS_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_0_REG - * @arg @ref LL_ADC_AWD_CHANNEL_0_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_0_REG + * @arg @ref LL_ADC_AWD_CHANNEL_0_INJ * @arg @ref LL_ADC_AWD_CHANNEL_0_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_1_REG - * @arg @ref LL_ADC_AWD_CHANNEL_1_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_1_REG + * @arg @ref LL_ADC_AWD_CHANNEL_1_INJ * @arg @ref LL_ADC_AWD_CHANNEL_1_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_2_REG - * @arg @ref LL_ADC_AWD_CHANNEL_2_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_2_REG + * @arg @ref LL_ADC_AWD_CHANNEL_2_INJ * @arg @ref LL_ADC_AWD_CHANNEL_2_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_3_REG - * @arg @ref LL_ADC_AWD_CHANNEL_3_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_3_REG + * @arg @ref LL_ADC_AWD_CHANNEL_3_INJ * @arg @ref LL_ADC_AWD_CHANNEL_3_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_4_REG - * @arg @ref LL_ADC_AWD_CHANNEL_4_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_4_REG + * @arg @ref LL_ADC_AWD_CHANNEL_4_INJ * @arg @ref LL_ADC_AWD_CHANNEL_4_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_5_REG - * @arg @ref LL_ADC_AWD_CHANNEL_5_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_5_REG + * @arg @ref LL_ADC_AWD_CHANNEL_5_INJ * @arg @ref LL_ADC_AWD_CHANNEL_5_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_6_REG - * @arg @ref LL_ADC_AWD_CHANNEL_6_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_6_REG + * @arg @ref LL_ADC_AWD_CHANNEL_6_INJ * @arg @ref LL_ADC_AWD_CHANNEL_6_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_7_REG - * @arg @ref LL_ADC_AWD_CHANNEL_7_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_7_REG + * @arg @ref LL_ADC_AWD_CHANNEL_7_INJ * @arg @ref LL_ADC_AWD_CHANNEL_7_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_8_REG - * @arg @ref LL_ADC_AWD_CHANNEL_8_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_8_REG + * @arg @ref LL_ADC_AWD_CHANNEL_8_INJ * @arg @ref LL_ADC_AWD_CHANNEL_8_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_9_REG - * @arg @ref LL_ADC_AWD_CHANNEL_9_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_9_REG + * @arg @ref LL_ADC_AWD_CHANNEL_9_INJ * @arg @ref LL_ADC_AWD_CHANNEL_9_REG_INJ * @arg @ref LL_ADC_AWD_CHANNEL_10_REG * @arg @ref LL_ADC_AWD_CHANNEL_10_INJ @@ -1459,7 +1458,7 @@ typedef struct * @arg @ref LL_ADC_AWD_CH_VBAT_REG (1) * @arg @ref LL_ADC_AWD_CH_VBAT_INJ (1) * @arg @ref LL_ADC_AWD_CH_VBAT_REG_INJ (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled. */ @@ -1497,7 +1496,7 @@ typedef struct /** * @brief Helper macro to get the value of ADC analog watchdog threshold high - * or low in function of ADC resolution, when ADC resolution is + * or low in function of ADC resolution, when ADC resolution is * different of 12 bits. * @note To be used with function @ref LL_ADC_GetAnalogWDThresholds(). * Example, with a ADC resolution of 8 bits, to get the value of @@ -1606,7 +1605,7 @@ typedef struct /** * @brief Helper macro to convert the ADC conversion data from * a resolution to another resolution. - * @param __DATA__ ADC conversion data to be converted + * @param __DATA__ ADC conversion data to be converted * @param __ADC_RESOLUTION_CURRENT__ Resolution of to the data to be converted * This parameter can be one of the following values: * @arg @ref LL_ADC_RESOLUTION_12B @@ -1855,33 +1854,36 @@ typedef struct * @param Register This parameter can be one of the following values: * @arg @ref LL_ADC_DMA_REG_REGULAR_DATA * @arg @ref LL_ADC_DMA_REG_REGULAR_DATA_MULTI (1) - * + * * (1) Available on devices with several ADC instances. * @retval ADC register address */ #if defined(ADC_MULTIMODE_SUPPORT) -__STATIC_INLINE uint32_t LL_ADC_DMA_GetRegAddr(ADC_TypeDef *ADCx, uint32_t Register) +__STATIC_INLINE uint32_t LL_ADC_DMA_GetRegAddr(const ADC_TypeDef *ADCx, uint32_t Register) { uint32_t data_reg_addr = 0UL; - + if (Register == LL_ADC_DMA_REG_REGULAR_DATA) { /* Retrieve address of register DR */ - data_reg_addr = (uint32_t)&(ADCx->DR); + data_reg_addr = (uint32_t) & (ADCx->DR); } else /* (Register == LL_ADC_DMA_REG_REGULAR_DATA_MULTI) */ { /* Retrieve address of register CDR */ - data_reg_addr = (uint32_t)&((__LL_ADC_COMMON_INSTANCE(ADCx))->CDR); + data_reg_addr = (uint32_t) & ((__LL_ADC_COMMON_INSTANCE(ADCx))->CDR); } - + return data_reg_addr; } #else __STATIC_INLINE uint32_t LL_ADC_DMA_GetRegAddr(ADC_TypeDef *ADCx, uint32_t Register) { + /* Prevent unused argument compilation warning */ + (void)Register; + /* Retrieve address of register DR */ - return (uint32_t)&(ADCx->DR); + return (uint32_t) & (ADCx->DR); } #endif @@ -1921,7 +1923,7 @@ __STATIC_INLINE void LL_ADC_SetCommonClock(ADC_Common_TypeDef *ADCxy_COMMON, uin * @arg @ref LL_ADC_CLOCK_SYNC_PCLK_DIV6 * @arg @ref LL_ADC_CLOCK_SYNC_PCLK_DIV8 */ -__STATIC_INLINE uint32_t LL_ADC_GetCommonClock(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_GetCommonClock(const ADC_Common_TypeDef *ADCxy_COMMON) { return (uint32_t)(READ_BIT(ADCxy_COMMON->CCR, ADC_CCR_ADCPRE)); } @@ -1975,7 +1977,7 @@ __STATIC_INLINE void LL_ADC_SetCommonPathInternalCh(ADC_Common_TypeDef *ADCxy_CO * @arg @ref LL_ADC_PATH_INTERNAL_TEMPSENSOR * @arg @ref LL_ADC_PATH_INTERNAL_VBAT */ -__STATIC_INLINE uint32_t LL_ADC_GetCommonPathInternalCh(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_GetCommonPathInternalCh(const ADC_Common_TypeDef *ADCxy_COMMON) { return (uint32_t)(READ_BIT(ADCxy_COMMON->CCR, ADC_CCR_TSVREFE | ADC_CCR_VBATE)); } @@ -2018,7 +2020,7 @@ __STATIC_INLINE void LL_ADC_SetResolution(ADC_TypeDef *ADCx, uint32_t Resolution * @arg @ref LL_ADC_RESOLUTION_8B * @arg @ref LL_ADC_RESOLUTION_6B */ -__STATIC_INLINE uint32_t LL_ADC_GetResolution(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_GetResolution(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR1, ADC_CR1_RES)); } @@ -2049,7 +2051,7 @@ __STATIC_INLINE void LL_ADC_SetDataAlignment(ADC_TypeDef *ADCx, uint32_t DataAli * @arg @ref LL_ADC_DATA_ALIGN_RIGHT * @arg @ref LL_ADC_DATA_ALIGN_LEFT */ -__STATIC_INLINE uint32_t LL_ADC_GetDataAlignment(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_GetDataAlignment(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR2, ADC_CR2_ALIGN)); } @@ -2100,7 +2102,7 @@ __STATIC_INLINE void LL_ADC_SetSequencersScanMode(ADC_TypeDef *ADCx, uint32_t Sc * @arg @ref LL_ADC_SEQ_SCAN_DISABLE * @arg @ref LL_ADC_SEQ_SCAN_ENABLE */ -__STATIC_INLINE uint32_t LL_ADC_GetSequencersScanMode(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_GetSequencersScanMode(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR1, ADC_CR1_SCAN)); } @@ -2119,7 +2121,7 @@ __STATIC_INLINE uint32_t LL_ADC_GetSequencersScanMode(ADC_TypeDef *ADCx) * external interrupt line). * @note On this STM32 series, setting of external trigger edge is performed * using function @ref LL_ADC_REG_StartConversionExtTrig(). - * @note Availability of parameters of trigger sources from timer + * @note Availability of parameters of trigger sources from timer * depends on timers availability on the selected device. * @rmtoll CR2 EXTSEL LL_ADC_REG_SetTriggerSource\n * CR2 EXTEN LL_ADC_REG_SetTriggerSource @@ -2146,11 +2148,11 @@ __STATIC_INLINE uint32_t LL_ADC_GetSequencersScanMode(ADC_TypeDef *ADCx) */ __STATIC_INLINE void LL_ADC_REG_SetTriggerSource(ADC_TypeDef *ADCx, uint32_t TriggerSource) { -/* Note: On this STM32 series, ADC group regular external trigger edge */ -/* is used to perform a ADC conversion start. */ -/* This function does not set external trigger edge. */ -/* This feature is set using function */ -/* @ref LL_ADC_REG_StartConversionExtTrig(). */ + /* Note: On this STM32 series, ADC group regular external trigger edge */ + /* is used to perform a ADC conversion start. */ + /* This function does not set external trigger edge. */ + /* This feature is set using function */ + /* @ref LL_ADC_REG_StartConversionExtTrig(). */ MODIFY_REG(ADCx->CR2, ADC_CR2_EXTSEL, (TriggerSource & ADC_CR2_EXTSEL)); } @@ -2161,10 +2163,10 @@ __STATIC_INLINE void LL_ADC_REG_SetTriggerSource(ADC_TypeDef *ADCx, uint32_t Tri * @note To determine whether group regular trigger source is * internal (SW start) or external, without detail * of which peripheral is selected as external trigger, - * (equivalent to + * (equivalent to * "if(LL_ADC_REG_GetTriggerSource(ADC1) == LL_ADC_REG_TRIG_SOFTWARE)") * use function @ref LL_ADC_REG_IsTriggerSourceSWStart. - * @note Availability of parameters of trigger sources from timer + * @note Availability of parameters of trigger sources from timer * depends on timers availability on the selected device. * @rmtoll CR2 EXTSEL LL_ADC_REG_GetTriggerSource\n * CR2 EXTEN LL_ADC_REG_GetTriggerSource @@ -2188,14 +2190,14 @@ __STATIC_INLINE void LL_ADC_REG_SetTriggerSource(ADC_TypeDef *ADCx, uint32_t Tri * @arg @ref LL_ADC_REG_TRIG_EXT_TIM8_TRGO * @arg @ref LL_ADC_REG_TRIG_EXT_EXTI_LINE11 */ -__STATIC_INLINE uint32_t LL_ADC_REG_GetTriggerSource(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_REG_GetTriggerSource(const ADC_TypeDef *ADCx) { uint32_t TriggerSource = READ_BIT(ADCx->CR2, ADC_CR2_EXTSEL | ADC_CR2_EXTEN); - + /* Value for shift of {0; 4; 8; 12} depending on value of bitfield */ /* corresponding to ADC_CR2_EXTEN {0; 1; 2; 3}. */ uint32_t ShiftExten = ((TriggerSource & ADC_CR2_EXTEN) >> (ADC_REG_TRIG_EXTEN_BITOFFSET_POS - 2UL)); - + /* Set bitfield corresponding to ADC_CR2_EXTEN and ADC_CR2_EXTSEL */ /* to match with triggers literals definition. */ return ((TriggerSource @@ -2215,7 +2217,7 @@ __STATIC_INLINE uint32_t LL_ADC_REG_GetTriggerSource(ADC_TypeDef *ADCx) * @retval Value "0" if trigger source external trigger * Value "1" if trigger source SW start. */ -__STATIC_INLINE uint32_t LL_ADC_REG_IsTriggerSourceSWStart(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_REG_IsTriggerSourceSWStart(const ADC_TypeDef *ADCx) { return (READ_BIT(ADCx->CR2, ADC_CR2_EXTEN) == (LL_ADC_REG_TRIG_SOFTWARE & ADC_CR2_EXTEN)); } @@ -2232,7 +2234,7 @@ __STATIC_INLINE uint32_t LL_ADC_REG_IsTriggerSourceSWStart(ADC_TypeDef *ADCx) * @arg @ref LL_ADC_REG_TRIG_EXT_FALLING * @arg @ref LL_ADC_REG_TRIG_EXT_RISINGFALLING */ -__STATIC_INLINE uint32_t LL_ADC_REG_GetTriggerEdge(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_REG_GetTriggerEdge(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR2, ADC_CR2_EXTEN)); } @@ -2354,7 +2356,7 @@ __STATIC_INLINE void LL_ADC_REG_SetSequencerLength(ADC_TypeDef *ADCx, uint32_t S * @arg @ref LL_ADC_REG_SEQ_SCAN_ENABLE_15RANKS * @arg @ref LL_ADC_REG_SEQ_SCAN_ENABLE_16RANKS */ -__STATIC_INLINE uint32_t LL_ADC_REG_GetSequencerLength(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_REG_GetSequencerLength(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->SQR1, ADC_SQR1_L)); } @@ -2363,7 +2365,7 @@ __STATIC_INLINE uint32_t LL_ADC_REG_GetSequencerLength(ADC_TypeDef *ADCx) * @brief Set ADC group regular sequencer discontinuous mode: * sequence subdivided and scan conversions interrupted every selected * number of ranks. - * @note It is not possible to enable both ADC group regular + * @note It is not possible to enable both ADC group regular * continuous mode and sequencer discontinuous mode. * @note It is not possible to enable both ADC auto-injected mode * and ADC group regular sequencer discontinuous mode. @@ -2405,7 +2407,7 @@ __STATIC_INLINE void LL_ADC_REG_SetSequencerDiscont(ADC_TypeDef *ADCx, uint32_t * @arg @ref LL_ADC_REG_SEQ_DISCONT_7RANKS * @arg @ref LL_ADC_REG_SEQ_DISCONT_8RANKS */ -__STATIC_INLINE uint32_t LL_ADC_REG_GetSequencerDiscont(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_REG_GetSequencerDiscont(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR1, ADC_CR1_DISCEN | ADC_CR1_DISCNUM)); } @@ -2483,7 +2485,7 @@ __STATIC_INLINE uint32_t LL_ADC_REG_GetSequencerDiscont(ADC_TypeDef *ADCx) * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled. * @retval None @@ -2495,7 +2497,7 @@ __STATIC_INLINE void LL_ADC_REG_SetSequencerRanks(ADC_TypeDef *ADCx, uint32_t Ra /* Parameters "Rank" and "Channel" are used with masks because containing */ /* other bits reserved for other purpose. */ __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->SQR1, __ADC_MASK_SHIFT(Rank, ADC_REG_SQRX_REGOFFSET_MASK)); - + MODIFY_REG(*preg, ADC_CHANNEL_ID_NUMBER_MASK << (Rank & ADC_REG_RANK_ID_SQRX_MASK), (Channel & ADC_CHANNEL_ID_NUMBER_MASK) << (Rank & ADC_REG_RANK_ID_SQRX_MASK)); @@ -2578,21 +2580,21 @@ __STATIC_INLINE void LL_ADC_REG_SetSequencerRanks(ADC_TypeDef *ADCx, uint32_t Ra * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled.\n * (1) For ADC channel read back from ADC register, * comparison with internal channel parameter to be done * using helper macro @ref __LL_ADC_CHANNEL_INTERNAL_TO_EXTERNAL(). */ -__STATIC_INLINE uint32_t LL_ADC_REG_GetSequencerRanks(ADC_TypeDef *ADCx, uint32_t Rank) +__STATIC_INLINE uint32_t LL_ADC_REG_GetSequencerRanks(const ADC_TypeDef *ADCx, uint32_t Rank) { - __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->SQR1, __ADC_MASK_SHIFT(Rank, ADC_REG_SQRX_REGOFFSET_MASK)); - - return (uint32_t) (READ_BIT(*preg, - ADC_CHANNEL_ID_NUMBER_MASK << (Rank & ADC_REG_RANK_ID_SQRX_MASK)) - >> (Rank & ADC_REG_RANK_ID_SQRX_MASK) - ); + __IO uint32_t const *preg = __ADC_PTR_REG_OFFSET(ADCx->SQR1, __ADC_MASK_SHIFT(Rank, ADC_REG_SQRX_REGOFFSET_MASK)); + + return (uint32_t)(READ_BIT(*preg, + ADC_CHANNEL_ID_NUMBER_MASK << (Rank & ADC_REG_RANK_ID_SQRX_MASK)) + >> (Rank & ADC_REG_RANK_ID_SQRX_MASK) + ); } /** @@ -2601,7 +2603,7 @@ __STATIC_INLINE uint32_t LL_ADC_REG_GetSequencerRanks(ADC_TypeDef *ADCx, uint32_ * - single mode: one conversion per trigger * - continuous mode: after the first trigger, following * conversions launched successively automatically. - * @note It is not possible to enable both ADC group regular + * @note It is not possible to enable both ADC group regular * continuous mode and sequencer discontinuous mode. * @rmtoll CR2 CONT LL_ADC_REG_SetContinuousMode * @param ADCx ADC instance @@ -2627,7 +2629,7 @@ __STATIC_INLINE void LL_ADC_REG_SetContinuousMode(ADC_TypeDef *ADCx, uint32_t Co * @arg @ref LL_ADC_REG_CONV_SINGLE * @arg @ref LL_ADC_REG_CONV_CONTINUOUS */ -__STATIC_INLINE uint32_t LL_ADC_REG_GetContinuousMode(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_REG_GetContinuousMode(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR2, ADC_CR2_CONT)); } @@ -2698,7 +2700,7 @@ __STATIC_INLINE void LL_ADC_REG_SetDMATransfer(ADC_TypeDef *ADCx, uint32_t DMATr * @arg @ref LL_ADC_REG_DMA_TRANSFER_LIMITED * @arg @ref LL_ADC_REG_DMA_TRANSFER_UNLIMITED */ -__STATIC_INLINE uint32_t LL_ADC_REG_GetDMATransfer(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_REG_GetDMATransfer(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR2, ADC_CR2_DMA | ADC_CR2_DDS)); } @@ -2735,7 +2737,7 @@ __STATIC_INLINE void LL_ADC_REG_SetFlagEndOfConversion(ADC_TypeDef *ADCx, uint32 * @arg @ref LL_ADC_REG_FLAG_EOC_SEQUENCE_CONV * @arg @ref LL_ADC_REG_FLAG_EOC_UNITARY_CONV */ -__STATIC_INLINE uint32_t LL_ADC_REG_GetFlagEndOfConversion(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_REG_GetFlagEndOfConversion(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR2, ADC_CR2_EOCS)); } @@ -2754,7 +2756,7 @@ __STATIC_INLINE uint32_t LL_ADC_REG_GetFlagEndOfConversion(ADC_TypeDef *ADCx) * external interrupt line). * @note On this STM32 series, setting of external trigger edge is performed * using function @ref LL_ADC_INJ_StartConversionExtTrig(). - * @note Availability of parameters of trigger sources from timer + * @note Availability of parameters of trigger sources from timer * depends on timers availability on the selected device. * @rmtoll CR2 JEXTSEL LL_ADC_INJ_SetTriggerSource\n * CR2 JEXTEN LL_ADC_INJ_SetTriggerSource @@ -2781,11 +2783,11 @@ __STATIC_INLINE uint32_t LL_ADC_REG_GetFlagEndOfConversion(ADC_TypeDef *ADCx) */ __STATIC_INLINE void LL_ADC_INJ_SetTriggerSource(ADC_TypeDef *ADCx, uint32_t TriggerSource) { -/* Note: On this STM32 series, ADC group injected external trigger edge */ -/* is used to perform a ADC conversion start. */ -/* This function does not set external trigger edge. */ -/* This feature is set using function */ -/* @ref LL_ADC_INJ_StartConversionExtTrig(). */ + /* Note: On this STM32 series, ADC group injected external trigger edge */ + /* is used to perform a ADC conversion start. */ + /* This function does not set external trigger edge. */ + /* This feature is set using function */ + /* @ref LL_ADC_INJ_StartConversionExtTrig(). */ MODIFY_REG(ADCx->CR2, ADC_CR2_JEXTSEL, (TriggerSource & ADC_CR2_JEXTSEL)); } @@ -2796,10 +2798,10 @@ __STATIC_INLINE void LL_ADC_INJ_SetTriggerSource(ADC_TypeDef *ADCx, uint32_t Tri * @note To determine whether group injected trigger source is * internal (SW start) or external, without detail * of which peripheral is selected as external trigger, - * (equivalent to + * (equivalent to * "if(LL_ADC_INJ_GetTriggerSource(ADC1) == LL_ADC_INJ_TRIG_SOFTWARE)") * use function @ref LL_ADC_INJ_IsTriggerSourceSWStart. - * @note Availability of parameters of trigger sources from timer + * @note Availability of parameters of trigger sources from timer * depends on timers availability on the selected device. * @rmtoll CR2 JEXTSEL LL_ADC_INJ_GetTriggerSource\n * CR2 JEXTEN LL_ADC_INJ_GetTriggerSource @@ -2823,14 +2825,14 @@ __STATIC_INLINE void LL_ADC_INJ_SetTriggerSource(ADC_TypeDef *ADCx, uint32_t Tri * @arg @ref LL_ADC_INJ_TRIG_EXT_TIM8_CH4 * @arg @ref LL_ADC_INJ_TRIG_EXT_EXTI_LINE15 */ -__STATIC_INLINE uint32_t LL_ADC_INJ_GetTriggerSource(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_INJ_GetTriggerSource(const ADC_TypeDef *ADCx) { uint32_t TriggerSource = READ_BIT(ADCx->CR2, ADC_CR2_JEXTSEL | ADC_CR2_JEXTEN); - + /* Value for shift of {0; 4; 8; 12} depending on value of bitfield */ /* corresponding to ADC_CR2_JEXTEN {0; 1; 2; 3}. */ uint32_t ShiftExten = ((TriggerSource & ADC_CR2_JEXTEN) >> (ADC_INJ_TRIG_EXTEN_BITOFFSET_POS - 2UL)); - + /* Set bitfield corresponding to ADC_CR2_JEXTEN and ADC_CR2_JEXTSEL */ /* to match with triggers literals definition. */ return ((TriggerSource @@ -2850,7 +2852,7 @@ __STATIC_INLINE uint32_t LL_ADC_INJ_GetTriggerSource(ADC_TypeDef *ADCx) * @retval Value "0" if trigger source external trigger * Value "1" if trigger source SW start. */ -__STATIC_INLINE uint32_t LL_ADC_INJ_IsTriggerSourceSWStart(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_INJ_IsTriggerSourceSWStart(const ADC_TypeDef *ADCx) { return (READ_BIT(ADCx->CR2, ADC_CR2_JEXTEN) == (LL_ADC_INJ_TRIG_SOFTWARE & ADC_CR2_JEXTEN)); } @@ -2865,7 +2867,7 @@ __STATIC_INLINE uint32_t LL_ADC_INJ_IsTriggerSourceSWStart(ADC_TypeDef *ADCx) * @arg @ref LL_ADC_INJ_TRIG_EXT_FALLING * @arg @ref LL_ADC_INJ_TRIG_EXT_RISINGFALLING */ -__STATIC_INLINE uint32_t LL_ADC_INJ_GetTriggerEdge(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_INJ_GetTriggerEdge(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR2, ADC_CR2_JEXTEN)); } @@ -2920,7 +2922,7 @@ __STATIC_INLINE void LL_ADC_INJ_SetSequencerLength(ADC_TypeDef *ADCx, uint32_t S * @arg @ref LL_ADC_INJ_SEQ_SCAN_ENABLE_3RANKS * @arg @ref LL_ADC_INJ_SEQ_SCAN_ENABLE_4RANKS */ -__STATIC_INLINE uint32_t LL_ADC_INJ_GetSequencerLength(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_INJ_GetSequencerLength(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->JSQR, ADC_JSQR_JL)); } @@ -2953,7 +2955,7 @@ __STATIC_INLINE void LL_ADC_INJ_SetSequencerDiscont(ADC_TypeDef *ADCx, uint32_t * @arg @ref LL_ADC_INJ_SEQ_DISCONT_DISABLE * @arg @ref LL_ADC_INJ_SEQ_DISCONT_1RANK */ -__STATIC_INLINE uint32_t LL_ADC_INJ_GetSequencerDiscont(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_INJ_GetSequencerDiscont(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR1, ADC_CR1_JDISCEN)); } @@ -3000,7 +3002,7 @@ __STATIC_INLINE uint32_t LL_ADC_INJ_GetSequencerDiscont(ADC_TypeDef *ADCx) * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled. * @retval None @@ -3012,7 +3014,7 @@ __STATIC_INLINE void LL_ADC_INJ_SetSequencerRanks(ADC_TypeDef *ADCx, uint32_t Ra /* Parameters "Rank" and "Channel" are used with masks because containing */ /* other bits reserved for other purpose. */ uint32_t tmpreg1 = (READ_BIT(ADCx->JSQR, ADC_JSQR_JL) >> ADC_JSQR_JL_Pos) + 1UL; - + MODIFY_REG(ADCx->JSQR, ADC_CHANNEL_ID_NUMBER_MASK << (5UL * (uint8_t)(((Rank) + 3UL) - (tmpreg1))), (Channel & ADC_CHANNEL_ID_NUMBER_MASK) << (5UL * (uint8_t)(((Rank) + 3UL) - (tmpreg1)))); @@ -3067,17 +3069,17 @@ __STATIC_INLINE void LL_ADC_INJ_SetSequencerRanks(ADC_TypeDef *ADCx, uint32_t Ra * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled.\n * (1) For ADC channel read back from ADC register, * comparison with internal channel parameter to be done * using helper macro @ref __LL_ADC_CHANNEL_INTERNAL_TO_EXTERNAL(). */ -__STATIC_INLINE uint32_t LL_ADC_INJ_GetSequencerRanks(ADC_TypeDef *ADCx, uint32_t Rank) +__STATIC_INLINE uint32_t LL_ADC_INJ_GetSequencerRanks(const ADC_TypeDef *ADCx, uint32_t Rank) { uint32_t tmpreg1 = (READ_BIT(ADCx->JSQR, ADC_JSQR_JL) >> ADC_JSQR_JL_Pos) + 1UL; - + return (uint32_t)(READ_BIT(ADCx->JSQR, ADC_CHANNEL_ID_NUMBER_MASK << (5UL * (uint8_t)(((Rank) + 3UL) - (tmpreg1)))) >> (5UL * (uint8_t)(((Rank) + 3UL) - (tmpreg1))) @@ -3088,18 +3090,18 @@ __STATIC_INLINE uint32_t LL_ADC_INJ_GetSequencerRanks(ADC_TypeDef *ADCx, uint32_ * @brief Set ADC group injected conversion trigger: * independent or from ADC group regular. * @note This mode can be used to extend number of data registers - * updated after one ADC conversion trigger and with data + * updated after one ADC conversion trigger and with data * permanently kept (not erased by successive conversions of scan of * ADC sequencer ranks), up to 5 data registers: * 1 data register on ADC group regular, 4 data registers - * on ADC group injected. + * on ADC group injected. * @note If ADC group injected injected trigger source is set to an * external trigger, this feature must be must be set to * independent trigger. - * ADC group injected automatic trigger is compliant only with - * group injected trigger source set to SW start, without any - * further action on ADC group injected conversion start or stop: - * in this case, ADC group injected is controlled only + * ADC group injected automatic trigger is compliant only with + * group injected trigger source set to SW start, without any + * further action on ADC group injected conversion start or stop: + * in this case, ADC group injected is controlled only * from ADC group regular. * @note It is not possible to enable both ADC group injected * auto-injected mode and sequencer discontinuous mode. @@ -3124,7 +3126,7 @@ __STATIC_INLINE void LL_ADC_INJ_SetTrigAuto(ADC_TypeDef *ADCx, uint32_t TrigAuto * @arg @ref LL_ADC_INJ_TRIG_INDEPENDENT * @arg @ref LL_ADC_INJ_TRIG_FROM_GRP_REGULAR */ -__STATIC_INLINE uint32_t LL_ADC_INJ_GetTrigAuto(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_INJ_GetTrigAuto(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR1, ADC_CR1_JAUTO)); } @@ -3157,7 +3159,7 @@ __STATIC_INLINE uint32_t LL_ADC_INJ_GetTrigAuto(ADC_TypeDef *ADCx) __STATIC_INLINE void LL_ADC_INJ_SetOffset(ADC_TypeDef *ADCx, uint32_t Rank, uint32_t OffsetLevel) { __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->JOFR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JOFRX_REGOFFSET_MASK)); - + MODIFY_REG(*preg, ADC_JOFR1_JOFFSET1, OffsetLevel); @@ -3181,10 +3183,10 @@ __STATIC_INLINE void LL_ADC_INJ_SetOffset(ADC_TypeDef *ADCx, uint32_t Rank, uint * @arg @ref LL_ADC_INJ_RANK_4 * @retval Value between Min_Data=0x000 and Max_Data=0xFFF */ -__STATIC_INLINE uint32_t LL_ADC_INJ_GetOffset(ADC_TypeDef *ADCx, uint32_t Rank) +__STATIC_INLINE uint32_t LL_ADC_INJ_GetOffset(const ADC_TypeDef *ADCx, uint32_t Rank) { - __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->JOFR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JOFRX_REGOFFSET_MASK)); - + __IO uint32_t const *preg = __ADC_PTR_REG_OFFSET(ADCx->JOFR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JOFRX_REGOFFSET_MASK)); + return (uint32_t)(READ_BIT(*preg, ADC_JOFR1_JOFFSET1) ); @@ -3260,7 +3262,7 @@ __STATIC_INLINE uint32_t LL_ADC_INJ_GetOffset(ADC_TypeDef *ADCx, uint32_t Rank) * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled. * @param SamplingTime This parameter can be one of the following values: @@ -3281,7 +3283,7 @@ __STATIC_INLINE void LL_ADC_SetChannelSamplingTime(ADC_TypeDef *ADCx, uint32_t C /* Parameter "Channel" is used with masks because containing */ /* other bits reserved for other purpose. */ __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->SMPR1, __ADC_MASK_SHIFT(Channel, ADC_CHANNEL_SMPRX_REGOFFSET_MASK)); - + MODIFY_REG(*preg, ADC_SMPR2_SMP0 << __ADC_MASK_SHIFT(Channel, ADC_CHANNEL_SMPx_BITOFFSET_MASK), SamplingTime << __ADC_MASK_SHIFT(Channel, ADC_CHANNEL_SMPx_BITOFFSET_MASK)); @@ -3338,7 +3340,7 @@ __STATIC_INLINE void LL_ADC_SetChannelSamplingTime(ADC_TypeDef *ADCx, uint32_t C * @arg @ref LL_ADC_CHANNEL_VREFINT (1) * @arg @ref LL_ADC_CHANNEL_TEMPSENSOR (1)(2) * @arg @ref LL_ADC_CHANNEL_VBAT (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled. * @retval Returned value can be one of the following values: @@ -3351,10 +3353,10 @@ __STATIC_INLINE void LL_ADC_SetChannelSamplingTime(ADC_TypeDef *ADCx, uint32_t C * @arg @ref LL_ADC_SAMPLINGTIME_144CYCLES * @arg @ref LL_ADC_SAMPLINGTIME_480CYCLES */ -__STATIC_INLINE uint32_t LL_ADC_GetChannelSamplingTime(ADC_TypeDef *ADCx, uint32_t Channel) +__STATIC_INLINE uint32_t LL_ADC_GetChannelSamplingTime(const ADC_TypeDef *ADCx, uint32_t Channel) { - __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->SMPR1, __ADC_MASK_SHIFT(Channel, ADC_CHANNEL_SMPRX_REGOFFSET_MASK)); - + __IO uint32_t const *preg = __ADC_PTR_REG_OFFSET(ADCx->SMPR1, __ADC_MASK_SHIFT(Channel, ADC_CHANNEL_SMPRX_REGOFFSET_MASK)); + return (uint32_t)(READ_BIT(*preg, ADC_SMPR2_SMP0 << __ADC_MASK_SHIFT(Channel, ADC_CHANNEL_SMPx_BITOFFSET_MASK)) >> __ADC_MASK_SHIFT(Channel, ADC_CHANNEL_SMPx_BITOFFSET_MASK) @@ -3394,35 +3396,35 @@ __STATIC_INLINE uint32_t LL_ADC_GetChannelSamplingTime(ADC_TypeDef *ADCx, uint32 * @arg @ref LL_ADC_AWD_ALL_CHANNELS_REG * @arg @ref LL_ADC_AWD_ALL_CHANNELS_INJ * @arg @ref LL_ADC_AWD_ALL_CHANNELS_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_0_REG - * @arg @ref LL_ADC_AWD_CHANNEL_0_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_0_REG + * @arg @ref LL_ADC_AWD_CHANNEL_0_INJ * @arg @ref LL_ADC_AWD_CHANNEL_0_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_1_REG - * @arg @ref LL_ADC_AWD_CHANNEL_1_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_1_REG + * @arg @ref LL_ADC_AWD_CHANNEL_1_INJ * @arg @ref LL_ADC_AWD_CHANNEL_1_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_2_REG - * @arg @ref LL_ADC_AWD_CHANNEL_2_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_2_REG + * @arg @ref LL_ADC_AWD_CHANNEL_2_INJ * @arg @ref LL_ADC_AWD_CHANNEL_2_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_3_REG - * @arg @ref LL_ADC_AWD_CHANNEL_3_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_3_REG + * @arg @ref LL_ADC_AWD_CHANNEL_3_INJ * @arg @ref LL_ADC_AWD_CHANNEL_3_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_4_REG - * @arg @ref LL_ADC_AWD_CHANNEL_4_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_4_REG + * @arg @ref LL_ADC_AWD_CHANNEL_4_INJ * @arg @ref LL_ADC_AWD_CHANNEL_4_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_5_REG - * @arg @ref LL_ADC_AWD_CHANNEL_5_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_5_REG + * @arg @ref LL_ADC_AWD_CHANNEL_5_INJ * @arg @ref LL_ADC_AWD_CHANNEL_5_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_6_REG - * @arg @ref LL_ADC_AWD_CHANNEL_6_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_6_REG + * @arg @ref LL_ADC_AWD_CHANNEL_6_INJ * @arg @ref LL_ADC_AWD_CHANNEL_6_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_7_REG - * @arg @ref LL_ADC_AWD_CHANNEL_7_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_7_REG + * @arg @ref LL_ADC_AWD_CHANNEL_7_INJ * @arg @ref LL_ADC_AWD_CHANNEL_7_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_8_REG - * @arg @ref LL_ADC_AWD_CHANNEL_8_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_8_REG + * @arg @ref LL_ADC_AWD_CHANNEL_8_INJ * @arg @ref LL_ADC_AWD_CHANNEL_8_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_9_REG - * @arg @ref LL_ADC_AWD_CHANNEL_9_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_9_REG + * @arg @ref LL_ADC_AWD_CHANNEL_9_INJ * @arg @ref LL_ADC_AWD_CHANNEL_9_REG_INJ * @arg @ref LL_ADC_AWD_CHANNEL_10_REG * @arg @ref LL_ADC_AWD_CHANNEL_10_INJ @@ -3460,7 +3462,7 @@ __STATIC_INLINE uint32_t LL_ADC_GetChannelSamplingTime(ADC_TypeDef *ADCx, uint32 * @arg @ref LL_ADC_AWD_CH_VBAT_REG (1) * @arg @ref LL_ADC_AWD_CH_VBAT_INJ (1) * @arg @ref LL_ADC_AWD_CH_VBAT_REG_INJ (1) - * + * * (1) On STM32F4, parameter available only on ADC instance: ADC1.\n * (2) On devices STM32F42x and STM32F43x, limitation: this internal channel is shared between temperature sensor and Vbat, only 1 measurement path must be enabled. * @retval None @@ -3503,35 +3505,35 @@ __STATIC_INLINE void LL_ADC_SetAnalogWDMonitChannels(ADC_TypeDef *ADCx, uint32_t * @arg @ref LL_ADC_AWD_ALL_CHANNELS_REG * @arg @ref LL_ADC_AWD_ALL_CHANNELS_INJ * @arg @ref LL_ADC_AWD_ALL_CHANNELS_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_0_REG - * @arg @ref LL_ADC_AWD_CHANNEL_0_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_0_REG + * @arg @ref LL_ADC_AWD_CHANNEL_0_INJ * @arg @ref LL_ADC_AWD_CHANNEL_0_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_1_REG - * @arg @ref LL_ADC_AWD_CHANNEL_1_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_1_REG + * @arg @ref LL_ADC_AWD_CHANNEL_1_INJ * @arg @ref LL_ADC_AWD_CHANNEL_1_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_2_REG - * @arg @ref LL_ADC_AWD_CHANNEL_2_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_2_REG + * @arg @ref LL_ADC_AWD_CHANNEL_2_INJ * @arg @ref LL_ADC_AWD_CHANNEL_2_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_3_REG - * @arg @ref LL_ADC_AWD_CHANNEL_3_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_3_REG + * @arg @ref LL_ADC_AWD_CHANNEL_3_INJ * @arg @ref LL_ADC_AWD_CHANNEL_3_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_4_REG - * @arg @ref LL_ADC_AWD_CHANNEL_4_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_4_REG + * @arg @ref LL_ADC_AWD_CHANNEL_4_INJ * @arg @ref LL_ADC_AWD_CHANNEL_4_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_5_REG - * @arg @ref LL_ADC_AWD_CHANNEL_5_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_5_REG + * @arg @ref LL_ADC_AWD_CHANNEL_5_INJ * @arg @ref LL_ADC_AWD_CHANNEL_5_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_6_REG - * @arg @ref LL_ADC_AWD_CHANNEL_6_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_6_REG + * @arg @ref LL_ADC_AWD_CHANNEL_6_INJ * @arg @ref LL_ADC_AWD_CHANNEL_6_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_7_REG - * @arg @ref LL_ADC_AWD_CHANNEL_7_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_7_REG + * @arg @ref LL_ADC_AWD_CHANNEL_7_INJ * @arg @ref LL_ADC_AWD_CHANNEL_7_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_8_REG - * @arg @ref LL_ADC_AWD_CHANNEL_8_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_8_REG + * @arg @ref LL_ADC_AWD_CHANNEL_8_INJ * @arg @ref LL_ADC_AWD_CHANNEL_8_REG_INJ - * @arg @ref LL_ADC_AWD_CHANNEL_9_REG - * @arg @ref LL_ADC_AWD_CHANNEL_9_INJ + * @arg @ref LL_ADC_AWD_CHANNEL_9_REG + * @arg @ref LL_ADC_AWD_CHANNEL_9_INJ * @arg @ref LL_ADC_AWD_CHANNEL_9_REG_INJ * @arg @ref LL_ADC_AWD_CHANNEL_10_REG * @arg @ref LL_ADC_AWD_CHANNEL_10_INJ @@ -3561,7 +3563,7 @@ __STATIC_INLINE void LL_ADC_SetAnalogWDMonitChannels(ADC_TypeDef *ADCx, uint32_t * @arg @ref LL_ADC_AWD_CHANNEL_18_INJ * @arg @ref LL_ADC_AWD_CHANNEL_18_REG_INJ */ -__STATIC_INLINE uint32_t LL_ADC_GetAnalogWDMonitChannels(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_GetAnalogWDMonitChannels(const ADC_TypeDef *ADCx) { return (uint32_t)(READ_BIT(ADCx->CR1, (ADC_CR1_AWDEN | ADC_CR1_JAWDEN | ADC_CR1_AWDSGL | ADC_CR1_AWDCH))); } @@ -3591,7 +3593,7 @@ __STATIC_INLINE uint32_t LL_ADC_GetAnalogWDMonitChannels(ADC_TypeDef *ADCx) __STATIC_INLINE void LL_ADC_SetAnalogWDThresholds(ADC_TypeDef *ADCx, uint32_t AWDThresholdsHighLow, uint32_t AWDThresholdValue) { __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->HTR, AWDThresholdsHighLow); - + MODIFY_REG(*preg, ADC_HTR_HT, AWDThresholdValue); @@ -3611,10 +3613,10 @@ __STATIC_INLINE void LL_ADC_SetAnalogWDThresholds(ADC_TypeDef *ADCx, uint32_t AW * @arg @ref LL_ADC_AWD_THRESHOLD_LOW * @retval Value between Min_Data=0x000 and Max_Data=0xFFF */ -__STATIC_INLINE uint32_t LL_ADC_GetAnalogWDThresholds(ADC_TypeDef *ADCx, uint32_t AWDThresholdsHighLow) +__STATIC_INLINE uint32_t LL_ADC_GetAnalogWDThresholds(const ADC_TypeDef *ADCx, uint32_t AWDThresholdsHighLow) { - __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->HTR, AWDThresholdsHighLow); - + __IO uint32_t const *preg = __ADC_PTR_REG_OFFSET(ADCx->HTR, AWDThresholdsHighLow); + return (uint32_t)(READ_BIT(*preg, ADC_HTR_HT)); } @@ -3683,7 +3685,7 @@ __STATIC_INLINE void LL_ADC_SetMultimode(ADC_Common_TypeDef *ADCxy_COMMON, uint3 * @arg @ref LL_ADC_MULTI_TRIPLE_REG_INTERL * @arg @ref LL_ADC_MULTI_TRIPLE_INJ_ALTERN */ -__STATIC_INLINE uint32_t LL_ADC_GetMultimode(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_GetMultimode(const ADC_Common_TypeDef *ADCxy_COMMON) { return (uint32_t)(READ_BIT(ADCxy_COMMON->CCR, ADC_CCR_MULTI)); } @@ -3780,7 +3782,7 @@ __STATIC_INLINE void LL_ADC_SetMultiDMATransfer(ADC_Common_TypeDef *ADCxy_COMMON * @arg @ref LL_ADC_MULTI_REG_DMA_UNLMT_2 * @arg @ref LL_ADC_MULTI_REG_DMA_UNLMT_3 */ -__STATIC_INLINE uint32_t LL_ADC_GetMultiDMATransfer(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_GetMultiDMATransfer(const ADC_Common_TypeDef *ADCxy_COMMON) { return (uint32_t)(READ_BIT(ADCxy_COMMON->CCR, ADC_CCR_DMA | ADC_CCR_DDS)); } @@ -3842,7 +3844,7 @@ __STATIC_INLINE void LL_ADC_SetMultiTwoSamplingDelay(ADC_Common_TypeDef *ADCxy_C * @arg @ref LL_ADC_MULTI_TWOSMP_DELAY_19CYCLES * @arg @ref LL_ADC_MULTI_TWOSMP_DELAY_20CYCLES */ -__STATIC_INLINE uint32_t LL_ADC_GetMultiTwoSamplingDelay(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_GetMultiTwoSamplingDelay(const ADC_Common_TypeDef *ADCxy_COMMON) { return (uint32_t)(READ_BIT(ADCxy_COMMON->CCR, ADC_CCR_DELAY)); } @@ -3857,7 +3859,7 @@ __STATIC_INLINE uint32_t LL_ADC_GetMultiTwoSamplingDelay(ADC_Common_TypeDef *ADC /** * @brief Enable the selected ADC instance. - * @note On this STM32 series, after ADC enable, a delay for + * @note On this STM32 series, after ADC enable, a delay for * ADC internal analog stabilization is required before performing a * ADC conversion start. * Refer to device datasheet, parameter tSTAB. @@ -3887,7 +3889,7 @@ __STATIC_INLINE void LL_ADC_Disable(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval 0: ADC is disabled, 1: ADC is enabled. */ -__STATIC_INLINE uint32_t LL_ADC_IsEnabled(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_IsEnabled(const ADC_TypeDef *ADCx) { return (READ_BIT(ADCx->CR2, ADC_CR2_ADON) == (ADC_CR2_ADON)); } @@ -3907,9 +3909,9 @@ __STATIC_INLINE uint32_t LL_ADC_IsEnabled(ADC_TypeDef *ADCx) * - If ADC trigger has been set to software start, ADC conversion * starts immediately. * - If ADC trigger has been set to external trigger, ADC conversion - * start must be performed using function + * start must be performed using function * @ref LL_ADC_REG_StartConversionExtTrig(). - * (if external trigger edge would have been set during ADC other + * (if external trigger edge would have been set during ADC other * settings, ADC conversion would start at trigger event * as soon as ADC is enabled). * @rmtoll CR2 SWSTART LL_ADC_REG_StartConversionSWStart @@ -3925,7 +3927,7 @@ __STATIC_INLINE void LL_ADC_REG_StartConversionSWStart(ADC_TypeDef *ADCx) * @brief Start ADC group regular conversion from external trigger. * @note ADC conversion will start at next trigger event (on the selected * trigger edge) following the ADC start conversion command. - * @note On this STM32 series, this function is relevant for + * @note On this STM32 series, this function is relevant for * ADC conversion start from external trigger. * If internal trigger (SW start) is needed, perform ADC conversion * start using function @ref LL_ADC_REG_StartConversionSWStart(). @@ -3969,7 +3971,7 @@ __STATIC_INLINE void LL_ADC_REG_StopConversionExtTrig(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval Value between Min_Data=0x00000000 and Max_Data=0xFFFFFFFF */ -__STATIC_INLINE uint32_t LL_ADC_REG_ReadConversionData32(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_REG_ReadConversionData32(const ADC_TypeDef *ADCx) { return (uint16_t)(READ_BIT(ADCx->DR, ADC_DR_DATA)); } @@ -3984,7 +3986,7 @@ __STATIC_INLINE uint32_t LL_ADC_REG_ReadConversionData32(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval Value between Min_Data=0x000 and Max_Data=0xFFF */ -__STATIC_INLINE uint16_t LL_ADC_REG_ReadConversionData12(ADC_TypeDef *ADCx) +__STATIC_INLINE uint16_t LL_ADC_REG_ReadConversionData12(const ADC_TypeDef *ADCx) { return (uint16_t)(READ_BIT(ADCx->DR, ADC_DR_DATA)); } @@ -3999,7 +4001,7 @@ __STATIC_INLINE uint16_t LL_ADC_REG_ReadConversionData12(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval Value between Min_Data=0x000 and Max_Data=0x3FF */ -__STATIC_INLINE uint16_t LL_ADC_REG_ReadConversionData10(ADC_TypeDef *ADCx) +__STATIC_INLINE uint16_t LL_ADC_REG_ReadConversionData10(const ADC_TypeDef *ADCx) { return (uint16_t)(READ_BIT(ADCx->DR, ADC_DR_DATA)); } @@ -4014,9 +4016,9 @@ __STATIC_INLINE uint16_t LL_ADC_REG_ReadConversionData10(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval Value between Min_Data=0x00 and Max_Data=0xFF */ -__STATIC_INLINE uint8_t LL_ADC_REG_ReadConversionData8(ADC_TypeDef *ADCx) +__STATIC_INLINE uint8_t LL_ADC_REG_ReadConversionData8(const ADC_TypeDef *ADCx) { - return (uint16_t)(READ_BIT(ADCx->DR, ADC_DR_DATA)); + return (uint8_t)(READ_BIT(ADCx->DR, ADC_DR_DATA)); } /** @@ -4029,9 +4031,9 @@ __STATIC_INLINE uint8_t LL_ADC_REG_ReadConversionData8(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval Value between Min_Data=0x00 and Max_Data=0x3F */ -__STATIC_INLINE uint8_t LL_ADC_REG_ReadConversionData6(ADC_TypeDef *ADCx) +__STATIC_INLINE uint8_t LL_ADC_REG_ReadConversionData6(const ADC_TypeDef *ADCx) { - return (uint16_t)(READ_BIT(ADCx->DR, ADC_DR_DATA)); + return (uint8_t)(READ_BIT(ADCx->DR, ADC_DR_DATA)); } #if defined(ADC_MULTIMODE_SUPPORT) @@ -4056,7 +4058,7 @@ __STATIC_INLINE uint8_t LL_ADC_REG_ReadConversionData6(ADC_TypeDef *ADCx) * @arg @ref LL_ADC_MULTI_MASTER_SLAVE * @retval Value between Min_Data=0x00000000 and Max_Data=0xFFFFFFFF */ -__STATIC_INLINE uint32_t LL_ADC_REG_ReadMultiConversionData32(ADC_Common_TypeDef *ADCxy_COMMON, uint32_t ConversionData) +__STATIC_INLINE uint32_t LL_ADC_REG_ReadMultiConversionData32(const ADC_Common_TypeDef *ADCxy_COMMON, uint32_t ConversionData) { return (uint32_t)(READ_BIT(ADCxy_COMMON->CDR, ADC_DR_ADC2DATA) @@ -4080,9 +4082,9 @@ __STATIC_INLINE uint32_t LL_ADC_REG_ReadMultiConversionData32(ADC_Common_TypeDef * - If ADC trigger has been set to software start, ADC conversion * starts immediately. * - If ADC trigger has been set to external trigger, ADC conversion - * start must be performed using function + * start must be performed using function * @ref LL_ADC_INJ_StartConversionExtTrig(). - * (if external trigger edge would have been set during ADC other + * (if external trigger edge would have been set during ADC other * settings, ADC conversion would start at trigger event * as soon as ADC is enabled). * @rmtoll CR2 JSWSTART LL_ADC_INJ_StartConversionSWStart @@ -4098,7 +4100,7 @@ __STATIC_INLINE void LL_ADC_INJ_StartConversionSWStart(ADC_TypeDef *ADCx) * @brief Start ADC group injected conversion from external trigger. * @note ADC conversion will start at next trigger event (on the selected * trigger edge) following the ADC start conversion command. - * @note On this STM32 series, this function is relevant for + * @note On this STM32 series, this function is relevant for * ADC conversion start from external trigger. * If internal trigger (SW start) is needed, perform ADC conversion * start using function @ref LL_ADC_INJ_StartConversionSWStart(). @@ -4150,10 +4152,10 @@ __STATIC_INLINE void LL_ADC_INJ_StopConversionExtTrig(ADC_TypeDef *ADCx) * @arg @ref LL_ADC_INJ_RANK_4 * @retval Value between Min_Data=0x00000000 and Max_Data=0xFFFFFFFF */ -__STATIC_INLINE uint32_t LL_ADC_INJ_ReadConversionData32(ADC_TypeDef *ADCx, uint32_t Rank) +__STATIC_INLINE uint32_t LL_ADC_INJ_ReadConversionData32(const ADC_TypeDef *ADCx, uint32_t Rank) { - __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->JDR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JDRX_REGOFFSET_MASK)); - + __IO uint32_t const *preg = __ADC_PTR_REG_OFFSET(ADCx->JDR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JDRX_REGOFFSET_MASK)); + return (uint32_t)(READ_BIT(*preg, ADC_JDR1_JDATA) ); @@ -4177,10 +4179,10 @@ __STATIC_INLINE uint32_t LL_ADC_INJ_ReadConversionData32(ADC_TypeDef *ADCx, uint * @arg @ref LL_ADC_INJ_RANK_4 * @retval Value between Min_Data=0x000 and Max_Data=0xFFF */ -__STATIC_INLINE uint16_t LL_ADC_INJ_ReadConversionData12(ADC_TypeDef *ADCx, uint32_t Rank) +__STATIC_INLINE uint16_t LL_ADC_INJ_ReadConversionData12(const ADC_TypeDef *ADCx, uint32_t Rank) { - __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->JDR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JDRX_REGOFFSET_MASK)); - + __IO uint32_t const *preg = __ADC_PTR_REG_OFFSET(ADCx->JDR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JDRX_REGOFFSET_MASK)); + return (uint16_t)(READ_BIT(*preg, ADC_JDR1_JDATA) ); @@ -4204,10 +4206,10 @@ __STATIC_INLINE uint16_t LL_ADC_INJ_ReadConversionData12(ADC_TypeDef *ADCx, uint * @arg @ref LL_ADC_INJ_RANK_4 * @retval Value between Min_Data=0x000 and Max_Data=0x3FF */ -__STATIC_INLINE uint16_t LL_ADC_INJ_ReadConversionData10(ADC_TypeDef *ADCx, uint32_t Rank) +__STATIC_INLINE uint16_t LL_ADC_INJ_ReadConversionData10(const ADC_TypeDef *ADCx, uint32_t Rank) { - __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->JDR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JDRX_REGOFFSET_MASK)); - + __IO uint32_t const *preg = __ADC_PTR_REG_OFFSET(ADCx->JDR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JDRX_REGOFFSET_MASK)); + return (uint16_t)(READ_BIT(*preg, ADC_JDR1_JDATA) ); @@ -4231,10 +4233,10 @@ __STATIC_INLINE uint16_t LL_ADC_INJ_ReadConversionData10(ADC_TypeDef *ADCx, uint * @arg @ref LL_ADC_INJ_RANK_4 * @retval Value between Min_Data=0x00 and Max_Data=0xFF */ -__STATIC_INLINE uint8_t LL_ADC_INJ_ReadConversionData8(ADC_TypeDef *ADCx, uint32_t Rank) +__STATIC_INLINE uint8_t LL_ADC_INJ_ReadConversionData8(const ADC_TypeDef *ADCx, uint32_t Rank) { - __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->JDR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JDRX_REGOFFSET_MASK)); - + __IO uint32_t const *preg = __ADC_PTR_REG_OFFSET(ADCx->JDR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JDRX_REGOFFSET_MASK)); + return (uint8_t)(READ_BIT(*preg, ADC_JDR1_JDATA) ); @@ -4258,10 +4260,10 @@ __STATIC_INLINE uint8_t LL_ADC_INJ_ReadConversionData8(ADC_TypeDef *ADCx, uint32 * @arg @ref LL_ADC_INJ_RANK_4 * @retval Value between Min_Data=0x00 and Max_Data=0x3F */ -__STATIC_INLINE uint8_t LL_ADC_INJ_ReadConversionData6(ADC_TypeDef *ADCx, uint32_t Rank) +__STATIC_INLINE uint8_t LL_ADC_INJ_ReadConversionData6(const ADC_TypeDef *ADCx, uint32_t Rank) { - __IO uint32_t *preg = __ADC_PTR_REG_OFFSET(ADCx->JDR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JDRX_REGOFFSET_MASK)); - + __IO uint32_t const *preg = __ADC_PTR_REG_OFFSET(ADCx->JDR1, __ADC_MASK_SHIFT(Rank, ADC_INJ_JDRX_REGOFFSET_MASK)); + return (uint8_t)(READ_BIT(*preg, ADC_JDR1_JDATA) ); @@ -4285,7 +4287,7 @@ __STATIC_INLINE uint8_t LL_ADC_INJ_ReadConversionData6(ADC_TypeDef *ADCx, uint32 * @param ADCx ADC instance * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_EOCS(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_EOCS(const ADC_TypeDef *ADCx) { return (READ_BIT(ADCx->SR, LL_ADC_FLAG_EOCS) == (LL_ADC_FLAG_EOCS)); } @@ -4296,7 +4298,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_EOCS(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_OVR(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_OVR(const ADC_TypeDef *ADCx) { return (READ_BIT(ADCx->SR, LL_ADC_FLAG_OVR) == (LL_ADC_FLAG_OVR)); } @@ -4308,7 +4310,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_OVR(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_JEOS(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_JEOS(const ADC_TypeDef *ADCx) { /* Note: on this STM32 series, there is no flag ADC group injected */ /* end of unitary conversion. */ @@ -4323,7 +4325,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_JEOS(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_AWD1(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_AWD1(const ADC_TypeDef *ADCx) { return (READ_BIT(ADCx->SR, LL_ADC_FLAG_AWD1) == (LL_ADC_FLAG_AWD1)); } @@ -4393,9 +4395,9 @@ __STATIC_INLINE void LL_ADC_ClearFlag_AWD1(ADC_TypeDef *ADCx) * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_EOCS(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_EOCS(const ADC_Common_TypeDef *ADCxy_COMMON) { - return (READ_BIT(ADC1->SR, LL_ADC_FLAG_EOCS) == (LL_ADC_FLAG_EOCS)); + return (READ_BIT(ADCxy_COMMON->CSR, LL_ADC_FLAG_EOCS_MST) == (LL_ADC_FLAG_EOCS_MST)); } /** @@ -4409,7 +4411,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_EOCS(ADC_Common_TypeDef *ADCxy_ * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_EOCS(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_EOCS(const ADC_Common_TypeDef *ADCxy_COMMON) { return (READ_BIT(ADCxy_COMMON->CSR, LL_ADC_FLAG_EOCS_SLV1) == (LL_ADC_FLAG_EOCS_SLV1)); } @@ -4425,7 +4427,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_EOCS(ADC_Common_TypeDef *ADCxy * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_EOCS(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_EOCS(const ADC_Common_TypeDef *ADCxy_COMMON) { return (READ_BIT(ADCxy_COMMON->CSR, LL_ADC_FLAG_EOCS_SLV2) == (LL_ADC_FLAG_EOCS_SLV2)); } @@ -4436,7 +4438,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_EOCS(ADC_Common_TypeDef *ADCxy * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_OVR(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_OVR(const ADC_Common_TypeDef *ADCxy_COMMON) { return (READ_BIT(ADCxy_COMMON->CSR, LL_ADC_FLAG_OVR_MST) == (LL_ADC_FLAG_OVR_MST)); } @@ -4448,7 +4450,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_OVR(ADC_Common_TypeDef *ADCxy_C * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_OVR(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_OVR(const ADC_Common_TypeDef *ADCxy_COMMON) { return (READ_BIT(ADCxy_COMMON->CSR, LL_ADC_FLAG_OVR_SLV1) == (LL_ADC_FLAG_OVR_SLV1)); } @@ -4460,7 +4462,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_OVR(ADC_Common_TypeDef *ADCxy_ * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_OVR(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_OVR(const ADC_Common_TypeDef *ADCxy_COMMON) { return (READ_BIT(ADCxy_COMMON->CSR, LL_ADC_FLAG_OVR_SLV2) == (LL_ADC_FLAG_OVR_SLV2)); } @@ -4468,12 +4470,12 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_OVR(ADC_Common_TypeDef *ADCxy_ /** * @brief Get flag multimode ADC group injected end of sequence conversions of the ADC master. - * @rmtoll CSR JEOC LL_ADC_IsActiveFlag_MST_EOCS + * @rmtoll CSR JEOC LL_ADC_IsActiveFlag_MST_JEOS * @param ADCxy_COMMON ADC common instance * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_JEOS(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_JEOS(const ADC_Common_TypeDef *ADCxy_COMMON) { /* Note: on this STM32 series, there is no flag ADC group injected */ /* end of unitary conversion. */ @@ -4489,7 +4491,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_JEOS(ADC_Common_TypeDef *ADCxy_ * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_JEOS(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_JEOS(const ADC_Common_TypeDef *ADCxy_COMMON) { /* Note: on this STM32 series, there is no flag ADC group injected */ /* end of unitary conversion. */ @@ -4505,7 +4507,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_JEOS(ADC_Common_TypeDef *ADCxy * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_JEOS(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_JEOS(const ADC_Common_TypeDef *ADCxy_COMMON) { /* Note: on this STM32 series, there is no flag ADC group injected */ /* end of unitary conversion. */ @@ -4521,7 +4523,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_JEOS(ADC_Common_TypeDef *ADCxy * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_AWD1(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_AWD1(const ADC_Common_TypeDef *ADCxy_COMMON) { return (READ_BIT(ADCxy_COMMON->CSR, LL_ADC_FLAG_AWD1_MST) == (LL_ADC_FLAG_AWD1_MST)); } @@ -4533,7 +4535,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_MST_AWD1(ADC_Common_TypeDef *ADCxy_ * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_AWD1(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_AWD1(const ADC_Common_TypeDef *ADCxy_COMMON) { return (READ_BIT(ADCxy_COMMON->CSR, LL_ADC_FLAG_AWD1_SLV1) == (LL_ADC_FLAG_AWD1_SLV1)); } @@ -4545,9 +4547,9 @@ __STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV1_AWD1(ADC_Common_TypeDef *ADCxy * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_AWD1(ADC_Common_TypeDef *ADCxy_COMMON) +__STATIC_INLINE uint32_t LL_ADC_IsActiveFlag_SLV2_AWD1(const ADC_Common_TypeDef *ADCxy_COMMON) { - return (READ_BIT(ADCxy_COMMON->CSR, LL_ADC_FLAG_AWD1_SLV2) == (LL_ADC_FLAG_AWD1_SLV2)); + return (READ_BIT(ADCxy_COMMON->CSR, LL_ADC_FLAG_AWD1_SLV2) == (LL_ADC_FLAG_AWD1_SLV2)); } #endif /* ADC_MULTIMODE_SUPPORT */ @@ -4677,7 +4679,7 @@ __STATIC_INLINE void LL_ADC_DisableIT_AWD1(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_EOCS(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_EOCS(const ADC_TypeDef *ADCx) { return (READ_BIT(ADCx->CR1, LL_ADC_IT_EOCS) == (LL_ADC_IT_EOCS)); } @@ -4689,7 +4691,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_EOCS(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_OVR(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_OVR(const ADC_TypeDef *ADCx) { return (READ_BIT(ADCx->CR1, LL_ADC_IT_OVR) == (LL_ADC_IT_OVR)); } @@ -4702,7 +4704,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_OVR(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_JEOS(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_JEOS(const ADC_TypeDef *ADCx) { /* Note: on this STM32 series, there is no flag ADC group injected */ /* end of unitary conversion. */ @@ -4718,7 +4720,7 @@ __STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_JEOS(ADC_TypeDef *ADCx) * @param ADCx ADC instance * @retval State of bit (1 or 0). */ -__STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_AWD1(ADC_TypeDef *ADCx) +__STATIC_INLINE uint32_t LL_ADC_IsEnabledIT_AWD1(const ADC_TypeDef *ADCx) { return (READ_BIT(ADCx->CR1, LL_ADC_IT_AWD1) == (LL_ADC_IT_AWD1)); } @@ -4778,4 +4780,3 @@ void LL_ADC_INJ_StructInit(LL_ADC_INJ_InitTypeDef *ADC_INJ_InitStruct); #endif /* __STM32F4xx_LL_ADC_H */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_bus.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_bus.h new file mode 100644 index 00000000..ce19d4d9 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_bus.h @@ -0,0 +1,2105 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_bus.h + * @author MCD Application Team + * @brief Header file of BUS LL module. + + @verbatim + ##### RCC Limitations ##### + ============================================================================== + [..] + A delay between an RCC peripheral clock enable and the effective peripheral + enabling should be taken into account in order to manage the peripheral read/write + from/to registers. + (+) This delay depends on the peripheral mapping. + (++) AHB & APB peripherals, 1 dummy read is necessary + + [..] + Workarounds: + (#) For AHB & APB peripherals, a dummy read to the peripheral register has been + inserted in each LL_{BUS}_GRP{x}_EnableClock() function. + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_BUS_H +#define __STM32F4xx_LL_BUS_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined(RCC) + +/** @defgroup BUS_LL BUS + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup BUS_LL_Exported_Constants BUS Exported Constants + * @{ + */ + +/** @defgroup BUS_LL_EC_AHB1_GRP1_PERIPH AHB1 GRP1 PERIPH + * @{ + */ +#define LL_AHB1_GRP1_PERIPH_ALL 0xFFFFFFFFU +#define LL_AHB1_GRP1_PERIPH_GPIOA RCC_AHB1ENR_GPIOAEN +#define LL_AHB1_GRP1_PERIPH_GPIOB RCC_AHB1ENR_GPIOBEN +#define LL_AHB1_GRP1_PERIPH_GPIOC RCC_AHB1ENR_GPIOCEN +#if defined(GPIOD) +#define LL_AHB1_GRP1_PERIPH_GPIOD RCC_AHB1ENR_GPIODEN +#endif /* GPIOD */ +#if defined(GPIOE) +#define LL_AHB1_GRP1_PERIPH_GPIOE RCC_AHB1ENR_GPIOEEN +#endif /* GPIOE */ +#if defined(GPIOF) +#define LL_AHB1_GRP1_PERIPH_GPIOF RCC_AHB1ENR_GPIOFEN +#endif /* GPIOF */ +#if defined(GPIOG) +#define LL_AHB1_GRP1_PERIPH_GPIOG RCC_AHB1ENR_GPIOGEN +#endif /* GPIOG */ +#if defined(GPIOH) +#define LL_AHB1_GRP1_PERIPH_GPIOH RCC_AHB1ENR_GPIOHEN +#endif /* GPIOH */ +#if defined(GPIOI) +#define LL_AHB1_GRP1_PERIPH_GPIOI RCC_AHB1ENR_GPIOIEN +#endif /* GPIOI */ +#if defined(GPIOJ) +#define LL_AHB1_GRP1_PERIPH_GPIOJ RCC_AHB1ENR_GPIOJEN +#endif /* GPIOJ */ +#if defined(GPIOK) +#define LL_AHB1_GRP1_PERIPH_GPIOK RCC_AHB1ENR_GPIOKEN +#endif /* GPIOK */ +#define LL_AHB1_GRP1_PERIPH_CRC RCC_AHB1ENR_CRCEN +#if defined(RCC_AHB1ENR_BKPSRAMEN) +#define LL_AHB1_GRP1_PERIPH_BKPSRAM RCC_AHB1ENR_BKPSRAMEN +#endif /* RCC_AHB1ENR_BKPSRAMEN */ +#if defined(RCC_AHB1ENR_CCMDATARAMEN) +#define LL_AHB1_GRP1_PERIPH_CCMDATARAM RCC_AHB1ENR_CCMDATARAMEN +#endif /* RCC_AHB1ENR_CCMDATARAMEN */ +#define LL_AHB1_GRP1_PERIPH_DMA1 RCC_AHB1ENR_DMA1EN +#define LL_AHB1_GRP1_PERIPH_DMA2 RCC_AHB1ENR_DMA2EN +#if defined(RCC_AHB1ENR_RNGEN) +#define LL_AHB1_GRP1_PERIPH_RNG RCC_AHB1ENR_RNGEN +#endif /* RCC_AHB1ENR_RNGEN */ +#if defined(DMA2D) +#define LL_AHB1_GRP1_PERIPH_DMA2D RCC_AHB1ENR_DMA2DEN +#endif /* DMA2D */ +#if defined(ETH) +#define LL_AHB1_GRP1_PERIPH_ETHMAC RCC_AHB1ENR_ETHMACEN +#define LL_AHB1_GRP1_PERIPH_ETHMACTX RCC_AHB1ENR_ETHMACTXEN +#define LL_AHB1_GRP1_PERIPH_ETHMACRX RCC_AHB1ENR_ETHMACRXEN +#define LL_AHB1_GRP1_PERIPH_ETHMACPTP RCC_AHB1ENR_ETHMACPTPEN +#endif /* ETH */ +#if defined(USB_OTG_HS) +#define LL_AHB1_GRP1_PERIPH_OTGHS RCC_AHB1ENR_OTGHSEN +#define LL_AHB1_GRP1_PERIPH_OTGHSULPI RCC_AHB1ENR_OTGHSULPIEN +#endif /* USB_OTG_HS */ +#define LL_AHB1_GRP1_PERIPH_FLITF RCC_AHB1LPENR_FLITFLPEN +#define LL_AHB1_GRP1_PERIPH_SRAM1 RCC_AHB1LPENR_SRAM1LPEN +#if defined(RCC_AHB1LPENR_SRAM2LPEN) +#define LL_AHB1_GRP1_PERIPH_SRAM2 RCC_AHB1LPENR_SRAM2LPEN +#endif /* RCC_AHB1LPENR_SRAM2LPEN */ +#if defined(RCC_AHB1LPENR_SRAM3LPEN) +#define LL_AHB1_GRP1_PERIPH_SRAM3 RCC_AHB1LPENR_SRAM3LPEN +#endif /* RCC_AHB1LPENR_SRAM3LPEN */ +/** + * @} + */ + +#if defined(RCC_AHB2_SUPPORT) +/** @defgroup BUS_LL_EC_AHB2_GRP1_PERIPH AHB2 GRP1 PERIPH + * @{ + */ +#define LL_AHB2_GRP1_PERIPH_ALL 0xFFFFFFFFU +#if defined(DCMI) +#define LL_AHB2_GRP1_PERIPH_DCMI RCC_AHB2ENR_DCMIEN +#endif /* DCMI */ +#if defined(CRYP) +#define LL_AHB2_GRP1_PERIPH_CRYP RCC_AHB2ENR_CRYPEN +#endif /* CRYP */ +#if defined(AES) +#define LL_AHB2_GRP1_PERIPH_AES RCC_AHB2ENR_AESEN +#endif /* AES */ +#if defined(HASH) +#define LL_AHB2_GRP1_PERIPH_HASH RCC_AHB2ENR_HASHEN +#endif /* HASH */ +#if defined(RCC_AHB2ENR_RNGEN) +#define LL_AHB2_GRP1_PERIPH_RNG RCC_AHB2ENR_RNGEN +#endif /* RCC_AHB2ENR_RNGEN */ +#if defined(USB_OTG_FS) +#define LL_AHB2_GRP1_PERIPH_OTGFS RCC_AHB2ENR_OTGFSEN +#endif /* USB_OTG_FS */ +/** + * @} + */ +#endif /* RCC_AHB2_SUPPORT */ + +#if defined(RCC_AHB3_SUPPORT) +/** @defgroup BUS_LL_EC_AHB3_GRP1_PERIPH AHB3 GRP1 PERIPH + * @{ + */ +#define LL_AHB3_GRP1_PERIPH_ALL 0xFFFFFFFFU +#if defined(FSMC_Bank1) +#define LL_AHB3_GRP1_PERIPH_FSMC RCC_AHB3ENR_FSMCEN +#endif /* FSMC_Bank1 */ +#if defined(FMC_Bank1) +#define LL_AHB3_GRP1_PERIPH_FMC RCC_AHB3ENR_FMCEN +#endif /* FMC_Bank1 */ +#if defined(QUADSPI) +#define LL_AHB3_GRP1_PERIPH_QSPI RCC_AHB3ENR_QSPIEN +#endif /* QUADSPI */ +/** + * @} + */ +#endif /* RCC_AHB3_SUPPORT */ + +/** @defgroup BUS_LL_EC_APB1_GRP1_PERIPH APB1 GRP1 PERIPH + * @{ + */ +#define LL_APB1_GRP1_PERIPH_ALL 0xFFFFFFFFU +#if defined(TIM2) +#define LL_APB1_GRP1_PERIPH_TIM2 RCC_APB1ENR_TIM2EN +#endif /* TIM2 */ +#if defined(TIM3) +#define LL_APB1_GRP1_PERIPH_TIM3 RCC_APB1ENR_TIM3EN +#endif /* TIM3 */ +#if defined(TIM4) +#define LL_APB1_GRP1_PERIPH_TIM4 RCC_APB1ENR_TIM4EN +#endif /* TIM4 */ +#define LL_APB1_GRP1_PERIPH_TIM5 RCC_APB1ENR_TIM5EN +#if defined(TIM6) +#define LL_APB1_GRP1_PERIPH_TIM6 RCC_APB1ENR_TIM6EN +#endif /* TIM6 */ +#if defined(TIM7) +#define LL_APB1_GRP1_PERIPH_TIM7 RCC_APB1ENR_TIM7EN +#endif /* TIM7 */ +#if defined(TIM12) +#define LL_APB1_GRP1_PERIPH_TIM12 RCC_APB1ENR_TIM12EN +#endif /* TIM12 */ +#if defined(TIM13) +#define LL_APB1_GRP1_PERIPH_TIM13 RCC_APB1ENR_TIM13EN +#endif /* TIM13 */ +#if defined(TIM14) +#define LL_APB1_GRP1_PERIPH_TIM14 RCC_APB1ENR_TIM14EN +#endif /* TIM14 */ +#if defined(LPTIM1) +#define LL_APB1_GRP1_PERIPH_LPTIM1 RCC_APB1ENR_LPTIM1EN +#endif /* LPTIM1 */ +#if defined(RCC_APB1ENR_RTCAPBEN) +#define LL_APB1_GRP1_PERIPH_RTCAPB RCC_APB1ENR_RTCAPBEN +#endif /* RCC_APB1ENR_RTCAPBEN */ +#define LL_APB1_GRP1_PERIPH_WWDG RCC_APB1ENR_WWDGEN +#if defined(SPI2) +#define LL_APB1_GRP1_PERIPH_SPI2 RCC_APB1ENR_SPI2EN +#endif /* SPI2 */ +#if defined(SPI3) +#define LL_APB1_GRP1_PERIPH_SPI3 RCC_APB1ENR_SPI3EN +#endif /* SPI3 */ +#if defined(SPDIFRX) +#define LL_APB1_GRP1_PERIPH_SPDIFRX RCC_APB1ENR_SPDIFRXEN +#endif /* SPDIFRX */ +#define LL_APB1_GRP1_PERIPH_USART2 RCC_APB1ENR_USART2EN +#if defined(USART3) +#define LL_APB1_GRP1_PERIPH_USART3 RCC_APB1ENR_USART3EN +#endif /* USART3 */ +#if defined(UART4) +#define LL_APB1_GRP1_PERIPH_UART4 RCC_APB1ENR_UART4EN +#endif /* UART4 */ +#if defined(UART5) +#define LL_APB1_GRP1_PERIPH_UART5 RCC_APB1ENR_UART5EN +#endif /* UART5 */ +#define LL_APB1_GRP1_PERIPH_I2C1 RCC_APB1ENR_I2C1EN +#define LL_APB1_GRP1_PERIPH_I2C2 RCC_APB1ENR_I2C2EN +#if defined(I2C3) +#define LL_APB1_GRP1_PERIPH_I2C3 RCC_APB1ENR_I2C3EN +#endif /* I2C3 */ +#if defined(FMPI2C1) +#define LL_APB1_GRP1_PERIPH_FMPI2C1 RCC_APB1ENR_FMPI2C1EN +#endif /* FMPI2C1 */ +#if defined(CAN1) +#define LL_APB1_GRP1_PERIPH_CAN1 RCC_APB1ENR_CAN1EN +#endif /* CAN1 */ +#if defined(CAN2) +#define LL_APB1_GRP1_PERIPH_CAN2 RCC_APB1ENR_CAN2EN +#endif /* CAN2 */ +#if defined(CAN3) +#define LL_APB1_GRP1_PERIPH_CAN3 RCC_APB1ENR_CAN3EN +#endif /* CAN3 */ +#if defined(CEC) +#define LL_APB1_GRP1_PERIPH_CEC RCC_APB1ENR_CECEN +#endif /* CEC */ +#define LL_APB1_GRP1_PERIPH_PWR RCC_APB1ENR_PWREN +#if defined(DAC1) +#define LL_APB1_GRP1_PERIPH_DAC1 RCC_APB1ENR_DACEN +#endif /* DAC1 */ +#if defined(UART7) +#define LL_APB1_GRP1_PERIPH_UART7 RCC_APB1ENR_UART7EN +#endif /* UART7 */ +#if defined(UART8) +#define LL_APB1_GRP1_PERIPH_UART8 RCC_APB1ENR_UART8EN +#endif /* UART8 */ +/** + * @} + */ + +/** @defgroup BUS_LL_EC_APB2_GRP1_PERIPH APB2 GRP1 PERIPH + * @{ + */ +#define LL_APB2_GRP1_PERIPH_ALL 0xFFFFFFFFU +#define LL_APB2_GRP1_PERIPH_TIM1 RCC_APB2ENR_TIM1EN +#if defined(TIM8) +#define LL_APB2_GRP1_PERIPH_TIM8 RCC_APB2ENR_TIM8EN +#endif /* TIM8 */ +#define LL_APB2_GRP1_PERIPH_USART1 RCC_APB2ENR_USART1EN +#if defined(USART6) +#define LL_APB2_GRP1_PERIPH_USART6 RCC_APB2ENR_USART6EN +#endif /* USART6 */ +#if defined(UART9) +#define LL_APB2_GRP1_PERIPH_UART9 RCC_APB2ENR_UART9EN +#endif /* UART9 */ +#if defined(UART10) +#define LL_APB2_GRP1_PERIPH_UART10 RCC_APB2ENR_UART10EN +#endif /* UART10 */ +#define LL_APB2_GRP1_PERIPH_ADC1 RCC_APB2ENR_ADC1EN +#if defined(ADC2) +#define LL_APB2_GRP1_PERIPH_ADC2 RCC_APB2ENR_ADC2EN +#endif /* ADC2 */ +#if defined(ADC3) +#define LL_APB2_GRP1_PERIPH_ADC3 RCC_APB2ENR_ADC3EN +#endif /* ADC3 */ +#if defined(SDIO) +#define LL_APB2_GRP1_PERIPH_SDIO RCC_APB2ENR_SDIOEN +#endif /* SDIO */ +#define LL_APB2_GRP1_PERIPH_SPI1 RCC_APB2ENR_SPI1EN +#if defined(SPI4) +#define LL_APB2_GRP1_PERIPH_SPI4 RCC_APB2ENR_SPI4EN +#endif /* SPI4 */ +#define LL_APB2_GRP1_PERIPH_SYSCFG RCC_APB2ENR_SYSCFGEN +#if defined(RCC_APB2ENR_EXTITEN) +#define LL_APB2_GRP1_PERIPH_EXTI RCC_APB2ENR_EXTITEN +#endif /* RCC_APB2ENR_EXTITEN */ +#define LL_APB2_GRP1_PERIPH_TIM9 RCC_APB2ENR_TIM9EN +#if defined(TIM10) +#define LL_APB2_GRP1_PERIPH_TIM10 RCC_APB2ENR_TIM10EN +#endif /* TIM10 */ +#define LL_APB2_GRP1_PERIPH_TIM11 RCC_APB2ENR_TIM11EN +#if defined(SPI5) +#define LL_APB2_GRP1_PERIPH_SPI5 RCC_APB2ENR_SPI5EN +#endif /* SPI5 */ +#if defined(SPI6) +#define LL_APB2_GRP1_PERIPH_SPI6 RCC_APB2ENR_SPI6EN +#endif /* SPI6 */ +#if defined(SAI1) +#define LL_APB2_GRP1_PERIPH_SAI1 RCC_APB2ENR_SAI1EN +#endif /* SAI1 */ +#if defined(SAI2) +#define LL_APB2_GRP1_PERIPH_SAI2 RCC_APB2ENR_SAI2EN +#endif /* SAI2 */ +#if defined(LTDC) +#define LL_APB2_GRP1_PERIPH_LTDC RCC_APB2ENR_LTDCEN +#endif /* LTDC */ +#if defined(DSI) +#define LL_APB2_GRP1_PERIPH_DSI RCC_APB2ENR_DSIEN +#endif /* DSI */ +#if defined(DFSDM1_Channel0) +#define LL_APB2_GRP1_PERIPH_DFSDM1 RCC_APB2ENR_DFSDM1EN +#endif /* DFSDM1_Channel0 */ +#if defined(DFSDM2_Channel0) +#define LL_APB2_GRP1_PERIPH_DFSDM2 RCC_APB2ENR_DFSDM2EN +#endif /* DFSDM2_Channel0 */ +#define LL_APB2_GRP1_PERIPH_ADC RCC_APB2RSTR_ADCRST +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup BUS_LL_Exported_Functions BUS Exported Functions + * @{ + */ + +/** @defgroup BUS_LL_EF_AHB1 AHB1 + * @{ + */ + +/** + * @brief Enable AHB1 peripherals clock. + * @rmtoll AHB1ENR GPIOAEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOBEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOCEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIODEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOEEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOFEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOGEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOHEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOIEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOJEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR GPIOKEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR CRCEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR BKPSRAMEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR CCMDATARAMEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR DMA1EN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR DMA2EN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR RNGEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR DMA2DEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR ETHMACEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR ETHMACTXEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR ETHMACRXEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR ETHMACPTPEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR OTGHSEN LL_AHB1_GRP1_EnableClock\n + * AHB1ENR OTGHSULPIEN LL_AHB1_GRP1_EnableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_BKPSRAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CCMDATARAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACTX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACRX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACPTP (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHSULPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_EnableClock(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB1ENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB1ENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Check if AHB1 peripheral clock is enabled or not + * @rmtoll AHB1ENR GPIOAEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOBEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOCEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIODEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOEEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOFEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOGEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOHEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOIEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOJEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR GPIOKEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR CRCEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR BKPSRAMEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR CCMDATARAMEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR DMA1EN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR DMA2EN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR RNGEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR DMA2DEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR ETHMACEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR ETHMACTXEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR ETHMACRXEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR ETHMACPTPEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR OTGHSEN LL_AHB1_GRP1_IsEnabledClock\n + * AHB1ENR OTGHSULPIEN LL_AHB1_GRP1_IsEnabledClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_BKPSRAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CCMDATARAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACTX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACRX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACPTP (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHSULPI (*) + * + * (*) value not defined in all devices. + * @retval State of Periphs (1 or 0). + */ +__STATIC_INLINE uint32_t LL_AHB1_GRP1_IsEnabledClock(uint32_t Periphs) +{ + return (READ_BIT(RCC->AHB1ENR, Periphs) == Periphs); +} + +/** + * @brief Disable AHB1 peripherals clock. + * @rmtoll AHB1ENR GPIOAEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOBEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOCEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIODEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOEEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOFEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOGEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOHEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOIEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOJEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR GPIOKEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR CRCEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR BKPSRAMEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR CCMDATARAMEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR DMA1EN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR DMA2EN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR RNGEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR DMA2DEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR ETHMACEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR ETHMACTXEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR ETHMACRXEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR ETHMACPTPEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR OTGHSEN LL_AHB1_GRP1_DisableClock\n + * AHB1ENR OTGHSULPIEN LL_AHB1_GRP1_DisableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_BKPSRAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CCMDATARAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACTX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACRX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACPTP (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHSULPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_DisableClock(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB1ENR, Periphs); +} + +/** + * @brief Force AHB1 peripherals reset. + * @rmtoll AHB1RSTR GPIOARST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOBRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOCRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIODRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOERST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOFRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOGRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOHRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOIRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOJRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR GPIOKRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR CRCRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR DMA1RST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR DMA2RST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR RNGRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR DMA2DRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR ETHMACRST LL_AHB1_GRP1_ForceReset\n + * AHB1RSTR OTGHSRST LL_AHB1_GRP1_ForceReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_ALL + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_ForceReset(uint32_t Periphs) +{ + SET_BIT(RCC->AHB1RSTR, Periphs); +} + +/** + * @brief Release AHB1 peripherals reset. + * @rmtoll AHB1RSTR GPIOARST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOBRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOCRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIODRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOERST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOFRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOGRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOHRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOIRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOJRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR GPIOKRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR CRCRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR DMA1RST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR DMA2RST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR RNGRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR DMA2DRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR ETHMACRST LL_AHB1_GRP1_ReleaseReset\n + * AHB1RSTR OTGHSRST LL_AHB1_GRP1_ReleaseReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_ALL + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_ReleaseReset(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB1RSTR, Periphs); +} + +/** + * @brief Enable AHB1 peripheral clocks in low-power mode + * @rmtoll AHB1LPENR GPIOALPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOBLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOCLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIODLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOELPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOFLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOGLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOHLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOILPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOJLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR GPIOKLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR CRCLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR BKPSRAMLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR FLITFLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR SRAM1LPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR SRAM2LPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR SRAM3LPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR BKPSRAMLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR DMA1LPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR DMA2LPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR DMA2DLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR RNGLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR ETHMACLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR ETHMACTXLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR ETHMACRXLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR ETHMACPTPLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR OTGHSLPEN LL_AHB1_GRP1_EnableClockLowPower\n + * AHB1LPENR OTGHSULPILPEN LL_AHB1_GRP1_EnableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_BKPSRAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_FLITF + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM1 + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM2 (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM3 (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACTX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACRX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACPTP (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHSULPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_EnableClockLowPower(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB1LPENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB1LPENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Disable AHB1 peripheral clocks in low-power mode + * @rmtoll AHB1LPENR GPIOALPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOBLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOCLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIODLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOELPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOFLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOGLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOHLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOILPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOJLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR GPIOKLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR CRCLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR BKPSRAMLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR FLITFLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR SRAM1LPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR SRAM2LPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR SRAM3LPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR BKPSRAMLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR DMA1LPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR DMA2LPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR DMA2DLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR RNGLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR ETHMACLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR ETHMACTXLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR ETHMACRXLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR ETHMACPTPLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR OTGHSLPEN LL_AHB1_GRP1_DisableClockLowPower\n + * AHB1LPENR OTGHSULPILPEN LL_AHB1_GRP1_DisableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOA + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOB + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOC + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOD (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOE (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOF (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOH (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOI (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOJ (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_GPIOK (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_CRC + * @arg @ref LL_AHB1_GRP1_PERIPH_BKPSRAM (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_FLITF + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM1 + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM2 (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_SRAM3 (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA1 + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2 + * @arg @ref LL_AHB1_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_DMA2D (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMAC (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACTX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACRX (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_ETHMACPTP (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHS (*) + * @arg @ref LL_AHB1_GRP1_PERIPH_OTGHSULPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB1_GRP1_DisableClockLowPower(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB1LPENR, Periphs); +} + +/** + * @} + */ + +#if defined(RCC_AHB2_SUPPORT) +/** @defgroup BUS_LL_EF_AHB2 AHB2 + * @{ + */ + +/** + * @brief Enable AHB2 peripherals clock. + * @rmtoll AHB2ENR DCMIEN LL_AHB2_GRP1_EnableClock\n + * AHB2ENR CRYPEN LL_AHB2_GRP1_EnableClock\n + * AHB2ENR AESEN LL_AHB2_GRP1_EnableClock\n + * AHB2ENR HASHEN LL_AHB2_GRP1_EnableClock\n + * AHB2ENR RNGEN LL_AHB2_GRP1_EnableClock\n + * AHB2ENR OTGFSEN LL_AHB2_GRP1_EnableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_EnableClock(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB2ENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB2ENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Check if AHB2 peripheral clock is enabled or not + * @rmtoll AHB2ENR DCMIEN LL_AHB2_GRP1_IsEnabledClock\n + * AHB2ENR CRYPEN LL_AHB2_GRP1_IsEnabledClock\n + * AHB2ENR AESEN LL_AHB2_GRP1_IsEnabledClock\n + * AHB2ENR HASHEN LL_AHB2_GRP1_IsEnabledClock\n + * AHB2ENR RNGEN LL_AHB2_GRP1_IsEnabledClock\n + * AHB2ENR OTGFSEN LL_AHB2_GRP1_IsEnabledClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval State of Periphs (1 or 0). + */ +__STATIC_INLINE uint32_t LL_AHB2_GRP1_IsEnabledClock(uint32_t Periphs) +{ + return (READ_BIT(RCC->AHB2ENR, Periphs) == Periphs); +} + +/** + * @brief Disable AHB2 peripherals clock. + * @rmtoll AHB2ENR DCMIEN LL_AHB2_GRP1_DisableClock\n + * AHB2ENR CRYPEN LL_AHB2_GRP1_DisableClock\n + * AHB2ENR AESEN LL_AHB2_GRP1_DisableClock\n + * AHB2ENR HASHEN LL_AHB2_GRP1_DisableClock\n + * AHB2ENR RNGEN LL_AHB2_GRP1_DisableClock\n + * AHB2ENR OTGFSEN LL_AHB2_GRP1_DisableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_DisableClock(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB2ENR, Periphs); +} + +/** + * @brief Force AHB2 peripherals reset. + * @rmtoll AHB2RSTR DCMIRST LL_AHB2_GRP1_ForceReset\n + * AHB2RSTR CRYPRST LL_AHB2_GRP1_ForceReset\n + * AHB2RSTR AESRST LL_AHB2_GRP1_ForceReset\n + * AHB2RSTR HASHRST LL_AHB2_GRP1_ForceReset\n + * AHB2RSTR RNGRST LL_AHB2_GRP1_ForceReset\n + * AHB2RSTR OTGFSRST LL_AHB2_GRP1_ForceReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_ALL + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_ForceReset(uint32_t Periphs) +{ + SET_BIT(RCC->AHB2RSTR, Periphs); +} + +/** + * @brief Release AHB2 peripherals reset. + * @rmtoll AHB2RSTR DCMIRST LL_AHB2_GRP1_ReleaseReset\n + * AHB2RSTR CRYPRST LL_AHB2_GRP1_ReleaseReset\n + * AHB2RSTR AESRST LL_AHB2_GRP1_ReleaseReset\n + * AHB2RSTR HASHRST LL_AHB2_GRP1_ReleaseReset\n + * AHB2RSTR RNGRST LL_AHB2_GRP1_ReleaseReset\n + * AHB2RSTR OTGFSRST LL_AHB2_GRP1_ReleaseReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_ALL + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_ReleaseReset(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB2RSTR, Periphs); +} + +/** + * @brief Enable AHB2 peripheral clocks in low-power mode + * @rmtoll AHB2LPENR DCMILPEN LL_AHB2_GRP1_EnableClockLowPower\n + * AHB2LPENR CRYPLPEN LL_AHB2_GRP1_EnableClockLowPower\n + * AHB2LPENR AESLPEN LL_AHB2_GRP1_EnableClockLowPower\n + * AHB2LPENR HASHLPEN LL_AHB2_GRP1_EnableClockLowPower\n + * AHB2LPENR RNGLPEN LL_AHB2_GRP1_EnableClockLowPower\n + * AHB2LPENR OTGFSLPEN LL_AHB2_GRP1_EnableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_EnableClockLowPower(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB2LPENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB2LPENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Disable AHB2 peripheral clocks in low-power mode + * @rmtoll AHB2LPENR DCMILPEN LL_AHB2_GRP1_DisableClockLowPower\n + * AHB2LPENR CRYPLPEN LL_AHB2_GRP1_DisableClockLowPower\n + * AHB2LPENR AESLPEN LL_AHB2_GRP1_DisableClockLowPower\n + * AHB2LPENR HASHLPEN LL_AHB2_GRP1_DisableClockLowPower\n + * AHB2LPENR RNGLPEN LL_AHB2_GRP1_DisableClockLowPower\n + * AHB2LPENR OTGFSLPEN LL_AHB2_GRP1_DisableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_DCMI (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_CRYP (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_AES (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_HASH (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_RNG (*) + * @arg @ref LL_AHB2_GRP1_PERIPH_OTGFS (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB2_GRP1_DisableClockLowPower(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB2LPENR, Periphs); +} + +/** + * @} + */ +#endif /* RCC_AHB2_SUPPORT */ + +#if defined(RCC_AHB3_SUPPORT) +/** @defgroup BUS_LL_EF_AHB3 AHB3 + * @{ + */ + +/** + * @brief Enable AHB3 peripherals clock. + * @rmtoll AHB3ENR FMCEN LL_AHB3_GRP1_EnableClock\n + * AHB3ENR FSMCEN LL_AHB3_GRP1_EnableClock\n + * AHB3ENR QSPIEN LL_AHB3_GRP1_EnableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_EnableClock(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB3ENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB3ENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Check if AHB3 peripheral clock is enabled or not + * @rmtoll AHB3ENR FMCEN LL_AHB3_GRP1_IsEnabledClock\n + * AHB3ENR FSMCEN LL_AHB3_GRP1_IsEnabledClock\n + * AHB3ENR QSPIEN LL_AHB3_GRP1_IsEnabledClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval State of Periphs (1 or 0). + */ +__STATIC_INLINE uint32_t LL_AHB3_GRP1_IsEnabledClock(uint32_t Periphs) +{ + return (READ_BIT(RCC->AHB3ENR, Periphs) == Periphs); +} + +/** + * @brief Disable AHB3 peripherals clock. + * @rmtoll AHB3ENR FMCEN LL_AHB3_GRP1_DisableClock\n + * AHB3ENR FSMCEN LL_AHB3_GRP1_DisableClock\n + * AHB3ENR QSPIEN LL_AHB3_GRP1_DisableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_DisableClock(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB3ENR, Periphs); +} + +/** + * @brief Force AHB3 peripherals reset. + * @rmtoll AHB3RSTR FMCRST LL_AHB3_GRP1_ForceReset\n + * AHB3RSTR FSMCRST LL_AHB3_GRP1_ForceReset\n + * AHB3RSTR QSPIRST LL_AHB3_GRP1_ForceReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_ALL + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_ForceReset(uint32_t Periphs) +{ + SET_BIT(RCC->AHB3RSTR, Periphs); +} + +/** + * @brief Release AHB3 peripherals reset. + * @rmtoll AHB3RSTR FMCRST LL_AHB3_GRP1_ReleaseReset\n + * AHB3RSTR FSMCRST LL_AHB3_GRP1_ReleaseReset\n + * AHB3RSTR QSPIRST LL_AHB3_GRP1_ReleaseReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB2_GRP1_PERIPH_ALL + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_ReleaseReset(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB3RSTR, Periphs); +} + +/** + * @brief Enable AHB3 peripheral clocks in low-power mode + * @rmtoll AHB3LPENR FMCLPEN LL_AHB3_GRP1_EnableClockLowPower\n + * AHB3LPENR FSMCLPEN LL_AHB3_GRP1_EnableClockLowPower\n + * AHB3LPENR QSPILPEN LL_AHB3_GRP1_EnableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_EnableClockLowPower(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->AHB3LPENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->AHB3LPENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Disable AHB3 peripheral clocks in low-power mode + * @rmtoll AHB3LPENR FMCLPEN LL_AHB3_GRP1_DisableClockLowPower\n + * AHB3LPENR FSMCLPEN LL_AHB3_GRP1_DisableClockLowPower\n + * AHB3LPENR QSPILPEN LL_AHB3_GRP1_DisableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_AHB3_GRP1_PERIPH_FMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_FSMC (*) + * @arg @ref LL_AHB3_GRP1_PERIPH_QSPI (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_AHB3_GRP1_DisableClockLowPower(uint32_t Periphs) +{ + CLEAR_BIT(RCC->AHB3LPENR, Periphs); +} + +/** + * @} + */ +#endif /* RCC_AHB3_SUPPORT */ + +/** @defgroup BUS_LL_EF_APB1 APB1 + * @{ + */ + +/** + * @brief Enable APB1 peripherals clock. + * @rmtoll APB1ENR TIM2EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM3EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM4EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM5EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM6EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM7EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM12EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM13EN LL_APB1_GRP1_EnableClock\n + * APB1ENR TIM14EN LL_APB1_GRP1_EnableClock\n + * APB1ENR LPTIM1EN LL_APB1_GRP1_EnableClock\n + * APB1ENR WWDGEN LL_APB1_GRP1_EnableClock\n + * APB1ENR SPI2EN LL_APB1_GRP1_EnableClock\n + * APB1ENR SPI3EN LL_APB1_GRP1_EnableClock\n + * APB1ENR SPDIFRXEN LL_APB1_GRP1_EnableClock\n + * APB1ENR USART2EN LL_APB1_GRP1_EnableClock\n + * APB1ENR USART3EN LL_APB1_GRP1_EnableClock\n + * APB1ENR UART4EN LL_APB1_GRP1_EnableClock\n + * APB1ENR UART5EN LL_APB1_GRP1_EnableClock\n + * APB1ENR I2C1EN LL_APB1_GRP1_EnableClock\n + * APB1ENR I2C2EN LL_APB1_GRP1_EnableClock\n + * APB1ENR I2C3EN LL_APB1_GRP1_EnableClock\n + * APB1ENR FMPI2C1EN LL_APB1_GRP1_EnableClock\n + * APB1ENR CAN1EN LL_APB1_GRP1_EnableClock\n + * APB1ENR CAN2EN LL_APB1_GRP1_EnableClock\n + * APB1ENR CAN3EN LL_APB1_GRP1_EnableClock\n + * APB1ENR CECEN LL_APB1_GRP1_EnableClock\n + * APB1ENR PWREN LL_APB1_GRP1_EnableClock\n + * APB1ENR DACEN LL_APB1_GRP1_EnableClock\n + * APB1ENR UART7EN LL_APB1_GRP1_EnableClock\n + * APB1ENR UART8EN LL_APB1_GRP1_EnableClock\n + * APB1ENR RTCAPBEN LL_APB1_GRP1_EnableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_RTCAPB (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_EnableClock(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->APB1ENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->APB1ENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Check if APB1 peripheral clock is enabled or not + * @rmtoll APB1ENR TIM2EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM3EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM4EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM5EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM6EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM7EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM12EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM13EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR TIM14EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR LPTIM1EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR WWDGEN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR SPI2EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR SPI3EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR SPDIFRXEN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR USART2EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR USART3EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR UART4EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR UART5EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR I2C1EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR I2C2EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR I2C3EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR FMPI2C1EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR CAN1EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR CAN2EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR CAN3EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR CECEN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR PWREN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR DACEN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR UART7EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR UART8EN LL_APB1_GRP1_IsEnabledClock\n + * APB1ENR RTCAPBEN LL_APB1_GRP1_IsEnabledClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_RTCAPB (*) + * + * (*) value not defined in all devices. + * @retval State of Periphs (1 or 0). + */ +__STATIC_INLINE uint32_t LL_APB1_GRP1_IsEnabledClock(uint32_t Periphs) +{ + return (READ_BIT(RCC->APB1ENR, Periphs) == Periphs); +} + +/** + * @brief Disable APB1 peripherals clock. + * @rmtoll APB1ENR TIM2EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM3EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM4EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM5EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM6EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM7EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM12EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM13EN LL_APB1_GRP1_DisableClock\n + * APB1ENR TIM14EN LL_APB1_GRP1_DisableClock\n + * APB1ENR LPTIM1EN LL_APB1_GRP1_DisableClock\n + * APB1ENR WWDGEN LL_APB1_GRP1_DisableClock\n + * APB1ENR SPI2EN LL_APB1_GRP1_DisableClock\n + * APB1ENR SPI3EN LL_APB1_GRP1_DisableClock\n + * APB1ENR SPDIFRXEN LL_APB1_GRP1_DisableClock\n + * APB1ENR USART2EN LL_APB1_GRP1_DisableClock\n + * APB1ENR USART3EN LL_APB1_GRP1_DisableClock\n + * APB1ENR UART4EN LL_APB1_GRP1_DisableClock\n + * APB1ENR UART5EN LL_APB1_GRP1_DisableClock\n + * APB1ENR I2C1EN LL_APB1_GRP1_DisableClock\n + * APB1ENR I2C2EN LL_APB1_GRP1_DisableClock\n + * APB1ENR I2C3EN LL_APB1_GRP1_DisableClock\n + * APB1ENR FMPI2C1EN LL_APB1_GRP1_DisableClock\n + * APB1ENR CAN1EN LL_APB1_GRP1_DisableClock\n + * APB1ENR CAN2EN LL_APB1_GRP1_DisableClock\n + * APB1ENR CAN3EN LL_APB1_GRP1_DisableClock\n + * APB1ENR CECEN LL_APB1_GRP1_DisableClock\n + * APB1ENR PWREN LL_APB1_GRP1_DisableClock\n + * APB1ENR DACEN LL_APB1_GRP1_DisableClock\n + * APB1ENR UART7EN LL_APB1_GRP1_DisableClock\n + * APB1ENR UART8EN LL_APB1_GRP1_DisableClock\n + * APB1ENR RTCAPBEN LL_APB1_GRP1_DisableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_RTCAPB (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_DisableClock(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB1ENR, Periphs); +} + +/** + * @brief Force APB1 peripherals reset. + * @rmtoll APB1RSTR TIM2RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM3RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM4RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM5RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM6RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM7RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM12RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM13RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR TIM14RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR LPTIM1RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR WWDGRST LL_APB1_GRP1_ForceReset\n + * APB1RSTR SPI2RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR SPI3RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR SPDIFRXRST LL_APB1_GRP1_ForceReset\n + * APB1RSTR USART2RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR USART3RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR UART4RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR UART5RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR I2C1RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR I2C2RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR I2C3RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR FMPI2C1RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR CAN1RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR CAN2RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR CAN3RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR CECRST LL_APB1_GRP1_ForceReset\n + * APB1RSTR PWRRST LL_APB1_GRP1_ForceReset\n + * APB1RSTR DACRST LL_APB1_GRP1_ForceReset\n + * APB1RSTR UART7RST LL_APB1_GRP1_ForceReset\n + * APB1RSTR UART8RST LL_APB1_GRP1_ForceReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_ForceReset(uint32_t Periphs) +{ + SET_BIT(RCC->APB1RSTR, Periphs); +} + +/** + * @brief Release APB1 peripherals reset. + * @rmtoll APB1RSTR TIM2RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM3RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM4RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM5RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM6RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM7RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM12RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM13RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR TIM14RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR LPTIM1RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR WWDGRST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR SPI2RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR SPI3RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR SPDIFRXRST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR USART2RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR USART3RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR UART4RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR UART5RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR I2C1RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR I2C2RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR I2C3RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR FMPI2C1RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR CAN1RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR CAN2RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR CAN3RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR CECRST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR PWRRST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR DACRST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR UART7RST LL_APB1_GRP1_ReleaseReset\n + * APB1RSTR UART8RST LL_APB1_GRP1_ReleaseReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_ReleaseReset(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB1RSTR, Periphs); +} + +/** + * @brief Enable APB1 peripheral clocks in low-power mode + * @rmtoll APB1LPENR TIM2LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM3LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM4LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM5LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM6LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM7LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM12LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM13LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR TIM14LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR LPTIM1LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR WWDGLPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR SPI2LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR SPI3LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR SPDIFRXLPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR USART2LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR USART3LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR UART4LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR UART5LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR I2C1LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR I2C2LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR I2C3LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR FMPI2C1LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR CAN1LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR CAN2LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR CAN3LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR CECLPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR PWRLPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR DACLPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR UART7LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR UART8LPEN LL_APB1_GRP1_EnableClockLowPower\n + * APB1LPENR RTCAPBLPEN LL_APB1_GRP1_EnableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_RTCAPB (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_EnableClockLowPower(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->APB1LPENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->APB1LPENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Disable APB1 peripheral clocks in low-power mode + * @rmtoll APB1LPENR TIM2LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM3LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM4LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM5LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM6LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM7LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM12LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM13LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR TIM14LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR LPTIM1LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR WWDGLPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR SPI2LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR SPI3LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR SPDIFRXLPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR USART2LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR USART3LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR UART4LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR UART5LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR I2C1LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR I2C2LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR I2C3LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR FMPI2C1LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR CAN1LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR CAN2LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR CAN3LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR CECLPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR PWRLPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR DACLPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR UART7LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR UART8LPEN LL_APB1_GRP1_DisableClockLowPower\n + * APB1LPENR RTCAPBLPEN LL_APB1_GRP1_DisableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB1_GRP1_PERIPH_TIM2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM5 + * @arg @ref LL_APB1_GRP1_PERIPH_TIM6 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM12 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM13 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_TIM14 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_LPTIM1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_WWDG + * @arg @ref LL_APB1_GRP1_PERIPH_SPI2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPI3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_SPDIFRX (*) + * @arg @ref LL_APB1_GRP1_PERIPH_USART2 + * @arg @ref LL_APB1_GRP1_PERIPH_USART3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART4 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART5 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_I2C1 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C2 + * @arg @ref LL_APB1_GRP1_PERIPH_I2C3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_FMPI2C1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN2 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CAN3 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_CEC (*) + * @arg @ref LL_APB1_GRP1_PERIPH_PWR + * @arg @ref LL_APB1_GRP1_PERIPH_DAC1 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART7 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_UART8 (*) + * @arg @ref LL_APB1_GRP1_PERIPH_RTCAPB (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB1_GRP1_DisableClockLowPower(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB1LPENR, Periphs); +} + +/** + * @} + */ + +/** @defgroup BUS_LL_EF_APB2 APB2 + * @{ + */ + +/** + * @brief Enable APB2 peripherals clock. + * @rmtoll APB2ENR TIM1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR TIM8EN LL_APB2_GRP1_EnableClock\n + * APB2ENR USART1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR USART6EN LL_APB2_GRP1_EnableClock\n + * APB2ENR UART9EN LL_APB2_GRP1_EnableClock\n + * APB2ENR UART10EN LL_APB2_GRP1_EnableClock\n + * APB2ENR ADC1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR ADC2EN LL_APB2_GRP1_EnableClock\n + * APB2ENR ADC3EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SDIOEN LL_APB2_GRP1_EnableClock\n + * APB2ENR SPI1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SPI4EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SYSCFGEN LL_APB2_GRP1_EnableClock\n + * APB2ENR EXTITEN LL_APB2_GRP1_EnableClock\n + * APB2ENR TIM9EN LL_APB2_GRP1_EnableClock\n + * APB2ENR TIM10EN LL_APB2_GRP1_EnableClock\n + * APB2ENR TIM11EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SPI5EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SPI6EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SAI1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR SAI2EN LL_APB2_GRP1_EnableClock\n + * APB2ENR LTDCEN LL_APB2_GRP1_EnableClock\n + * APB2ENR DSIEN LL_APB2_GRP1_EnableClock\n + * APB2ENR DFSDM1EN LL_APB2_GRP1_EnableClock\n + * APB2ENR DFSDM2EN LL_APB2_GRP1_EnableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC1 + * @arg @ref LL_APB2_GRP1_PERIPH_ADC2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC3 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_EnableClock(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->APB2ENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->APB2ENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Check if APB2 peripheral clock is enabled or not + * @rmtoll APB2ENR TIM1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR TIM8EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR USART1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR USART6EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR UART9EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR UART10EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR ADC1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR ADC2EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR ADC3EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SDIOEN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SPI1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SPI4EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SYSCFGEN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR EXTITEN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR TIM9EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR TIM10EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR TIM11EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SPI5EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SPI6EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SAI1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR SAI2EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR LTDCEN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR DSIEN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR DFSDM1EN LL_APB2_GRP1_IsEnabledClock\n + * APB2ENR DFSDM2EN LL_APB2_GRP1_IsEnabledClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC1 + * @arg @ref LL_APB2_GRP1_PERIPH_ADC2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC3 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval State of Periphs (1 or 0). + */ +__STATIC_INLINE uint32_t LL_APB2_GRP1_IsEnabledClock(uint32_t Periphs) +{ + return (READ_BIT(RCC->APB2ENR, Periphs) == Periphs); +} + +/** + * @brief Disable APB2 peripherals clock. + * @rmtoll APB2ENR TIM1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR TIM8EN LL_APB2_GRP1_DisableClock\n + * APB2ENR USART1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR USART6EN LL_APB2_GRP1_DisableClock\n + * APB2ENR UART9EN LL_APB2_GRP1_DisableClock\n + * APB2ENR UART10EN LL_APB2_GRP1_DisableClock\n + * APB2ENR ADC1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR ADC2EN LL_APB2_GRP1_DisableClock\n + * APB2ENR ADC3EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SDIOEN LL_APB2_GRP1_DisableClock\n + * APB2ENR SPI1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SPI4EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SYSCFGEN LL_APB2_GRP1_DisableClock\n + * APB2ENR EXTITEN LL_APB2_GRP1_DisableClock\n + * APB2ENR TIM9EN LL_APB2_GRP1_DisableClock\n + * APB2ENR TIM10EN LL_APB2_GRP1_DisableClock\n + * APB2ENR TIM11EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SPI5EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SPI6EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SAI1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR SAI2EN LL_APB2_GRP1_DisableClock\n + * APB2ENR LTDCEN LL_APB2_GRP1_DisableClock\n + * APB2ENR DSIEN LL_APB2_GRP1_DisableClock\n + * APB2ENR DFSDM1EN LL_APB2_GRP1_DisableClock\n + * APB2ENR DFSDM2EN LL_APB2_GRP1_DisableClock + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC1 + * @arg @ref LL_APB2_GRP1_PERIPH_ADC2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC3 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_DisableClock(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB2ENR, Periphs); +} + +/** + * @brief Force APB2 peripherals reset. + * @rmtoll APB2RSTR TIM1RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR TIM8RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR USART1RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR USART6RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR UART9RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR UART10RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR ADCRST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SDIORST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SPI1RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SPI4RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SYSCFGRST LL_APB2_GRP1_ForceReset\n + * APB2RSTR TIM9RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR TIM10RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR TIM11RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SPI5RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SPI6RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SAI1RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR SAI2RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR LTDCRST LL_APB2_GRP1_ForceReset\n + * APB2RSTR DSIRST LL_APB2_GRP1_ForceReset\n + * APB2RSTR DFSDM1RST LL_APB2_GRP1_ForceReset\n + * APB2RSTR DFSDM2RST LL_APB2_GRP1_ForceReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_ALL + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_ForceReset(uint32_t Periphs) +{ + SET_BIT(RCC->APB2RSTR, Periphs); +} + +/** + * @brief Release APB2 peripherals reset. + * @rmtoll APB2RSTR TIM1RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR TIM8RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR USART1RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR USART6RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR UART9RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR UART10RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR ADCRST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SDIORST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SPI1RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SPI4RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SYSCFGRST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR TIM9RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR TIM10RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR TIM11RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SPI5RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SPI6RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SAI1RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR SAI2RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR LTDCRST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR DSIRST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR DFSDM1RST LL_APB2_GRP1_ReleaseReset\n + * APB2RSTR DFSDM2RST LL_APB2_GRP1_ReleaseReset + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_ALL + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_ReleaseReset(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB2RSTR, Periphs); +} + +/** + * @brief Enable APB2 peripheral clocks in low-power mode + * @rmtoll APB2LPENR TIM1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR TIM8LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR USART1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR USART6LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR UART9LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR UART10LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR ADC1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR ADC2LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR ADC3LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SDIOLPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SPI1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SPI4LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SYSCFGLPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR EXTITLPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR TIM9LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR TIM10LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR TIM11LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SPI5LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SPI6LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SAI1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR SAI2LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR LTDCLPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR DSILPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR DFSDM1LPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR DSILPEN LL_APB2_GRP1_EnableClockLowPower\n + * APB2LPENR DFSDM2LPEN LL_APB2_GRP1_EnableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC1 + * @arg @ref LL_APB2_GRP1_PERIPH_ADC2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC3 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_EnableClockLowPower(uint32_t Periphs) +{ + __IO uint32_t tmpreg; + SET_BIT(RCC->APB2LPENR, Periphs); + /* Delay after an RCC peripheral clock enabling */ + tmpreg = READ_BIT(RCC->APB2LPENR, Periphs); + (void)tmpreg; +} + +/** + * @brief Disable APB2 peripheral clocks in low-power mode + * @rmtoll APB2LPENR TIM1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR TIM8LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR USART1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR USART6LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR UART9LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR UART10LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR ADC1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR ADC2LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR ADC3LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SDIOLPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SPI1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SPI4LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SYSCFGLPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR EXTITLPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR TIM9LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR TIM10LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR TIM11LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SPI5LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SPI6LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SAI1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR SAI2LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR LTDCLPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR DSILPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR DFSDM1LPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR DSILPEN LL_APB2_GRP1_DisableClockLowPower\n + * APB2LPENR DFSDM2LPEN LL_APB2_GRP1_DisableClockLowPower + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_APB2_GRP1_PERIPH_TIM1 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM8 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_USART1 + * @arg @ref LL_APB2_GRP1_PERIPH_USART6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART9 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_UART10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC1 + * @arg @ref LL_APB2_GRP1_PERIPH_ADC2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_ADC3 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SDIO (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI1 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI4 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SYSCFG + * @arg @ref LL_APB2_GRP1_PERIPH_EXTI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM9 + * @arg @ref LL_APB2_GRP1_PERIPH_TIM10 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_TIM11 + * @arg @ref LL_APB2_GRP1_PERIPH_SPI5 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SPI6 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_SAI2 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_LTDC (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DSI (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM1 (*) + * @arg @ref LL_APB2_GRP1_PERIPH_DFSDM2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_APB2_GRP1_DisableClockLowPower(uint32_t Periphs) +{ + CLEAR_BIT(RCC->APB2LPENR, Periphs); +} + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined(RCC) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_BUS_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_cortex.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_cortex.h new file mode 100644 index 00000000..9a183ea7 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_cortex.h @@ -0,0 +1,647 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_cortex.h + * @author MCD Application Team + * @brief Header file of CORTEX LL module. + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The LL CORTEX driver contains a set of generic APIs that can be + used by user: + (+) SYSTICK configuration used by LL_mDelay and LL_Init1msTick + functions + (+) Low power mode configuration (SCB register of Cortex-MCU) + (+) MPU API to configure and enable regions + (MPU services provided only on some devices) + (+) API to access to MCU info (CPUID register) + (+) API to enable fault handler (SHCSR accesses) + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_CORTEX_H +#define __STM32F4xx_LL_CORTEX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +/** @defgroup CORTEX_LL CORTEX + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ + +/* Private macros ------------------------------------------------------------*/ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup CORTEX_LL_Exported_Constants CORTEX Exported Constants + * @{ + */ + +/** @defgroup CORTEX_LL_EC_CLKSOURCE_HCLK SYSTICK Clock Source + * @{ + */ +#define LL_SYSTICK_CLKSOURCE_HCLK_DIV8 0x00000000U /*!< AHB clock divided by 8 selected as SysTick clock source.*/ +#define LL_SYSTICK_CLKSOURCE_HCLK SysTick_CTRL_CLKSOURCE_Msk /*!< AHB clock selected as SysTick clock source. */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_FAULT Handler Fault type + * @{ + */ +#define LL_HANDLER_FAULT_USG SCB_SHCSR_USGFAULTENA_Msk /*!< Usage fault */ +#define LL_HANDLER_FAULT_BUS SCB_SHCSR_BUSFAULTENA_Msk /*!< Bus fault */ +#define LL_HANDLER_FAULT_MEM SCB_SHCSR_MEMFAULTENA_Msk /*!< Memory management fault */ +/** + * @} + */ + +#if __MPU_PRESENT + +/** @defgroup CORTEX_LL_EC_CTRL_HFNMI_PRIVDEF MPU Control + * @{ + */ +#define LL_MPU_CTRL_HFNMI_PRIVDEF_NONE 0x00000000U /*!< Disable NMI and privileged SW access */ +#define LL_MPU_CTRL_HARDFAULT_NMI MPU_CTRL_HFNMIENA_Msk /*!< Enables the operation of MPU during hard fault, NMI, and FAULTMASK handlers */ +#define LL_MPU_CTRL_PRIVILEGED_DEFAULT MPU_CTRL_PRIVDEFENA_Msk /*!< Enable privileged software access to default memory map */ +#define LL_MPU_CTRL_HFNMI_PRIVDEF (MPU_CTRL_HFNMIENA_Msk | MPU_CTRL_PRIVDEFENA_Msk) /*!< Enable NMI and privileged SW access */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_REGION MPU Region Number + * @{ + */ +#define LL_MPU_REGION_NUMBER0 0x00U /*!< REGION Number 0 */ +#define LL_MPU_REGION_NUMBER1 0x01U /*!< REGION Number 1 */ +#define LL_MPU_REGION_NUMBER2 0x02U /*!< REGION Number 2 */ +#define LL_MPU_REGION_NUMBER3 0x03U /*!< REGION Number 3 */ +#define LL_MPU_REGION_NUMBER4 0x04U /*!< REGION Number 4 */ +#define LL_MPU_REGION_NUMBER5 0x05U /*!< REGION Number 5 */ +#define LL_MPU_REGION_NUMBER6 0x06U /*!< REGION Number 6 */ +#define LL_MPU_REGION_NUMBER7 0x07U /*!< REGION Number 7 */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_REGION_SIZE MPU Region Size + * @{ + */ +#define LL_MPU_REGION_SIZE_32B (0x04U << MPU_RASR_SIZE_Pos) /*!< 32B Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_64B (0x05U << MPU_RASR_SIZE_Pos) /*!< 64B Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_128B (0x06U << MPU_RASR_SIZE_Pos) /*!< 128B Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_256B (0x07U << MPU_RASR_SIZE_Pos) /*!< 256B Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_512B (0x08U << MPU_RASR_SIZE_Pos) /*!< 512B Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_1KB (0x09U << MPU_RASR_SIZE_Pos) /*!< 1KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_2KB (0x0AU << MPU_RASR_SIZE_Pos) /*!< 2KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_4KB (0x0BU << MPU_RASR_SIZE_Pos) /*!< 4KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_8KB (0x0CU << MPU_RASR_SIZE_Pos) /*!< 8KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_16KB (0x0DU << MPU_RASR_SIZE_Pos) /*!< 16KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_32KB (0x0EU << MPU_RASR_SIZE_Pos) /*!< 32KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_64KB (0x0FU << MPU_RASR_SIZE_Pos) /*!< 64KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_128KB (0x10U << MPU_RASR_SIZE_Pos) /*!< 128KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_256KB (0x11U << MPU_RASR_SIZE_Pos) /*!< 256KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_512KB (0x12U << MPU_RASR_SIZE_Pos) /*!< 512KB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_1MB (0x13U << MPU_RASR_SIZE_Pos) /*!< 1MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_2MB (0x14U << MPU_RASR_SIZE_Pos) /*!< 2MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_4MB (0x15U << MPU_RASR_SIZE_Pos) /*!< 4MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_8MB (0x16U << MPU_RASR_SIZE_Pos) /*!< 8MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_16MB (0x17U << MPU_RASR_SIZE_Pos) /*!< 16MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_32MB (0x18U << MPU_RASR_SIZE_Pos) /*!< 32MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_64MB (0x19U << MPU_RASR_SIZE_Pos) /*!< 64MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_128MB (0x1AU << MPU_RASR_SIZE_Pos) /*!< 128MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_256MB (0x1BU << MPU_RASR_SIZE_Pos) /*!< 256MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_512MB (0x1CU << MPU_RASR_SIZE_Pos) /*!< 512MB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_1GB (0x1DU << MPU_RASR_SIZE_Pos) /*!< 1GB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_2GB (0x1EU << MPU_RASR_SIZE_Pos) /*!< 2GB Size of the MPU protection region */ +#define LL_MPU_REGION_SIZE_4GB (0x1FU << MPU_RASR_SIZE_Pos) /*!< 4GB Size of the MPU protection region */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_REGION_PRIVILEDGES MPU Region Privileges + * @{ + */ +#define LL_MPU_REGION_NO_ACCESS (0x00U << MPU_RASR_AP_Pos) /*!< No access*/ +#define LL_MPU_REGION_PRIV_RW (0x01U << MPU_RASR_AP_Pos) /*!< RW privileged (privileged access only)*/ +#define LL_MPU_REGION_PRIV_RW_URO (0x02U << MPU_RASR_AP_Pos) /*!< RW privileged - RO user (Write in a user program generates a fault) */ +#define LL_MPU_REGION_FULL_ACCESS (0x03U << MPU_RASR_AP_Pos) /*!< RW privileged & user (Full access) */ +#define LL_MPU_REGION_PRIV_RO (0x05U << MPU_RASR_AP_Pos) /*!< RO privileged (privileged read only)*/ +#define LL_MPU_REGION_PRIV_RO_URO (0x06U << MPU_RASR_AP_Pos) /*!< RO privileged & user (read only) */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_TEX MPU TEX Level + * @{ + */ +#define LL_MPU_TEX_LEVEL0 (0x00U << MPU_RASR_TEX_Pos) /*!< b000 for TEX bits */ +#define LL_MPU_TEX_LEVEL1 (0x01U << MPU_RASR_TEX_Pos) /*!< b001 for TEX bits */ +#define LL_MPU_TEX_LEVEL2 (0x02U << MPU_RASR_TEX_Pos) /*!< b010 for TEX bits */ +#define LL_MPU_TEX_LEVEL4 (0x04U << MPU_RASR_TEX_Pos) /*!< b100 for TEX bits */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_INSTRUCTION_ACCESS MPU Instruction Access + * @{ + */ +#define LL_MPU_INSTRUCTION_ACCESS_ENABLE 0x00U /*!< Instruction fetches enabled */ +#define LL_MPU_INSTRUCTION_ACCESS_DISABLE MPU_RASR_XN_Msk /*!< Instruction fetches disabled*/ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_SHAREABLE_ACCESS MPU Shareable Access + * @{ + */ +#define LL_MPU_ACCESS_SHAREABLE MPU_RASR_S_Msk /*!< Shareable memory attribute */ +#define LL_MPU_ACCESS_NOT_SHAREABLE 0x00U /*!< Not Shareable memory attribute */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_CACHEABLE_ACCESS MPU Cacheable Access + * @{ + */ +#define LL_MPU_ACCESS_CACHEABLE MPU_RASR_C_Msk /*!< Cacheable memory attribute */ +#define LL_MPU_ACCESS_NOT_CACHEABLE 0x00U /*!< Not Cacheable memory attribute */ +/** + * @} + */ + +/** @defgroup CORTEX_LL_EC_BUFFERABLE_ACCESS MPU Bufferable Access + * @{ + */ +#define LL_MPU_ACCESS_BUFFERABLE MPU_RASR_B_Msk /*!< Bufferable memory attribute */ +#define LL_MPU_ACCESS_NOT_BUFFERABLE 0x00U /*!< Not Bufferable memory attribute */ +/** + * @} + */ +#endif /* __MPU_PRESENT */ +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup CORTEX_LL_Exported_Functions CORTEX Exported Functions + * @{ + */ + +/** @defgroup CORTEX_LL_EF_SYSTICK SYSTICK + * @{ + */ + +/** + * @brief This function checks if the Systick counter flag is active or not. + * @note It can be used in timeout function on application side. + * @rmtoll STK_CTRL COUNTFLAG LL_SYSTICK_IsActiveCounterFlag + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SYSTICK_IsActiveCounterFlag(void) +{ + return ((SysTick->CTRL & SysTick_CTRL_COUNTFLAG_Msk) == (SysTick_CTRL_COUNTFLAG_Msk)); +} + +/** + * @brief Configures the SysTick clock source + * @rmtoll STK_CTRL CLKSOURCE LL_SYSTICK_SetClkSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSTICK_CLKSOURCE_HCLK_DIV8 + * @arg @ref LL_SYSTICK_CLKSOURCE_HCLK + * @retval None + */ +__STATIC_INLINE void LL_SYSTICK_SetClkSource(uint32_t Source) +{ + if (Source == LL_SYSTICK_CLKSOURCE_HCLK) + { + SET_BIT(SysTick->CTRL, LL_SYSTICK_CLKSOURCE_HCLK); + } + else + { + CLEAR_BIT(SysTick->CTRL, LL_SYSTICK_CLKSOURCE_HCLK); + } +} + +/** + * @brief Get the SysTick clock source + * @rmtoll STK_CTRL CLKSOURCE LL_SYSTICK_GetClkSource + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSTICK_CLKSOURCE_HCLK_DIV8 + * @arg @ref LL_SYSTICK_CLKSOURCE_HCLK + */ +__STATIC_INLINE uint32_t LL_SYSTICK_GetClkSource(void) +{ + return READ_BIT(SysTick->CTRL, LL_SYSTICK_CLKSOURCE_HCLK); +} + +/** + * @brief Enable SysTick exception request + * @rmtoll STK_CTRL TICKINT LL_SYSTICK_EnableIT + * @retval None + */ +__STATIC_INLINE void LL_SYSTICK_EnableIT(void) +{ + SET_BIT(SysTick->CTRL, SysTick_CTRL_TICKINT_Msk); +} + +/** + * @brief Disable SysTick exception request + * @rmtoll STK_CTRL TICKINT LL_SYSTICK_DisableIT + * @retval None + */ +__STATIC_INLINE void LL_SYSTICK_DisableIT(void) +{ + CLEAR_BIT(SysTick->CTRL, SysTick_CTRL_TICKINT_Msk); +} + +/** + * @brief Checks if the SYSTICK interrupt is enabled or disabled. + * @rmtoll STK_CTRL TICKINT LL_SYSTICK_IsEnabledIT + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SYSTICK_IsEnabledIT(void) +{ + return (READ_BIT(SysTick->CTRL, SysTick_CTRL_TICKINT_Msk) == (SysTick_CTRL_TICKINT_Msk)); +} + +/** + * @} + */ + +/** @defgroup CORTEX_LL_EF_LOW_POWER_MODE LOW POWER MODE + * @{ + */ + +/** + * @brief Processor uses sleep as its low power mode + * @rmtoll SCB_SCR SLEEPDEEP LL_LPM_EnableSleep + * @retval None + */ +__STATIC_INLINE void LL_LPM_EnableSleep(void) +{ + /* Clear SLEEPDEEP bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPDEEP_Msk)); +} + +/** + * @brief Processor uses deep sleep as its low power mode + * @rmtoll SCB_SCR SLEEPDEEP LL_LPM_EnableDeepSleep + * @retval None + */ +__STATIC_INLINE void LL_LPM_EnableDeepSleep(void) +{ + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPDEEP_Msk)); +} + +/** + * @brief Configures sleep-on-exit when returning from Handler mode to Thread mode. + * @note Setting this bit to 1 enables an interrupt-driven application to avoid returning to an + * empty main application. + * @rmtoll SCB_SCR SLEEPONEXIT LL_LPM_EnableSleepOnExit + * @retval None + */ +__STATIC_INLINE void LL_LPM_EnableSleepOnExit(void) +{ + /* Set SLEEPONEXIT bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPONEXIT_Msk)); +} + +/** + * @brief Do not sleep when returning to Thread mode. + * @rmtoll SCB_SCR SLEEPONEXIT LL_LPM_DisableSleepOnExit + * @retval None + */ +__STATIC_INLINE void LL_LPM_DisableSleepOnExit(void) +{ + /* Clear SLEEPONEXIT bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPONEXIT_Msk)); +} + +/** + * @brief Enabled events and all interrupts, including disabled interrupts, can wakeup the + * processor. + * @rmtoll SCB_SCR SEVEONPEND LL_LPM_EnableEventOnPend + * @retval None + */ +__STATIC_INLINE void LL_LPM_EnableEventOnPend(void) +{ + /* Set SEVEONPEND bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SEVONPEND_Msk)); +} + +/** + * @brief Only enabled interrupts or events can wakeup the processor, disabled interrupts are + * excluded + * @rmtoll SCB_SCR SEVEONPEND LL_LPM_DisableEventOnPend + * @retval None + */ +__STATIC_INLINE void LL_LPM_DisableEventOnPend(void) +{ + /* Clear SEVEONPEND bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SEVONPEND_Msk)); +} + +/** + * @brief Clear pending events. + * @retval None + */ +__STATIC_INLINE void LL_LPM_ClearEvent(void) +{ + __SEV(); + __WFE(); +} + +/** + * @} + */ + +/** @defgroup CORTEX_LL_EF_HANDLER HANDLER + * @{ + */ + +/** + * @brief Enable a fault in System handler control register (SHCSR) + * @rmtoll SCB_SHCSR MEMFAULTENA LL_HANDLER_EnableFault + * @param Fault This parameter can be a combination of the following values: + * @arg @ref LL_HANDLER_FAULT_USG + * @arg @ref LL_HANDLER_FAULT_BUS + * @arg @ref LL_HANDLER_FAULT_MEM + * @retval None + */ +__STATIC_INLINE void LL_HANDLER_EnableFault(uint32_t Fault) +{ + /* Enable the system handler fault */ + SET_BIT(SCB->SHCSR, Fault); +} + +/** + * @brief Disable a fault in System handler control register (SHCSR) + * @rmtoll SCB_SHCSR MEMFAULTENA LL_HANDLER_DisableFault + * @param Fault This parameter can be a combination of the following values: + * @arg @ref LL_HANDLER_FAULT_USG + * @arg @ref LL_HANDLER_FAULT_BUS + * @arg @ref LL_HANDLER_FAULT_MEM + * @retval None + */ +__STATIC_INLINE void LL_HANDLER_DisableFault(uint32_t Fault) +{ + /* Disable the system handler fault */ + CLEAR_BIT(SCB->SHCSR, Fault); +} + +/** + * @} + */ + +/** @defgroup CORTEX_LL_EF_MCU_INFO MCU INFO + * @{ + */ + +/** + * @brief Get Implementer code + * @rmtoll SCB_CPUID IMPLEMENTER LL_CPUID_GetImplementer + * @retval Value should be equal to 0x41 for ARM + */ +__STATIC_INLINE uint32_t LL_CPUID_GetImplementer(void) +{ + return (uint32_t)(READ_BIT(SCB->CPUID, SCB_CPUID_IMPLEMENTER_Msk) >> SCB_CPUID_IMPLEMENTER_Pos); +} + +/** + * @brief Get Variant number (The r value in the rnpn product revision identifier) + * @rmtoll SCB_CPUID VARIANT LL_CPUID_GetVariant + * @retval Value between 0 and 255 (0x0: revision 0) + */ +__STATIC_INLINE uint32_t LL_CPUID_GetVariant(void) +{ + return (uint32_t)(READ_BIT(SCB->CPUID, SCB_CPUID_VARIANT_Msk) >> SCB_CPUID_VARIANT_Pos); +} + +/** + * @brief Get Constant number + * @rmtoll SCB_CPUID ARCHITECTURE LL_CPUID_GetConstant + * @retval Value should be equal to 0xF for Cortex-M4 devices + */ +__STATIC_INLINE uint32_t LL_CPUID_GetConstant(void) +{ + return (uint32_t)(READ_BIT(SCB->CPUID, SCB_CPUID_ARCHITECTURE_Msk) >> SCB_CPUID_ARCHITECTURE_Pos); +} + +/** + * @brief Get Part number + * @rmtoll SCB_CPUID PARTNO LL_CPUID_GetParNo + * @retval Value should be equal to 0xC24 for Cortex-M4 + */ +__STATIC_INLINE uint32_t LL_CPUID_GetParNo(void) +{ + return (uint32_t)(READ_BIT(SCB->CPUID, SCB_CPUID_PARTNO_Msk) >> SCB_CPUID_PARTNO_Pos); +} + +/** + * @brief Get Revision number (The p value in the rnpn product revision identifier, indicates patch release) + * @rmtoll SCB_CPUID REVISION LL_CPUID_GetRevision + * @retval Value between 0 and 255 (0x1: patch 1) + */ +__STATIC_INLINE uint32_t LL_CPUID_GetRevision(void) +{ + return (uint32_t)(READ_BIT(SCB->CPUID, SCB_CPUID_REVISION_Msk) >> SCB_CPUID_REVISION_Pos); +} + +/** + * @} + */ + +#if __MPU_PRESENT +/** @defgroup CORTEX_LL_EF_MPU MPU + * @{ + */ + +/** + * @brief Enable MPU with input options + * @rmtoll MPU_CTRL ENABLE LL_MPU_Enable + * @param Options This parameter can be one of the following values: + * @arg @ref LL_MPU_CTRL_HFNMI_PRIVDEF_NONE + * @arg @ref LL_MPU_CTRL_HARDFAULT_NMI + * @arg @ref LL_MPU_CTRL_PRIVILEGED_DEFAULT + * @arg @ref LL_MPU_CTRL_HFNMI_PRIVDEF + * @retval None + */ +__STATIC_INLINE void LL_MPU_Enable(uint32_t Options) +{ + /* Enable the MPU*/ + WRITE_REG(MPU->CTRL, (MPU_CTRL_ENABLE_Msk | Options)); + /* Ensure MPU settings take effects */ + __DSB(); + /* Sequence instruction fetches using update settings */ + __ISB(); +} + +/** + * @brief Disable MPU + * @rmtoll MPU_CTRL ENABLE LL_MPU_Disable + * @retval None + */ +__STATIC_INLINE void LL_MPU_Disable(void) +{ + /* Make sure outstanding transfers are done */ + __DMB(); + /* Disable MPU*/ + WRITE_REG(MPU->CTRL, 0U); +} + +/** + * @brief Check if MPU is enabled or not + * @rmtoll MPU_CTRL ENABLE LL_MPU_IsEnabled + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_MPU_IsEnabled(void) +{ + return (READ_BIT(MPU->CTRL, MPU_CTRL_ENABLE_Msk) == (MPU_CTRL_ENABLE_Msk)); +} + +/** + * @brief Enable a MPU region + * @rmtoll MPU_RASR ENABLE LL_MPU_EnableRegion + * @param Region This parameter can be one of the following values: + * @arg @ref LL_MPU_REGION_NUMBER0 + * @arg @ref LL_MPU_REGION_NUMBER1 + * @arg @ref LL_MPU_REGION_NUMBER2 + * @arg @ref LL_MPU_REGION_NUMBER3 + * @arg @ref LL_MPU_REGION_NUMBER4 + * @arg @ref LL_MPU_REGION_NUMBER5 + * @arg @ref LL_MPU_REGION_NUMBER6 + * @arg @ref LL_MPU_REGION_NUMBER7 + * @retval None + */ +__STATIC_INLINE void LL_MPU_EnableRegion(uint32_t Region) +{ + /* Set Region number */ + WRITE_REG(MPU->RNR, Region); + /* Enable the MPU region */ + SET_BIT(MPU->RASR, MPU_RASR_ENABLE_Msk); +} + +/** + * @brief Configure and enable a region + * @rmtoll MPU_RNR REGION LL_MPU_ConfigRegion\n + * MPU_RBAR REGION LL_MPU_ConfigRegion\n + * MPU_RBAR ADDR LL_MPU_ConfigRegion\n + * MPU_RASR XN LL_MPU_ConfigRegion\n + * MPU_RASR AP LL_MPU_ConfigRegion\n + * MPU_RASR S LL_MPU_ConfigRegion\n + * MPU_RASR C LL_MPU_ConfigRegion\n + * MPU_RASR B LL_MPU_ConfigRegion\n + * MPU_RASR SIZE LL_MPU_ConfigRegion + * @param Region This parameter can be one of the following values: + * @arg @ref LL_MPU_REGION_NUMBER0 + * @arg @ref LL_MPU_REGION_NUMBER1 + * @arg @ref LL_MPU_REGION_NUMBER2 + * @arg @ref LL_MPU_REGION_NUMBER3 + * @arg @ref LL_MPU_REGION_NUMBER4 + * @arg @ref LL_MPU_REGION_NUMBER5 + * @arg @ref LL_MPU_REGION_NUMBER6 + * @arg @ref LL_MPU_REGION_NUMBER7 + * @param Address Value of region base address + * @param SubRegionDisable Sub-region disable value between Min_Data = 0x00 and Max_Data = 0xFF + * @param Attributes This parameter can be a combination of the following values: + * @arg @ref LL_MPU_REGION_SIZE_32B or @ref LL_MPU_REGION_SIZE_64B or @ref LL_MPU_REGION_SIZE_128B or @ref LL_MPU_REGION_SIZE_256B or @ref LL_MPU_REGION_SIZE_512B + * or @ref LL_MPU_REGION_SIZE_1KB or @ref LL_MPU_REGION_SIZE_2KB or @ref LL_MPU_REGION_SIZE_4KB or @ref LL_MPU_REGION_SIZE_8KB or @ref LL_MPU_REGION_SIZE_16KB + * or @ref LL_MPU_REGION_SIZE_32KB or @ref LL_MPU_REGION_SIZE_64KB or @ref LL_MPU_REGION_SIZE_128KB or @ref LL_MPU_REGION_SIZE_256KB or @ref LL_MPU_REGION_SIZE_512KB + * or @ref LL_MPU_REGION_SIZE_1MB or @ref LL_MPU_REGION_SIZE_2MB or @ref LL_MPU_REGION_SIZE_4MB or @ref LL_MPU_REGION_SIZE_8MB or @ref LL_MPU_REGION_SIZE_16MB + * or @ref LL_MPU_REGION_SIZE_32MB or @ref LL_MPU_REGION_SIZE_64MB or @ref LL_MPU_REGION_SIZE_128MB or @ref LL_MPU_REGION_SIZE_256MB or @ref LL_MPU_REGION_SIZE_512MB + * or @ref LL_MPU_REGION_SIZE_1GB or @ref LL_MPU_REGION_SIZE_2GB or @ref LL_MPU_REGION_SIZE_4GB + * @arg @ref LL_MPU_REGION_NO_ACCESS or @ref LL_MPU_REGION_PRIV_RW or @ref LL_MPU_REGION_PRIV_RW_URO or @ref LL_MPU_REGION_FULL_ACCESS + * or @ref LL_MPU_REGION_PRIV_RO or @ref LL_MPU_REGION_PRIV_RO_URO + * @arg @ref LL_MPU_TEX_LEVEL0 or @ref LL_MPU_TEX_LEVEL1 or @ref LL_MPU_TEX_LEVEL2 or @ref LL_MPU_TEX_LEVEL4 + * @arg @ref LL_MPU_INSTRUCTION_ACCESS_ENABLE or @ref LL_MPU_INSTRUCTION_ACCESS_DISABLE + * @arg @ref LL_MPU_ACCESS_SHAREABLE or @ref LL_MPU_ACCESS_NOT_SHAREABLE + * @arg @ref LL_MPU_ACCESS_CACHEABLE or @ref LL_MPU_ACCESS_NOT_CACHEABLE + * @arg @ref LL_MPU_ACCESS_BUFFERABLE or @ref LL_MPU_ACCESS_NOT_BUFFERABLE + * @retval None + */ +__STATIC_INLINE void LL_MPU_ConfigRegion(uint32_t Region, uint32_t SubRegionDisable, uint32_t Address, uint32_t Attributes) +{ + /* Set Region number */ + WRITE_REG(MPU->RNR, Region); + /* Set base address */ + WRITE_REG(MPU->RBAR, (Address & 0xFFFFFFE0U)); + /* Configure MPU */ + WRITE_REG(MPU->RASR, (MPU_RASR_ENABLE_Msk | Attributes | (SubRegionDisable << MPU_RASR_SRD_Pos))); +} + +/** + * @brief Disable a region + * @rmtoll MPU_RNR REGION LL_MPU_DisableRegion\n + * MPU_RASR ENABLE LL_MPU_DisableRegion + * @param Region This parameter can be one of the following values: + * @arg @ref LL_MPU_REGION_NUMBER0 + * @arg @ref LL_MPU_REGION_NUMBER1 + * @arg @ref LL_MPU_REGION_NUMBER2 + * @arg @ref LL_MPU_REGION_NUMBER3 + * @arg @ref LL_MPU_REGION_NUMBER4 + * @arg @ref LL_MPU_REGION_NUMBER5 + * @arg @ref LL_MPU_REGION_NUMBER6 + * @arg @ref LL_MPU_REGION_NUMBER7 + * @retval None + */ +__STATIC_INLINE void LL_MPU_DisableRegion(uint32_t Region) +{ + /* Set Region number */ + WRITE_REG(MPU->RNR, Region); + /* Disable the MPU region */ + CLEAR_BIT(MPU->RASR, MPU_RASR_ENABLE_Msk); +} + +/** + * @} + */ + +#endif /* __MPU_PRESENT */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_CORTEX_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_dac.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_dac.h new file mode 100644 index 00000000..4bff015f --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_dac.h @@ -0,0 +1,1443 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_dac.h + * @author MCD Application Team + * @brief Header file of DAC LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F4xx_LL_DAC_H +#define STM32F4xx_LL_DAC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined(DAC) + +/** @defgroup DAC_LL DAC + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup DAC_LL_Private_Constants DAC Private Constants + * @{ + */ + +/* Internal masks for DAC channels definition */ +/* To select into literal LL_DAC_CHANNEL_x the relevant bits for: */ +/* - channel bits position into registers CR, MCR, CCR, SHHR, SHRR */ +/* - channel bits position into register SWTRIG */ +/* - channel register offset of data holding register DHRx */ +/* - channel register offset of data output register DORx */ +#define DAC_CR_CH1_BITOFFSET 0UL /* Position of channel bits into registers + CR, MCR, CCR, SHHR, SHRR of channel 1 */ +#if defined(DAC_CHANNEL2_SUPPORT) +#define DAC_CR_CH2_BITOFFSET 16UL /* Position of channel bits into registers + CR, MCR, CCR, SHHR, SHRR of channel 2 */ +#define DAC_CR_CHX_BITOFFSET_MASK (DAC_CR_CH1_BITOFFSET | DAC_CR_CH2_BITOFFSET) +#else +#define DAC_CR_CHX_BITOFFSET_MASK (DAC_CR_CH1_BITOFFSET) +#endif /* DAC_CHANNEL2_SUPPORT */ + +#define DAC_SWTR_CH1 (DAC_SWTRIGR_SWTRIG1) /* Channel bit into register SWTRIGR of channel 1. */ +#if defined(DAC_CHANNEL2_SUPPORT) +#define DAC_SWTR_CH2 (DAC_SWTRIGR_SWTRIG2) /* Channel bit into register SWTRIGR of channel 2. */ +#define DAC_SWTR_CHX_MASK (DAC_SWTR_CH1 | DAC_SWTR_CH2) +#else +#define DAC_SWTR_CHX_MASK (DAC_SWTR_CH1) +#endif /* DAC_CHANNEL2_SUPPORT */ + +#define DAC_REG_DHR12R1_REGOFFSET 0x00000000UL /* Register DHR12Rx channel 1 taken as reference */ +#define DAC_REG_DHR12L1_REGOFFSET 0x00100000UL /* Register offset of DHR12Lx channel 1 versus + DHR12Rx channel 1 (shifted left of 20 bits) */ +#define DAC_REG_DHR8R1_REGOFFSET 0x02000000UL /* Register offset of DHR8Rx channel 1 versus + DHR12Rx channel 1 (shifted left of 24 bits) */ +#if defined(DAC_CHANNEL2_SUPPORT) +#define DAC_REG_DHR12R2_REGOFFSET 0x00030000UL /* Register offset of DHR12Rx channel 2 versus + DHR12Rx channel 1 (shifted left of 16 bits) */ +#define DAC_REG_DHR12L2_REGOFFSET 0x00400000UL /* Register offset of DHR12Lx channel 2 versus + DHR12Rx channel 1 (shifted left of 20 bits) */ +#define DAC_REG_DHR8R2_REGOFFSET 0x05000000UL /* Register offset of DHR8Rx channel 2 versus + DHR12Rx channel 1 (shifted left of 24 bits) */ +#endif /* DAC_CHANNEL2_SUPPORT */ +#define DAC_REG_DHR12RX_REGOFFSET_MASK 0x000F0000UL +#define DAC_REG_DHR12LX_REGOFFSET_MASK 0x00F00000UL +#define DAC_REG_DHR8RX_REGOFFSET_MASK 0x0F000000UL +#define DAC_REG_DHRX_REGOFFSET_MASK (DAC_REG_DHR12RX_REGOFFSET_MASK\ + | DAC_REG_DHR12LX_REGOFFSET_MASK | DAC_REG_DHR8RX_REGOFFSET_MASK) + +#define DAC_REG_DOR1_REGOFFSET 0x00000000UL /* Register DORx channel 1 taken as reference */ +#if defined(DAC_CHANNEL2_SUPPORT) +#define DAC_REG_DOR2_REGOFFSET 0x10000000UL /* Register offset of DORx channel 1 versus + DORx channel 2 (shifted left of 28 bits) */ +#define DAC_REG_DORX_REGOFFSET_MASK (DAC_REG_DOR1_REGOFFSET | DAC_REG_DOR2_REGOFFSET) +#endif /* DAC_CHANNEL2_SUPPORT */ + + +#define DAC_REG_DHR_REGOFFSET_MASK_POSBIT0 0x0000000FUL /* Mask of data hold registers offset (DHR12Rx, + DHR12Lx, DHR8Rx, ...) when shifted to position 0 */ +#define DAC_REG_DORX_REGOFFSET_MASK_POSBIT0 0x00000001UL /* Mask of DORx registers offset when shifted + to position 0 */ +#define DAC_REG_SHSRX_REGOFFSET_MASK_POSBIT0 0x00000001UL /* Mask of SHSRx registers offset when shifted + to position 0 */ + +#define DAC_REG_DHR12RX_REGOFFSET_BITOFFSET_POS 16UL /* Position of bits register offset of DHR12Rx + channel 1 or 2 versus DHR12Rx channel 1 + (shifted left of 16 bits) */ +#define DAC_REG_DHR12LX_REGOFFSET_BITOFFSET_POS 20UL /* Position of bits register offset of DHR12Lx + channel 1 or 2 versus DHR12Rx channel 1 + (shifted left of 20 bits) */ +#define DAC_REG_DHR8RX_REGOFFSET_BITOFFSET_POS 24UL /* Position of bits register offset of DHR8Rx + channel 1 or 2 versus DHR12Rx channel 1 + (shifted left of 24 bits) */ +#define DAC_REG_DORX_REGOFFSET_BITOFFSET_POS 28UL /* Position of bits register offset of DORx + channel 1 or 2 versus DORx channel 1 + (shifted left of 28 bits) */ + +/* DAC registers bits positions */ +#if defined(DAC_CHANNEL2_SUPPORT) +#define DAC_DHR12RD_DACC2DHR_BITOFFSET_POS DAC_DHR12RD_DACC2DHR_Pos +#define DAC_DHR12LD_DACC2DHR_BITOFFSET_POS DAC_DHR12LD_DACC2DHR_Pos +#define DAC_DHR8RD_DACC2DHR_BITOFFSET_POS DAC_DHR8RD_DACC2DHR_Pos +#endif /* DAC_CHANNEL2_SUPPORT */ + +/* Miscellaneous data */ +#define DAC_DIGITAL_SCALE_12BITS 4095UL /* Full-scale digital value with a resolution of 12 + bits (voltage range determined by analog voltage + references Vref+ and Vref-, refer to reference manual) */ + +/** + * @} + */ + + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup DAC_LL_Private_Macros DAC Private Macros + * @{ + */ + +/** + * @brief Driver macro reserved for internal use: set a pointer to + * a register from a register basis from which an offset + * is applied. + * @param __REG__ Register basis from which the offset is applied. + * @param __REG_OFFFSET__ Offset to be applied (unit: number of registers). + * @retval Pointer to register address + */ +#define __DAC_PTR_REG_OFFSET(__REG__, __REG_OFFFSET__) \ + ((uint32_t *)((uint32_t) ((uint32_t)(&(__REG__)) + ((__REG_OFFFSET__) << 2UL)))) + +/** + * @} + */ + + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup DAC_LL_ES_INIT DAC Exported Init structure + * @{ + */ + +/** + * @brief Structure definition of some features of DAC instance. + */ +typedef struct +{ + uint32_t TriggerSource; /*!< Set the conversion trigger source for the selected DAC channel: + internal (SW start) or from external peripheral + (timer event, external interrupt line). + This parameter can be a value of @ref DAC_LL_EC_TRIGGER_SOURCE + + This feature can be modified afterwards using unitary + function @ref LL_DAC_SetTriggerSource(). */ + + uint32_t WaveAutoGeneration; /*!< Set the waveform automatic generation mode for the selected DAC channel. + This parameter can be a value of @ref DAC_LL_EC_WAVE_AUTO_GENERATION_MODE + + This feature can be modified afterwards using unitary + function @ref LL_DAC_SetWaveAutoGeneration(). */ + + uint32_t WaveAutoGenerationConfig; /*!< Set the waveform automatic generation mode for the selected DAC channel. + If waveform automatic generation mode is set to noise, this parameter + can be a value of @ref DAC_LL_EC_WAVE_NOISE_LFSR_UNMASK_BITS + If waveform automatic generation mode is set to triangle, + this parameter can be a value of @ref DAC_LL_EC_WAVE_TRIANGLE_AMPLITUDE + @note If waveform automatic generation mode is disabled, + this parameter is discarded. + + This feature can be modified afterwards using unitary + function @ref LL_DAC_SetWaveNoiseLFSR(), + @ref LL_DAC_SetWaveTriangleAmplitude() + depending on the wave automatic generation selected. */ + + uint32_t OutputBuffer; /*!< Set the output buffer for the selected DAC channel. + This parameter can be a value of @ref DAC_LL_EC_OUTPUT_BUFFER + + This feature can be modified afterwards using unitary + function @ref LL_DAC_SetOutputBuffer(). */ +} LL_DAC_InitTypeDef; + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup DAC_LL_Exported_Constants DAC Exported Constants + * @{ + */ + +/** @defgroup DAC_LL_EC_GET_FLAG DAC flags + * @brief Flags defines which can be used with LL_DAC_ReadReg function + * @{ + */ +/* DAC channel 1 flags */ +#define LL_DAC_FLAG_DMAUDR1 (DAC_SR_DMAUDR1) /*!< DAC channel 1 flag DMA underrun */ +#if defined(DAC_CHANNEL2_SUPPORT) +/* DAC channel 2 flags */ +#define LL_DAC_FLAG_DMAUDR2 (DAC_SR_DMAUDR2) /*!< DAC channel 2 flag DMA underrun */ +#endif /* DAC_CHANNEL2_SUPPORT */ +/** + * @} + */ + +/** @defgroup DAC_LL_EC_IT DAC interruptions + * @brief IT defines which can be used with LL_DAC_ReadReg and LL_DAC_WriteReg functions + * @{ + */ +#define LL_DAC_IT_DMAUDRIE1 (DAC_CR_DMAUDRIE1) /*!< DAC channel 1 interruption DMA underrun */ +#if defined(DAC_CHANNEL2_SUPPORT) +#define LL_DAC_IT_DMAUDRIE2 (DAC_CR_DMAUDRIE2) /*!< DAC channel 2 interruption DMA underrun */ +#endif /* DAC_CHANNEL2_SUPPORT */ +/** + * @} + */ + +/** @defgroup DAC_LL_EC_CHANNEL DAC channels + * @{ + */ +#define LL_DAC_CHANNEL_1 (DAC_REG_DOR1_REGOFFSET | DAC_REG_DHR12R1_REGOFFSET | DAC_REG_DHR12L1_REGOFFSET | DAC_REG_DHR8R1_REGOFFSET | DAC_CR_CH1_BITOFFSET | DAC_SWTR_CH1) /*!< DAC channel 1 */ +#if defined(DAC_CHANNEL2_SUPPORT) +#define LL_DAC_CHANNEL_2 (DAC_REG_DOR2_REGOFFSET | DAC_REG_DHR12R2_REGOFFSET | DAC_REG_DHR12L2_REGOFFSET | DAC_REG_DHR8R2_REGOFFSET | DAC_CR_CH2_BITOFFSET | DAC_SWTR_CH2) /*!< DAC channel 2 */ +#endif /* DAC_CHANNEL2_SUPPORT */ +/** + * @} + */ + +/** @defgroup DAC_LL_EC_TRIGGER_SOURCE DAC trigger source + * @{ + */ +#define LL_DAC_TRIG_SOFTWARE (DAC_CR_TSEL1_2 | DAC_CR_TSEL1_1 | DAC_CR_TSEL1_0) /*!< DAC channel conversion trigger internal (SW start) */ +#define LL_DAC_TRIG_EXT_TIM2_TRGO (DAC_CR_TSEL1_2 ) /*!< DAC channel conversion trigger from external peripheral: TIM2 TRGO. */ +#define LL_DAC_TRIG_EXT_TIM8_TRGO ( DAC_CR_TSEL1_0) /*!< DAC channel conversion trigger from external peripheral: TIM8 TRGO. */ +#define LL_DAC_TRIG_EXT_TIM4_TRGO (DAC_CR_TSEL1_2 | DAC_CR_TSEL1_0) /*!< DAC channel conversion trigger from external peripheral: TIM4 TRGO. */ +#define LL_DAC_TRIG_EXT_TIM6_TRGO 0x00000000UL /*!< DAC channel conversion trigger from external peripheral: TIM6 TRGO. */ +#define LL_DAC_TRIG_EXT_TIM7_TRGO ( DAC_CR_TSEL1_1 ) /*!< DAC channel conversion trigger from external peripheral: TIM7 TRGO. */ +#define LL_DAC_TRIG_EXT_TIM5_TRGO ( DAC_CR_TSEL1_1 | DAC_CR_TSEL1_0) /*!< DAC channel conversion trigger from external peripheral: TIM5 TRGO. */ +#define LL_DAC_TRIG_EXT_EXTI_LINE9 (DAC_CR_TSEL1_2 | DAC_CR_TSEL1_1 ) /*!< DAC channel conversion trigger from external peripheral: external interrupt line 9. */ +/** + * @} + */ + +/** @defgroup DAC_LL_EC_WAVE_AUTO_GENERATION_MODE DAC waveform automatic generation mode + * @{ + */ +#define LL_DAC_WAVE_AUTO_GENERATION_NONE 0x00000000UL /*!< DAC channel wave auto generation mode disabled. */ +#define LL_DAC_WAVE_AUTO_GENERATION_NOISE ( DAC_CR_WAVE1_0) /*!< DAC channel wave auto generation mode enabled, set generated noise waveform. */ +#define LL_DAC_WAVE_AUTO_GENERATION_TRIANGLE (DAC_CR_WAVE1_1 ) /*!< DAC channel wave auto generation mode enabled, set generated triangle waveform. */ +/** + * @} + */ + +/** @defgroup DAC_LL_EC_WAVE_NOISE_LFSR_UNMASK_BITS DAC wave generation - Noise LFSR unmask bits + * @{ + */ +#define LL_DAC_NOISE_LFSR_UNMASK_BIT0 0x00000000UL /*!< Noise wave generation, unmask LFSR bit0, for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS1_0 ( DAC_CR_MAMP1_0) /*!< Noise wave generation, unmask LFSR bits[1:0], for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS2_0 ( DAC_CR_MAMP1_1 ) /*!< Noise wave generation, unmask LFSR bits[2:0], for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS3_0 ( DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Noise wave generation, unmask LFSR bits[3:0], for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS4_0 ( DAC_CR_MAMP1_2 ) /*!< Noise wave generation, unmask LFSR bits[4:0], for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS5_0 ( DAC_CR_MAMP1_2 | DAC_CR_MAMP1_0) /*!< Noise wave generation, unmask LFSR bits[5:0], for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS6_0 ( DAC_CR_MAMP1_2 | DAC_CR_MAMP1_1 ) /*!< Noise wave generation, unmask LFSR bits[6:0], for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS7_0 ( DAC_CR_MAMP1_2 | DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Noise wave generation, unmask LFSR bits[7:0], for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS8_0 (DAC_CR_MAMP1_3 ) /*!< Noise wave generation, unmask LFSR bits[8:0], for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS9_0 (DAC_CR_MAMP1_3 | DAC_CR_MAMP1_0) /*!< Noise wave generation, unmask LFSR bits[9:0], for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS10_0 (DAC_CR_MAMP1_3 | DAC_CR_MAMP1_1 ) /*!< Noise wave generation, unmask LFSR bits[10:0], for the selected DAC channel */ +#define LL_DAC_NOISE_LFSR_UNMASK_BITS11_0 (DAC_CR_MAMP1_3 | DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Noise wave generation, unmask LFSR bits[11:0], for the selected DAC channel */ +/** + * @} + */ + +/** @defgroup DAC_LL_EC_WAVE_TRIANGLE_AMPLITUDE DAC wave generation - Triangle amplitude + * @{ + */ +#define LL_DAC_TRIANGLE_AMPLITUDE_1 0x00000000UL /*!< Triangle wave generation, amplitude of 1 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_3 ( DAC_CR_MAMP1_0) /*!< Triangle wave generation, amplitude of 3 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_7 ( DAC_CR_MAMP1_1 ) /*!< Triangle wave generation, amplitude of 7 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_15 ( DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Triangle wave generation, amplitude of 15 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_31 ( DAC_CR_MAMP1_2 ) /*!< Triangle wave generation, amplitude of 31 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_63 ( DAC_CR_MAMP1_2 | DAC_CR_MAMP1_0) /*!< Triangle wave generation, amplitude of 63 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_127 ( DAC_CR_MAMP1_2 | DAC_CR_MAMP1_1 ) /*!< Triangle wave generation, amplitude of 127 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_255 ( DAC_CR_MAMP1_2 | DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Triangle wave generation, amplitude of 255 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_511 (DAC_CR_MAMP1_3 ) /*!< Triangle wave generation, amplitude of 512 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_1023 (DAC_CR_MAMP1_3 | DAC_CR_MAMP1_0) /*!< Triangle wave generation, amplitude of 1023 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_2047 (DAC_CR_MAMP1_3 | DAC_CR_MAMP1_1 ) /*!< Triangle wave generation, amplitude of 2047 LSB of DAC output range, for the selected DAC channel */ +#define LL_DAC_TRIANGLE_AMPLITUDE_4095 (DAC_CR_MAMP1_3 | DAC_CR_MAMP1_1 | DAC_CR_MAMP1_0) /*!< Triangle wave generation, amplitude of 4095 LSB of DAC output range, for the selected DAC channel */ +/** + * @} + */ + +/** @defgroup DAC_LL_EC_OUTPUT_BUFFER DAC channel output buffer + * @{ + */ +#define LL_DAC_OUTPUT_BUFFER_ENABLE 0x00000000UL /*!< The selected DAC channel output is buffered: higher drive current capability, but also higher current consumption */ +#define LL_DAC_OUTPUT_BUFFER_DISABLE (DAC_CR_BOFF1) /*!< The selected DAC channel output is not buffered: lower drive current capability, but also lower current consumption */ +/** + * @} + */ + +/** @defgroup DAC_LL_EC_RESOLUTION DAC channel output resolution + * @{ + */ +#define LL_DAC_RESOLUTION_12B 0x00000000UL /*!< DAC channel resolution 12 bits */ +#define LL_DAC_RESOLUTION_8B 0x00000002UL /*!< DAC channel resolution 8 bits */ +/** + * @} + */ + +/** @defgroup DAC_LL_EC_REGISTERS DAC registers compliant with specific purpose + * @{ + */ +/* List of DAC registers intended to be used (most commonly) with */ +/* DMA transfer. */ +/* Refer to function @ref LL_DAC_DMA_GetRegAddr(). */ +#define LL_DAC_DMA_REG_DATA_12BITS_RIGHT_ALIGNED DAC_REG_DHR12RX_REGOFFSET_BITOFFSET_POS /*!< DAC channel data holding register 12 bits right aligned */ +#define LL_DAC_DMA_REG_DATA_12BITS_LEFT_ALIGNED DAC_REG_DHR12LX_REGOFFSET_BITOFFSET_POS /*!< DAC channel data holding register 12 bits left aligned */ +#define LL_DAC_DMA_REG_DATA_8BITS_RIGHT_ALIGNED DAC_REG_DHR8RX_REGOFFSET_BITOFFSET_POS /*!< DAC channel data holding register 8 bits right aligned */ +/** + * @} + */ + +/** @defgroup DAC_LL_EC_HW_DELAYS Definitions of DAC hardware constraints delays + * @note Only DAC peripheral HW delays are defined in DAC LL driver driver, + * not timeout values. + * For details on delays values, refer to descriptions in source code + * above each literal definition. + * @{ + */ + +/* Delay for DAC channel voltage settling time from DAC channel startup */ +/* (transition from disable to enable). */ +/* Note: DAC channel startup time depends on board application environment: */ +/* impedance connected to DAC channel output. */ +/* The delay below is specified under conditions: */ +/* - voltage maximum transition (lowest to highest value) */ +/* - until voltage reaches final value +-1LSB */ +/* - DAC channel output buffer enabled */ +/* - load impedance of 5kOhm (min), 50pF (max) */ +/* Literal set to maximum value (refer to device datasheet, */ +/* parameter "tWAKEUP"). */ +/* Unit: us */ +#define LL_DAC_DELAY_STARTUP_VOLTAGE_SETTLING_US 15UL /*!< Delay for DAC channel voltage settling time from DAC channel startup (transition from disable to enable) */ + +/* Delay for DAC channel voltage settling time. */ +/* Note: DAC channel startup time depends on board application environment: */ +/* impedance connected to DAC channel output. */ +/* The delay below is specified under conditions: */ +/* - voltage maximum transition (lowest to highest value) */ +/* - until voltage reaches final value +-1LSB */ +/* - DAC channel output buffer enabled */ +/* - load impedance of 5kOhm min, 50pF max */ +/* Literal set to maximum value (refer to device datasheet, */ +/* parameter "tSETTLING"). */ +/* Unit: us */ +#define LL_DAC_DELAY_VOLTAGE_SETTLING_US 12UL /*!< Delay for DAC channel voltage settling time */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup DAC_LL_Exported_Macros DAC Exported Macros + * @{ + */ + +/** @defgroup DAC_LL_EM_WRITE_READ Common write and read registers macros + * @{ + */ + +/** + * @brief Write a value in DAC register + * @param __INSTANCE__ DAC Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_DAC_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in DAC register + * @param __INSTANCE__ DAC Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_DAC_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) + +/** + * @} + */ + +/** @defgroup DAC_LL_EM_HELPER_MACRO DAC helper macro + * @{ + */ + +/** + * @brief Helper macro to get DAC channel number in decimal format + * from literals LL_DAC_CHANNEL_x. + * Example: + * __LL_DAC_CHANNEL_TO_DECIMAL_NB(LL_DAC_CHANNEL_1) + * will return decimal number "1". + * @note The input can be a value from functions where a channel + * number is returned. + * @param __CHANNEL__ This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval 1...2 (value "2" depending on DAC channel 2 availability) + */ +#define __LL_DAC_CHANNEL_TO_DECIMAL_NB(__CHANNEL__) \ + ((__CHANNEL__) & DAC_SWTR_CHX_MASK) + +/** + * @brief Helper macro to get DAC channel in literal format LL_DAC_CHANNEL_x + * from number in decimal format. + * Example: + * __LL_DAC_DECIMAL_NB_TO_CHANNEL(1) + * will return a data equivalent to "LL_DAC_CHANNEL_1". + * @note If the input parameter does not correspond to a DAC channel, + * this macro returns value '0'. + * @param __DECIMAL_NB__ 1...2 (value "2" depending on DAC channel 2 availability) + * @retval Returned value can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + */ +#if defined(DAC_CHANNEL2_SUPPORT) +#define __LL_DAC_DECIMAL_NB_TO_CHANNEL(__DECIMAL_NB__) \ + (((__DECIMAL_NB__) == 1UL) \ + ? (LL_DAC_CHANNEL_1) \ + : \ + (((__DECIMAL_NB__) == 2UL) \ + ? (LL_DAC_CHANNEL_2) \ + : \ + (0UL) \ + ) \ + ) +#else +#define __LL_DAC_DECIMAL_NB_TO_CHANNEL(__DECIMAL_NB__) \ + (((__DECIMAL_NB__) == 1UL) \ + ? (LL_DAC_CHANNEL_1) \ + : \ + (0UL) \ + ) +#endif /* DAC_CHANNEL2_SUPPORT */ + +/** + * @brief Helper macro to define the DAC conversion data full-scale digital + * value corresponding to the selected DAC resolution. + * @note DAC conversion data full-scale corresponds to voltage range + * determined by analog voltage references Vref+ and Vref- + * (refer to reference manual). + * @param __DAC_RESOLUTION__ This parameter can be one of the following values: + * @arg @ref LL_DAC_RESOLUTION_12B + * @arg @ref LL_DAC_RESOLUTION_8B + * @retval ADC conversion data equivalent voltage value (unit: mVolt) + */ +#define __LL_DAC_DIGITAL_SCALE(__DAC_RESOLUTION__) \ + ((0x00000FFFUL) >> ((__DAC_RESOLUTION__) << 1UL)) + +/** + * @brief Helper macro to calculate the DAC conversion data (unit: digital + * value) corresponding to a voltage (unit: mVolt). + * @note This helper macro is intended to provide input data in voltage + * rather than digital value, + * to be used with LL DAC functions such as + * @ref LL_DAC_ConvertData12RightAligned(). + * @note Analog reference voltage (Vref+) must be either known from + * user board environment or can be calculated using ADC measurement + * and ADC helper macro __LL_ADC_CALC_VREFANALOG_VOLTAGE(). + * @param __VREFANALOG_VOLTAGE__ Analog reference voltage (unit: mV) + * @param __DAC_VOLTAGE__ Voltage to be generated by DAC channel + * (unit: mVolt). + * @param __DAC_RESOLUTION__ This parameter can be one of the following values: + * @arg @ref LL_DAC_RESOLUTION_12B + * @arg @ref LL_DAC_RESOLUTION_8B + * @retval DAC conversion data (unit: digital value) + */ +#define __LL_DAC_CALC_VOLTAGE_TO_DATA(__VREFANALOG_VOLTAGE__, __DAC_VOLTAGE__, __DAC_RESOLUTION__) \ + ((__DAC_VOLTAGE__) * __LL_DAC_DIGITAL_SCALE(__DAC_RESOLUTION__) \ + / (__VREFANALOG_VOLTAGE__) \ + ) + +/** + * @} + */ + +/** + * @} + */ + + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup DAC_LL_Exported_Functions DAC Exported Functions + * @{ + */ + +/** + * @brief Set the conversion trigger source for the selected DAC channel. + * @note For conversion trigger source to be effective, DAC trigger + * must be enabled using function @ref LL_DAC_EnableTrigger(). + * @note To set conversion trigger source, DAC channel must be disabled. + * Otherwise, the setting is discarded. + * @note Availability of parameters of trigger sources from timer + * depends on timers availability on the selected device. + * @rmtoll CR TSEL1 LL_DAC_SetTriggerSource\n + * CR TSEL2 LL_DAC_SetTriggerSource + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @param TriggerSource This parameter can be one of the following values: + * @arg @ref LL_DAC_TRIG_SOFTWARE + * @arg @ref LL_DAC_TRIG_EXT_TIM8_TRGO + * @arg @ref LL_DAC_TRIG_EXT_TIM7_TRGO + * @arg @ref LL_DAC_TRIG_EXT_TIM6_TRGO + * @arg @ref LL_DAC_TRIG_EXT_TIM5_TRGO + * @arg @ref LL_DAC_TRIG_EXT_TIM4_TRGO + * @arg @ref LL_DAC_TRIG_EXT_TIM2_TRGO + * @arg @ref LL_DAC_TRIG_EXT_EXTI_LINE9 + * @retval None + */ +__STATIC_INLINE void LL_DAC_SetTriggerSource(DAC_TypeDef *DACx, uint32_t DAC_Channel, uint32_t TriggerSource) +{ + MODIFY_REG(DACx->CR, + DAC_CR_TSEL1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK), + TriggerSource << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Get the conversion trigger source for the selected DAC channel. + * @note For conversion trigger source to be effective, DAC trigger + * must be enabled using function @ref LL_DAC_EnableTrigger(). + * @note Availability of parameters of trigger sources from timer + * depends on timers availability on the selected device. + * @rmtoll CR TSEL1 LL_DAC_GetTriggerSource\n + * CR TSEL2 LL_DAC_GetTriggerSource + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval Returned value can be one of the following values: + * @arg @ref LL_DAC_TRIG_SOFTWARE + * @arg @ref LL_DAC_TRIG_EXT_TIM8_TRGO + * @arg @ref LL_DAC_TRIG_EXT_TIM7_TRGO + * @arg @ref LL_DAC_TRIG_EXT_TIM6_TRGO + * @arg @ref LL_DAC_TRIG_EXT_TIM5_TRGO + * @arg @ref LL_DAC_TRIG_EXT_TIM4_TRGO + * @arg @ref LL_DAC_TRIG_EXT_TIM2_TRGO + * @arg @ref LL_DAC_TRIG_EXT_EXTI_LINE9 + */ +__STATIC_INLINE uint32_t LL_DAC_GetTriggerSource(const DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + return (uint32_t)(READ_BIT(DACx->CR, DAC_CR_TSEL1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)) + >> (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK) + ); +} + +/** + * @brief Set the waveform automatic generation mode + * for the selected DAC channel. + * @rmtoll CR WAVE1 LL_DAC_SetWaveAutoGeneration\n + * CR WAVE2 LL_DAC_SetWaveAutoGeneration + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @param WaveAutoGeneration This parameter can be one of the following values: + * @arg @ref LL_DAC_WAVE_AUTO_GENERATION_NONE + * @arg @ref LL_DAC_WAVE_AUTO_GENERATION_NOISE + * @arg @ref LL_DAC_WAVE_AUTO_GENERATION_TRIANGLE + * @retval None + */ +__STATIC_INLINE void LL_DAC_SetWaveAutoGeneration(DAC_TypeDef *DACx, uint32_t DAC_Channel, uint32_t WaveAutoGeneration) +{ + MODIFY_REG(DACx->CR, + DAC_CR_WAVE1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK), + WaveAutoGeneration << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Get the waveform automatic generation mode + * for the selected DAC channel. + * @rmtoll CR WAVE1 LL_DAC_GetWaveAutoGeneration\n + * CR WAVE2 LL_DAC_GetWaveAutoGeneration + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval Returned value can be one of the following values: + * @arg @ref LL_DAC_WAVE_AUTO_GENERATION_NONE + * @arg @ref LL_DAC_WAVE_AUTO_GENERATION_NOISE + * @arg @ref LL_DAC_WAVE_AUTO_GENERATION_TRIANGLE + */ +__STATIC_INLINE uint32_t LL_DAC_GetWaveAutoGeneration(const DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + return (uint32_t)(READ_BIT(DACx->CR, DAC_CR_WAVE1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)) + >> (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK) + ); +} + +/** + * @brief Set the noise waveform generation for the selected DAC channel: + * Noise mode and parameters LFSR (linear feedback shift register). + * @note For wave generation to be effective, DAC channel + * wave generation mode must be enabled using + * function @ref LL_DAC_SetWaveAutoGeneration(). + * @note This setting can be set when the selected DAC channel is disabled + * (otherwise, the setting operation is ignored). + * @rmtoll CR MAMP1 LL_DAC_SetWaveNoiseLFSR\n + * CR MAMP2 LL_DAC_SetWaveNoiseLFSR + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @param NoiseLFSRMask This parameter can be one of the following values: + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BIT0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS1_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS2_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS3_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS4_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS5_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS6_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS7_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS8_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS9_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS10_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS11_0 + * @retval None + */ +__STATIC_INLINE void LL_DAC_SetWaveNoiseLFSR(DAC_TypeDef *DACx, uint32_t DAC_Channel, uint32_t NoiseLFSRMask) +{ + MODIFY_REG(DACx->CR, + DAC_CR_MAMP1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK), + NoiseLFSRMask << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Get the noise waveform generation for the selected DAC channel: + * Noise mode and parameters LFSR (linear feedback shift register). + * @rmtoll CR MAMP1 LL_DAC_GetWaveNoiseLFSR\n + * CR MAMP2 LL_DAC_GetWaveNoiseLFSR + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval Returned value can be one of the following values: + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BIT0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS1_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS2_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS3_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS4_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS5_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS6_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS7_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS8_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS9_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS10_0 + * @arg @ref LL_DAC_NOISE_LFSR_UNMASK_BITS11_0 + */ +__STATIC_INLINE uint32_t LL_DAC_GetWaveNoiseLFSR(const DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + return (uint32_t)(READ_BIT(DACx->CR, DAC_CR_MAMP1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)) + >> (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK) + ); +} + +/** + * @brief Set the triangle waveform generation for the selected DAC channel: + * triangle mode and amplitude. + * @note For wave generation to be effective, DAC channel + * wave generation mode must be enabled using + * function @ref LL_DAC_SetWaveAutoGeneration(). + * @note This setting can be set when the selected DAC channel is disabled + * (otherwise, the setting operation is ignored). + * @rmtoll CR MAMP1 LL_DAC_SetWaveTriangleAmplitude\n + * CR MAMP2 LL_DAC_SetWaveTriangleAmplitude + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @param TriangleAmplitude This parameter can be one of the following values: + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_1 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_3 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_7 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_15 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_31 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_63 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_127 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_255 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_511 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_1023 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_2047 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_4095 + * @retval None + */ +__STATIC_INLINE void LL_DAC_SetWaveTriangleAmplitude(DAC_TypeDef *DACx, uint32_t DAC_Channel, + uint32_t TriangleAmplitude) +{ + MODIFY_REG(DACx->CR, + DAC_CR_MAMP1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK), + TriangleAmplitude << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Get the triangle waveform generation for the selected DAC channel: + * triangle mode and amplitude. + * @rmtoll CR MAMP1 LL_DAC_GetWaveTriangleAmplitude\n + * CR MAMP2 LL_DAC_GetWaveTriangleAmplitude + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval Returned value can be one of the following values: + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_1 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_3 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_7 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_15 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_31 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_63 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_127 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_255 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_511 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_1023 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_2047 + * @arg @ref LL_DAC_TRIANGLE_AMPLITUDE_4095 + */ +__STATIC_INLINE uint32_t LL_DAC_GetWaveTriangleAmplitude(const DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + return (uint32_t)(READ_BIT(DACx->CR, DAC_CR_MAMP1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)) + >> (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK) + ); +} + +/** + * @brief Set the output buffer for the selected DAC channel. + * @rmtoll CR BOFF1 LL_DAC_SetOutputBuffer\n + * CR BOFF2 LL_DAC_SetOutputBuffer + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @param OutputBuffer This parameter can be one of the following values: + * @arg @ref LL_DAC_OUTPUT_BUFFER_ENABLE + * @arg @ref LL_DAC_OUTPUT_BUFFER_DISABLE + * @retval None + */ +__STATIC_INLINE void LL_DAC_SetOutputBuffer(DAC_TypeDef *DACx, uint32_t DAC_Channel, uint32_t OutputBuffer) +{ + MODIFY_REG(DACx->CR, + DAC_CR_BOFF1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK), + OutputBuffer << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Get the output buffer state for the selected DAC channel. + * @rmtoll CR BOFF1 LL_DAC_GetOutputBuffer\n + * CR BOFF2 LL_DAC_GetOutputBuffer + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval Returned value can be one of the following values: + * @arg @ref LL_DAC_OUTPUT_BUFFER_ENABLE + * @arg @ref LL_DAC_OUTPUT_BUFFER_DISABLE + */ +__STATIC_INLINE uint32_t LL_DAC_GetOutputBuffer(const DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + return (uint32_t)(READ_BIT(DACx->CR, DAC_CR_BOFF1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)) + >> (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK) + ); +} + +/** + * @} + */ + +/** @defgroup DAC_LL_EF_DMA_Management DMA Management + * @{ + */ + +/** + * @brief Enable DAC DMA transfer request of the selected channel. + * @note To configure DMA source address (peripheral address), + * use function @ref LL_DAC_DMA_GetRegAddr(). + * @rmtoll CR DMAEN1 LL_DAC_EnableDMAReq\n + * CR DMAEN2 LL_DAC_EnableDMAReq + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval None + */ +__STATIC_INLINE void LL_DAC_EnableDMAReq(DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + SET_BIT(DACx->CR, + DAC_CR_DMAEN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Disable DAC DMA transfer request of the selected channel. + * @note To configure DMA source address (peripheral address), + * use function @ref LL_DAC_DMA_GetRegAddr(). + * @rmtoll CR DMAEN1 LL_DAC_DisableDMAReq\n + * CR DMAEN2 LL_DAC_DisableDMAReq + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval None + */ +__STATIC_INLINE void LL_DAC_DisableDMAReq(DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + CLEAR_BIT(DACx->CR, + DAC_CR_DMAEN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Get DAC DMA transfer request state of the selected channel. + * (0: DAC DMA transfer request is disabled, 1: DAC DMA transfer request is enabled) + * @rmtoll CR DMAEN1 LL_DAC_IsDMAReqEnabled\n + * CR DMAEN2 LL_DAC_IsDMAReqEnabled + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DAC_IsDMAReqEnabled(const DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + return ((READ_BIT(DACx->CR, + DAC_CR_DMAEN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)) + == (DAC_CR_DMAEN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK))) ? 1UL : 0UL); +} + +/** + * @brief Function to help to configure DMA transfer to DAC: retrieve the + * DAC register address from DAC instance and a list of DAC registers + * intended to be used (most commonly) with DMA transfer. + * @note These DAC registers are data holding registers: + * when DAC conversion is requested, DAC generates a DMA transfer + * request to have data available in DAC data holding registers. + * @note This macro is intended to be used with LL DMA driver, refer to + * function "LL_DMA_ConfigAddresses()". + * Example: + * LL_DMA_ConfigAddresses(DMA1, + * LL_DMA_CHANNEL_1, + * (uint32_t)&< array or variable >, + * LL_DAC_DMA_GetRegAddr(DAC1, LL_DAC_CHANNEL_1, + * LL_DAC_DMA_REG_DATA_12BITS_RIGHT_ALIGNED), + * LL_DMA_DIRECTION_MEMORY_TO_PERIPH); + * @rmtoll DHR12R1 DACC1DHR LL_DAC_DMA_GetRegAddr\n + * DHR12L1 DACC1DHR LL_DAC_DMA_GetRegAddr\n + * DHR8R1 DACC1DHR LL_DAC_DMA_GetRegAddr\n + * DHR12R2 DACC2DHR LL_DAC_DMA_GetRegAddr\n + * DHR12L2 DACC2DHR LL_DAC_DMA_GetRegAddr\n + * DHR8R2 DACC2DHR LL_DAC_DMA_GetRegAddr + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @param Register This parameter can be one of the following values: + * @arg @ref LL_DAC_DMA_REG_DATA_12BITS_RIGHT_ALIGNED + * @arg @ref LL_DAC_DMA_REG_DATA_12BITS_LEFT_ALIGNED + * @arg @ref LL_DAC_DMA_REG_DATA_8BITS_RIGHT_ALIGNED + * @retval DAC register address + */ +__STATIC_INLINE uint32_t LL_DAC_DMA_GetRegAddr(const DAC_TypeDef *DACx, uint32_t DAC_Channel, uint32_t Register) +{ + /* Retrieve address of register DHR12Rx, DHR12Lx or DHR8Rx depending on */ + /* DAC channel selected. */ + return ((uint32_t)(__DAC_PTR_REG_OFFSET((DACx)->DHR12R1, ((DAC_Channel >> (Register & 0x1FUL)) + & DAC_REG_DHR_REGOFFSET_MASK_POSBIT0)))); +} +/** + * @} + */ + +/** @defgroup DAC_LL_EF_Operation Operation on DAC channels + * @{ + */ + +/** + * @brief Enable DAC selected channel. + * @rmtoll CR EN1 LL_DAC_Enable\n + * CR EN2 LL_DAC_Enable + * @note After enable from off state, DAC channel requires a delay + * for output voltage to reach accuracy +/- 1 LSB. + * Refer to device datasheet, parameter "tWAKEUP". + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval None + */ +__STATIC_INLINE void LL_DAC_Enable(DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + SET_BIT(DACx->CR, + DAC_CR_EN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Disable DAC selected channel. + * @rmtoll CR EN1 LL_DAC_Disable\n + * CR EN2 LL_DAC_Disable + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval None + */ +__STATIC_INLINE void LL_DAC_Disable(DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + CLEAR_BIT(DACx->CR, + DAC_CR_EN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Get DAC enable state of the selected channel. + * (0: DAC channel is disabled, 1: DAC channel is enabled) + * @rmtoll CR EN1 LL_DAC_IsEnabled\n + * CR EN2 LL_DAC_IsEnabled + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DAC_IsEnabled(const DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + return ((READ_BIT(DACx->CR, + DAC_CR_EN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)) + == (DAC_CR_EN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK))) ? 1UL : 0UL); +} + +/** + * @brief Enable DAC trigger of the selected channel. + * @note - If DAC trigger is disabled, DAC conversion is performed + * automatically once the data holding register is updated, + * using functions "LL_DAC_ConvertData{8; 12}{Right; Left} Aligned()": + * @ref LL_DAC_ConvertData12RightAligned(), ... + * - If DAC trigger is enabled, DAC conversion is performed + * only when a hardware of software trigger event is occurring. + * Select trigger source using + * function @ref LL_DAC_SetTriggerSource(). + * @rmtoll CR TEN1 LL_DAC_EnableTrigger\n + * CR TEN2 LL_DAC_EnableTrigger + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval None + */ +__STATIC_INLINE void LL_DAC_EnableTrigger(DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + SET_BIT(DACx->CR, + DAC_CR_TEN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Disable DAC trigger of the selected channel. + * @rmtoll CR TEN1 LL_DAC_DisableTrigger\n + * CR TEN2 LL_DAC_DisableTrigger + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval None + */ +__STATIC_INLINE void LL_DAC_DisableTrigger(DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + CLEAR_BIT(DACx->CR, + DAC_CR_TEN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)); +} + +/** + * @brief Get DAC trigger state of the selected channel. + * (0: DAC trigger is disabled, 1: DAC trigger is enabled) + * @rmtoll CR TEN1 LL_DAC_IsTriggerEnabled\n + * CR TEN2 LL_DAC_IsTriggerEnabled + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DAC_IsTriggerEnabled(const DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + return ((READ_BIT(DACx->CR, + DAC_CR_TEN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK)) + == (DAC_CR_TEN1 << (DAC_Channel & DAC_CR_CHX_BITOFFSET_MASK))) ? 1UL : 0UL); +} + +/** + * @brief Trig DAC conversion by software for the selected DAC channel. + * @note Preliminarily, DAC trigger must be set to software trigger + * using function + * @ref LL_DAC_Init() + * @ref LL_DAC_SetTriggerSource() + * with parameter "LL_DAC_TRIGGER_SOFTWARE". + * and DAC trigger must be enabled using + * function @ref LL_DAC_EnableTrigger(). + * @note For devices featuring DAC with 2 channels: this function + * can perform a SW start of both DAC channels simultaneously. + * Two channels can be selected as parameter. + * Example: (LL_DAC_CHANNEL_1 | LL_DAC_CHANNEL_2) + * @rmtoll SWTRIGR SWTRIG1 LL_DAC_TrigSWConversion\n + * SWTRIGR SWTRIG2 LL_DAC_TrigSWConversion + * @param DACx DAC instance + * @param DAC_Channel This parameter can a combination of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval None + */ +__STATIC_INLINE void LL_DAC_TrigSWConversion(DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + SET_BIT(DACx->SWTRIGR, + (DAC_Channel & DAC_SWTR_CHX_MASK)); +} + +/** + * @brief Set the data to be loaded in the data holding register + * in format 12 bits left alignment (LSB aligned on bit 0), + * for the selected DAC channel. + * @rmtoll DHR12R1 DACC1DHR LL_DAC_ConvertData12RightAligned\n + * DHR12R2 DACC2DHR LL_DAC_ConvertData12RightAligned + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @param Data Value between Min_Data=0x000 and Max_Data=0xFFF + * @retval None + */ +__STATIC_INLINE void LL_DAC_ConvertData12RightAligned(DAC_TypeDef *DACx, uint32_t DAC_Channel, uint32_t Data) +{ + __IO uint32_t *preg = __DAC_PTR_REG_OFFSET(DACx->DHR12R1, (DAC_Channel >> DAC_REG_DHR12RX_REGOFFSET_BITOFFSET_POS) + & DAC_REG_DHR_REGOFFSET_MASK_POSBIT0); + + MODIFY_REG(*preg, DAC_DHR12R1_DACC1DHR, Data); +} + +/** + * @brief Set the data to be loaded in the data holding register + * in format 12 bits left alignment (MSB aligned on bit 15), + * for the selected DAC channel. + * @rmtoll DHR12L1 DACC1DHR LL_DAC_ConvertData12LeftAligned\n + * DHR12L2 DACC2DHR LL_DAC_ConvertData12LeftAligned + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @param Data Value between Min_Data=0x000 and Max_Data=0xFFF + * @retval None + */ +__STATIC_INLINE void LL_DAC_ConvertData12LeftAligned(DAC_TypeDef *DACx, uint32_t DAC_Channel, uint32_t Data) +{ + __IO uint32_t *preg = __DAC_PTR_REG_OFFSET(DACx->DHR12R1, (DAC_Channel >> DAC_REG_DHR12LX_REGOFFSET_BITOFFSET_POS) + & DAC_REG_DHR_REGOFFSET_MASK_POSBIT0); + + MODIFY_REG(*preg, DAC_DHR12L1_DACC1DHR, Data); +} + +/** + * @brief Set the data to be loaded in the data holding register + * in format 8 bits left alignment (LSB aligned on bit 0), + * for the selected DAC channel. + * @rmtoll DHR8R1 DACC1DHR LL_DAC_ConvertData8RightAligned\n + * DHR8R2 DACC2DHR LL_DAC_ConvertData8RightAligned + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @param Data Value between Min_Data=0x00 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_DAC_ConvertData8RightAligned(DAC_TypeDef *DACx, uint32_t DAC_Channel, uint32_t Data) +{ + __IO uint32_t *preg = __DAC_PTR_REG_OFFSET(DACx->DHR12R1, (DAC_Channel >> DAC_REG_DHR8RX_REGOFFSET_BITOFFSET_POS) + & DAC_REG_DHR_REGOFFSET_MASK_POSBIT0); + + MODIFY_REG(*preg, DAC_DHR8R1_DACC1DHR, Data); +} + +#if defined(DAC_CHANNEL2_SUPPORT) +/** + * @brief Set the data to be loaded in the data holding register + * in format 12 bits left alignment (LSB aligned on bit 0), + * for both DAC channels. + * @rmtoll DHR12RD DACC1DHR LL_DAC_ConvertDualData12RightAligned\n + * DHR12RD DACC2DHR LL_DAC_ConvertDualData12RightAligned + * @param DACx DAC instance + * @param DataChannel1 Value between Min_Data=0x000 and Max_Data=0xFFF + * @param DataChannel2 Value between Min_Data=0x000 and Max_Data=0xFFF + * @retval None + */ +__STATIC_INLINE void LL_DAC_ConvertDualData12RightAligned(DAC_TypeDef *DACx, uint32_t DataChannel1, + uint32_t DataChannel2) +{ + MODIFY_REG(DACx->DHR12RD, + (DAC_DHR12RD_DACC2DHR | DAC_DHR12RD_DACC1DHR), + ((DataChannel2 << DAC_DHR12RD_DACC2DHR_BITOFFSET_POS) | DataChannel1)); +} + +/** + * @brief Set the data to be loaded in the data holding register + * in format 12 bits left alignment (MSB aligned on bit 15), + * for both DAC channels. + * @rmtoll DHR12LD DACC1DHR LL_DAC_ConvertDualData12LeftAligned\n + * DHR12LD DACC2DHR LL_DAC_ConvertDualData12LeftAligned + * @param DACx DAC instance + * @param DataChannel1 Value between Min_Data=0x000 and Max_Data=0xFFF + * @param DataChannel2 Value between Min_Data=0x000 and Max_Data=0xFFF + * @retval None + */ +__STATIC_INLINE void LL_DAC_ConvertDualData12LeftAligned(DAC_TypeDef *DACx, uint32_t DataChannel1, + uint32_t DataChannel2) +{ + /* Note: Data of DAC channel 2 shift value subtracted of 4 because */ + /* data on 16 bits and DAC channel 2 bits field is on the 12 MSB, */ + /* the 4 LSB must be taken into account for the shift value. */ + MODIFY_REG(DACx->DHR12LD, + (DAC_DHR12LD_DACC2DHR | DAC_DHR12LD_DACC1DHR), + ((DataChannel2 << (DAC_DHR12LD_DACC2DHR_BITOFFSET_POS - 4U)) | DataChannel1)); +} + +/** + * @brief Set the data to be loaded in the data holding register + * in format 8 bits left alignment (LSB aligned on bit 0), + * for both DAC channels. + * @rmtoll DHR8RD DACC1DHR LL_DAC_ConvertDualData8RightAligned\n + * DHR8RD DACC2DHR LL_DAC_ConvertDualData8RightAligned + * @param DACx DAC instance + * @param DataChannel1 Value between Min_Data=0x00 and Max_Data=0xFF + * @param DataChannel2 Value between Min_Data=0x00 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_DAC_ConvertDualData8RightAligned(DAC_TypeDef *DACx, uint32_t DataChannel1, + uint32_t DataChannel2) +{ + MODIFY_REG(DACx->DHR8RD, + (DAC_DHR8RD_DACC2DHR | DAC_DHR8RD_DACC1DHR), + ((DataChannel2 << DAC_DHR8RD_DACC2DHR_BITOFFSET_POS) | DataChannel1)); +} +#endif /* DAC_CHANNEL2_SUPPORT */ + +/** + * @brief Retrieve output data currently generated for the selected DAC channel. + * @note Whatever alignment and resolution settings + * (using functions "LL_DAC_ConvertData{8; 12}{Right; Left} Aligned()": + * @ref LL_DAC_ConvertData12RightAligned(), ...), + * output data format is 12 bits right aligned (LSB aligned on bit 0). + * @rmtoll DOR1 DACC1DOR LL_DAC_RetrieveOutputData\n + * DOR2 DACC2DOR LL_DAC_RetrieveOutputData + * @param DACx DAC instance + * @param DAC_Channel This parameter can be one of the following values: + * @arg @ref LL_DAC_CHANNEL_1 + * @arg @ref LL_DAC_CHANNEL_2 (1) + * + * (1) On this STM32 series, parameter not available on all devices. + * Refer to device datasheet for channels availability. + * @retval Value between Min_Data=0x000 and Max_Data=0xFFF + */ +__STATIC_INLINE uint32_t LL_DAC_RetrieveOutputData(const DAC_TypeDef *DACx, uint32_t DAC_Channel) +{ + __IO uint32_t const *preg = __DAC_PTR_REG_OFFSET(DACx->DOR1, (DAC_Channel >> DAC_REG_DORX_REGOFFSET_BITOFFSET_POS) + & DAC_REG_DORX_REGOFFSET_MASK_POSBIT0); + + return (uint16_t) READ_BIT(*preg, DAC_DOR1_DACC1DOR); +} + +/** + * @} + */ + +/** @defgroup DAC_LL_EF_FLAG_Management FLAG Management + * @{ + */ + +/** + * @brief Get DAC underrun flag for DAC channel 1 + * @rmtoll SR DMAUDR1 LL_DAC_IsActiveFlag_DMAUDR1 + * @param DACx DAC instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DAC_IsActiveFlag_DMAUDR1(const DAC_TypeDef *DACx) +{ + return ((READ_BIT(DACx->SR, LL_DAC_FLAG_DMAUDR1) == (LL_DAC_FLAG_DMAUDR1)) ? 1UL : 0UL); +} + +#if defined(DAC_CHANNEL2_SUPPORT) +/** + * @brief Get DAC underrun flag for DAC channel 2 + * @rmtoll SR DMAUDR2 LL_DAC_IsActiveFlag_DMAUDR2 + * @param DACx DAC instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DAC_IsActiveFlag_DMAUDR2(const DAC_TypeDef *DACx) +{ + return ((READ_BIT(DACx->SR, LL_DAC_FLAG_DMAUDR2) == (LL_DAC_FLAG_DMAUDR2)) ? 1UL : 0UL); +} +#endif /* DAC_CHANNEL2_SUPPORT */ + +/** + * @brief Clear DAC underrun flag for DAC channel 1 + * @rmtoll SR DMAUDR1 LL_DAC_ClearFlag_DMAUDR1 + * @param DACx DAC instance + * @retval None + */ +__STATIC_INLINE void LL_DAC_ClearFlag_DMAUDR1(DAC_TypeDef *DACx) +{ + WRITE_REG(DACx->SR, LL_DAC_FLAG_DMAUDR1); +} + +#if defined(DAC_CHANNEL2_SUPPORT) +/** + * @brief Clear DAC underrun flag for DAC channel 2 + * @rmtoll SR DMAUDR2 LL_DAC_ClearFlag_DMAUDR2 + * @param DACx DAC instance + * @retval None + */ +__STATIC_INLINE void LL_DAC_ClearFlag_DMAUDR2(DAC_TypeDef *DACx) +{ + WRITE_REG(DACx->SR, LL_DAC_FLAG_DMAUDR2); +} +#endif /* DAC_CHANNEL2_SUPPORT */ + +/** + * @} + */ + +/** @defgroup DAC_LL_EF_IT_Management IT management + * @{ + */ + +/** + * @brief Enable DMA underrun interrupt for DAC channel 1 + * @rmtoll CR DMAUDRIE1 LL_DAC_EnableIT_DMAUDR1 + * @param DACx DAC instance + * @retval None + */ +__STATIC_INLINE void LL_DAC_EnableIT_DMAUDR1(DAC_TypeDef *DACx) +{ + SET_BIT(DACx->CR, LL_DAC_IT_DMAUDRIE1); +} + +#if defined(DAC_CHANNEL2_SUPPORT) +/** + * @brief Enable DMA underrun interrupt for DAC channel 2 + * @rmtoll CR DMAUDRIE2 LL_DAC_EnableIT_DMAUDR2 + * @param DACx DAC instance + * @retval None + */ +__STATIC_INLINE void LL_DAC_EnableIT_DMAUDR2(DAC_TypeDef *DACx) +{ + SET_BIT(DACx->CR, LL_DAC_IT_DMAUDRIE2); +} +#endif /* DAC_CHANNEL2_SUPPORT */ + +/** + * @brief Disable DMA underrun interrupt for DAC channel 1 + * @rmtoll CR DMAUDRIE1 LL_DAC_DisableIT_DMAUDR1 + * @param DACx DAC instance + * @retval None + */ +__STATIC_INLINE void LL_DAC_DisableIT_DMAUDR1(DAC_TypeDef *DACx) +{ + CLEAR_BIT(DACx->CR, LL_DAC_IT_DMAUDRIE1); +} + +#if defined(DAC_CHANNEL2_SUPPORT) +/** + * @brief Disable DMA underrun interrupt for DAC channel 2 + * @rmtoll CR DMAUDRIE2 LL_DAC_DisableIT_DMAUDR2 + * @param DACx DAC instance + * @retval None + */ +__STATIC_INLINE void LL_DAC_DisableIT_DMAUDR2(DAC_TypeDef *DACx) +{ + CLEAR_BIT(DACx->CR, LL_DAC_IT_DMAUDRIE2); +} +#endif /* DAC_CHANNEL2_SUPPORT */ + +/** + * @brief Get DMA underrun interrupt for DAC channel 1 + * @rmtoll CR DMAUDRIE1 LL_DAC_IsEnabledIT_DMAUDR1 + * @param DACx DAC instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DAC_IsEnabledIT_DMAUDR1(const DAC_TypeDef *DACx) +{ + return ((READ_BIT(DACx->CR, LL_DAC_IT_DMAUDRIE1) == (LL_DAC_IT_DMAUDRIE1)) ? 1UL : 0UL); +} + +#if defined(DAC_CHANNEL2_SUPPORT) +/** + * @brief Get DMA underrun interrupt for DAC channel 2 + * @rmtoll CR DMAUDRIE2 LL_DAC_IsEnabledIT_DMAUDR2 + * @param DACx DAC instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DAC_IsEnabledIT_DMAUDR2(const DAC_TypeDef *DACx) +{ + return ((READ_BIT(DACx->CR, LL_DAC_IT_DMAUDRIE2) == (LL_DAC_IT_DMAUDRIE2)) ? 1UL : 0UL); +} +#endif /* DAC_CHANNEL2_SUPPORT */ + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup DAC_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +ErrorStatus LL_DAC_DeInit(const DAC_TypeDef *DACx); +ErrorStatus LL_DAC_Init(DAC_TypeDef *DACx, uint32_t DAC_Channel, const LL_DAC_InitTypeDef *DAC_InitStruct); +void LL_DAC_StructInit(LL_DAC_InitTypeDef *DAC_InitStruct); + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* DAC */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F4xx_LL_DAC_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_dma.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_dma.h new file mode 100644 index 00000000..c0182def --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_dma.h @@ -0,0 +1,2868 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_dma.h + * @author MCD Application Team + * @brief Header file of DMA LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_DMA_H +#define __STM32F4xx_LL_DMA_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (DMA1) || defined (DMA2) + +/** @defgroup DMA_LL DMA + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup DMA_LL_Private_Variables DMA Private Variables + * @{ + */ +/* Array used to get the DMA stream register offset versus stream index LL_DMA_STREAM_x */ +static const uint8_t STREAM_OFFSET_TAB[] = +{ + (uint8_t)(DMA1_Stream0_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream1_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream2_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream3_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream4_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream5_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream6_BASE - DMA1_BASE), + (uint8_t)(DMA1_Stream7_BASE - DMA1_BASE) +}; + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup DMA_LL_Private_Constants DMA Private Constants + * @{ + */ +/** + * @} + */ + + +/* Private macros ------------------------------------------------------------*/ +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup DMA_LL_ES_INIT DMA Exported Init structure + * @{ + */ +typedef struct +{ + uint32_t PeriphOrM2MSrcAddress; /*!< Specifies the peripheral base address for DMA transfer + or as Source base address in case of memory to memory transfer direction. + + This parameter must be a value between Min_Data = 0 and Max_Data = 0xFFFFFFFF. */ + + uint32_t MemoryOrM2MDstAddress; /*!< Specifies the memory base address for DMA transfer + or as Destination base address in case of memory to memory transfer direction. + + This parameter must be a value between Min_Data = 0 and Max_Data = 0xFFFFFFFF. */ + + uint32_t Direction; /*!< Specifies if the data will be transferred from memory to peripheral, + from memory to memory or from peripheral to memory. + This parameter can be a value of @ref DMA_LL_EC_DIRECTION + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetDataTransferDirection(). */ + + uint32_t Mode; /*!< Specifies the normal or circular operation mode. + This parameter can be a value of @ref DMA_LL_EC_MODE + @note The circular buffer mode cannot be used if the memory to memory + data transfer direction is configured on the selected Stream + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetMode(). */ + + uint32_t PeriphOrM2MSrcIncMode; /*!< Specifies whether the Peripheral address or Source address in case of memory to memory transfer direction + is incremented or not. + This parameter can be a value of @ref DMA_LL_EC_PERIPH + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetPeriphIncMode(). */ + + uint32_t MemoryOrM2MDstIncMode; /*!< Specifies whether the Memory address or Destination address in case of memory to memory transfer direction + is incremented or not. + This parameter can be a value of @ref DMA_LL_EC_MEMORY + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetMemoryIncMode(). */ + + uint32_t PeriphOrM2MSrcDataSize; /*!< Specifies the Peripheral data size alignment or Source data size alignment (byte, half word, word) + in case of memory to memory transfer direction. + This parameter can be a value of @ref DMA_LL_EC_PDATAALIGN + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetPeriphSize(). */ + + uint32_t MemoryOrM2MDstDataSize; /*!< Specifies the Memory data size alignment or Destination data size alignment (byte, half word, word) + in case of memory to memory transfer direction. + This parameter can be a value of @ref DMA_LL_EC_MDATAALIGN + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetMemorySize(). */ + + uint32_t NbData; /*!< Specifies the number of data to transfer, in data unit. + The data unit is equal to the source buffer configuration set in PeripheralSize + or MemorySize parameters depending in the transfer direction. + This parameter must be a value between Min_Data = 0 and Max_Data = 0x0000FFFF + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetDataLength(). */ + + uint32_t Channel; /*!< Specifies the peripheral channel. + This parameter can be a value of @ref DMA_LL_EC_CHANNEL + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetChannelSelection(). */ + + uint32_t Priority; /*!< Specifies the channel priority level. + This parameter can be a value of @ref DMA_LL_EC_PRIORITY + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetStreamPriorityLevel(). */ + + uint32_t FIFOMode; /*!< Specifies if the FIFO mode or Direct mode will be used for the specified stream. + This parameter can be a value of @ref DMA_LL_FIFOMODE + @note The Direct mode (FIFO mode disabled) cannot be used if the + memory-to-memory data transfer is configured on the selected stream + + This feature can be modified afterwards using unitary functions @ref LL_DMA_EnableFifoMode() or @ref LL_DMA_EnableFifoMode() . */ + + uint32_t FIFOThreshold; /*!< Specifies the FIFO threshold level. + This parameter can be a value of @ref DMA_LL_EC_FIFOTHRESHOLD + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetFIFOThreshold(). */ + + uint32_t MemBurst; /*!< Specifies the Burst transfer configuration for the memory transfers. + It specifies the amount of data to be transferred in a single non interruptible + transaction. + This parameter can be a value of @ref DMA_LL_EC_MBURST + @note The burst mode is possible only if the address Increment mode is enabled. + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetMemoryBurstxfer(). */ + + uint32_t PeriphBurst; /*!< Specifies the Burst transfer configuration for the peripheral transfers. + It specifies the amount of data to be transferred in a single non interruptible + transaction. + This parameter can be a value of @ref DMA_LL_EC_PBURST + @note The burst mode is possible only if the address Increment mode is enabled. + + This feature can be modified afterwards using unitary function @ref LL_DMA_SetPeriphBurstxfer(). */ + +} LL_DMA_InitTypeDef; +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup DMA_LL_Exported_Constants DMA Exported Constants + * @{ + */ + +/** @defgroup DMA_LL_EC_STREAM STREAM + * @{ + */ +#define LL_DMA_STREAM_0 0x00000000U +#define LL_DMA_STREAM_1 0x00000001U +#define LL_DMA_STREAM_2 0x00000002U +#define LL_DMA_STREAM_3 0x00000003U +#define LL_DMA_STREAM_4 0x00000004U +#define LL_DMA_STREAM_5 0x00000005U +#define LL_DMA_STREAM_6 0x00000006U +#define LL_DMA_STREAM_7 0x00000007U +#define LL_DMA_STREAM_ALL 0xFFFF0000U +/** + * @} + */ + +/** @defgroup DMA_LL_EC_DIRECTION DIRECTION + * @{ + */ +#define LL_DMA_DIRECTION_PERIPH_TO_MEMORY 0x00000000U /*!< Peripheral to memory direction */ +#define LL_DMA_DIRECTION_MEMORY_TO_PERIPH DMA_SxCR_DIR_0 /*!< Memory to peripheral direction */ +#define LL_DMA_DIRECTION_MEMORY_TO_MEMORY DMA_SxCR_DIR_1 /*!< Memory to memory direction */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_MODE MODE + * @{ + */ +#define LL_DMA_MODE_NORMAL 0x00000000U /*!< Normal Mode */ +#define LL_DMA_MODE_CIRCULAR DMA_SxCR_CIRC /*!< Circular Mode */ +#define LL_DMA_MODE_PFCTRL DMA_SxCR_PFCTRL /*!< Peripheral flow control mode */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_DOUBLEBUFFER_MODE DOUBLEBUFFER MODE + * @{ + */ +#define LL_DMA_DOUBLEBUFFER_MODE_DISABLE 0x00000000U /*!< Disable double buffering mode */ +#define LL_DMA_DOUBLEBUFFER_MODE_ENABLE DMA_SxCR_DBM /*!< Enable double buffering mode */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_PERIPH PERIPH + * @{ + */ +#define LL_DMA_PERIPH_NOINCREMENT 0x00000000U /*!< Peripheral increment mode Disable */ +#define LL_DMA_PERIPH_INCREMENT DMA_SxCR_PINC /*!< Peripheral increment mode Enable */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_MEMORY MEMORY + * @{ + */ +#define LL_DMA_MEMORY_NOINCREMENT 0x00000000U /*!< Memory increment mode Disable */ +#define LL_DMA_MEMORY_INCREMENT DMA_SxCR_MINC /*!< Memory increment mode Enable */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_PDATAALIGN PDATAALIGN + * @{ + */ +#define LL_DMA_PDATAALIGN_BYTE 0x00000000U /*!< Peripheral data alignment : Byte */ +#define LL_DMA_PDATAALIGN_HALFWORD DMA_SxCR_PSIZE_0 /*!< Peripheral data alignment : HalfWord */ +#define LL_DMA_PDATAALIGN_WORD DMA_SxCR_PSIZE_1 /*!< Peripheral data alignment : Word */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_MDATAALIGN MDATAALIGN + * @{ + */ +#define LL_DMA_MDATAALIGN_BYTE 0x00000000U /*!< Memory data alignment : Byte */ +#define LL_DMA_MDATAALIGN_HALFWORD DMA_SxCR_MSIZE_0 /*!< Memory data alignment : HalfWord */ +#define LL_DMA_MDATAALIGN_WORD DMA_SxCR_MSIZE_1 /*!< Memory data alignment : Word */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_OFFSETSIZE OFFSETSIZE + * @{ + */ +#define LL_DMA_OFFSETSIZE_PSIZE 0x00000000U /*!< Peripheral increment offset size is linked to the PSIZE */ +#define LL_DMA_OFFSETSIZE_FIXEDTO4 DMA_SxCR_PINCOS /*!< Peripheral increment offset size is fixed to 4 (32-bit alignment) */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_PRIORITY PRIORITY + * @{ + */ +#define LL_DMA_PRIORITY_LOW 0x00000000U /*!< Priority level : Low */ +#define LL_DMA_PRIORITY_MEDIUM DMA_SxCR_PL_0 /*!< Priority level : Medium */ +#define LL_DMA_PRIORITY_HIGH DMA_SxCR_PL_1 /*!< Priority level : High */ +#define LL_DMA_PRIORITY_VERYHIGH DMA_SxCR_PL /*!< Priority level : Very_High */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_CHANNEL CHANNEL + * @{ + */ +#define LL_DMA_CHANNEL_0 0x00000000U /* Select Channel0 of DMA Instance */ +#define LL_DMA_CHANNEL_1 DMA_SxCR_CHSEL_0 /* Select Channel1 of DMA Instance */ +#define LL_DMA_CHANNEL_2 DMA_SxCR_CHSEL_1 /* Select Channel2 of DMA Instance */ +#define LL_DMA_CHANNEL_3 (DMA_SxCR_CHSEL_0 | DMA_SxCR_CHSEL_1) /* Select Channel3 of DMA Instance */ +#define LL_DMA_CHANNEL_4 DMA_SxCR_CHSEL_2 /* Select Channel4 of DMA Instance */ +#define LL_DMA_CHANNEL_5 (DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_0) /* Select Channel5 of DMA Instance */ +#define LL_DMA_CHANNEL_6 (DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_1) /* Select Channel6 of DMA Instance */ +#define LL_DMA_CHANNEL_7 (DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_1 | DMA_SxCR_CHSEL_0) /* Select Channel7 of DMA Instance */ +#if defined (DMA_SxCR_CHSEL_3) +#define LL_DMA_CHANNEL_8 DMA_SxCR_CHSEL_3 /* Select Channel8 of DMA Instance */ +#define LL_DMA_CHANNEL_9 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_0) /* Select Channel9 of DMA Instance */ +#define LL_DMA_CHANNEL_10 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_1) /* Select Channel10 of DMA Instance */ +#define LL_DMA_CHANNEL_11 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_1 | DMA_SxCR_CHSEL_0) /* Select Channel11 of DMA Instance */ +#define LL_DMA_CHANNEL_12 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_2) /* Select Channel12 of DMA Instance */ +#define LL_DMA_CHANNEL_13 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_0) /* Select Channel13 of DMA Instance */ +#define LL_DMA_CHANNEL_14 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_1) /* Select Channel14 of DMA Instance */ +#define LL_DMA_CHANNEL_15 (DMA_SxCR_CHSEL_3 | DMA_SxCR_CHSEL_2 | DMA_SxCR_CHSEL_1 | DMA_SxCR_CHSEL_0) /* Select Channel15 of DMA Instance */ +#endif /* DMA_SxCR_CHSEL_3 */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_MBURST MBURST + * @{ + */ +#define LL_DMA_MBURST_SINGLE 0x00000000U /*!< Memory burst single transfer configuration */ +#define LL_DMA_MBURST_INC4 DMA_SxCR_MBURST_0 /*!< Memory burst of 4 beats transfer configuration */ +#define LL_DMA_MBURST_INC8 DMA_SxCR_MBURST_1 /*!< Memory burst of 8 beats transfer configuration */ +#define LL_DMA_MBURST_INC16 (DMA_SxCR_MBURST_0 | DMA_SxCR_MBURST_1) /*!< Memory burst of 16 beats transfer configuration */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_PBURST PBURST + * @{ + */ +#define LL_DMA_PBURST_SINGLE 0x00000000U /*!< Peripheral burst single transfer configuration */ +#define LL_DMA_PBURST_INC4 DMA_SxCR_PBURST_0 /*!< Peripheral burst of 4 beats transfer configuration */ +#define LL_DMA_PBURST_INC8 DMA_SxCR_PBURST_1 /*!< Peripheral burst of 8 beats transfer configuration */ +#define LL_DMA_PBURST_INC16 (DMA_SxCR_PBURST_0 | DMA_SxCR_PBURST_1) /*!< Peripheral burst of 16 beats transfer configuration */ +/** + * @} + */ + +/** @defgroup DMA_LL_FIFOMODE DMA_LL_FIFOMODE + * @{ + */ +#define LL_DMA_FIFOMODE_DISABLE 0x00000000U /*!< FIFO mode disable (direct mode is enabled) */ +#define LL_DMA_FIFOMODE_ENABLE DMA_SxFCR_DMDIS /*!< FIFO mode enable */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_FIFOSTATUS_0 FIFOSTATUS 0 + * @{ + */ +#define LL_DMA_FIFOSTATUS_0_25 0x00000000U /*!< 0 < fifo_level < 1/4 */ +#define LL_DMA_FIFOSTATUS_25_50 DMA_SxFCR_FS_0 /*!< 1/4 < fifo_level < 1/2 */ +#define LL_DMA_FIFOSTATUS_50_75 DMA_SxFCR_FS_1 /*!< 1/2 < fifo_level < 3/4 */ +#define LL_DMA_FIFOSTATUS_75_100 (DMA_SxFCR_FS_1 | DMA_SxFCR_FS_0) /*!< 3/4 < fifo_level < full */ +#define LL_DMA_FIFOSTATUS_EMPTY DMA_SxFCR_FS_2 /*!< FIFO is empty */ +#define LL_DMA_FIFOSTATUS_FULL (DMA_SxFCR_FS_2 | DMA_SxFCR_FS_0) /*!< FIFO is full */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_FIFOTHRESHOLD FIFOTHRESHOLD + * @{ + */ +#define LL_DMA_FIFOTHRESHOLD_1_4 0x00000000U /*!< FIFO threshold 1 quart full configuration */ +#define LL_DMA_FIFOTHRESHOLD_1_2 DMA_SxFCR_FTH_0 /*!< FIFO threshold half full configuration */ +#define LL_DMA_FIFOTHRESHOLD_3_4 DMA_SxFCR_FTH_1 /*!< FIFO threshold 3 quarts full configuration */ +#define LL_DMA_FIFOTHRESHOLD_FULL DMA_SxFCR_FTH /*!< FIFO threshold full configuration */ +/** + * @} + */ + +/** @defgroup DMA_LL_EC_CURRENTTARGETMEM CURRENTTARGETMEM + * @{ + */ +#define LL_DMA_CURRENTTARGETMEM0 0x00000000U /*!< Set CurrentTarget Memory to Memory 0 */ +#define LL_DMA_CURRENTTARGETMEM1 DMA_SxCR_CT /*!< Set CurrentTarget Memory to Memory 1 */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup DMA_LL_Exported_Macros DMA Exported Macros + * @{ + */ + +/** @defgroup DMA_LL_EM_WRITE_READ Common Write and read registers macros + * @{ + */ +/** + * @brief Write a value in DMA register + * @param __INSTANCE__ DMA Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_DMA_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in DMA register + * @param __INSTANCE__ DMA Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_DMA_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** @defgroup DMA_LL_EM_CONVERT_DMAxCHANNELy Convert DMAxStreamy + * @{ + */ +/** + * @brief Convert DMAx_Streamy into DMAx + * @param __STREAM_INSTANCE__ DMAx_Streamy + * @retval DMAx + */ +#define __LL_DMA_GET_INSTANCE(__STREAM_INSTANCE__) \ +(((uint32_t)(__STREAM_INSTANCE__) > ((uint32_t)DMA1_Stream7)) ? DMA2 : DMA1) + +/** + * @brief Convert DMAx_Streamy into LL_DMA_STREAM_y + * @param __STREAM_INSTANCE__ DMAx_Streamy + * @retval LL_DMA_CHANNEL_y + */ +#define __LL_DMA_GET_STREAM(__STREAM_INSTANCE__) \ +(((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream0)) ? LL_DMA_STREAM_0 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream0)) ? LL_DMA_STREAM_0 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream1)) ? LL_DMA_STREAM_1 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream1)) ? LL_DMA_STREAM_1 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream2)) ? LL_DMA_STREAM_2 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream2)) ? LL_DMA_STREAM_2 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream3)) ? LL_DMA_STREAM_3 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream3)) ? LL_DMA_STREAM_3 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream4)) ? LL_DMA_STREAM_4 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream4)) ? LL_DMA_STREAM_4 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream5)) ? LL_DMA_STREAM_5 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream5)) ? LL_DMA_STREAM_5 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA1_Stream6)) ? LL_DMA_STREAM_6 : \ + ((uint32_t)(__STREAM_INSTANCE__) == ((uint32_t)DMA2_Stream6)) ? LL_DMA_STREAM_6 : \ + LL_DMA_STREAM_7) + +/** + * @brief Convert DMA Instance DMAx and LL_DMA_STREAM_y into DMAx_Streamy + * @param __DMA_INSTANCE__ DMAx + * @param __STREAM__ LL_DMA_STREAM_y + * @retval DMAx_Streamy + */ +#define __LL_DMA_GET_STREAM_INSTANCE(__DMA_INSTANCE__, __STREAM__) \ +((((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_0))) ? DMA1_Stream0 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_0))) ? DMA2_Stream0 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_1))) ? DMA1_Stream1 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_1))) ? DMA2_Stream1 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_2))) ? DMA1_Stream2 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_2))) ? DMA2_Stream2 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_3))) ? DMA1_Stream3 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_3))) ? DMA2_Stream3 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_4))) ? DMA1_Stream4 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_4))) ? DMA2_Stream4 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_5))) ? DMA1_Stream5 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_5))) ? DMA2_Stream5 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_6))) ? DMA1_Stream6 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA2)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_6))) ? DMA2_Stream6 : \ + (((uint32_t)(__DMA_INSTANCE__) == ((uint32_t)DMA1)) && ((uint32_t)(__STREAM__) == ((uint32_t)LL_DMA_STREAM_7))) ? DMA1_Stream7 : \ + DMA2_Stream7) + +/** + * @} + */ + +/** + * @} + */ + + +/* Exported functions --------------------------------------------------------*/ + /** @defgroup DMA_LL_Exported_Functions DMA Exported Functions + * @{ + */ + +/** @defgroup DMA_LL_EF_Configuration Configuration + * @{ + */ +/** + * @brief Enable DMA stream. + * @rmtoll CR EN LL_DMA_EnableStream + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableStream(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_EN); +} + +/** + * @brief Disable DMA stream. + * @rmtoll CR EN LL_DMA_DisableStream + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableStream(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_EN); +} + +/** + * @brief Check if DMA stream is enabled or disabled. + * @rmtoll CR EN LL_DMA_IsEnabledStream + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledStream(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_EN) == (DMA_SxCR_EN)); +} + +/** + * @brief Configure all parameters linked to DMA transfer. + * @rmtoll CR DIR LL_DMA_ConfigTransfer\n + * CR CIRC LL_DMA_ConfigTransfer\n + * CR PINC LL_DMA_ConfigTransfer\n + * CR MINC LL_DMA_ConfigTransfer\n + * CR PSIZE LL_DMA_ConfigTransfer\n + * CR MSIZE LL_DMA_ConfigTransfer\n + * CR PL LL_DMA_ConfigTransfer\n + * CR PFCTRL LL_DMA_ConfigTransfer + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Configuration This parameter must be a combination of all the following values: + * @arg @ref LL_DMA_DIRECTION_PERIPH_TO_MEMORY or @ref LL_DMA_DIRECTION_MEMORY_TO_PERIPH or @ref LL_DMA_DIRECTION_MEMORY_TO_MEMORY + * @arg @ref LL_DMA_MODE_NORMAL or @ref LL_DMA_MODE_CIRCULAR or @ref LL_DMA_MODE_PFCTRL + * @arg @ref LL_DMA_PERIPH_INCREMENT or @ref LL_DMA_PERIPH_NOINCREMENT + * @arg @ref LL_DMA_MEMORY_INCREMENT or @ref LL_DMA_MEMORY_NOINCREMENT + * @arg @ref LL_DMA_PDATAALIGN_BYTE or @ref LL_DMA_PDATAALIGN_HALFWORD or @ref LL_DMA_PDATAALIGN_WORD + * @arg @ref LL_DMA_MDATAALIGN_BYTE or @ref LL_DMA_MDATAALIGN_HALFWORD or @ref LL_DMA_MDATAALIGN_WORD + * @arg @ref LL_DMA_PRIORITY_LOW or @ref LL_DMA_PRIORITY_MEDIUM or @ref LL_DMA_PRIORITY_HIGH or @ref LL_DMA_PRIORITY_VERYHIGH + *@retval None + */ +__STATIC_INLINE void LL_DMA_ConfigTransfer(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Configuration) +{ + MODIFY_REG(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, + DMA_SxCR_DIR | DMA_SxCR_CIRC | DMA_SxCR_PINC | DMA_SxCR_MINC | DMA_SxCR_PSIZE | DMA_SxCR_MSIZE | DMA_SxCR_PL | DMA_SxCR_PFCTRL, + Configuration); +} + +/** + * @brief Set Data transfer direction (read from peripheral or from memory). + * @rmtoll CR DIR LL_DMA_SetDataTransferDirection + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Direction This parameter can be one of the following values: + * @arg @ref LL_DMA_DIRECTION_PERIPH_TO_MEMORY + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_PERIPH + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_MEMORY + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetDataTransferDirection(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Direction) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DIR, Direction); +} + +/** + * @brief Get Data transfer direction (read from peripheral or from memory). + * @rmtoll CR DIR LL_DMA_GetDataTransferDirection + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_DIRECTION_PERIPH_TO_MEMORY + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_PERIPH + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_MEMORY + */ +__STATIC_INLINE uint32_t LL_DMA_GetDataTransferDirection(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DIR)); +} + +/** + * @brief Set DMA mode normal, circular or peripheral flow control. + * @rmtoll CR CIRC LL_DMA_SetMode\n + * CR PFCTRL LL_DMA_SetMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Mode This parameter can be one of the following values: + * @arg @ref LL_DMA_MODE_NORMAL + * @arg @ref LL_DMA_MODE_CIRCULAR + * @arg @ref LL_DMA_MODE_PFCTRL + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMode(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Mode) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CIRC | DMA_SxCR_PFCTRL, Mode); +} + +/** + * @brief Get DMA mode normal, circular or peripheral flow control. + * @rmtoll CR CIRC LL_DMA_GetMode\n + * CR PFCTRL LL_DMA_GetMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_MODE_NORMAL + * @arg @ref LL_DMA_MODE_CIRCULAR + * @arg @ref LL_DMA_MODE_PFCTRL + */ +__STATIC_INLINE uint32_t LL_DMA_GetMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CIRC | DMA_SxCR_PFCTRL)); +} + +/** + * @brief Set Peripheral increment mode. + * @rmtoll CR PINC LL_DMA_SetPeriphIncMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param IncrementMode This parameter can be one of the following values: + * @arg @ref LL_DMA_PERIPH_NOINCREMENT + * @arg @ref LL_DMA_PERIPH_INCREMENT + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetPeriphIncMode(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t IncrementMode) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PINC, IncrementMode); +} + +/** + * @brief Get Peripheral increment mode. + * @rmtoll CR PINC LL_DMA_GetPeriphIncMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_PERIPH_NOINCREMENT + * @arg @ref LL_DMA_PERIPH_INCREMENT + */ +__STATIC_INLINE uint32_t LL_DMA_GetPeriphIncMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PINC)); +} + +/** + * @brief Set Memory increment mode. + * @rmtoll CR MINC LL_DMA_SetMemoryIncMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param IncrementMode This parameter can be one of the following values: + * @arg @ref LL_DMA_MEMORY_NOINCREMENT + * @arg @ref LL_DMA_MEMORY_INCREMENT + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMemoryIncMode(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t IncrementMode) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MINC, IncrementMode); +} + +/** + * @brief Get Memory increment mode. + * @rmtoll CR MINC LL_DMA_GetMemoryIncMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_MEMORY_NOINCREMENT + * @arg @ref LL_DMA_MEMORY_INCREMENT + */ +__STATIC_INLINE uint32_t LL_DMA_GetMemoryIncMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MINC)); +} + +/** + * @brief Set Peripheral size. + * @rmtoll CR PSIZE LL_DMA_SetPeriphSize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Size This parameter can be one of the following values: + * @arg @ref LL_DMA_PDATAALIGN_BYTE + * @arg @ref LL_DMA_PDATAALIGN_HALFWORD + * @arg @ref LL_DMA_PDATAALIGN_WORD + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetPeriphSize(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Size) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PSIZE, Size); +} + +/** + * @brief Get Peripheral size. + * @rmtoll CR PSIZE LL_DMA_GetPeriphSize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_PDATAALIGN_BYTE + * @arg @ref LL_DMA_PDATAALIGN_HALFWORD + * @arg @ref LL_DMA_PDATAALIGN_WORD + */ +__STATIC_INLINE uint32_t LL_DMA_GetPeriphSize(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PSIZE)); +} + +/** + * @brief Set Memory size. + * @rmtoll CR MSIZE LL_DMA_SetMemorySize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Size This parameter can be one of the following values: + * @arg @ref LL_DMA_MDATAALIGN_BYTE + * @arg @ref LL_DMA_MDATAALIGN_HALFWORD + * @arg @ref LL_DMA_MDATAALIGN_WORD + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMemorySize(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Size) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MSIZE, Size); +} + +/** + * @brief Get Memory size. + * @rmtoll CR MSIZE LL_DMA_GetMemorySize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_MDATAALIGN_BYTE + * @arg @ref LL_DMA_MDATAALIGN_HALFWORD + * @arg @ref LL_DMA_MDATAALIGN_WORD + */ +__STATIC_INLINE uint32_t LL_DMA_GetMemorySize(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MSIZE)); +} + +/** + * @brief Set Peripheral increment offset size. + * @rmtoll CR PINCOS LL_DMA_SetIncOffsetSize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param OffsetSize This parameter can be one of the following values: + * @arg @ref LL_DMA_OFFSETSIZE_PSIZE + * @arg @ref LL_DMA_OFFSETSIZE_FIXEDTO4 + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetIncOffsetSize(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t OffsetSize) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PINCOS, OffsetSize); +} + +/** + * @brief Get Peripheral increment offset size. + * @rmtoll CR PINCOS LL_DMA_GetIncOffsetSize + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_OFFSETSIZE_PSIZE + * @arg @ref LL_DMA_OFFSETSIZE_FIXEDTO4 + */ +__STATIC_INLINE uint32_t LL_DMA_GetIncOffsetSize(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PINCOS)); +} + +/** + * @brief Set Stream priority level. + * @rmtoll CR PL LL_DMA_SetStreamPriorityLevel + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Priority This parameter can be one of the following values: + * @arg @ref LL_DMA_PRIORITY_LOW + * @arg @ref LL_DMA_PRIORITY_MEDIUM + * @arg @ref LL_DMA_PRIORITY_HIGH + * @arg @ref LL_DMA_PRIORITY_VERYHIGH + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetStreamPriorityLevel(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Priority) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PL, Priority); +} + +/** + * @brief Get Stream priority level. + * @rmtoll CR PL LL_DMA_GetStreamPriorityLevel + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_PRIORITY_LOW + * @arg @ref LL_DMA_PRIORITY_MEDIUM + * @arg @ref LL_DMA_PRIORITY_HIGH + * @arg @ref LL_DMA_PRIORITY_VERYHIGH + */ +__STATIC_INLINE uint32_t LL_DMA_GetStreamPriorityLevel(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PL)); +} + +/** + * @brief Set Number of data to transfer. + * @rmtoll NDTR NDT LL_DMA_SetDataLength + * @note This action has no effect if + * stream is enabled. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param NbData Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetDataLength(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t NbData) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->NDTR, DMA_SxNDT, NbData); +} + +/** + * @brief Get Number of data to transfer. + * @rmtoll NDTR NDT LL_DMA_GetDataLength + * @note Once the stream is enabled, the return value indicate the + * remaining bytes to be transmitted. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetDataLength(DMA_TypeDef* DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->NDTR, DMA_SxNDT)); +} + +/** + * @brief Select Channel number associated to the Stream. + * @rmtoll CR CHSEL LL_DMA_SetChannelSelection + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_DMA_CHANNEL_0 + * @arg @ref LL_DMA_CHANNEL_1 + * @arg @ref LL_DMA_CHANNEL_2 + * @arg @ref LL_DMA_CHANNEL_3 + * @arg @ref LL_DMA_CHANNEL_4 + * @arg @ref LL_DMA_CHANNEL_5 + * @arg @ref LL_DMA_CHANNEL_6 + * @arg @ref LL_DMA_CHANNEL_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetChannelSelection(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Channel) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CHSEL, Channel); +} + +/** + * @brief Get the Channel number associated to the Stream. + * @rmtoll CR CHSEL LL_DMA_GetChannelSelection + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_CHANNEL_0 + * @arg @ref LL_DMA_CHANNEL_1 + * @arg @ref LL_DMA_CHANNEL_2 + * @arg @ref LL_DMA_CHANNEL_3 + * @arg @ref LL_DMA_CHANNEL_4 + * @arg @ref LL_DMA_CHANNEL_5 + * @arg @ref LL_DMA_CHANNEL_6 + * @arg @ref LL_DMA_CHANNEL_7 + */ +__STATIC_INLINE uint32_t LL_DMA_GetChannelSelection(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CHSEL)); +} + +/** + * @brief Set Memory burst transfer configuration. + * @rmtoll CR MBURST LL_DMA_SetMemoryBurstxfer + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Mburst This parameter can be one of the following values: + * @arg @ref LL_DMA_MBURST_SINGLE + * @arg @ref LL_DMA_MBURST_INC4 + * @arg @ref LL_DMA_MBURST_INC8 + * @arg @ref LL_DMA_MBURST_INC16 + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMemoryBurstxfer(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Mburst) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MBURST, Mburst); +} + +/** + * @brief Get Memory burst transfer configuration. + * @rmtoll CR MBURST LL_DMA_GetMemoryBurstxfer + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_MBURST_SINGLE + * @arg @ref LL_DMA_MBURST_INC4 + * @arg @ref LL_DMA_MBURST_INC8 + * @arg @ref LL_DMA_MBURST_INC16 + */ +__STATIC_INLINE uint32_t LL_DMA_GetMemoryBurstxfer(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_MBURST)); +} + +/** + * @brief Set Peripheral burst transfer configuration. + * @rmtoll CR PBURST LL_DMA_SetPeriphBurstxfer + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Pburst This parameter can be one of the following values: + * @arg @ref LL_DMA_PBURST_SINGLE + * @arg @ref LL_DMA_PBURST_INC4 + * @arg @ref LL_DMA_PBURST_INC8 + * @arg @ref LL_DMA_PBURST_INC16 + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetPeriphBurstxfer(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Pburst) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PBURST, Pburst); +} + +/** + * @brief Get Peripheral burst transfer configuration. + * @rmtoll CR PBURST LL_DMA_GetPeriphBurstxfer + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_PBURST_SINGLE + * @arg @ref LL_DMA_PBURST_INC4 + * @arg @ref LL_DMA_PBURST_INC8 + * @arg @ref LL_DMA_PBURST_INC16 + */ +__STATIC_INLINE uint32_t LL_DMA_GetPeriphBurstxfer(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_PBURST)); +} + +/** + * @brief Set Current target (only in double buffer mode) to Memory 1 or Memory 0. + * @rmtoll CR CT LL_DMA_SetCurrentTargetMem + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param CurrentMemory This parameter can be one of the following values: + * @arg @ref LL_DMA_CURRENTTARGETMEM0 + * @arg @ref LL_DMA_CURRENTTARGETMEM1 + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetCurrentTargetMem(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t CurrentMemory) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CT, CurrentMemory); +} + +/** + * @brief Get Current target (only in double buffer mode). + * @rmtoll CR CT LL_DMA_GetCurrentTargetMem + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_CURRENTTARGETMEM0 + * @arg @ref LL_DMA_CURRENTTARGETMEM1 + */ +__STATIC_INLINE uint32_t LL_DMA_GetCurrentTargetMem(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_CT)); +} + +/** + * @brief Enable the double buffer mode. + * @rmtoll CR DBM LL_DMA_EnableDoubleBufferMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableDoubleBufferMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DBM); +} + +/** + * @brief Disable the double buffer mode. + * @rmtoll CR DBM LL_DMA_DisableDoubleBufferMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableDoubleBufferMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DBM); +} + +/** + * @brief Get FIFO status. + * @rmtoll FCR FS LL_DMA_GetFIFOStatus + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_FIFOSTATUS_0_25 + * @arg @ref LL_DMA_FIFOSTATUS_25_50 + * @arg @ref LL_DMA_FIFOSTATUS_50_75 + * @arg @ref LL_DMA_FIFOSTATUS_75_100 + * @arg @ref LL_DMA_FIFOSTATUS_EMPTY + * @arg @ref LL_DMA_FIFOSTATUS_FULL + */ +__STATIC_INLINE uint32_t LL_DMA_GetFIFOStatus(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FS)); +} + +/** + * @brief Disable Fifo mode. + * @rmtoll FCR DMDIS LL_DMA_DisableFifoMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableFifoMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_DMDIS); +} + +/** + * @brief Enable Fifo mode. + * @rmtoll FCR DMDIS LL_DMA_EnableFifoMode + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableFifoMode(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_DMDIS); +} + +/** + * @brief Select FIFO threshold. + * @rmtoll FCR FTH LL_DMA_SetFIFOThreshold + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Threshold This parameter can be one of the following values: + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_2 + * @arg @ref LL_DMA_FIFOTHRESHOLD_3_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_FULL + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetFIFOThreshold(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Threshold) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FTH, Threshold); +} + +/** + * @brief Get FIFO threshold. + * @rmtoll FCR FTH LL_DMA_GetFIFOThreshold + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_2 + * @arg @ref LL_DMA_FIFOTHRESHOLD_3_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_FULL + */ +__STATIC_INLINE uint32_t LL_DMA_GetFIFOThreshold(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FTH)); +} + +/** + * @brief Configure the FIFO . + * @rmtoll FCR FTH LL_DMA_ConfigFifo\n + * FCR DMDIS LL_DMA_ConfigFifo + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param FifoMode This parameter can be one of the following values: + * @arg @ref LL_DMA_FIFOMODE_ENABLE + * @arg @ref LL_DMA_FIFOMODE_DISABLE + * @param FifoThreshold This parameter can be one of the following values: + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_1_2 + * @arg @ref LL_DMA_FIFOTHRESHOLD_3_4 + * @arg @ref LL_DMA_FIFOTHRESHOLD_FULL + * @retval None + */ +__STATIC_INLINE void LL_DMA_ConfigFifo(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t FifoMode, uint32_t FifoThreshold) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FTH|DMA_SxFCR_DMDIS, FifoMode|FifoThreshold); +} + +/** + * @brief Configure the Source and Destination addresses. + * @note This API must not be called when the DMA stream is enabled. + * @rmtoll M0AR M0A LL_DMA_ConfigAddresses\n + * PAR PA LL_DMA_ConfigAddresses + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param SrcAddress Between 0 to 0xFFFFFFFF + * @param DstAddress Between 0 to 0xFFFFFFFF + * @param Direction This parameter can be one of the following values: + * @arg @ref LL_DMA_DIRECTION_PERIPH_TO_MEMORY + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_PERIPH + * @arg @ref LL_DMA_DIRECTION_MEMORY_TO_MEMORY + * @retval None + */ +__STATIC_INLINE void LL_DMA_ConfigAddresses(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t SrcAddress, uint32_t DstAddress, uint32_t Direction) +{ + /* Direction Memory to Periph */ + if (Direction == LL_DMA_DIRECTION_MEMORY_TO_PERIPH) + { + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR, SrcAddress); + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR, DstAddress); + } + /* Direction Periph to Memory and Memory to Memory */ + else + { + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR, SrcAddress); + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR, DstAddress); + } +} + +/** + * @brief Set the Memory address. + * @rmtoll M0AR M0A LL_DMA_SetMemoryAddress + * @note Interface used for direction LL_DMA_DIRECTION_PERIPH_TO_MEMORY or LL_DMA_DIRECTION_MEMORY_TO_PERIPH only. + * @note This API must not be called when the DMA channel is enabled. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param MemoryAddress Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMemoryAddress(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t MemoryAddress) +{ + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR, MemoryAddress); +} + +/** + * @brief Set the Peripheral address. + * @rmtoll PAR PA LL_DMA_SetPeriphAddress + * @note Interface used for direction LL_DMA_DIRECTION_PERIPH_TO_MEMORY or LL_DMA_DIRECTION_MEMORY_TO_PERIPH only. + * @note This API must not be called when the DMA channel is enabled. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param PeriphAddress Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetPeriphAddress(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t PeriphAddress) +{ + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR, PeriphAddress); +} + +/** + * @brief Get the Memory address. + * @rmtoll M0AR M0A LL_DMA_GetMemoryAddress + * @note Interface used for direction LL_DMA_DIRECTION_PERIPH_TO_MEMORY or LL_DMA_DIRECTION_MEMORY_TO_PERIPH only. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetMemoryAddress(DMA_TypeDef* DMAx, uint32_t Stream) +{ + return (READ_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR)); +} + +/** + * @brief Get the Peripheral address. + * @rmtoll PAR PA LL_DMA_GetPeriphAddress + * @note Interface used for direction LL_DMA_DIRECTION_PERIPH_TO_MEMORY or LL_DMA_DIRECTION_MEMORY_TO_PERIPH only. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetPeriphAddress(DMA_TypeDef* DMAx, uint32_t Stream) +{ + return (READ_REG(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR)); +} + +/** + * @brief Set the Memory to Memory Source address. + * @rmtoll PAR PA LL_DMA_SetM2MSrcAddress + * @note Interface used for direction LL_DMA_DIRECTION_MEMORY_TO_MEMORY only. + * @note This API must not be called when the DMA channel is enabled. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param MemoryAddress Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetM2MSrcAddress(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t MemoryAddress) +{ + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR, MemoryAddress); +} + +/** + * @brief Set the Memory to Memory Destination address. + * @rmtoll M0AR M0A LL_DMA_SetM2MDstAddress + * @note Interface used for direction LL_DMA_DIRECTION_MEMORY_TO_MEMORY only. + * @note This API must not be called when the DMA channel is enabled. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param MemoryAddress Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetM2MDstAddress(DMA_TypeDef* DMAx, uint32_t Stream, uint32_t MemoryAddress) + { + WRITE_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR, MemoryAddress); + } + +/** + * @brief Get the Memory to Memory Source address. + * @rmtoll PAR PA LL_DMA_GetM2MSrcAddress + * @note Interface used for direction LL_DMA_DIRECTION_MEMORY_TO_MEMORY only. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetM2MSrcAddress(DMA_TypeDef* DMAx, uint32_t Stream) + { + return (READ_REG(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->PAR)); + } + +/** + * @brief Get the Memory to Memory Destination address. + * @rmtoll M0AR M0A LL_DMA_GetM2MDstAddress + * @note Interface used for direction LL_DMA_DIRECTION_MEMORY_TO_MEMORY only. + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetM2MDstAddress(DMA_TypeDef* DMAx, uint32_t Stream) +{ + return (READ_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M0AR)); +} + +/** + * @brief Set Memory 1 address (used in case of Double buffer mode). + * @rmtoll M1AR M1A LL_DMA_SetMemory1Address + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @param Address Between 0 to 0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_DMA_SetMemory1Address(DMA_TypeDef *DMAx, uint32_t Stream, uint32_t Address) +{ + MODIFY_REG(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M1AR, DMA_SxM1AR_M1A, Address); +} + +/** + * @brief Get Memory 1 address (used in case of Double buffer mode). + * @rmtoll M1AR M1A LL_DMA_GetMemory1Address + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval Between 0 to 0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_DMA_GetMemory1Address(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->M1AR); +} + +/** + * @} + */ + +/** @defgroup DMA_LL_EF_FLAG_Management FLAG_Management + * @{ + */ + +/** + * @brief Get Stream 0 half transfer flag. + * @rmtoll LISR HTIF0 LL_DMA_IsActiveFlag_HT0 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT0(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_HTIF0)==(DMA_LISR_HTIF0)); +} + +/** + * @brief Get Stream 1 half transfer flag. + * @rmtoll LISR HTIF1 LL_DMA_IsActiveFlag_HT1 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT1(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_HTIF1)==(DMA_LISR_HTIF1)); +} + +/** + * @brief Get Stream 2 half transfer flag. + * @rmtoll LISR HTIF2 LL_DMA_IsActiveFlag_HT2 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT2(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_HTIF2)==(DMA_LISR_HTIF2)); +} + +/** + * @brief Get Stream 3 half transfer flag. + * @rmtoll LISR HTIF3 LL_DMA_IsActiveFlag_HT3 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT3(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_HTIF3)==(DMA_LISR_HTIF3)); +} + +/** + * @brief Get Stream 4 half transfer flag. + * @rmtoll HISR HTIF4 LL_DMA_IsActiveFlag_HT4 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT4(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_HTIF4)==(DMA_HISR_HTIF4)); +} + +/** + * @brief Get Stream 5 half transfer flag. + * @rmtoll HISR HTIF0 LL_DMA_IsActiveFlag_HT5 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT5(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_HTIF5)==(DMA_HISR_HTIF5)); +} + +/** + * @brief Get Stream 6 half transfer flag. + * @rmtoll HISR HTIF6 LL_DMA_IsActiveFlag_HT6 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT6(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_HTIF6)==(DMA_HISR_HTIF6)); +} + +/** + * @brief Get Stream 7 half transfer flag. + * @rmtoll HISR HTIF7 LL_DMA_IsActiveFlag_HT7 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_HT7(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_HTIF7)==(DMA_HISR_HTIF7)); +} + +/** + * @brief Get Stream 0 transfer complete flag. + * @rmtoll LISR TCIF0 LL_DMA_IsActiveFlag_TC0 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC0(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TCIF0)==(DMA_LISR_TCIF0)); +} + +/** + * @brief Get Stream 1 transfer complete flag. + * @rmtoll LISR TCIF1 LL_DMA_IsActiveFlag_TC1 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC1(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TCIF1)==(DMA_LISR_TCIF1)); +} + +/** + * @brief Get Stream 2 transfer complete flag. + * @rmtoll LISR TCIF2 LL_DMA_IsActiveFlag_TC2 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC2(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TCIF2)==(DMA_LISR_TCIF2)); +} + +/** + * @brief Get Stream 3 transfer complete flag. + * @rmtoll LISR TCIF3 LL_DMA_IsActiveFlag_TC3 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC3(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TCIF3)==(DMA_LISR_TCIF3)); +} + +/** + * @brief Get Stream 4 transfer complete flag. + * @rmtoll HISR TCIF4 LL_DMA_IsActiveFlag_TC4 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC4(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TCIF4)==(DMA_HISR_TCIF4)); +} + +/** + * @brief Get Stream 5 transfer complete flag. + * @rmtoll HISR TCIF0 LL_DMA_IsActiveFlag_TC5 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC5(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TCIF5)==(DMA_HISR_TCIF5)); +} + +/** + * @brief Get Stream 6 transfer complete flag. + * @rmtoll HISR TCIF6 LL_DMA_IsActiveFlag_TC6 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC6(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TCIF6)==(DMA_HISR_TCIF6)); +} + +/** + * @brief Get Stream 7 transfer complete flag. + * @rmtoll HISR TCIF7 LL_DMA_IsActiveFlag_TC7 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TC7(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TCIF7)==(DMA_HISR_TCIF7)); +} + +/** + * @brief Get Stream 0 transfer error flag. + * @rmtoll LISR TEIF0 LL_DMA_IsActiveFlag_TE0 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE0(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TEIF0)==(DMA_LISR_TEIF0)); +} + +/** + * @brief Get Stream 1 transfer error flag. + * @rmtoll LISR TEIF1 LL_DMA_IsActiveFlag_TE1 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE1(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TEIF1)==(DMA_LISR_TEIF1)); +} + +/** + * @brief Get Stream 2 transfer error flag. + * @rmtoll LISR TEIF2 LL_DMA_IsActiveFlag_TE2 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE2(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TEIF2)==(DMA_LISR_TEIF2)); +} + +/** + * @brief Get Stream 3 transfer error flag. + * @rmtoll LISR TEIF3 LL_DMA_IsActiveFlag_TE3 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE3(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_TEIF3)==(DMA_LISR_TEIF3)); +} + +/** + * @brief Get Stream 4 transfer error flag. + * @rmtoll HISR TEIF4 LL_DMA_IsActiveFlag_TE4 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE4(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TEIF4)==(DMA_HISR_TEIF4)); +} + +/** + * @brief Get Stream 5 transfer error flag. + * @rmtoll HISR TEIF0 LL_DMA_IsActiveFlag_TE5 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE5(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TEIF5)==(DMA_HISR_TEIF5)); +} + +/** + * @brief Get Stream 6 transfer error flag. + * @rmtoll HISR TEIF6 LL_DMA_IsActiveFlag_TE6 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE6(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TEIF6)==(DMA_HISR_TEIF6)); +} + +/** + * @brief Get Stream 7 transfer error flag. + * @rmtoll HISR TEIF7 LL_DMA_IsActiveFlag_TE7 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_TE7(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_TEIF7)==(DMA_HISR_TEIF7)); +} + +/** + * @brief Get Stream 0 direct mode error flag. + * @rmtoll LISR DMEIF0 LL_DMA_IsActiveFlag_DME0 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME0(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_DMEIF0)==(DMA_LISR_DMEIF0)); +} + +/** + * @brief Get Stream 1 direct mode error flag. + * @rmtoll LISR DMEIF1 LL_DMA_IsActiveFlag_DME1 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME1(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_DMEIF1)==(DMA_LISR_DMEIF1)); +} + +/** + * @brief Get Stream 2 direct mode error flag. + * @rmtoll LISR DMEIF2 LL_DMA_IsActiveFlag_DME2 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME2(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_DMEIF2)==(DMA_LISR_DMEIF2)); +} + +/** + * @brief Get Stream 3 direct mode error flag. + * @rmtoll LISR DMEIF3 LL_DMA_IsActiveFlag_DME3 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME3(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_DMEIF3)==(DMA_LISR_DMEIF3)); +} + +/** + * @brief Get Stream 4 direct mode error flag. + * @rmtoll HISR DMEIF4 LL_DMA_IsActiveFlag_DME4 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME4(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_DMEIF4)==(DMA_HISR_DMEIF4)); +} + +/** + * @brief Get Stream 5 direct mode error flag. + * @rmtoll HISR DMEIF0 LL_DMA_IsActiveFlag_DME5 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME5(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_DMEIF5)==(DMA_HISR_DMEIF5)); +} + +/** + * @brief Get Stream 6 direct mode error flag. + * @rmtoll HISR DMEIF6 LL_DMA_IsActiveFlag_DME6 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME6(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_DMEIF6)==(DMA_HISR_DMEIF6)); +} + +/** + * @brief Get Stream 7 direct mode error flag. + * @rmtoll HISR DMEIF7 LL_DMA_IsActiveFlag_DME7 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_DME7(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_DMEIF7)==(DMA_HISR_DMEIF7)); +} + +/** + * @brief Get Stream 0 FIFO error flag. + * @rmtoll LISR FEIF0 LL_DMA_IsActiveFlag_FE0 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE0(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_FEIF0)==(DMA_LISR_FEIF0)); +} + +/** + * @brief Get Stream 1 FIFO error flag. + * @rmtoll LISR FEIF1 LL_DMA_IsActiveFlag_FE1 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE1(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_FEIF1)==(DMA_LISR_FEIF1)); +} + +/** + * @brief Get Stream 2 FIFO error flag. + * @rmtoll LISR FEIF2 LL_DMA_IsActiveFlag_FE2 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE2(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_FEIF2)==(DMA_LISR_FEIF2)); +} + +/** + * @brief Get Stream 3 FIFO error flag. + * @rmtoll LISR FEIF3 LL_DMA_IsActiveFlag_FE3 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE3(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->LISR ,DMA_LISR_FEIF3)==(DMA_LISR_FEIF3)); +} + +/** + * @brief Get Stream 4 FIFO error flag. + * @rmtoll HISR FEIF4 LL_DMA_IsActiveFlag_FE4 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE4(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_FEIF4)==(DMA_HISR_FEIF4)); +} + +/** + * @brief Get Stream 5 FIFO error flag. + * @rmtoll HISR FEIF0 LL_DMA_IsActiveFlag_FE5 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE5(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_FEIF5)==(DMA_HISR_FEIF5)); +} + +/** + * @brief Get Stream 6 FIFO error flag. + * @rmtoll HISR FEIF6 LL_DMA_IsActiveFlag_FE6 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE6(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_FEIF6)==(DMA_HISR_FEIF6)); +} + +/** + * @brief Get Stream 7 FIFO error flag. + * @rmtoll HISR FEIF7 LL_DMA_IsActiveFlag_FE7 + * @param DMAx DMAx Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsActiveFlag_FE7(DMA_TypeDef *DMAx) +{ + return (READ_BIT(DMAx->HISR ,DMA_HISR_FEIF7)==(DMA_HISR_FEIF7)); +} + +/** + * @brief Clear Stream 0 half transfer flag. + * @rmtoll LIFCR CHTIF0 LL_DMA_ClearFlag_HT0 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT0(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CHTIF0); +} + +/** + * @brief Clear Stream 1 half transfer flag. + * @rmtoll LIFCR CHTIF1 LL_DMA_ClearFlag_HT1 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT1(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CHTIF1); +} + +/** + * @brief Clear Stream 2 half transfer flag. + * @rmtoll LIFCR CHTIF2 LL_DMA_ClearFlag_HT2 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT2(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CHTIF2); +} + +/** + * @brief Clear Stream 3 half transfer flag. + * @rmtoll LIFCR CHTIF3 LL_DMA_ClearFlag_HT3 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT3(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CHTIF3); +} + +/** + * @brief Clear Stream 4 half transfer flag. + * @rmtoll HIFCR CHTIF4 LL_DMA_ClearFlag_HT4 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT4(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CHTIF4); +} + +/** + * @brief Clear Stream 5 half transfer flag. + * @rmtoll HIFCR CHTIF5 LL_DMA_ClearFlag_HT5 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT5(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CHTIF5); +} + +/** + * @brief Clear Stream 6 half transfer flag. + * @rmtoll HIFCR CHTIF6 LL_DMA_ClearFlag_HT6 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT6(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CHTIF6); +} + +/** + * @brief Clear Stream 7 half transfer flag. + * @rmtoll HIFCR CHTIF7 LL_DMA_ClearFlag_HT7 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_HT7(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CHTIF7); +} + +/** + * @brief Clear Stream 0 transfer complete flag. + * @rmtoll LIFCR CTCIF0 LL_DMA_ClearFlag_TC0 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC0(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTCIF0); +} + +/** + * @brief Clear Stream 1 transfer complete flag. + * @rmtoll LIFCR CTCIF1 LL_DMA_ClearFlag_TC1 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC1(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTCIF1); +} + +/** + * @brief Clear Stream 2 transfer complete flag. + * @rmtoll LIFCR CTCIF2 LL_DMA_ClearFlag_TC2 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC2(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTCIF2); +} + +/** + * @brief Clear Stream 3 transfer complete flag. + * @rmtoll LIFCR CTCIF3 LL_DMA_ClearFlag_TC3 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC3(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTCIF3); +} + +/** + * @brief Clear Stream 4 transfer complete flag. + * @rmtoll HIFCR CTCIF4 LL_DMA_ClearFlag_TC4 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC4(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTCIF4); +} + +/** + * @brief Clear Stream 5 transfer complete flag. + * @rmtoll HIFCR CTCIF5 LL_DMA_ClearFlag_TC5 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC5(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTCIF5); +} + +/** + * @brief Clear Stream 6 transfer complete flag. + * @rmtoll HIFCR CTCIF6 LL_DMA_ClearFlag_TC6 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC6(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTCIF6); +} + +/** + * @brief Clear Stream 7 transfer complete flag. + * @rmtoll HIFCR CTCIF7 LL_DMA_ClearFlag_TC7 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TC7(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTCIF7); +} + +/** + * @brief Clear Stream 0 transfer error flag. + * @rmtoll LIFCR CTEIF0 LL_DMA_ClearFlag_TE0 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE0(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTEIF0); +} + +/** + * @brief Clear Stream 1 transfer error flag. + * @rmtoll LIFCR CTEIF1 LL_DMA_ClearFlag_TE1 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE1(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTEIF1); +} + +/** + * @brief Clear Stream 2 transfer error flag. + * @rmtoll LIFCR CTEIF2 LL_DMA_ClearFlag_TE2 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE2(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTEIF2); +} + +/** + * @brief Clear Stream 3 transfer error flag. + * @rmtoll LIFCR CTEIF3 LL_DMA_ClearFlag_TE3 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE3(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CTEIF3); +} + +/** + * @brief Clear Stream 4 transfer error flag. + * @rmtoll HIFCR CTEIF4 LL_DMA_ClearFlag_TE4 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE4(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTEIF4); +} + +/** + * @brief Clear Stream 5 transfer error flag. + * @rmtoll HIFCR CTEIF5 LL_DMA_ClearFlag_TE5 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE5(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTEIF5); +} + +/** + * @brief Clear Stream 6 transfer error flag. + * @rmtoll HIFCR CTEIF6 LL_DMA_ClearFlag_TE6 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE6(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTEIF6); +} + +/** + * @brief Clear Stream 7 transfer error flag. + * @rmtoll HIFCR CTEIF7 LL_DMA_ClearFlag_TE7 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_TE7(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CTEIF7); +} + +/** + * @brief Clear Stream 0 direct mode error flag. + * @rmtoll LIFCR CDMEIF0 LL_DMA_ClearFlag_DME0 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME0(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CDMEIF0); +} + +/** + * @brief Clear Stream 1 direct mode error flag. + * @rmtoll LIFCR CDMEIF1 LL_DMA_ClearFlag_DME1 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME1(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CDMEIF1); +} + +/** + * @brief Clear Stream 2 direct mode error flag. + * @rmtoll LIFCR CDMEIF2 LL_DMA_ClearFlag_DME2 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME2(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CDMEIF2); +} + +/** + * @brief Clear Stream 3 direct mode error flag. + * @rmtoll LIFCR CDMEIF3 LL_DMA_ClearFlag_DME3 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME3(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CDMEIF3); +} + +/** + * @brief Clear Stream 4 direct mode error flag. + * @rmtoll HIFCR CDMEIF4 LL_DMA_ClearFlag_DME4 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME4(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CDMEIF4); +} + +/** + * @brief Clear Stream 5 direct mode error flag. + * @rmtoll HIFCR CDMEIF5 LL_DMA_ClearFlag_DME5 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME5(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CDMEIF5); +} + +/** + * @brief Clear Stream 6 direct mode error flag. + * @rmtoll HIFCR CDMEIF6 LL_DMA_ClearFlag_DME6 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME6(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CDMEIF6); +} + +/** + * @brief Clear Stream 7 direct mode error flag. + * @rmtoll HIFCR CDMEIF7 LL_DMA_ClearFlag_DME7 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_DME7(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CDMEIF7); +} + +/** + * @brief Clear Stream 0 FIFO error flag. + * @rmtoll LIFCR CFEIF0 LL_DMA_ClearFlag_FE0 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE0(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CFEIF0); +} + +/** + * @brief Clear Stream 1 FIFO error flag. + * @rmtoll LIFCR CFEIF1 LL_DMA_ClearFlag_FE1 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE1(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CFEIF1); +} + +/** + * @brief Clear Stream 2 FIFO error flag. + * @rmtoll LIFCR CFEIF2 LL_DMA_ClearFlag_FE2 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE2(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CFEIF2); +} + +/** + * @brief Clear Stream 3 FIFO error flag. + * @rmtoll LIFCR CFEIF3 LL_DMA_ClearFlag_FE3 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE3(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->LIFCR , DMA_LIFCR_CFEIF3); +} + +/** + * @brief Clear Stream 4 FIFO error flag. + * @rmtoll HIFCR CFEIF4 LL_DMA_ClearFlag_FE4 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE4(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CFEIF4); +} + +/** + * @brief Clear Stream 5 FIFO error flag. + * @rmtoll HIFCR CFEIF5 LL_DMA_ClearFlag_FE5 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE5(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CFEIF5); +} + +/** + * @brief Clear Stream 6 FIFO error flag. + * @rmtoll HIFCR CFEIF6 LL_DMA_ClearFlag_FE6 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE6(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CFEIF6); +} + +/** + * @brief Clear Stream 7 FIFO error flag. + * @rmtoll HIFCR CFEIF7 LL_DMA_ClearFlag_FE7 + * @param DMAx DMAx Instance + * @retval None + */ +__STATIC_INLINE void LL_DMA_ClearFlag_FE7(DMA_TypeDef *DMAx) +{ + WRITE_REG(DMAx->HIFCR , DMA_HIFCR_CFEIF7); +} + +/** + * @} + */ + +/** @defgroup DMA_LL_EF_IT_Management IT_Management + * @{ + */ + +/** + * @brief Enable Half transfer interrupt. + * @rmtoll CR HTIE LL_DMA_EnableIT_HT + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableIT_HT(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_HTIE); +} + +/** + * @brief Enable Transfer error interrupt. + * @rmtoll CR TEIE LL_DMA_EnableIT_TE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableIT_TE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TEIE); +} + +/** + * @brief Enable Transfer complete interrupt. + * @rmtoll CR TCIE LL_DMA_EnableIT_TC + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableIT_TC(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TCIE); +} + +/** + * @brief Enable Direct mode error interrupt. + * @rmtoll CR DMEIE LL_DMA_EnableIT_DME + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableIT_DME(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DMEIE); +} + +/** + * @brief Enable FIFO error interrupt. + * @rmtoll FCR FEIE LL_DMA_EnableIT_FE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_EnableIT_FE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + SET_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FEIE); +} + +/** + * @brief Disable Half transfer interrupt. + * @rmtoll CR HTIE LL_DMA_DisableIT_HT + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableIT_HT(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_HTIE); +} + +/** + * @brief Disable Transfer error interrupt. + * @rmtoll CR TEIE LL_DMA_DisableIT_TE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableIT_TE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TEIE); +} + +/** + * @brief Disable Transfer complete interrupt. + * @rmtoll CR TCIE LL_DMA_DisableIT_TC + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableIT_TC(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TCIE); +} + +/** + * @brief Disable Direct mode error interrupt. + * @rmtoll CR DMEIE LL_DMA_DisableIT_DME + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableIT_DME(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DMEIE); +} + +/** + * @brief Disable FIFO error interrupt. + * @rmtoll FCR FEIE LL_DMA_DisableIT_FE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval None + */ +__STATIC_INLINE void LL_DMA_DisableIT_FE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + CLEAR_BIT(((DMA_Stream_TypeDef *)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FEIE); +} + +/** + * @brief Check if Half transfer interrupt is enabled. + * @rmtoll CR HTIE LL_DMA_IsEnabledIT_HT + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledIT_HT(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_HTIE) == DMA_SxCR_HTIE); +} + +/** + * @brief Check if Transfer error nterrup is enabled. + * @rmtoll CR TEIE LL_DMA_IsEnabledIT_TE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledIT_TE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TEIE) == DMA_SxCR_TEIE); +} + +/** + * @brief Check if Transfer complete interrupt is enabled. + * @rmtoll CR TCIE LL_DMA_IsEnabledIT_TC + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledIT_TC(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_TCIE) == DMA_SxCR_TCIE); +} + +/** + * @brief Check if Direct mode error interrupt is enabled. + * @rmtoll CR DMEIE LL_DMA_IsEnabledIT_DME + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledIT_DME(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->CR, DMA_SxCR_DMEIE) == DMA_SxCR_DMEIE); +} + +/** + * @brief Check if FIFO error interrupt is enabled. + * @rmtoll FCR FEIE LL_DMA_IsEnabledIT_FE + * @param DMAx DMAx Instance + * @param Stream This parameter can be one of the following values: + * @arg @ref LL_DMA_STREAM_0 + * @arg @ref LL_DMA_STREAM_1 + * @arg @ref LL_DMA_STREAM_2 + * @arg @ref LL_DMA_STREAM_3 + * @arg @ref LL_DMA_STREAM_4 + * @arg @ref LL_DMA_STREAM_5 + * @arg @ref LL_DMA_STREAM_6 + * @arg @ref LL_DMA_STREAM_7 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_DMA_IsEnabledIT_FE(DMA_TypeDef *DMAx, uint32_t Stream) +{ + return (READ_BIT(((DMA_Stream_TypeDef*)((uint32_t)((uint32_t)DMAx + STREAM_OFFSET_TAB[Stream])))->FCR, DMA_SxFCR_FEIE) == DMA_SxFCR_FEIE); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup DMA_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +uint32_t LL_DMA_Init(DMA_TypeDef *DMAx, uint32_t Stream, LL_DMA_InitTypeDef *DMA_InitStruct); +uint32_t LL_DMA_DeInit(DMA_TypeDef *DMAx, uint32_t Stream); +void LL_DMA_StructInit(LL_DMA_InitTypeDef *DMA_InitStruct); + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* DMA1 || DMA2 */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_DMA_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_exti.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_exti.h new file mode 100644 index 00000000..65ab6918 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_exti.h @@ -0,0 +1,954 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_exti.h + * @author MCD Application Team + * @brief Header file of EXTI LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS.Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_EXTI_H +#define __STM32F4xx_LL_EXTI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (EXTI) + +/** @defgroup EXTI_LL EXTI + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private Macros ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup EXTI_LL_Private_Macros EXTI Private Macros + * @{ + */ +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup EXTI_LL_ES_INIT EXTI Exported Init structure + * @{ + */ +typedef struct +{ + + uint32_t Line_0_31; /*!< Specifies the EXTI lines to be enabled or disabled for Lines in range 0 to 31 + This parameter can be any combination of @ref EXTI_LL_EC_LINE */ + + FunctionalState LineCommand; /*!< Specifies the new state of the selected EXTI lines. + This parameter can be set either to ENABLE or DISABLE */ + + uint8_t Mode; /*!< Specifies the mode for the EXTI lines. + This parameter can be a value of @ref EXTI_LL_EC_MODE. */ + + uint8_t Trigger; /*!< Specifies the trigger signal active edge for the EXTI lines. + This parameter can be a value of @ref EXTI_LL_EC_TRIGGER. */ +} LL_EXTI_InitTypeDef; + +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup EXTI_LL_Exported_Constants EXTI Exported Constants + * @{ + */ + +/** @defgroup EXTI_LL_EC_LINE LINE + * @{ + */ +#define LL_EXTI_LINE_0 EXTI_IMR_IM0 /*!< Extended line 0 */ +#define LL_EXTI_LINE_1 EXTI_IMR_IM1 /*!< Extended line 1 */ +#define LL_EXTI_LINE_2 EXTI_IMR_IM2 /*!< Extended line 2 */ +#define LL_EXTI_LINE_3 EXTI_IMR_IM3 /*!< Extended line 3 */ +#define LL_EXTI_LINE_4 EXTI_IMR_IM4 /*!< Extended line 4 */ +#define LL_EXTI_LINE_5 EXTI_IMR_IM5 /*!< Extended line 5 */ +#define LL_EXTI_LINE_6 EXTI_IMR_IM6 /*!< Extended line 6 */ +#define LL_EXTI_LINE_7 EXTI_IMR_IM7 /*!< Extended line 7 */ +#define LL_EXTI_LINE_8 EXTI_IMR_IM8 /*!< Extended line 8 */ +#define LL_EXTI_LINE_9 EXTI_IMR_IM9 /*!< Extended line 9 */ +#define LL_EXTI_LINE_10 EXTI_IMR_IM10 /*!< Extended line 10 */ +#define LL_EXTI_LINE_11 EXTI_IMR_IM11 /*!< Extended line 11 */ +#define LL_EXTI_LINE_12 EXTI_IMR_IM12 /*!< Extended line 12 */ +#define LL_EXTI_LINE_13 EXTI_IMR_IM13 /*!< Extended line 13 */ +#define LL_EXTI_LINE_14 EXTI_IMR_IM14 /*!< Extended line 14 */ +#define LL_EXTI_LINE_15 EXTI_IMR_IM15 /*!< Extended line 15 */ +#if defined(EXTI_IMR_IM16) +#define LL_EXTI_LINE_16 EXTI_IMR_IM16 /*!< Extended line 16 */ +#endif +#define LL_EXTI_LINE_17 EXTI_IMR_IM17 /*!< Extended line 17 */ +#if defined(EXTI_IMR_IM18) +#define LL_EXTI_LINE_18 EXTI_IMR_IM18 /*!< Extended line 18 */ +#endif +#define LL_EXTI_LINE_19 EXTI_IMR_IM19 /*!< Extended line 19 */ +#if defined(EXTI_IMR_IM20) +#define LL_EXTI_LINE_20 EXTI_IMR_IM20 /*!< Extended line 20 */ +#endif +#if defined(EXTI_IMR_IM21) +#define LL_EXTI_LINE_21 EXTI_IMR_IM21 /*!< Extended line 21 */ +#endif +#if defined(EXTI_IMR_IM22) +#define LL_EXTI_LINE_22 EXTI_IMR_IM22 /*!< Extended line 22 */ +#endif +#if defined(EXTI_IMR_IM23) +#define LL_EXTI_LINE_23 EXTI_IMR_IM23 /*!< Extended line 23 */ +#endif +#if defined(EXTI_IMR_IM24) +#define LL_EXTI_LINE_24 EXTI_IMR_IM24 /*!< Extended line 24 */ +#endif +#if defined(EXTI_IMR_IM25) +#define LL_EXTI_LINE_25 EXTI_IMR_IM25 /*!< Extended line 25 */ +#endif +#if defined(EXTI_IMR_IM26) +#define LL_EXTI_LINE_26 EXTI_IMR_IM26 /*!< Extended line 26 */ +#endif +#if defined(EXTI_IMR_IM27) +#define LL_EXTI_LINE_27 EXTI_IMR_IM27 /*!< Extended line 27 */ +#endif +#if defined(EXTI_IMR_IM28) +#define LL_EXTI_LINE_28 EXTI_IMR_IM28 /*!< Extended line 28 */ +#endif +#if defined(EXTI_IMR_IM29) +#define LL_EXTI_LINE_29 EXTI_IMR_IM29 /*!< Extended line 29 */ +#endif +#if defined(EXTI_IMR_IM30) +#define LL_EXTI_LINE_30 EXTI_IMR_IM30 /*!< Extended line 30 */ +#endif +#if defined(EXTI_IMR_IM31) +#define LL_EXTI_LINE_31 EXTI_IMR_IM31 /*!< Extended line 31 */ +#endif +#define LL_EXTI_LINE_ALL_0_31 EXTI_IMR_IM /*!< All Extended line not reserved*/ + + +#define LL_EXTI_LINE_ALL ((uint32_t)0xFFFFFFFFU) /*!< All Extended line */ + +#if defined(USE_FULL_LL_DRIVER) +#define LL_EXTI_LINE_NONE ((uint32_t)0x00000000U) /*!< None Extended line */ +#endif /*USE_FULL_LL_DRIVER*/ + +/** + * @} + */ +#if defined(USE_FULL_LL_DRIVER) + +/** @defgroup EXTI_LL_EC_MODE Mode + * @{ + */ +#define LL_EXTI_MODE_IT ((uint8_t)0x00U) /*!< Interrupt Mode */ +#define LL_EXTI_MODE_EVENT ((uint8_t)0x01U) /*!< Event Mode */ +#define LL_EXTI_MODE_IT_EVENT ((uint8_t)0x02U) /*!< Interrupt & Event Mode */ +/** + * @} + */ + +/** @defgroup EXTI_LL_EC_TRIGGER Edge Trigger + * @{ + */ +#define LL_EXTI_TRIGGER_NONE ((uint8_t)0x00U) /*!< No Trigger Mode */ +#define LL_EXTI_TRIGGER_RISING ((uint8_t)0x01U) /*!< Trigger Rising Mode */ +#define LL_EXTI_TRIGGER_FALLING ((uint8_t)0x02U) /*!< Trigger Falling Mode */ +#define LL_EXTI_TRIGGER_RISING_FALLING ((uint8_t)0x03U) /*!< Trigger Rising & Falling Mode */ + +/** + * @} + */ + + +#endif /*USE_FULL_LL_DRIVER*/ + + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup EXTI_LL_Exported_Macros EXTI Exported Macros + * @{ + */ + +/** @defgroup EXTI_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in EXTI register + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_EXTI_WriteReg(__REG__, __VALUE__) WRITE_REG(EXTI->__REG__, (__VALUE__)) + +/** + * @brief Read a value in EXTI register + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_EXTI_ReadReg(__REG__) READ_REG(EXTI->__REG__) +/** + * @} + */ + + +/** + * @} + */ + + + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup EXTI_LL_Exported_Functions EXTI Exported Functions + * @{ + */ +/** @defgroup EXTI_LL_EF_IT_Management IT_Management + * @{ + */ + +/** + * @brief Enable ExtiLine Interrupt request for Lines in range 0 to 31 + * @note The reset value for the direct or internal lines (see RM) + * is set to 1 in order to enable the interrupt by default. + * Bits are set automatically at Power on. + * @rmtoll IMR IMx LL_EXTI_EnableIT_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_EnableIT_0_31(uint32_t ExtiLine) +{ + SET_BIT(EXTI->IMR, ExtiLine); +} + +/** + * @brief Disable ExtiLine Interrupt request for Lines in range 0 to 31 + * @note The reset value for the direct or internal lines (see RM) + * is set to 1 in order to enable the interrupt by default. + * Bits are set automatically at Power on. + * @rmtoll IMR IMx LL_EXTI_DisableIT_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_DisableIT_0_31(uint32_t ExtiLine) +{ + CLEAR_BIT(EXTI->IMR, ExtiLine); +} + + +/** + * @brief Indicate if ExtiLine Interrupt request is enabled for Lines in range 0 to 31 + * @note The reset value for the direct or internal lines (see RM) + * is set to 1 in order to enable the interrupt by default. + * Bits are set automatically at Power on. + * @rmtoll IMR IMx LL_EXTI_IsEnabledIT_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_EXTI_IsEnabledIT_0_31(uint32_t ExtiLine) +{ + return (READ_BIT(EXTI->IMR, ExtiLine) == (ExtiLine)); +} + + +/** + * @} + */ + +/** @defgroup EXTI_LL_EF_Event_Management Event_Management + * @{ + */ + +/** + * @brief Enable ExtiLine Event request for Lines in range 0 to 31 + * @rmtoll EMR EMx LL_EXTI_EnableEvent_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_EnableEvent_0_31(uint32_t ExtiLine) +{ + SET_BIT(EXTI->EMR, ExtiLine); + +} + + +/** + * @brief Disable ExtiLine Event request for Lines in range 0 to 31 + * @rmtoll EMR EMx LL_EXTI_DisableEvent_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_DisableEvent_0_31(uint32_t ExtiLine) +{ + CLEAR_BIT(EXTI->EMR, ExtiLine); +} + + +/** + * @brief Indicate if ExtiLine Event request is enabled for Lines in range 0 to 31 + * @rmtoll EMR EMx LL_EXTI_IsEnabledEvent_0_31 + * @param ExtiLine This parameter can be one of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_17 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @arg @ref LL_EXTI_LINE_23(*) + * @arg @ref LL_EXTI_LINE_ALL_0_31 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_EXTI_IsEnabledEvent_0_31(uint32_t ExtiLine) +{ + return (READ_BIT(EXTI->EMR, ExtiLine) == (ExtiLine)); + +} + + +/** + * @} + */ + +/** @defgroup EXTI_LL_EF_Rising_Trigger_Management Rising_Trigger_Management + * @{ + */ + +/** + * @brief Enable ExtiLine Rising Edge Trigger for Lines in range 0 to 31 + * @note The configurable wakeup lines are edge-triggered. No glitch must be + * generated on these lines. If a rising edge on a configurable interrupt + * line occurs during a write operation in the EXTI_RTSR register, the + * pending bit is not set. + * Rising and falling edge triggers can be set for + * the same interrupt line. In this case, both generate a trigger + * condition. + * @rmtoll RTSR RTx LL_EXTI_EnableRisingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_EnableRisingTrig_0_31(uint32_t ExtiLine) +{ + SET_BIT(EXTI->RTSR, ExtiLine); + +} + + +/** + * @brief Disable ExtiLine Rising Edge Trigger for Lines in range 0 to 31 + * @note The configurable wakeup lines are edge-triggered. No glitch must be + * generated on these lines. If a rising edge on a configurable interrupt + * line occurs during a write operation in the EXTI_RTSR register, the + * pending bit is not set. + * Rising and falling edge triggers can be set for + * the same interrupt line. In this case, both generate a trigger + * condition. + * @rmtoll RTSR RTx LL_EXTI_DisableRisingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_DisableRisingTrig_0_31(uint32_t ExtiLine) +{ + CLEAR_BIT(EXTI->RTSR, ExtiLine); + +} + + +/** + * @brief Check if rising edge trigger is enabled for Lines in range 0 to 31 + * @rmtoll RTSR RTx LL_EXTI_IsEnabledRisingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_EXTI_IsEnabledRisingTrig_0_31(uint32_t ExtiLine) +{ + return (READ_BIT(EXTI->RTSR, ExtiLine) == (ExtiLine)); +} + + +/** + * @} + */ + +/** @defgroup EXTI_LL_EF_Falling_Trigger_Management Falling_Trigger_Management + * @{ + */ + +/** + * @brief Enable ExtiLine Falling Edge Trigger for Lines in range 0 to 31 + * @note The configurable wakeup lines are edge-triggered. No glitch must be + * generated on these lines. If a falling edge on a configurable interrupt + * line occurs during a write operation in the EXTI_FTSR register, the + * pending bit is not set. + * Rising and falling edge triggers can be set for + * the same interrupt line. In this case, both generate a trigger + * condition. + * @rmtoll FTSR FTx LL_EXTI_EnableFallingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_EnableFallingTrig_0_31(uint32_t ExtiLine) +{ + SET_BIT(EXTI->FTSR, ExtiLine); +} + + +/** + * @brief Disable ExtiLine Falling Edge Trigger for Lines in range 0 to 31 + * @note The configurable wakeup lines are edge-triggered. No glitch must be + * generated on these lines. If a Falling edge on a configurable interrupt + * line occurs during a write operation in the EXTI_FTSR register, the + * pending bit is not set. + * Rising and falling edge triggers can be set for the same interrupt line. + * In this case, both generate a trigger condition. + * @rmtoll FTSR FTx LL_EXTI_DisableFallingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_DisableFallingTrig_0_31(uint32_t ExtiLine) +{ + CLEAR_BIT(EXTI->FTSR, ExtiLine); +} + + +/** + * @brief Check if falling edge trigger is enabled for Lines in range 0 to 31 + * @rmtoll FTSR FTx LL_EXTI_IsEnabledFallingTrig_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_EXTI_IsEnabledFallingTrig_0_31(uint32_t ExtiLine) +{ + return (READ_BIT(EXTI->FTSR, ExtiLine) == (ExtiLine)); +} + + +/** + * @} + */ + +/** @defgroup EXTI_LL_EF_Software_Interrupt_Management Software_Interrupt_Management + * @{ + */ + +/** + * @brief Generate a software Interrupt Event for Lines in range 0 to 31 + * @note If the interrupt is enabled on this line in the EXTI_IMR, writing a 1 to + * this bit when it is at '0' sets the corresponding pending bit in EXTI_PR + * resulting in an interrupt request generation. + * This bit is cleared by clearing the corresponding bit in the EXTI_PR + * register (by writing a 1 into the bit) + * @rmtoll SWIER SWIx LL_EXTI_GenerateSWI_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_GenerateSWI_0_31(uint32_t ExtiLine) +{ + SET_BIT(EXTI->SWIER, ExtiLine); +} + + +/** + * @} + */ + +/** @defgroup EXTI_LL_EF_Flag_Management Flag_Management + * @{ + */ + +/** + * @brief Check if the ExtLine Flag is set or not for Lines in range 0 to 31 + * @note This bit is set when the selected edge event arrives on the interrupt + * line. This bit is cleared by writing a 1 to the bit. + * @rmtoll PR PIFx LL_EXTI_IsActiveFlag_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_EXTI_IsActiveFlag_0_31(uint32_t ExtiLine) +{ + return (READ_BIT(EXTI->PR, ExtiLine) == (ExtiLine)); +} + + +/** + * @brief Read ExtLine Combination Flag for Lines in range 0 to 31 + * @note This bit is set when the selected edge event arrives on the interrupt + * line. This bit is cleared by writing a 1 to the bit. + * @rmtoll PR PIFx LL_EXTI_ReadFlag_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval @note This bit is set when the selected edge event arrives on the interrupt + */ +__STATIC_INLINE uint32_t LL_EXTI_ReadFlag_0_31(uint32_t ExtiLine) +{ + return (uint32_t)(READ_BIT(EXTI->PR, ExtiLine)); +} + + +/** + * @brief Clear ExtLine Flags for Lines in range 0 to 31 + * @note This bit is set when the selected edge event arrives on the interrupt + * line. This bit is cleared by writing a 1 to the bit. + * @rmtoll PR PIFx LL_EXTI_ClearFlag_0_31 + * @param ExtiLine This parameter can be a combination of the following values: + * @arg @ref LL_EXTI_LINE_0 + * @arg @ref LL_EXTI_LINE_1 + * @arg @ref LL_EXTI_LINE_2 + * @arg @ref LL_EXTI_LINE_3 + * @arg @ref LL_EXTI_LINE_4 + * @arg @ref LL_EXTI_LINE_5 + * @arg @ref LL_EXTI_LINE_6 + * @arg @ref LL_EXTI_LINE_7 + * @arg @ref LL_EXTI_LINE_8 + * @arg @ref LL_EXTI_LINE_9 + * @arg @ref LL_EXTI_LINE_10 + * @arg @ref LL_EXTI_LINE_11 + * @arg @ref LL_EXTI_LINE_12 + * @arg @ref LL_EXTI_LINE_13 + * @arg @ref LL_EXTI_LINE_14 + * @arg @ref LL_EXTI_LINE_15 + * @arg @ref LL_EXTI_LINE_16 + * @arg @ref LL_EXTI_LINE_18 + * @arg @ref LL_EXTI_LINE_19(*) + * @arg @ref LL_EXTI_LINE_20(*) + * @arg @ref LL_EXTI_LINE_21 + * @arg @ref LL_EXTI_LINE_22 + * @note (*): Available in some devices + * @note Please check each device line mapping for EXTI Line availability + * @retval None + */ +__STATIC_INLINE void LL_EXTI_ClearFlag_0_31(uint32_t ExtiLine) +{ + WRITE_REG(EXTI->PR, ExtiLine); +} + + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup EXTI_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +uint32_t LL_EXTI_Init(LL_EXTI_InitTypeDef *EXTI_InitStruct); +uint32_t LL_EXTI_DeInit(void); +void LL_EXTI_StructInit(LL_EXTI_InitTypeDef *EXTI_InitStruct); + + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* EXTI */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_EXTI_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_gpio.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_gpio.h new file mode 100644 index 00000000..6bee7fd1 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_gpio.h @@ -0,0 +1,981 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_gpio.h + * @author MCD Application Team + * @brief Header file of GPIO LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_GPIO_H +#define __STM32F4xx_LL_GPIO_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (GPIOA) || defined (GPIOB) || defined (GPIOC) || defined (GPIOD) || defined (GPIOE) || defined (GPIOF) || defined (GPIOG) || defined (GPIOH) || defined (GPIOI) || defined (GPIOJ) || defined (GPIOK) + +/** @defgroup GPIO_LL GPIO + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup GPIO_LL_Private_Macros GPIO Private Macros + * @{ + */ + +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup GPIO_LL_ES_INIT GPIO Exported Init structures + * @{ + */ + +/** + * @brief LL GPIO Init Structure definition + */ +typedef struct +{ + uint32_t Pin; /*!< Specifies the GPIO pins to be configured. + This parameter can be any value of @ref GPIO_LL_EC_PIN */ + + uint32_t Mode; /*!< Specifies the operating mode for the selected pins. + This parameter can be a value of @ref GPIO_LL_EC_MODE. + + GPIO HW configuration can be modified afterwards using unitary function @ref LL_GPIO_SetPinMode().*/ + + uint32_t Speed; /*!< Specifies the speed for the selected pins. + This parameter can be a value of @ref GPIO_LL_EC_SPEED. + + GPIO HW configuration can be modified afterwards using unitary function @ref LL_GPIO_SetPinSpeed().*/ + + uint32_t OutputType; /*!< Specifies the operating output type for the selected pins. + This parameter can be a value of @ref GPIO_LL_EC_OUTPUT. + + GPIO HW configuration can be modified afterwards using unitary function @ref LL_GPIO_SetPinOutputType().*/ + + uint32_t Pull; /*!< Specifies the operating Pull-up/Pull down for the selected pins. + This parameter can be a value of @ref GPIO_LL_EC_PULL. + + GPIO HW configuration can be modified afterwards using unitary function @ref LL_GPIO_SetPinPull().*/ + + uint32_t Alternate; /*!< Specifies the Peripheral to be connected to the selected pins. + This parameter can be a value of @ref GPIO_LL_EC_AF. + + GPIO HW configuration can be modified afterwards using unitary function @ref LL_GPIO_SetAFPin_0_7() and LL_GPIO_SetAFPin_8_15().*/ +} LL_GPIO_InitTypeDef; + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup GPIO_LL_Exported_Constants GPIO Exported Constants + * @{ + */ + +/** @defgroup GPIO_LL_EC_PIN PIN + * @{ + */ +#define LL_GPIO_PIN_0 GPIO_BSRR_BS_0 /*!< Select pin 0 */ +#define LL_GPIO_PIN_1 GPIO_BSRR_BS_1 /*!< Select pin 1 */ +#define LL_GPIO_PIN_2 GPIO_BSRR_BS_2 /*!< Select pin 2 */ +#define LL_GPIO_PIN_3 GPIO_BSRR_BS_3 /*!< Select pin 3 */ +#define LL_GPIO_PIN_4 GPIO_BSRR_BS_4 /*!< Select pin 4 */ +#define LL_GPIO_PIN_5 GPIO_BSRR_BS_5 /*!< Select pin 5 */ +#define LL_GPIO_PIN_6 GPIO_BSRR_BS_6 /*!< Select pin 6 */ +#define LL_GPIO_PIN_7 GPIO_BSRR_BS_7 /*!< Select pin 7 */ +#define LL_GPIO_PIN_8 GPIO_BSRR_BS_8 /*!< Select pin 8 */ +#define LL_GPIO_PIN_9 GPIO_BSRR_BS_9 /*!< Select pin 9 */ +#define LL_GPIO_PIN_10 GPIO_BSRR_BS_10 /*!< Select pin 10 */ +#define LL_GPIO_PIN_11 GPIO_BSRR_BS_11 /*!< Select pin 11 */ +#define LL_GPIO_PIN_12 GPIO_BSRR_BS_12 /*!< Select pin 12 */ +#define LL_GPIO_PIN_13 GPIO_BSRR_BS_13 /*!< Select pin 13 */ +#define LL_GPIO_PIN_14 GPIO_BSRR_BS_14 /*!< Select pin 14 */ +#define LL_GPIO_PIN_15 GPIO_BSRR_BS_15 /*!< Select pin 15 */ +#define LL_GPIO_PIN_ALL (GPIO_BSRR_BS_0 | GPIO_BSRR_BS_1 | GPIO_BSRR_BS_2 | \ + GPIO_BSRR_BS_3 | GPIO_BSRR_BS_4 | GPIO_BSRR_BS_5 | \ + GPIO_BSRR_BS_6 | GPIO_BSRR_BS_7 | GPIO_BSRR_BS_8 | \ + GPIO_BSRR_BS_9 | GPIO_BSRR_BS_10 | GPIO_BSRR_BS_11 | \ + GPIO_BSRR_BS_12 | GPIO_BSRR_BS_13 | GPIO_BSRR_BS_14 | \ + GPIO_BSRR_BS_15) /*!< Select all pins */ +/** + * @} + */ + +/** @defgroup GPIO_LL_EC_MODE Mode + * @{ + */ +#define LL_GPIO_MODE_INPUT (0x00000000U) /*!< Select input mode */ +#define LL_GPIO_MODE_OUTPUT GPIO_MODER_MODER0_0 /*!< Select output mode */ +#define LL_GPIO_MODE_ALTERNATE GPIO_MODER_MODER0_1 /*!< Select alternate function mode */ +#define LL_GPIO_MODE_ANALOG GPIO_MODER_MODER0 /*!< Select analog mode */ +/** + * @} + */ + +/** @defgroup GPIO_LL_EC_OUTPUT Output Type + * @{ + */ +#define LL_GPIO_OUTPUT_PUSHPULL (0x00000000U) /*!< Select push-pull as output type */ +#define LL_GPIO_OUTPUT_OPENDRAIN GPIO_OTYPER_OT_0 /*!< Select open-drain as output type */ +/** + * @} + */ + +/** @defgroup GPIO_LL_EC_SPEED Output Speed + * @{ + */ +#define LL_GPIO_SPEED_FREQ_LOW (0x00000000U) /*!< Select I/O low output speed */ +#define LL_GPIO_SPEED_FREQ_MEDIUM GPIO_OSPEEDER_OSPEEDR0_0 /*!< Select I/O medium output speed */ +#define LL_GPIO_SPEED_FREQ_HIGH GPIO_OSPEEDER_OSPEEDR0_1 /*!< Select I/O fast output speed */ +#define LL_GPIO_SPEED_FREQ_VERY_HIGH GPIO_OSPEEDER_OSPEEDR0 /*!< Select I/O high output speed */ +/** + * @} + */ + +/** @defgroup GPIO_LL_EC_PULL Pull Up Pull Down + * @{ + */ +#define LL_GPIO_PULL_NO (0x00000000U) /*!< Select I/O no pull */ +#define LL_GPIO_PULL_UP GPIO_PUPDR_PUPDR0_0 /*!< Select I/O pull up */ +#define LL_GPIO_PULL_DOWN GPIO_PUPDR_PUPDR0_1 /*!< Select I/O pull down */ +/** + * @} + */ + +/** @defgroup GPIO_LL_EC_AF Alternate Function + * @{ + */ +#define LL_GPIO_AF_0 (0x0000000U) /*!< Select alternate function 0 */ +#define LL_GPIO_AF_1 (0x0000001U) /*!< Select alternate function 1 */ +#define LL_GPIO_AF_2 (0x0000002U) /*!< Select alternate function 2 */ +#define LL_GPIO_AF_3 (0x0000003U) /*!< Select alternate function 3 */ +#define LL_GPIO_AF_4 (0x0000004U) /*!< Select alternate function 4 */ +#define LL_GPIO_AF_5 (0x0000005U) /*!< Select alternate function 5 */ +#define LL_GPIO_AF_6 (0x0000006U) /*!< Select alternate function 6 */ +#define LL_GPIO_AF_7 (0x0000007U) /*!< Select alternate function 7 */ +#define LL_GPIO_AF_8 (0x0000008U) /*!< Select alternate function 8 */ +#define LL_GPIO_AF_9 (0x0000009U) /*!< Select alternate function 9 */ +#define LL_GPIO_AF_10 (0x000000AU) /*!< Select alternate function 10 */ +#define LL_GPIO_AF_11 (0x000000BU) /*!< Select alternate function 11 */ +#define LL_GPIO_AF_12 (0x000000CU) /*!< Select alternate function 12 */ +#define LL_GPIO_AF_13 (0x000000DU) /*!< Select alternate function 13 */ +#define LL_GPIO_AF_14 (0x000000EU) /*!< Select alternate function 14 */ +#define LL_GPIO_AF_15 (0x000000FU) /*!< Select alternate function 15 */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup GPIO_LL_Exported_Macros GPIO Exported Macros + * @{ + */ + +/** @defgroup GPIO_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in GPIO register + * @param __INSTANCE__ GPIO Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_GPIO_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in GPIO register + * @param __INSTANCE__ GPIO Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_GPIO_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup GPIO_LL_Exported_Functions GPIO Exported Functions + * @{ + */ + +/** @defgroup GPIO_LL_EF_Port_Configuration Port Configuration + * @{ + */ + +/** + * @brief Configure gpio mode for a dedicated pin on dedicated port. + * @note I/O mode can be Input mode, General purpose output, Alternate function mode or Analog. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll MODER MODEy LL_GPIO_SetPinMode + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @param Mode This parameter can be one of the following values: + * @arg @ref LL_GPIO_MODE_INPUT + * @arg @ref LL_GPIO_MODE_OUTPUT + * @arg @ref LL_GPIO_MODE_ALTERNATE + * @arg @ref LL_GPIO_MODE_ANALOG + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetPinMode(GPIO_TypeDef *GPIOx, uint32_t Pin, uint32_t Mode) +{ + MODIFY_REG(GPIOx->MODER, (GPIO_MODER_MODER0 << (POSITION_VAL(Pin) * 2U)), (Mode << (POSITION_VAL(Pin) * 2U))); +} + +/** + * @brief Return gpio mode for a dedicated pin on dedicated port. + * @note I/O mode can be Input mode, General purpose output, Alternate function mode or Analog. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll MODER MODEy LL_GPIO_GetPinMode + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_MODE_INPUT + * @arg @ref LL_GPIO_MODE_OUTPUT + * @arg @ref LL_GPIO_MODE_ALTERNATE + * @arg @ref LL_GPIO_MODE_ANALOG + */ +__STATIC_INLINE uint32_t LL_GPIO_GetPinMode(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->MODER, + (GPIO_MODER_MODER0 << (POSITION_VAL(Pin) * 2U))) >> (POSITION_VAL(Pin) * 2U)); +} + +/** + * @brief Configure gpio output type for several pins on dedicated port. + * @note Output type as to be set when gpio pin is in output or + * alternate modes. Possible type are Push-pull or Open-drain. + * @rmtoll OTYPER OTy LL_GPIO_SetPinOutputType + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @param OutputType This parameter can be one of the following values: + * @arg @ref LL_GPIO_OUTPUT_PUSHPULL + * @arg @ref LL_GPIO_OUTPUT_OPENDRAIN + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetPinOutputType(GPIO_TypeDef *GPIOx, uint32_t PinMask, uint32_t OutputType) +{ + MODIFY_REG(GPIOx->OTYPER, PinMask, (PinMask * OutputType)); +} + +/** + * @brief Return gpio output type for several pins on dedicated port. + * @note Output type as to be set when gpio pin is in output or + * alternate modes. Possible type are Push-pull or Open-drain. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll OTYPER OTy LL_GPIO_GetPinOutputType + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_OUTPUT_PUSHPULL + * @arg @ref LL_GPIO_OUTPUT_OPENDRAIN + */ +__STATIC_INLINE uint32_t LL_GPIO_GetPinOutputType(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->OTYPER, Pin) >> POSITION_VAL(Pin)); +} + +/** + * @brief Configure gpio speed for a dedicated pin on dedicated port. + * @note I/O speed can be Low, Medium, Fast or High speed. + * @note Warning: only one pin can be passed as parameter. + * @note Refer to datasheet for frequency specifications and the power + * supply and load conditions for each speed. + * @rmtoll OSPEEDR OSPEEDy LL_GPIO_SetPinSpeed + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @param Speed This parameter can be one of the following values: + * @arg @ref LL_GPIO_SPEED_FREQ_LOW + * @arg @ref LL_GPIO_SPEED_FREQ_MEDIUM + * @arg @ref LL_GPIO_SPEED_FREQ_HIGH + * @arg @ref LL_GPIO_SPEED_FREQ_VERY_HIGH + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetPinSpeed(GPIO_TypeDef *GPIOx, uint32_t Pin, uint32_t Speed) +{ + MODIFY_REG(GPIOx->OSPEEDR, (GPIO_OSPEEDER_OSPEEDR0 << (POSITION_VAL(Pin) * 2U)), + (Speed << (POSITION_VAL(Pin) * 2U))); +} + +/** + * @brief Return gpio speed for a dedicated pin on dedicated port. + * @note I/O speed can be Low, Medium, Fast or High speed. + * @note Warning: only one pin can be passed as parameter. + * @note Refer to datasheet for frequency specifications and the power + * supply and load conditions for each speed. + * @rmtoll OSPEEDR OSPEEDy LL_GPIO_GetPinSpeed + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_SPEED_FREQ_LOW + * @arg @ref LL_GPIO_SPEED_FREQ_MEDIUM + * @arg @ref LL_GPIO_SPEED_FREQ_HIGH + * @arg @ref LL_GPIO_SPEED_FREQ_VERY_HIGH + */ +__STATIC_INLINE uint32_t LL_GPIO_GetPinSpeed(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->OSPEEDR, + (GPIO_OSPEEDER_OSPEEDR0 << (POSITION_VAL(Pin) * 2U))) >> (POSITION_VAL(Pin) * 2U)); +} + +/** + * @brief Configure gpio pull-up or pull-down for a dedicated pin on a dedicated port. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll PUPDR PUPDy LL_GPIO_SetPinPull + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @param Pull This parameter can be one of the following values: + * @arg @ref LL_GPIO_PULL_NO + * @arg @ref LL_GPIO_PULL_UP + * @arg @ref LL_GPIO_PULL_DOWN + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetPinPull(GPIO_TypeDef *GPIOx, uint32_t Pin, uint32_t Pull) +{ + MODIFY_REG(GPIOx->PUPDR, (GPIO_PUPDR_PUPDR0 << (POSITION_VAL(Pin) * 2U)), (Pull << (POSITION_VAL(Pin) * 2U))); +} + +/** + * @brief Return gpio pull-up or pull-down for a dedicated pin on a dedicated port + * @note Warning: only one pin can be passed as parameter. + * @rmtoll PUPDR PUPDy LL_GPIO_GetPinPull + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_PULL_NO + * @arg @ref LL_GPIO_PULL_UP + * @arg @ref LL_GPIO_PULL_DOWN + */ +__STATIC_INLINE uint32_t LL_GPIO_GetPinPull(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->PUPDR, + (GPIO_PUPDR_PUPDR0 << (POSITION_VAL(Pin) * 2U))) >> (POSITION_VAL(Pin) * 2U)); +} + +/** + * @brief Configure gpio alternate function of a dedicated pin from 0 to 7 for a dedicated port. + * @note Possible values are from AF0 to AF15 depending on target. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll AFRL AFSELy LL_GPIO_SetAFPin_0_7 + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @param Alternate This parameter can be one of the following values: + * @arg @ref LL_GPIO_AF_0 + * @arg @ref LL_GPIO_AF_1 + * @arg @ref LL_GPIO_AF_2 + * @arg @ref LL_GPIO_AF_3 + * @arg @ref LL_GPIO_AF_4 + * @arg @ref LL_GPIO_AF_5 + * @arg @ref LL_GPIO_AF_6 + * @arg @ref LL_GPIO_AF_7 + * @arg @ref LL_GPIO_AF_8 + * @arg @ref LL_GPIO_AF_9 + * @arg @ref LL_GPIO_AF_10 + * @arg @ref LL_GPIO_AF_11 + * @arg @ref LL_GPIO_AF_12 + * @arg @ref LL_GPIO_AF_13 + * @arg @ref LL_GPIO_AF_14 + * @arg @ref LL_GPIO_AF_15 + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetAFPin_0_7(GPIO_TypeDef *GPIOx, uint32_t Pin, uint32_t Alternate) +{ + MODIFY_REG(GPIOx->AFR[0], (GPIO_AFRL_AFSEL0 << (POSITION_VAL(Pin) * 4U)), + (Alternate << (POSITION_VAL(Pin) * 4U))); +} + +/** + * @brief Return gpio alternate function of a dedicated pin from 0 to 7 for a dedicated port. + * @rmtoll AFRL AFSELy LL_GPIO_GetAFPin_0_7 + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_AF_0 + * @arg @ref LL_GPIO_AF_1 + * @arg @ref LL_GPIO_AF_2 + * @arg @ref LL_GPIO_AF_3 + * @arg @ref LL_GPIO_AF_4 + * @arg @ref LL_GPIO_AF_5 + * @arg @ref LL_GPIO_AF_6 + * @arg @ref LL_GPIO_AF_7 + * @arg @ref LL_GPIO_AF_8 + * @arg @ref LL_GPIO_AF_9 + * @arg @ref LL_GPIO_AF_10 + * @arg @ref LL_GPIO_AF_11 + * @arg @ref LL_GPIO_AF_12 + * @arg @ref LL_GPIO_AF_13 + * @arg @ref LL_GPIO_AF_14 + * @arg @ref LL_GPIO_AF_15 + */ +__STATIC_INLINE uint32_t LL_GPIO_GetAFPin_0_7(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->AFR[0], + (GPIO_AFRL_AFSEL0 << (POSITION_VAL(Pin) * 4U))) >> (POSITION_VAL(Pin) * 4U)); +} + +/** + * @brief Configure gpio alternate function of a dedicated pin from 8 to 15 for a dedicated port. + * @note Possible values are from AF0 to AF15 depending on target. + * @note Warning: only one pin can be passed as parameter. + * @rmtoll AFRH AFSELy LL_GPIO_SetAFPin_8_15 + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @param Alternate This parameter can be one of the following values: + * @arg @ref LL_GPIO_AF_0 + * @arg @ref LL_GPIO_AF_1 + * @arg @ref LL_GPIO_AF_2 + * @arg @ref LL_GPIO_AF_3 + * @arg @ref LL_GPIO_AF_4 + * @arg @ref LL_GPIO_AF_5 + * @arg @ref LL_GPIO_AF_6 + * @arg @ref LL_GPIO_AF_7 + * @arg @ref LL_GPIO_AF_8 + * @arg @ref LL_GPIO_AF_9 + * @arg @ref LL_GPIO_AF_10 + * @arg @ref LL_GPIO_AF_11 + * @arg @ref LL_GPIO_AF_12 + * @arg @ref LL_GPIO_AF_13 + * @arg @ref LL_GPIO_AF_14 + * @arg @ref LL_GPIO_AF_15 + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetAFPin_8_15(GPIO_TypeDef *GPIOx, uint32_t Pin, uint32_t Alternate) +{ + MODIFY_REG(GPIOx->AFR[1], (GPIO_AFRH_AFSEL8 << (POSITION_VAL(Pin >> 8U) * 4U)), + (Alternate << (POSITION_VAL(Pin >> 8U) * 4U))); +} + +/** + * @brief Return gpio alternate function of a dedicated pin from 8 to 15 for a dedicated port. + * @note Possible values are from AF0 to AF15 depending on target. + * @rmtoll AFRH AFSELy LL_GPIO_GetAFPin_8_15 + * @param GPIOx GPIO Port + * @param Pin This parameter can be one of the following values: + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @retval Returned value can be one of the following values: + * @arg @ref LL_GPIO_AF_0 + * @arg @ref LL_GPIO_AF_1 + * @arg @ref LL_GPIO_AF_2 + * @arg @ref LL_GPIO_AF_3 + * @arg @ref LL_GPIO_AF_4 + * @arg @ref LL_GPIO_AF_5 + * @arg @ref LL_GPIO_AF_6 + * @arg @ref LL_GPIO_AF_7 + * @arg @ref LL_GPIO_AF_8 + * @arg @ref LL_GPIO_AF_9 + * @arg @ref LL_GPIO_AF_10 + * @arg @ref LL_GPIO_AF_11 + * @arg @ref LL_GPIO_AF_12 + * @arg @ref LL_GPIO_AF_13 + * @arg @ref LL_GPIO_AF_14 + * @arg @ref LL_GPIO_AF_15 + */ +__STATIC_INLINE uint32_t LL_GPIO_GetAFPin_8_15(GPIO_TypeDef *GPIOx, uint32_t Pin) +{ + return (uint32_t)(READ_BIT(GPIOx->AFR[1], + (GPIO_AFRH_AFSEL8 << (POSITION_VAL(Pin >> 8U) * 4U))) >> (POSITION_VAL(Pin >> 8U) * 4U)); +} + + +/** + * @brief Lock configuration of several pins for a dedicated port. + * @note When the lock sequence has been applied on a port bit, the + * value of this port bit can no longer be modified until the + * next reset. + * @note Each lock bit freezes a specific configuration register + * (control and alternate function registers). + * @rmtoll LCKR LCKK LL_GPIO_LockPin + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval None + */ +__STATIC_INLINE void LL_GPIO_LockPin(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + __IO uint32_t temp; + WRITE_REG(GPIOx->LCKR, GPIO_LCKR_LCKK | PinMask); + WRITE_REG(GPIOx->LCKR, PinMask); + WRITE_REG(GPIOx->LCKR, GPIO_LCKR_LCKK | PinMask); + temp = READ_REG(GPIOx->LCKR); + (void) temp; +} + +/** + * @brief Return 1 if all pins passed as parameter, of a dedicated port, are locked. else Return 0. + * @rmtoll LCKR LCKy LL_GPIO_IsPinLocked + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_GPIO_IsPinLocked(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + return (READ_BIT(GPIOx->LCKR, PinMask) == (PinMask)); +} + +/** + * @brief Return 1 if one of the pin of a dedicated port is locked. else return 0. + * @rmtoll LCKR LCKK LL_GPIO_IsAnyPinLocked + * @param GPIOx GPIO Port + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_GPIO_IsAnyPinLocked(GPIO_TypeDef *GPIOx) +{ + return (READ_BIT(GPIOx->LCKR, GPIO_LCKR_LCKK) == (GPIO_LCKR_LCKK)); +} + +/** + * @} + */ + +/** @defgroup GPIO_LL_EF_Data_Access Data Access + * @{ + */ + +/** + * @brief Return full input data register value for a dedicated port. + * @rmtoll IDR IDy LL_GPIO_ReadInputPort + * @param GPIOx GPIO Port + * @retval Input data register value of port + */ +__STATIC_INLINE uint32_t LL_GPIO_ReadInputPort(GPIO_TypeDef *GPIOx) +{ + return (uint32_t)(READ_REG(GPIOx->IDR)); +} + +/** + * @brief Return if input data level for several pins of dedicated port is high or low. + * @rmtoll IDR IDy LL_GPIO_IsInputPinSet + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_GPIO_IsInputPinSet(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + return (READ_BIT(GPIOx->IDR, PinMask) == (PinMask)); +} + +/** + * @brief Write output data register for the port. + * @rmtoll ODR ODy LL_GPIO_WriteOutputPort + * @param GPIOx GPIO Port + * @param PortValue Level value for each pin of the port + * @retval None + */ +__STATIC_INLINE void LL_GPIO_WriteOutputPort(GPIO_TypeDef *GPIOx, uint32_t PortValue) +{ + WRITE_REG(GPIOx->ODR, PortValue); +} + +/** + * @brief Return full output data register value for a dedicated port. + * @rmtoll ODR ODy LL_GPIO_ReadOutputPort + * @param GPIOx GPIO Port + * @retval Output data register value of port + */ +__STATIC_INLINE uint32_t LL_GPIO_ReadOutputPort(GPIO_TypeDef *GPIOx) +{ + return (uint32_t)(READ_REG(GPIOx->ODR)); +} + +/** + * @brief Return if input data level for several pins of dedicated port is high or low. + * @rmtoll ODR ODy LL_GPIO_IsOutputPinSet + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_GPIO_IsOutputPinSet(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + return (READ_BIT(GPIOx->ODR, PinMask) == (PinMask)); +} + +/** + * @brief Set several pins to high level on dedicated gpio port. + * @rmtoll BSRR BSy LL_GPIO_SetOutputPin + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval None + */ +__STATIC_INLINE void LL_GPIO_SetOutputPin(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + WRITE_REG(GPIOx->BSRR, PinMask); +} + +/** + * @brief Set several pins to low level on dedicated gpio port. + * @rmtoll BSRR BRy LL_GPIO_ResetOutputPin + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval None + */ +__STATIC_INLINE void LL_GPIO_ResetOutputPin(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + WRITE_REG(GPIOx->BSRR, (PinMask << 16)); +} + +/** + * @brief Toggle data value for several pin of dedicated port. + * @rmtoll ODR ODy LL_GPIO_TogglePin + * @param GPIOx GPIO Port + * @param PinMask This parameter can be a combination of the following values: + * @arg @ref LL_GPIO_PIN_0 + * @arg @ref LL_GPIO_PIN_1 + * @arg @ref LL_GPIO_PIN_2 + * @arg @ref LL_GPIO_PIN_3 + * @arg @ref LL_GPIO_PIN_4 + * @arg @ref LL_GPIO_PIN_5 + * @arg @ref LL_GPIO_PIN_6 + * @arg @ref LL_GPIO_PIN_7 + * @arg @ref LL_GPIO_PIN_8 + * @arg @ref LL_GPIO_PIN_9 + * @arg @ref LL_GPIO_PIN_10 + * @arg @ref LL_GPIO_PIN_11 + * @arg @ref LL_GPIO_PIN_12 + * @arg @ref LL_GPIO_PIN_13 + * @arg @ref LL_GPIO_PIN_14 + * @arg @ref LL_GPIO_PIN_15 + * @arg @ref LL_GPIO_PIN_ALL + * @retval None + */ +__STATIC_INLINE void LL_GPIO_TogglePin(GPIO_TypeDef *GPIOx, uint32_t PinMask) +{ + uint32_t odr = READ_REG(GPIOx->ODR); + WRITE_REG(GPIOx->BSRR, ((odr & PinMask) << 16u) | (~odr & PinMask)); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup GPIO_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +ErrorStatus LL_GPIO_DeInit(GPIO_TypeDef *GPIOx); +ErrorStatus LL_GPIO_Init(GPIO_TypeDef *GPIOx, LL_GPIO_InitTypeDef *GPIO_InitStruct); +void LL_GPIO_StructInit(LL_GPIO_InitTypeDef *GPIO_InitStruct); + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined (GPIOA) || defined (GPIOB) || defined (GPIOC) || defined (GPIOD) || defined (GPIOE) || defined (GPIOF) || defined (GPIOG) || defined (GPIOH) || defined (GPIOI) || defined (GPIOJ) || defined (GPIOK) */ +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_GPIO_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_i2c.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_i2c.h new file mode 100644 index 00000000..92d4a74a --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_i2c.h @@ -0,0 +1,1890 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_i2c.h + * @author MCD Application Team + * @brief Header file of I2C LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_I2C_H +#define __STM32F4xx_LL_I2C_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (I2C1) || defined (I2C2) || defined (I2C3) + +/** @defgroup I2C_LL I2C + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup I2C_LL_Private_Constants I2C Private Constants + * @{ + */ + +/* Defines used to perform compute and check in the macros */ +#define LL_I2C_MAX_SPEED_STANDARD 100000U +#define LL_I2C_MAX_SPEED_FAST 400000U +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup I2C_LL_Private_Macros I2C Private Macros + * @{ + */ +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup I2C_LL_ES_INIT I2C Exported Init structure + * @{ + */ +typedef struct +{ + uint32_t PeripheralMode; /*!< Specifies the peripheral mode. + This parameter can be a value of @ref I2C_LL_EC_PERIPHERAL_MODE + + This feature can be modified afterwards using unitary function @ref LL_I2C_SetMode(). */ + + uint32_t ClockSpeed; /*!< Specifies the clock frequency. + This parameter must be set to a value lower than 400kHz (in Hz) + + This feature can be modified afterwards using unitary function @ref LL_I2C_SetClockPeriod() + or @ref LL_I2C_SetDutyCycle() or @ref LL_I2C_SetClockSpeedMode() or @ref LL_I2C_ConfigSpeed(). */ + + uint32_t DutyCycle; /*!< Specifies the I2C fast mode duty cycle. + This parameter can be a value of @ref I2C_LL_EC_DUTYCYCLE + + This feature can be modified afterwards using unitary function @ref LL_I2C_SetDutyCycle(). */ + +#if defined(I2C_FLTR_ANOFF)&&defined(I2C_FLTR_DNF) + uint32_t AnalogFilter; /*!< Enables or disables analog noise filter. + This parameter can be a value of @ref I2C_LL_EC_ANALOGFILTER_SELECTION + + This feature can be modified afterwards using unitary functions @ref LL_I2C_EnableAnalogFilter() or LL_I2C_DisableAnalogFilter(). */ + + uint32_t DigitalFilter; /*!< Configures the digital noise filter. + This parameter can be a number between Min_Data = 0x00 and Max_Data = 0x0F + + This feature can be modified afterwards using unitary function @ref LL_I2C_SetDigitalFilter(). */ + +#endif + uint32_t OwnAddress1; /*!< Specifies the device own address 1. + This parameter must be a value between Min_Data = 0x00 and Max_Data = 0x3FF + + This feature can be modified afterwards using unitary function @ref LL_I2C_SetOwnAddress1(). */ + + uint32_t TypeAcknowledge; /*!< Specifies the ACKnowledge or Non ACKnowledge condition after the address receive match code or next received byte. + This parameter can be a value of @ref I2C_LL_EC_I2C_ACKNOWLEDGE + + This feature can be modified afterwards using unitary function @ref LL_I2C_AcknowledgeNextData(). */ + + uint32_t OwnAddrSize; /*!< Specifies the device own address 1 size (7-bit or 10-bit). + This parameter can be a value of @ref I2C_LL_EC_OWNADDRESS1 + + This feature can be modified afterwards using unitary function @ref LL_I2C_SetOwnAddress1(). */ +} LL_I2C_InitTypeDef; +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup I2C_LL_Exported_Constants I2C Exported Constants + * @{ + */ + +/** @defgroup I2C_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_I2C_ReadReg function + * @{ + */ +#define LL_I2C_SR1_SB I2C_SR1_SB /*!< Start Bit (master mode) */ +#define LL_I2C_SR1_ADDR I2C_SR1_ADDR /*!< Address sent (master mode) or + Address matched flag (slave mode) */ +#define LL_I2C_SR1_BTF I2C_SR1_BTF /*!< Byte Transfer Finished flag */ +#define LL_I2C_SR1_ADD10 I2C_SR1_ADD10 /*!< 10-bit header sent (master mode) */ +#define LL_I2C_SR1_STOPF I2C_SR1_STOPF /*!< Stop detection flag (slave mode) */ +#define LL_I2C_SR1_RXNE I2C_SR1_RXNE /*!< Data register not empty (receivers) */ +#define LL_I2C_SR1_TXE I2C_SR1_TXE /*!< Data register empty (transmitters) */ +#define LL_I2C_SR1_BERR I2C_SR1_BERR /*!< Bus error */ +#define LL_I2C_SR1_ARLO I2C_SR1_ARLO /*!< Arbitration lost */ +#define LL_I2C_SR1_AF I2C_SR1_AF /*!< Acknowledge failure flag */ +#define LL_I2C_SR1_OVR I2C_SR1_OVR /*!< Overrun/Underrun */ +#define LL_I2C_SR1_PECERR I2C_ISR_PECERR /*!< PEC Error in reception (SMBus mode) */ +#define LL_I2C_SR1_TIMEOUT I2C_ISR_TIMEOUT /*!< Timeout detection flag (SMBus mode) */ +#define LL_I2C_SR1_SMALERT I2C_ISR_SMALERT /*!< SMBus alert (SMBus mode) */ +#define LL_I2C_SR2_MSL I2C_SR2_MSL /*!< Master/Slave flag */ +#define LL_I2C_SR2_BUSY I2C_SR2_BUSY /*!< Bus busy flag */ +#define LL_I2C_SR2_TRA I2C_SR2_TRA /*!< Transmitter/receiver direction */ +#define LL_I2C_SR2_GENCALL I2C_SR2_GENCALL /*!< General call address (Slave mode) */ +#define LL_I2C_SR2_SMBDEFAULT I2C_SR2_SMBDEFAULT /*!< SMBus Device default address (Slave mode) */ +#define LL_I2C_SR2_SMBHOST I2C_SR2_SMBHOST /*!< SMBus Host address (Slave mode) */ +#define LL_I2C_SR2_DUALF I2C_SR2_DUALF /*!< Dual flag (Slave mode) */ +/** + * @} + */ + +/** @defgroup I2C_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_I2C_ReadReg and LL_I2C_WriteReg functions + * @{ + */ +#define LL_I2C_CR2_ITEVTEN I2C_CR2_ITEVTEN /*!< Events interrupts enable */ +#define LL_I2C_CR2_ITBUFEN I2C_CR2_ITBUFEN /*!< Buffer interrupts enable */ +#define LL_I2C_CR2_ITERREN I2C_CR2_ITERREN /*!< Error interrupts enable */ +/** + * @} + */ + +#if defined(I2C_FLTR_ANOFF) +/** @defgroup I2C_LL_EC_ANALOGFILTER_SELECTION Analog Filter Selection + * @{ + */ +#define LL_I2C_ANALOGFILTER_ENABLE 0x00000000U /*!< Analog filter is enabled. */ +#define LL_I2C_ANALOGFILTER_DISABLE I2C_FLTR_ANOFF /*!< Analog filter is disabled.*/ +/** + * @} + */ + +#endif +/** @defgroup I2C_LL_EC_OWNADDRESS1 Own Address 1 Length + * @{ + */ +#define LL_I2C_OWNADDRESS1_7BIT 0x00004000U /*!< Own address 1 is a 7-bit address. */ +#define LL_I2C_OWNADDRESS1_10BIT (uint32_t)(I2C_OAR1_ADDMODE | 0x00004000U) /*!< Own address 1 is a 10-bit address. */ +/** + * @} + */ + +/** @defgroup I2C_LL_EC_DUTYCYCLE Fast Mode Duty Cycle + * @{ + */ +#define LL_I2C_DUTYCYCLE_2 0x00000000U /*!< I2C fast mode Tlow/Thigh = 2 */ +#define LL_I2C_DUTYCYCLE_16_9 I2C_CCR_DUTY /*!< I2C fast mode Tlow/Thigh = 16/9 */ +/** + * @} + */ + +/** @defgroup I2C_LL_EC_CLOCK_SPEED_MODE Master Clock Speed Mode + * @{ + */ +#define LL_I2C_CLOCK_SPEED_STANDARD_MODE 0x00000000U /*!< Master clock speed range is standard mode */ +#define LL_I2C_CLOCK_SPEED_FAST_MODE I2C_CCR_FS /*!< Master clock speed range is fast mode */ +/** + * @} + */ + +/** @defgroup I2C_LL_EC_PERIPHERAL_MODE Peripheral Mode + * @{ + */ +#define LL_I2C_MODE_I2C 0x00000000U /*!< I2C Master or Slave mode */ +#define LL_I2C_MODE_SMBUS_HOST (uint32_t)(I2C_CR1_SMBUS | I2C_CR1_SMBTYPE | I2C_CR1_ENARP) /*!< SMBus Host address acknowledge */ +#define LL_I2C_MODE_SMBUS_DEVICE I2C_CR1_SMBUS /*!< SMBus Device default mode (Default address not acknowledge) */ +#define LL_I2C_MODE_SMBUS_DEVICE_ARP (uint32_t)(I2C_CR1_SMBUS | I2C_CR1_ENARP) /*!< SMBus Device Default address acknowledge */ +/** + * @} + */ + +/** @defgroup I2C_LL_EC_I2C_ACKNOWLEDGE Acknowledge Generation + * @{ + */ +#define LL_I2C_ACK I2C_CR1_ACK /*!< ACK is sent after current received byte. */ +#define LL_I2C_NACK 0x00000000U /*!< NACK is sent after current received byte.*/ +/** + * @} + */ + +/** @defgroup I2C_LL_EC_DIRECTION Read Write Direction + * @{ + */ +#define LL_I2C_DIRECTION_WRITE I2C_SR2_TRA /*!< Bus is in write transfer */ +#define LL_I2C_DIRECTION_READ 0x00000000U /*!< Bus is in read transfer */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup I2C_LL_Exported_Macros I2C Exported Macros + * @{ + */ + +/** @defgroup I2C_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in I2C register + * @param __INSTANCE__ I2C Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_I2C_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in I2C register + * @param __INSTANCE__ I2C Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_I2C_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** @defgroup I2C_LL_EM_Exported_Macros_Helper Exported Macros Helper + * @{ + */ + +/** + * @brief Convert Peripheral Clock Frequency in Mhz. + * @param __PCLK__ This parameter must be a value of peripheral clock (in Hz). + * @retval Value of peripheral clock (in Mhz) + */ +#define __LL_I2C_FREQ_HZ_TO_MHZ(__PCLK__) (uint32_t)((__PCLK__)/1000000U) + +/** + * @brief Convert Peripheral Clock Frequency in Hz. + * @param __PCLK__ This parameter must be a value of peripheral clock (in Mhz). + * @retval Value of peripheral clock (in Hz) + */ +#define __LL_I2C_FREQ_MHZ_TO_HZ(__PCLK__) (uint32_t)((__PCLK__)*1000000U) + +/** + * @brief Compute I2C Clock rising time. + * @param __FREQRANGE__ This parameter must be a value of peripheral clock (in Mhz). + * @param __SPEED__ This parameter must be a value lower than 400kHz (in Hz). + * @retval Value between Min_Data=0x02 and Max_Data=0x3F + */ +#define __LL_I2C_RISE_TIME(__FREQRANGE__, __SPEED__) (uint32_t)(((__SPEED__) <= LL_I2C_MAX_SPEED_STANDARD) ? ((__FREQRANGE__) + 1U) : ((((__FREQRANGE__) * 300U) / 1000U) + 1U)) + +/** + * @brief Compute Speed clock range to a Clock Control Register (I2C_CCR_CCR) value. + * @param __PCLK__ This parameter must be a value of peripheral clock (in Hz). + * @param __SPEED__ This parameter must be a value lower than 400kHz (in Hz). + * @param __DUTYCYCLE__ This parameter can be one of the following values: + * @arg @ref LL_I2C_DUTYCYCLE_2 + * @arg @ref LL_I2C_DUTYCYCLE_16_9 + * @retval Value between Min_Data=0x004 and Max_Data=0xFFF, except in FAST DUTY mode where Min_Data=0x001. + */ +#define __LL_I2C_SPEED_TO_CCR(__PCLK__, __SPEED__, __DUTYCYCLE__) (uint32_t)(((__SPEED__) <= LL_I2C_MAX_SPEED_STANDARD)? \ + (__LL_I2C_SPEED_STANDARD_TO_CCR((__PCLK__), (__SPEED__))) : \ + (__LL_I2C_SPEED_FAST_TO_CCR((__PCLK__), (__SPEED__), (__DUTYCYCLE__)))) + +/** + * @brief Compute Speed Standard clock range to a Clock Control Register (I2C_CCR_CCR) value. + * @param __PCLK__ This parameter must be a value of peripheral clock (in Hz). + * @param __SPEED__ This parameter must be a value lower than 100kHz (in Hz). + * @retval Value between Min_Data=0x004 and Max_Data=0xFFF. + */ +#define __LL_I2C_SPEED_STANDARD_TO_CCR(__PCLK__, __SPEED__) (uint32_t)(((((__PCLK__)/((__SPEED__) << 1U)) & I2C_CCR_CCR) < 4U)? 4U:((__PCLK__) / ((__SPEED__) << 1U))) + +/** + * @brief Compute Speed Fast clock range to a Clock Control Register (I2C_CCR_CCR) value. + * @param __PCLK__ This parameter must be a value of peripheral clock (in Hz). + * @param __SPEED__ This parameter must be a value between Min_Data=100Khz and Max_Data=400Khz (in Hz). + * @param __DUTYCYCLE__ This parameter can be one of the following values: + * @arg @ref LL_I2C_DUTYCYCLE_2 + * @arg @ref LL_I2C_DUTYCYCLE_16_9 + * @retval Value between Min_Data=0x001 and Max_Data=0xFFF + */ +#define __LL_I2C_SPEED_FAST_TO_CCR(__PCLK__, __SPEED__, __DUTYCYCLE__) (uint32_t)(((__DUTYCYCLE__) == LL_I2C_DUTYCYCLE_2)? \ + (((((__PCLK__) / ((__SPEED__) * 3U)) & I2C_CCR_CCR) == 0U)? 1U:((__PCLK__) / ((__SPEED__) * 3U))) : \ + (((((__PCLK__) / ((__SPEED__) * 25U)) & I2C_CCR_CCR) == 0U)? 1U:((__PCLK__) / ((__SPEED__) * 25U)))) + +/** + * @brief Get the Least significant bits of a 10-Bits address. + * @param __ADDRESS__ This parameter must be a value of a 10-Bits slave address. + * @retval Value between Min_Data=0x00 and Max_Data=0xFF + */ +#define __LL_I2C_10BIT_ADDRESS(__ADDRESS__) ((uint8_t)((uint16_t)((__ADDRESS__) & (uint16_t)(0x00FF)))) + +/** + * @brief Convert a 10-Bits address to a 10-Bits header with Write direction. + * @param __ADDRESS__ This parameter must be a value of a 10-Bits slave address. + * @retval Value between Min_Data=0xF0 and Max_Data=0xF6 + */ +#define __LL_I2C_10BIT_HEADER_WRITE(__ADDRESS__) ((uint8_t)((uint16_t)((uint16_t)(((uint16_t)((__ADDRESS__) & (uint16_t)(0x0300))) >> 7) | (uint16_t)(0xF0)))) + +/** + * @brief Convert a 10-Bits address to a 10-Bits header with Read direction. + * @param __ADDRESS__ This parameter must be a value of a 10-Bits slave address. + * @retval Value between Min_Data=0xF1 and Max_Data=0xF7 + */ +#define __LL_I2C_10BIT_HEADER_READ(__ADDRESS__) ((uint8_t)((uint16_t)((uint16_t)(((uint16_t)((__ADDRESS__) & (uint16_t)(0x0300))) >> 7) | (uint16_t)(0xF1)))) + +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup I2C_LL_Exported_Functions I2C Exported Functions + * @{ + */ + +/** @defgroup I2C_LL_EF_Configuration Configuration + * @{ + */ + +/** + * @brief Enable I2C peripheral (PE = 1). + * @rmtoll CR1 PE LL_I2C_Enable + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_Enable(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR1, I2C_CR1_PE); +} + +/** + * @brief Disable I2C peripheral (PE = 0). + * @rmtoll CR1 PE LL_I2C_Disable + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_Disable(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR1, I2C_CR1_PE); +} + +/** + * @brief Check if the I2C peripheral is enabled or disabled. + * @rmtoll CR1 PE LL_I2C_IsEnabled + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabled(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR1, I2C_CR1_PE) == (I2C_CR1_PE)); +} + +#if defined(I2C_FLTR_ANOFF)&&defined(I2C_FLTR_DNF) +/** + * @brief Configure Noise Filters (Analog and Digital). + * @note If the analog filter is also enabled, the digital filter is added to analog filter. + * The filters can only be programmed when the I2C is disabled (PE = 0). + * @rmtoll FLTR ANOFF LL_I2C_ConfigFilters\n + * FLTR DNF LL_I2C_ConfigFilters + * @param I2Cx I2C Instance. + * @param AnalogFilter This parameter can be one of the following values: + * @arg @ref LL_I2C_ANALOGFILTER_ENABLE + * @arg @ref LL_I2C_ANALOGFILTER_DISABLE + * @param DigitalFilter This parameter must be a value between Min_Data=0x00 (Digital filter disabled) and Max_Data=0x0F (Digital filter enabled and filtering capability up to 15*TPCLK1) + * This parameter is used to configure the digital noise filter on SDA and SCL input. The digital filter will suppress the spikes with a length of up to DNF[3:0]*TPCLK1. + * @retval None + */ +__STATIC_INLINE void LL_I2C_ConfigFilters(I2C_TypeDef *I2Cx, uint32_t AnalogFilter, uint32_t DigitalFilter) +{ + MODIFY_REG(I2Cx->FLTR, I2C_FLTR_ANOFF | I2C_FLTR_DNF, AnalogFilter | DigitalFilter); +} +#endif +#if defined(I2C_FLTR_DNF) + +/** + * @brief Configure Digital Noise Filter. + * @note If the analog filter is also enabled, the digital filter is added to analog filter. + * This filter can only be programmed when the I2C is disabled (PE = 0). + * @rmtoll FLTR DNF LL_I2C_SetDigitalFilter + * @param I2Cx I2C Instance. + * @param DigitalFilter This parameter must be a value between Min_Data=0x00 (Digital filter disabled) and Max_Data=0x0F (Digital filter enabled and filtering capability up to 15*TPCLK1) + * This parameter is used to configure the digital noise filter on SDA and SCL input. The digital filter will suppress the spikes with a length of up to DNF[3:0]*TPCLK1. + * @retval None + */ +__STATIC_INLINE void LL_I2C_SetDigitalFilter(I2C_TypeDef *I2Cx, uint32_t DigitalFilter) +{ + MODIFY_REG(I2Cx->FLTR, I2C_FLTR_DNF, DigitalFilter); +} + +/** + * @brief Get the current Digital Noise Filter configuration. + * @rmtoll FLTR DNF LL_I2C_GetDigitalFilter + * @param I2Cx I2C Instance. + * @retval Value between Min_Data=0x0 and Max_Data=0xF + */ +__STATIC_INLINE uint32_t LL_I2C_GetDigitalFilter(I2C_TypeDef *I2Cx) +{ + return (uint32_t)(READ_BIT(I2Cx->FLTR, I2C_FLTR_DNF)); +} +#endif +#if defined(I2C_FLTR_ANOFF) + +/** + * @brief Enable Analog Noise Filter. + * @note This filter can only be programmed when the I2C is disabled (PE = 0). + * @rmtoll FLTR ANOFF LL_I2C_EnableAnalogFilter + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableAnalogFilter(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->FLTR, I2C_FLTR_ANOFF); +} + +/** + * @brief Disable Analog Noise Filter. + * @note This filter can only be programmed when the I2C is disabled (PE = 0). + * @rmtoll FLTR ANOFF LL_I2C_DisableAnalogFilter + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableAnalogFilter(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->FLTR, I2C_FLTR_ANOFF); +} + +/** + * @brief Check if Analog Noise Filter is enabled or disabled. + * @rmtoll FLTR ANOFF LL_I2C_IsEnabledAnalogFilter + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledAnalogFilter(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->FLTR, I2C_FLTR_ANOFF) == (I2C_FLTR_ANOFF)); +} +#endif + +/** + * @brief Enable DMA transmission requests. + * @rmtoll CR2 DMAEN LL_I2C_EnableDMAReq_TX + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableDMAReq_TX(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR2, I2C_CR2_DMAEN); +} + +/** + * @brief Disable DMA transmission requests. + * @rmtoll CR2 DMAEN LL_I2C_DisableDMAReq_TX + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableDMAReq_TX(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR2, I2C_CR2_DMAEN); +} + +/** + * @brief Check if DMA transmission requests are enabled or disabled. + * @rmtoll CR2 DMAEN LL_I2C_IsEnabledDMAReq_TX + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledDMAReq_TX(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR2, I2C_CR2_DMAEN) == (I2C_CR2_DMAEN)); +} + +/** + * @brief Enable DMA reception requests. + * @rmtoll CR2 DMAEN LL_I2C_EnableDMAReq_RX + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableDMAReq_RX(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR2, I2C_CR2_DMAEN); +} + +/** + * @brief Disable DMA reception requests. + * @rmtoll CR2 DMAEN LL_I2C_DisableDMAReq_RX + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableDMAReq_RX(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR2, I2C_CR2_DMAEN); +} + +/** + * @brief Check if DMA reception requests are enabled or disabled. + * @rmtoll CR2 DMAEN LL_I2C_IsEnabledDMAReq_RX + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledDMAReq_RX(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR2, I2C_CR2_DMAEN) == (I2C_CR2_DMAEN)); +} + +/** + * @brief Get the data register address used for DMA transfer. + * @rmtoll DR DR LL_I2C_DMA_GetRegAddr + * @param I2Cx I2C Instance. + * @retval Address of data register + */ +__STATIC_INLINE uint32_t LL_I2C_DMA_GetRegAddr(I2C_TypeDef *I2Cx) +{ + return (uint32_t) & (I2Cx->DR); +} + +/** + * @brief Enable Clock stretching. + * @note This bit can only be programmed when the I2C is disabled (PE = 0). + * @rmtoll CR1 NOSTRETCH LL_I2C_EnableClockStretching + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableClockStretching(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR1, I2C_CR1_NOSTRETCH); +} + +/** + * @brief Disable Clock stretching. + * @note This bit can only be programmed when the I2C is disabled (PE = 0). + * @rmtoll CR1 NOSTRETCH LL_I2C_DisableClockStretching + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableClockStretching(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR1, I2C_CR1_NOSTRETCH); +} + +/** + * @brief Check if Clock stretching is enabled or disabled. + * @rmtoll CR1 NOSTRETCH LL_I2C_IsEnabledClockStretching + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledClockStretching(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR1, I2C_CR1_NOSTRETCH) != (I2C_CR1_NOSTRETCH)); +} + +/** + * @brief Enable General Call. + * @note When enabled the Address 0x00 is ACKed. + * @rmtoll CR1 ENGC LL_I2C_EnableGeneralCall + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableGeneralCall(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR1, I2C_CR1_ENGC); +} + +/** + * @brief Disable General Call. + * @note When disabled the Address 0x00 is NACKed. + * @rmtoll CR1 ENGC LL_I2C_DisableGeneralCall + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableGeneralCall(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR1, I2C_CR1_ENGC); +} + +/** + * @brief Check if General Call is enabled or disabled. + * @rmtoll CR1 ENGC LL_I2C_IsEnabledGeneralCall + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledGeneralCall(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR1, I2C_CR1_ENGC) == (I2C_CR1_ENGC)); +} + +/** + * @brief Set the Own Address1. + * @rmtoll OAR1 ADD0 LL_I2C_SetOwnAddress1\n + * OAR1 ADD1_7 LL_I2C_SetOwnAddress1\n + * OAR1 ADD8_9 LL_I2C_SetOwnAddress1\n + * OAR1 ADDMODE LL_I2C_SetOwnAddress1 + * @param I2Cx I2C Instance. + * @param OwnAddress1 This parameter must be a value between Min_Data=0 and Max_Data=0x3FF. + * @param OwnAddrSize This parameter can be one of the following values: + * @arg @ref LL_I2C_OWNADDRESS1_7BIT + * @arg @ref LL_I2C_OWNADDRESS1_10BIT + * @retval None + */ +__STATIC_INLINE void LL_I2C_SetOwnAddress1(I2C_TypeDef *I2Cx, uint32_t OwnAddress1, uint32_t OwnAddrSize) +{ + MODIFY_REG(I2Cx->OAR1, I2C_OAR1_ADD0 | I2C_OAR1_ADD1_7 | I2C_OAR1_ADD8_9 | I2C_OAR1_ADDMODE, OwnAddress1 | OwnAddrSize); +} + +/** + * @brief Set the 7bits Own Address2. + * @note This action has no effect if own address2 is enabled. + * @rmtoll OAR2 ADD2 LL_I2C_SetOwnAddress2 + * @param I2Cx I2C Instance. + * @param OwnAddress2 This parameter must be a value between Min_Data=0 and Max_Data=0x7F. + * @retval None + */ +__STATIC_INLINE void LL_I2C_SetOwnAddress2(I2C_TypeDef *I2Cx, uint32_t OwnAddress2) +{ + MODIFY_REG(I2Cx->OAR2, I2C_OAR2_ADD2, OwnAddress2); +} + +/** + * @brief Enable acknowledge on Own Address2 match address. + * @rmtoll OAR2 ENDUAL LL_I2C_EnableOwnAddress2 + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableOwnAddress2(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->OAR2, I2C_OAR2_ENDUAL); +} + +/** + * @brief Disable acknowledge on Own Address2 match address. + * @rmtoll OAR2 ENDUAL LL_I2C_DisableOwnAddress2 + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableOwnAddress2(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->OAR2, I2C_OAR2_ENDUAL); +} + +/** + * @brief Check if Own Address1 acknowledge is enabled or disabled. + * @rmtoll OAR2 ENDUAL LL_I2C_IsEnabledOwnAddress2 + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledOwnAddress2(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->OAR2, I2C_OAR2_ENDUAL) == (I2C_OAR2_ENDUAL)); +} + +/** + * @brief Configure the Peripheral clock frequency. + * @rmtoll CR2 FREQ LL_I2C_SetPeriphClock + * @param I2Cx I2C Instance. + * @param PeriphClock Peripheral Clock (in Hz) + * @retval None + */ +__STATIC_INLINE void LL_I2C_SetPeriphClock(I2C_TypeDef *I2Cx, uint32_t PeriphClock) +{ + MODIFY_REG(I2Cx->CR2, I2C_CR2_FREQ, __LL_I2C_FREQ_HZ_TO_MHZ(PeriphClock)); +} + +/** + * @brief Get the Peripheral clock frequency. + * @rmtoll CR2 FREQ LL_I2C_GetPeriphClock + * @param I2Cx I2C Instance. + * @retval Value of Peripheral Clock (in Hz) + */ +__STATIC_INLINE uint32_t LL_I2C_GetPeriphClock(I2C_TypeDef *I2Cx) +{ + return (uint32_t)(__LL_I2C_FREQ_MHZ_TO_HZ(READ_BIT(I2Cx->CR2, I2C_CR2_FREQ))); +} + +/** + * @brief Configure the Duty cycle (Fast mode only). + * @rmtoll CCR DUTY LL_I2C_SetDutyCycle + * @param I2Cx I2C Instance. + * @param DutyCycle This parameter can be one of the following values: + * @arg @ref LL_I2C_DUTYCYCLE_2 + * @arg @ref LL_I2C_DUTYCYCLE_16_9 + * @retval None + */ +__STATIC_INLINE void LL_I2C_SetDutyCycle(I2C_TypeDef *I2Cx, uint32_t DutyCycle) +{ + MODIFY_REG(I2Cx->CCR, I2C_CCR_DUTY, DutyCycle); +} + +/** + * @brief Get the Duty cycle (Fast mode only). + * @rmtoll CCR DUTY LL_I2C_GetDutyCycle + * @param I2Cx I2C Instance. + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2C_DUTYCYCLE_2 + * @arg @ref LL_I2C_DUTYCYCLE_16_9 + */ +__STATIC_INLINE uint32_t LL_I2C_GetDutyCycle(I2C_TypeDef *I2Cx) +{ + return (uint32_t)(READ_BIT(I2Cx->CCR, I2C_CCR_DUTY)); +} + +/** + * @brief Configure the I2C master clock speed mode. + * @rmtoll CCR FS LL_I2C_SetClockSpeedMode + * @param I2Cx I2C Instance. + * @param ClockSpeedMode This parameter can be one of the following values: + * @arg @ref LL_I2C_CLOCK_SPEED_STANDARD_MODE + * @arg @ref LL_I2C_CLOCK_SPEED_FAST_MODE + * @retval None + */ +__STATIC_INLINE void LL_I2C_SetClockSpeedMode(I2C_TypeDef *I2Cx, uint32_t ClockSpeedMode) +{ + MODIFY_REG(I2Cx->CCR, I2C_CCR_FS, ClockSpeedMode); +} + +/** + * @brief Get the the I2C master speed mode. + * @rmtoll CCR FS LL_I2C_GetClockSpeedMode + * @param I2Cx I2C Instance. + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2C_CLOCK_SPEED_STANDARD_MODE + * @arg @ref LL_I2C_CLOCK_SPEED_FAST_MODE + */ +__STATIC_INLINE uint32_t LL_I2C_GetClockSpeedMode(I2C_TypeDef *I2Cx) +{ + return (uint32_t)(READ_BIT(I2Cx->CCR, I2C_CCR_FS)); +} + +/** + * @brief Configure the SCL, SDA rising time. + * @note This bit can only be programmed when the I2C is disabled (PE = 0). + * @rmtoll TRISE TRISE LL_I2C_SetRiseTime + * @param I2Cx I2C Instance. + * @param RiseTime This parameter must be a value between Min_Data=0x02 and Max_Data=0x3F. + * @retval None + */ +__STATIC_INLINE void LL_I2C_SetRiseTime(I2C_TypeDef *I2Cx, uint32_t RiseTime) +{ + MODIFY_REG(I2Cx->TRISE, I2C_TRISE_TRISE, RiseTime); +} + +/** + * @brief Get the SCL, SDA rising time. + * @rmtoll TRISE TRISE LL_I2C_GetRiseTime + * @param I2Cx I2C Instance. + * @retval Value between Min_Data=0x02 and Max_Data=0x3F + */ +__STATIC_INLINE uint32_t LL_I2C_GetRiseTime(I2C_TypeDef *I2Cx) +{ + return (uint32_t)(READ_BIT(I2Cx->TRISE, I2C_TRISE_TRISE)); +} + +/** + * @brief Configure the SCL high and low period. + * @note This bit can only be programmed when the I2C is disabled (PE = 0). + * @rmtoll CCR CCR LL_I2C_SetClockPeriod + * @param I2Cx I2C Instance. + * @param ClockPeriod This parameter must be a value between Min_Data=0x004 and Max_Data=0xFFF, except in FAST DUTY mode where Min_Data=0x001. + * @retval None + */ +__STATIC_INLINE void LL_I2C_SetClockPeriod(I2C_TypeDef *I2Cx, uint32_t ClockPeriod) +{ + MODIFY_REG(I2Cx->CCR, I2C_CCR_CCR, ClockPeriod); +} + +/** + * @brief Get the SCL high and low period. + * @rmtoll CCR CCR LL_I2C_GetClockPeriod + * @param I2Cx I2C Instance. + * @retval Value between Min_Data=0x004 and Max_Data=0xFFF, except in FAST DUTY mode where Min_Data=0x001. + */ +__STATIC_INLINE uint32_t LL_I2C_GetClockPeriod(I2C_TypeDef *I2Cx) +{ + return (uint32_t)(READ_BIT(I2Cx->CCR, I2C_CCR_CCR)); +} + +/** + * @brief Configure the SCL speed. + * @note This bit can only be programmed when the I2C is disabled (PE = 0). + * @rmtoll CR2 FREQ LL_I2C_ConfigSpeed\n + * TRISE TRISE LL_I2C_ConfigSpeed\n + * CCR FS LL_I2C_ConfigSpeed\n + * CCR DUTY LL_I2C_ConfigSpeed\n + * CCR CCR LL_I2C_ConfigSpeed + * @param I2Cx I2C Instance. + * @param PeriphClock Peripheral Clock (in Hz) + * @param ClockSpeed This parameter must be a value lower than 400kHz (in Hz). + * @param DutyCycle This parameter can be one of the following values: + * @arg @ref LL_I2C_DUTYCYCLE_2 + * @arg @ref LL_I2C_DUTYCYCLE_16_9 + * @retval None + */ +__STATIC_INLINE void LL_I2C_ConfigSpeed(I2C_TypeDef *I2Cx, uint32_t PeriphClock, uint32_t ClockSpeed, + uint32_t DutyCycle) +{ + uint32_t freqrange = 0x0U; + uint32_t clockconfig = 0x0U; + + /* Compute frequency range */ + freqrange = __LL_I2C_FREQ_HZ_TO_MHZ(PeriphClock); + + /* Configure I2Cx: Frequency range register */ + MODIFY_REG(I2Cx->CR2, I2C_CR2_FREQ, freqrange); + + /* Configure I2Cx: Rise Time register */ + MODIFY_REG(I2Cx->TRISE, I2C_TRISE_TRISE, __LL_I2C_RISE_TIME(freqrange, ClockSpeed)); + + /* Configure Speed mode, Duty Cycle and Clock control register value */ + if (ClockSpeed > LL_I2C_MAX_SPEED_STANDARD) + { + /* Set Speed mode at fast and duty cycle for Clock Speed request in fast clock range */ + clockconfig = LL_I2C_CLOCK_SPEED_FAST_MODE | \ + __LL_I2C_SPEED_FAST_TO_CCR(PeriphClock, ClockSpeed, DutyCycle) | \ + DutyCycle; + } + else + { + /* Set Speed mode at standard for Clock Speed request in standard clock range */ + clockconfig = LL_I2C_CLOCK_SPEED_STANDARD_MODE | \ + __LL_I2C_SPEED_STANDARD_TO_CCR(PeriphClock, ClockSpeed); + } + + /* Configure I2Cx: Clock control register */ + MODIFY_REG(I2Cx->CCR, (I2C_CCR_FS | I2C_CCR_DUTY | I2C_CCR_CCR), clockconfig); +} + +/** + * @brief Configure peripheral mode. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll CR1 SMBUS LL_I2C_SetMode\n + * CR1 SMBTYPE LL_I2C_SetMode\n + * CR1 ENARP LL_I2C_SetMode + * @param I2Cx I2C Instance. + * @param PeripheralMode This parameter can be one of the following values: + * @arg @ref LL_I2C_MODE_I2C + * @arg @ref LL_I2C_MODE_SMBUS_HOST + * @arg @ref LL_I2C_MODE_SMBUS_DEVICE + * @arg @ref LL_I2C_MODE_SMBUS_DEVICE_ARP + * @retval None + */ +__STATIC_INLINE void LL_I2C_SetMode(I2C_TypeDef *I2Cx, uint32_t PeripheralMode) +{ + MODIFY_REG(I2Cx->CR1, I2C_CR1_SMBUS | I2C_CR1_SMBTYPE | I2C_CR1_ENARP, PeripheralMode); +} + +/** + * @brief Get peripheral mode. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll CR1 SMBUS LL_I2C_GetMode\n + * CR1 SMBTYPE LL_I2C_GetMode\n + * CR1 ENARP LL_I2C_GetMode + * @param I2Cx I2C Instance. + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2C_MODE_I2C + * @arg @ref LL_I2C_MODE_SMBUS_HOST + * @arg @ref LL_I2C_MODE_SMBUS_DEVICE + * @arg @ref LL_I2C_MODE_SMBUS_DEVICE_ARP + */ +__STATIC_INLINE uint32_t LL_I2C_GetMode(I2C_TypeDef *I2Cx) +{ + return (uint32_t)(READ_BIT(I2Cx->CR1, I2C_CR1_SMBUS | I2C_CR1_SMBTYPE | I2C_CR1_ENARP)); +} + +/** + * @brief Enable SMBus alert (Host or Device mode) + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @note SMBus Device mode: + * - SMBus Alert pin is drived low and + * Alert Response Address Header acknowledge is enabled. + * SMBus Host mode: + * - SMBus Alert pin management is supported. + * @rmtoll CR1 ALERT LL_I2C_EnableSMBusAlert + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableSMBusAlert(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR1, I2C_CR1_ALERT); +} + +/** + * @brief Disable SMBus alert (Host or Device mode) + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @note SMBus Device mode: + * - SMBus Alert pin is not drived (can be used as a standard GPIO) and + * Alert Response Address Header acknowledge is disabled. + * SMBus Host mode: + * - SMBus Alert pin management is not supported. + * @rmtoll CR1 ALERT LL_I2C_DisableSMBusAlert + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableSMBusAlert(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR1, I2C_CR1_ALERT); +} + +/** + * @brief Check if SMBus alert (Host or Device mode) is enabled or disabled. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll CR1 ALERT LL_I2C_IsEnabledSMBusAlert + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledSMBusAlert(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR1, I2C_CR1_ALERT) == (I2C_CR1_ALERT)); +} + +/** + * @brief Enable SMBus Packet Error Calculation (PEC). + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll CR1 ENPEC LL_I2C_EnableSMBusPEC + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableSMBusPEC(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR1, I2C_CR1_ENPEC); +} + +/** + * @brief Disable SMBus Packet Error Calculation (PEC). + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll CR1 ENPEC LL_I2C_DisableSMBusPEC + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableSMBusPEC(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR1, I2C_CR1_ENPEC); +} + +/** + * @brief Check if SMBus Packet Error Calculation (PEC) is enabled or disabled. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll CR1 ENPEC LL_I2C_IsEnabledSMBusPEC + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledSMBusPEC(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR1, I2C_CR1_ENPEC) == (I2C_CR1_ENPEC)); +} + +/** + * @} + */ + +/** @defgroup I2C_LL_EF_IT_Management IT_Management + * @{ + */ + +/** + * @brief Enable TXE interrupt. + * @rmtoll CR2 ITEVTEN LL_I2C_EnableIT_TX\n + * CR2 ITBUFEN LL_I2C_EnableIT_TX + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableIT_TX(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR2, I2C_CR2_ITEVTEN | I2C_CR2_ITBUFEN); +} + +/** + * @brief Disable TXE interrupt. + * @rmtoll CR2 ITEVTEN LL_I2C_DisableIT_TX\n + * CR2 ITBUFEN LL_I2C_DisableIT_TX + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableIT_TX(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR2, I2C_CR2_ITEVTEN | I2C_CR2_ITBUFEN); +} + +/** + * @brief Check if the TXE Interrupt is enabled or disabled. + * @rmtoll CR2 ITEVTEN LL_I2C_IsEnabledIT_TX\n + * CR2 ITBUFEN LL_I2C_IsEnabledIT_TX + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledIT_TX(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR2, I2C_CR2_ITEVTEN | I2C_CR2_ITBUFEN) == (I2C_CR2_ITEVTEN | I2C_CR2_ITBUFEN)); +} + +/** + * @brief Enable RXNE interrupt. + * @rmtoll CR2 ITEVTEN LL_I2C_EnableIT_RX\n + * CR2 ITBUFEN LL_I2C_EnableIT_RX + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableIT_RX(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR2, I2C_CR2_ITEVTEN | I2C_CR2_ITBUFEN); +} + +/** + * @brief Disable RXNE interrupt. + * @rmtoll CR2 ITEVTEN LL_I2C_DisableIT_RX\n + * CR2 ITBUFEN LL_I2C_DisableIT_RX + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableIT_RX(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR2, I2C_CR2_ITEVTEN | I2C_CR2_ITBUFEN); +} + +/** + * @brief Check if the RXNE Interrupt is enabled or disabled. + * @rmtoll CR2 ITEVTEN LL_I2C_IsEnabledIT_RX\n + * CR2 ITBUFEN LL_I2C_IsEnabledIT_RX + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledIT_RX(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR2, I2C_CR2_ITEVTEN | I2C_CR2_ITBUFEN) == (I2C_CR2_ITEVTEN | I2C_CR2_ITBUFEN)); +} + +/** + * @brief Enable Events interrupts. + * @note Any of these events will generate interrupt : + * Start Bit (SB) + * Address sent, Address matched (ADDR) + * 10-bit header sent (ADD10) + * Stop detection (STOPF) + * Byte transfer finished (BTF) + * + * @note Any of these events will generate interrupt if Buffer interrupts are enabled too(using unitary function @ref LL_I2C_EnableIT_BUF()) : + * Receive buffer not empty (RXNE) + * Transmit buffer empty (TXE) + * @rmtoll CR2 ITEVTEN LL_I2C_EnableIT_EVT + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableIT_EVT(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR2, I2C_CR2_ITEVTEN); +} + +/** + * @brief Disable Events interrupts. + * @note Any of these events will generate interrupt : + * Start Bit (SB) + * Address sent, Address matched (ADDR) + * 10-bit header sent (ADD10) + * Stop detection (STOPF) + * Byte transfer finished (BTF) + * Receive buffer not empty (RXNE) + * Transmit buffer empty (TXE) + * @rmtoll CR2 ITEVTEN LL_I2C_DisableIT_EVT + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableIT_EVT(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR2, I2C_CR2_ITEVTEN); +} + +/** + * @brief Check if Events interrupts are enabled or disabled. + * @rmtoll CR2 ITEVTEN LL_I2C_IsEnabledIT_EVT + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledIT_EVT(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR2, I2C_CR2_ITEVTEN) == (I2C_CR2_ITEVTEN)); +} + +/** + * @brief Enable Buffer interrupts. + * @note Any of these Buffer events will generate interrupt if Events interrupts are enabled too(using unitary function @ref LL_I2C_EnableIT_EVT()) : + * Receive buffer not empty (RXNE) + * Transmit buffer empty (TXE) + * @rmtoll CR2 ITBUFEN LL_I2C_EnableIT_BUF + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableIT_BUF(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR2, I2C_CR2_ITBUFEN); +} + +/** + * @brief Disable Buffer interrupts. + * @note Any of these Buffer events will generate interrupt : + * Receive buffer not empty (RXNE) + * Transmit buffer empty (TXE) + * @rmtoll CR2 ITBUFEN LL_I2C_DisableIT_BUF + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableIT_BUF(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR2, I2C_CR2_ITBUFEN); +} + +/** + * @brief Check if Buffer interrupts are enabled or disabled. + * @rmtoll CR2 ITBUFEN LL_I2C_IsEnabledIT_BUF + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledIT_BUF(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR2, I2C_CR2_ITBUFEN) == (I2C_CR2_ITBUFEN)); +} + +/** + * @brief Enable Error interrupts. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @note Any of these errors will generate interrupt : + * Bus Error detection (BERR) + * Arbitration Loss (ARLO) + * Acknowledge Failure(AF) + * Overrun/Underrun (OVR) + * SMBus Timeout detection (TIMEOUT) + * SMBus PEC error detection (PECERR) + * SMBus Alert pin event detection (SMBALERT) + * @rmtoll CR2 ITERREN LL_I2C_EnableIT_ERR + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableIT_ERR(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR2, I2C_CR2_ITERREN); +} + +/** + * @brief Disable Error interrupts. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @note Any of these errors will generate interrupt : + * Bus Error detection (BERR) + * Arbitration Loss (ARLO) + * Acknowledge Failure(AF) + * Overrun/Underrun (OVR) + * SMBus Timeout detection (TIMEOUT) + * SMBus PEC error detection (PECERR) + * SMBus Alert pin event detection (SMBALERT) + * @rmtoll CR2 ITERREN LL_I2C_DisableIT_ERR + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableIT_ERR(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR2, I2C_CR2_ITERREN); +} + +/** + * @brief Check if Error interrupts are enabled or disabled. + * @rmtoll CR2 ITERREN LL_I2C_IsEnabledIT_ERR + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledIT_ERR(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR2, I2C_CR2_ITERREN) == (I2C_CR2_ITERREN)); +} + +/** + * @} + */ + +/** @defgroup I2C_LL_EF_FLAG_management FLAG_management + * @{ + */ + +/** + * @brief Indicate the status of Transmit data register empty flag. + * @note RESET: When next data is written in Transmit data register. + * SET: When Transmit data register is empty. + * @rmtoll SR1 TXE LL_I2C_IsActiveFlag_TXE + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_TXE(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_TXE) == (I2C_SR1_TXE)); +} + +/** + * @brief Indicate the status of Byte Transfer Finished flag. + * RESET: When Data byte transfer not done. + * SET: When Data byte transfer succeeded. + * @rmtoll SR1 BTF LL_I2C_IsActiveFlag_BTF + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_BTF(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_BTF) == (I2C_SR1_BTF)); +} + +/** + * @brief Indicate the status of Receive data register not empty flag. + * @note RESET: When Receive data register is read. + * SET: When the received data is copied in Receive data register. + * @rmtoll SR1 RXNE LL_I2C_IsActiveFlag_RXNE + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_RXNE(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_RXNE) == (I2C_SR1_RXNE)); +} + +/** + * @brief Indicate the status of Start Bit (master mode). + * @note RESET: When No Start condition. + * SET: When Start condition is generated. + * @rmtoll SR1 SB LL_I2C_IsActiveFlag_SB + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_SB(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_SB) == (I2C_SR1_SB)); +} + +/** + * @brief Indicate the status of Address sent (master mode) or Address matched flag (slave mode). + * @note RESET: Clear default value. + * SET: When the address is fully sent (master mode) or when the received slave address matched with one of the enabled slave address (slave mode). + * @rmtoll SR1 ADDR LL_I2C_IsActiveFlag_ADDR + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_ADDR(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_ADDR) == (I2C_SR1_ADDR)); +} + +/** + * @brief Indicate the status of 10-bit header sent (master mode). + * @note RESET: When no ADD10 event occurred. + * SET: When the master has sent the first address byte (header). + * @rmtoll SR1 ADD10 LL_I2C_IsActiveFlag_ADD10 + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_ADD10(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_ADD10) == (I2C_SR1_ADD10)); +} + +/** + * @brief Indicate the status of Acknowledge failure flag. + * @note RESET: No acknowledge failure. + * SET: When an acknowledge failure is received after a byte transmission. + * @rmtoll SR1 AF LL_I2C_IsActiveFlag_AF + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_AF(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_AF) == (I2C_SR1_AF)); +} + +/** + * @brief Indicate the status of Stop detection flag (slave mode). + * @note RESET: Clear default value. + * SET: When a Stop condition is detected. + * @rmtoll SR1 STOPF LL_I2C_IsActiveFlag_STOP + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_STOP(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_STOPF) == (I2C_SR1_STOPF)); +} + +/** + * @brief Indicate the status of Bus error flag. + * @note RESET: Clear default value. + * SET: When a misplaced Start or Stop condition is detected. + * @rmtoll SR1 BERR LL_I2C_IsActiveFlag_BERR + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_BERR(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_BERR) == (I2C_SR1_BERR)); +} + +/** + * @brief Indicate the status of Arbitration lost flag. + * @note RESET: Clear default value. + * SET: When arbitration lost. + * @rmtoll SR1 ARLO LL_I2C_IsActiveFlag_ARLO + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_ARLO(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_ARLO) == (I2C_SR1_ARLO)); +} + +/** + * @brief Indicate the status of Overrun/Underrun flag. + * @note RESET: Clear default value. + * SET: When an overrun/underrun error occurs (Clock Stretching Disabled). + * @rmtoll SR1 OVR LL_I2C_IsActiveFlag_OVR + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_OVR(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_OVR) == (I2C_SR1_OVR)); +} + +/** + * @brief Indicate the status of SMBus PEC error flag in reception. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll SR1 PECERR LL_I2C_IsActiveSMBusFlag_PECERR + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveSMBusFlag_PECERR(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_PECERR) == (I2C_SR1_PECERR)); +} + +/** + * @brief Indicate the status of SMBus Timeout detection flag. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll SR1 TIMEOUT LL_I2C_IsActiveSMBusFlag_TIMEOUT + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveSMBusFlag_TIMEOUT(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_TIMEOUT) == (I2C_SR1_TIMEOUT)); +} + +/** + * @brief Indicate the status of SMBus alert flag. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll SR1 SMBALERT LL_I2C_IsActiveSMBusFlag_ALERT + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveSMBusFlag_ALERT(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR1, I2C_SR1_SMBALERT) == (I2C_SR1_SMBALERT)); +} + +/** + * @brief Indicate the status of Bus Busy flag. + * @note RESET: Clear default value. + * SET: When a Start condition is detected. + * @rmtoll SR2 BUSY LL_I2C_IsActiveFlag_BUSY + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_BUSY(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR2, I2C_SR2_BUSY) == (I2C_SR2_BUSY)); +} + +/** + * @brief Indicate the status of Dual flag. + * @note RESET: Received address matched with OAR1. + * SET: Received address matched with OAR2. + * @rmtoll SR2 DUALF LL_I2C_IsActiveFlag_DUAL + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_DUAL(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR2, I2C_SR2_DUALF) == (I2C_SR2_DUALF)); +} + +/** + * @brief Indicate the status of SMBus Host address reception (Slave mode). + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @note RESET: No SMBus Host address + * SET: SMBus Host address received. + * @note This status is cleared by hardware after a STOP condition or repeated START condition. + * @rmtoll SR2 SMBHOST LL_I2C_IsActiveSMBusFlag_SMBHOST + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveSMBusFlag_SMBHOST(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR2, I2C_SR2_SMBHOST) == (I2C_SR2_SMBHOST)); +} + +/** + * @brief Indicate the status of SMBus Device default address reception (Slave mode). + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @note RESET: No SMBus Device default address + * SET: SMBus Device default address received. + * @note This status is cleared by hardware after a STOP condition or repeated START condition. + * @rmtoll SR2 SMBDEFAULT LL_I2C_IsActiveSMBusFlag_SMBDEFAULT + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveSMBusFlag_SMBDEFAULT(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR2, I2C_SR2_SMBDEFAULT) == (I2C_SR2_SMBDEFAULT)); +} + +/** + * @brief Indicate the status of General call address reception (Slave mode). + * @note RESET: No General call address + * SET: General call address received. + * @note This status is cleared by hardware after a STOP condition or repeated START condition. + * @rmtoll SR2 GENCALL LL_I2C_IsActiveFlag_GENCALL + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_GENCALL(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR2, I2C_SR2_GENCALL) == (I2C_SR2_GENCALL)); +} + +/** + * @brief Indicate the status of Master/Slave flag. + * @note RESET: Slave Mode. + * SET: Master Mode. + * @rmtoll SR2 MSL LL_I2C_IsActiveFlag_MSL + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsActiveFlag_MSL(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->SR2, I2C_SR2_MSL) == (I2C_SR2_MSL)); +} + +/** + * @brief Clear Address Matched flag. + * @note Clearing this flag is done by a read access to the I2Cx_SR1 + * register followed by a read access to the I2Cx_SR2 register. + * @rmtoll SR1 ADDR LL_I2C_ClearFlag_ADDR + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_ClearFlag_ADDR(I2C_TypeDef *I2Cx) +{ + __IO uint32_t tmpreg; + tmpreg = I2Cx->SR1; + (void) tmpreg; + tmpreg = I2Cx->SR2; + (void) tmpreg; +} + +/** + * @brief Clear Acknowledge failure flag. + * @rmtoll SR1 AF LL_I2C_ClearFlag_AF + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_ClearFlag_AF(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->SR1, I2C_SR1_AF); +} + +/** + * @brief Clear Stop detection flag. + * @note Clearing this flag is done by a read access to the I2Cx_SR1 + * register followed by a write access to I2Cx_CR1 register. + * @rmtoll SR1 STOPF LL_I2C_ClearFlag_STOP\n + * CR1 PE LL_I2C_ClearFlag_STOP + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_ClearFlag_STOP(I2C_TypeDef *I2Cx) +{ + __IO uint32_t tmpreg; + tmpreg = I2Cx->SR1; + (void) tmpreg; + SET_BIT(I2Cx->CR1, I2C_CR1_PE); +} + +/** + * @brief Clear Bus error flag. + * @rmtoll SR1 BERR LL_I2C_ClearFlag_BERR + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_ClearFlag_BERR(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->SR1, I2C_SR1_BERR); +} + +/** + * @brief Clear Arbitration lost flag. + * @rmtoll SR1 ARLO LL_I2C_ClearFlag_ARLO + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_ClearFlag_ARLO(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->SR1, I2C_SR1_ARLO); +} + +/** + * @brief Clear Overrun/Underrun flag. + * @rmtoll SR1 OVR LL_I2C_ClearFlag_OVR + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_ClearFlag_OVR(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->SR1, I2C_SR1_OVR); +} + +/** + * @brief Clear SMBus PEC error flag. + * @rmtoll SR1 PECERR LL_I2C_ClearSMBusFlag_PECERR + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_ClearSMBusFlag_PECERR(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->SR1, I2C_SR1_PECERR); +} + +/** + * @brief Clear SMBus Timeout detection flag. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll SR1 TIMEOUT LL_I2C_ClearSMBusFlag_TIMEOUT + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_ClearSMBusFlag_TIMEOUT(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->SR1, I2C_SR1_TIMEOUT); +} + +/** + * @brief Clear SMBus Alert flag. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll SR1 SMBALERT LL_I2C_ClearSMBusFlag_ALERT + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_ClearSMBusFlag_ALERT(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->SR1, I2C_SR1_SMBALERT); +} + +/** + * @} + */ + +/** @defgroup I2C_LL_EF_Data_Management Data_Management + * @{ + */ + +/** + * @brief Enable Reset of I2C peripheral. + * @rmtoll CR1 SWRST LL_I2C_EnableReset + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableReset(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR1, I2C_CR1_SWRST); +} + +/** + * @brief Disable Reset of I2C peripheral. + * @rmtoll CR1 SWRST LL_I2C_DisableReset + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableReset(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR1, I2C_CR1_SWRST); +} + +/** + * @brief Check if the I2C peripheral is under reset state or not. + * @rmtoll CR1 SWRST LL_I2C_IsResetEnabled + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsResetEnabled(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR1, I2C_CR1_SWRST) == (I2C_CR1_SWRST)); +} + +/** + * @brief Prepare the generation of a ACKnowledge or Non ACKnowledge condition after the address receive match code or next received byte. + * @note Usage in Slave or Master mode. + * @rmtoll CR1 ACK LL_I2C_AcknowledgeNextData + * @param I2Cx I2C Instance. + * @param TypeAcknowledge This parameter can be one of the following values: + * @arg @ref LL_I2C_ACK + * @arg @ref LL_I2C_NACK + * @retval None + */ +__STATIC_INLINE void LL_I2C_AcknowledgeNextData(I2C_TypeDef *I2Cx, uint32_t TypeAcknowledge) +{ + MODIFY_REG(I2Cx->CR1, I2C_CR1_ACK, TypeAcknowledge); +} + +/** + * @brief Generate a START or RESTART condition + * @note The START bit can be set even if bus is BUSY or I2C is in slave mode. + * This action has no effect when RELOAD is set. + * @rmtoll CR1 START LL_I2C_GenerateStartCondition + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_GenerateStartCondition(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR1, I2C_CR1_START); +} + +/** + * @brief Generate a STOP condition after the current byte transfer (master mode). + * @rmtoll CR1 STOP LL_I2C_GenerateStopCondition + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_GenerateStopCondition(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR1, I2C_CR1_STOP); +} + +/** + * @brief Enable bit POS (master/host mode). + * @note In that case, the ACK bit controls the (N)ACK of the next byte received or the PEC bit indicates that the next byte in shift register is a PEC. + * @rmtoll CR1 POS LL_I2C_EnableBitPOS + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableBitPOS(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR1, I2C_CR1_POS); +} + +/** + * @brief Disable bit POS (master/host mode). + * @note In that case, the ACK bit controls the (N)ACK of the current byte received or the PEC bit indicates that the current byte in shift register is a PEC. + * @rmtoll CR1 POS LL_I2C_DisableBitPOS + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableBitPOS(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR1, I2C_CR1_POS); +} + +/** + * @brief Check if bit POS is enabled or disabled. + * @rmtoll CR1 POS LL_I2C_IsEnabledBitPOS + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledBitPOS(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR1, I2C_CR1_POS) == (I2C_CR1_POS)); +} + +/** + * @brief Indicate the value of transfer direction. + * @note RESET: Bus is in read transfer (peripheral point of view). + * SET: Bus is in write transfer (peripheral point of view). + * @rmtoll SR2 TRA LL_I2C_GetTransferDirection + * @param I2Cx I2C Instance. + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2C_DIRECTION_WRITE + * @arg @ref LL_I2C_DIRECTION_READ + */ +__STATIC_INLINE uint32_t LL_I2C_GetTransferDirection(I2C_TypeDef *I2Cx) +{ + return (uint32_t)(READ_BIT(I2Cx->SR2, I2C_SR2_TRA)); +} + +/** + * @brief Enable DMA last transfer. + * @note This action mean that next DMA EOT is the last transfer. + * @rmtoll CR2 LAST LL_I2C_EnableLastDMA + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableLastDMA(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR2, I2C_CR2_LAST); +} + +/** + * @brief Disable DMA last transfer. + * @note This action mean that next DMA EOT is not the last transfer. + * @rmtoll CR2 LAST LL_I2C_DisableLastDMA + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableLastDMA(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR2, I2C_CR2_LAST); +} + +/** + * @brief Check if DMA last transfer is enabled or disabled. + * @rmtoll CR2 LAST LL_I2C_IsEnabledLastDMA + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledLastDMA(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR2, I2C_CR2_LAST) == (I2C_CR2_LAST)); +} + +/** + * @brief Enable transfer or internal comparison of the SMBus Packet Error byte (transmission or reception mode). + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @note This feature is cleared by hardware when the PEC byte is transferred or compared, + * or by a START or STOP condition, it is also cleared by software. + * @rmtoll CR1 PEC LL_I2C_EnableSMBusPECCompare + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_EnableSMBusPECCompare(I2C_TypeDef *I2Cx) +{ + SET_BIT(I2Cx->CR1, I2C_CR1_PEC); +} + +/** + * @brief Disable transfer or internal comparison of the SMBus Packet Error byte (transmission or reception mode). + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll CR1 PEC LL_I2C_DisableSMBusPECCompare + * @param I2Cx I2C Instance. + * @retval None + */ +__STATIC_INLINE void LL_I2C_DisableSMBusPECCompare(I2C_TypeDef *I2Cx) +{ + CLEAR_BIT(I2Cx->CR1, I2C_CR1_PEC); +} + +/** + * @brief Check if the SMBus Packet Error byte transfer or internal comparison is requested or not. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll CR1 PEC LL_I2C_IsEnabledSMBusPECCompare + * @param I2Cx I2C Instance. + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2C_IsEnabledSMBusPECCompare(I2C_TypeDef *I2Cx) +{ + return (READ_BIT(I2Cx->CR1, I2C_CR1_PEC) == (I2C_CR1_PEC)); +} + +/** + * @brief Get the SMBus Packet Error byte calculated. + * @note Macro IS_SMBUS_ALL_INSTANCE(I2Cx) can be used to check whether or not + * SMBus feature is supported by the I2Cx Instance. + * @rmtoll SR2 PEC LL_I2C_GetSMBusPEC + * @param I2Cx I2C Instance. + * @retval Value between Min_Data=0x00 and Max_Data=0xFF + */ +__STATIC_INLINE uint32_t LL_I2C_GetSMBusPEC(I2C_TypeDef *I2Cx) +{ + return (uint32_t)(READ_BIT(I2Cx->SR2, I2C_SR2_PEC) >> I2C_SR2_PEC_Pos); +} + +/** + * @brief Read Receive Data register. + * @rmtoll DR DR LL_I2C_ReceiveData8 + * @param I2Cx I2C Instance. + * @retval Value between Min_Data=0x0 and Max_Data=0xFF + */ +__STATIC_INLINE uint8_t LL_I2C_ReceiveData8(I2C_TypeDef *I2Cx) +{ + return (uint8_t)(READ_BIT(I2Cx->DR, I2C_DR_DR)); +} + +/** + * @brief Write in Transmit Data Register . + * @rmtoll DR DR LL_I2C_TransmitData8 + * @param I2Cx I2C Instance. + * @param Data Value between Min_Data=0x0 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_I2C_TransmitData8(I2C_TypeDef *I2Cx, uint8_t Data) +{ + MODIFY_REG(I2Cx->DR, I2C_DR_DR, Data); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup I2C_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +uint32_t LL_I2C_Init(I2C_TypeDef *I2Cx, LL_I2C_InitTypeDef *I2C_InitStruct); +uint32_t LL_I2C_DeInit(I2C_TypeDef *I2Cx); +void LL_I2C_StructInit(LL_I2C_InitTypeDef *I2C_InitStruct); + + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* I2C1 || I2C2 || I2C3 */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_I2C_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_pwr.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_pwr.h new file mode 100644 index 00000000..ea23dc52 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_pwr.h @@ -0,0 +1,985 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_pwr.h + * @author MCD Application Team + * @brief Header file of PWR LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_PWR_H +#define __STM32F4xx_LL_PWR_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined(PWR) + +/** @defgroup PWR_LL PWR + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup PWR_LL_Exported_Constants PWR Exported Constants + * @{ + */ + +/** @defgroup PWR_LL_EC_CLEAR_FLAG Clear Flags Defines + * @brief Flags defines which can be used with LL_PWR_WriteReg function + * @{ + */ +#define LL_PWR_CR_CSBF PWR_CR_CSBF /*!< Clear standby flag */ +#define LL_PWR_CR_CWUF PWR_CR_CWUF /*!< Clear wakeup flag */ +/** + * @} + */ + +/** @defgroup PWR_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_PWR_ReadReg function + * @{ + */ +#define LL_PWR_CSR_WUF PWR_CSR_WUF /*!< Wakeup flag */ +#define LL_PWR_CSR_SBF PWR_CSR_SBF /*!< Standby flag */ +#define LL_PWR_CSR_PVDO PWR_CSR_PVDO /*!< Power voltage detector output flag */ +#define LL_PWR_CSR_VOS PWR_CSR_VOSRDY /*!< Voltage scaling select flag */ +#if defined(PWR_CSR_EWUP) +#define LL_PWR_CSR_EWUP1 PWR_CSR_EWUP /*!< Enable WKUP pin */ +#elif defined(PWR_CSR_EWUP1) +#define LL_PWR_CSR_EWUP1 PWR_CSR_EWUP1 /*!< Enable WKUP pin 1 */ +#endif /* PWR_CSR_EWUP */ +#if defined(PWR_CSR_EWUP2) +#define LL_PWR_CSR_EWUP2 PWR_CSR_EWUP2 /*!< Enable WKUP pin 2 */ +#endif /* PWR_CSR_EWUP2 */ +#if defined(PWR_CSR_EWUP3) +#define LL_PWR_CSR_EWUP3 PWR_CSR_EWUP3 /*!< Enable WKUP pin 3 */ +#endif /* PWR_CSR_EWUP3 */ +/** + * @} + */ + +/** @defgroup PWR_LL_EC_REGU_VOLTAGE Regulator Voltage + * @{ + */ +#if defined(PWR_CR_VOS_0) +#define LL_PWR_REGU_VOLTAGE_SCALE3 (PWR_CR_VOS_0) +#define LL_PWR_REGU_VOLTAGE_SCALE2 (PWR_CR_VOS_1) +#define LL_PWR_REGU_VOLTAGE_SCALE1 (PWR_CR_VOS_0 | PWR_CR_VOS_1) /* The SCALE1 is not available for STM32F401xx devices */ +#else +#define LL_PWR_REGU_VOLTAGE_SCALE1 (PWR_CR_VOS) +#define LL_PWR_REGU_VOLTAGE_SCALE2 0x00000000U +#endif /* PWR_CR_VOS_0 */ +/** + * @} + */ + +/** @defgroup PWR_LL_EC_MODE_PWR Mode Power + * @{ + */ +#define LL_PWR_MODE_STOP_MAINREGU 0x00000000U /*!< Enter Stop mode when the CPU enters deepsleep */ +#define LL_PWR_MODE_STOP_LPREGU (PWR_CR_LPDS) /*!< Enter Stop mode (with low power Regulator ON) when the CPU enters deepsleep */ +#if defined(PWR_CR_MRUDS) && defined(PWR_CR_LPUDS) && defined(PWR_CR_FPDS) +#define LL_PWR_MODE_STOP_MAINREGU_UNDERDRIVE (PWR_CR_MRUDS | PWR_CR_FPDS) /*!< Enter Stop mode (with main Regulator in under-drive mode) when the CPU enters deepsleep */ +#define LL_PWR_MODE_STOP_LPREGU_UNDERDRIVE (PWR_CR_LPDS | PWR_CR_LPUDS | PWR_CR_FPDS) /*!< Enter Stop mode (with low power Regulator in under-drive mode) when the CPU enters deepsleep */ +#endif /* PWR_CR_MRUDS && PWR_CR_LPUDS && PWR_CR_FPDS */ +#if defined(PWR_CR_MRLVDS) && defined(PWR_CR_LPLVDS) && defined(PWR_CR_FPDS) +#define LL_PWR_MODE_STOP_MAINREGU_DEEPSLEEP (PWR_CR_MRLVDS | PWR_CR_FPDS) /*!< Enter Stop mode (with main Regulator in Deep Sleep mode) when the CPU enters deepsleep */ +#define LL_PWR_MODE_STOP_LPREGU_DEEPSLEEP (PWR_CR_LPDS | PWR_CR_LPLVDS | PWR_CR_FPDS) /*!< Enter Stop mode (with low power Regulator in Deep Sleep mode) when the CPU enters deepsleep */ +#endif /* PWR_CR_MRLVDS && PWR_CR_LPLVDS && PWR_CR_FPDS */ +#define LL_PWR_MODE_STANDBY (PWR_CR_PDDS) /*!< Enter Standby mode when the CPU enters deepsleep */ +/** + * @} + */ + +/** @defgroup PWR_LL_EC_REGU_MODE_DS_MODE Regulator Mode In Deep Sleep Mode + * @{ + */ +#define LL_PWR_REGU_DSMODE_MAIN 0x00000000U /*!< Voltage Regulator in main mode during deepsleep mode */ +#define LL_PWR_REGU_DSMODE_LOW_POWER (PWR_CR_LPDS) /*!< Voltage Regulator in low-power mode during deepsleep mode */ +/** + * @} + */ + +/** @defgroup PWR_LL_EC_PVDLEVEL Power Voltage Detector Level + * @{ + */ +#define LL_PWR_PVDLEVEL_0 (PWR_CR_PLS_LEV0) /*!< Voltage threshold detected by PVD 2.2 V */ +#define LL_PWR_PVDLEVEL_1 (PWR_CR_PLS_LEV1) /*!< Voltage threshold detected by PVD 2.3 V */ +#define LL_PWR_PVDLEVEL_2 (PWR_CR_PLS_LEV2) /*!< Voltage threshold detected by PVD 2.4 V */ +#define LL_PWR_PVDLEVEL_3 (PWR_CR_PLS_LEV3) /*!< Voltage threshold detected by PVD 2.5 V */ +#define LL_PWR_PVDLEVEL_4 (PWR_CR_PLS_LEV4) /*!< Voltage threshold detected by PVD 2.6 V */ +#define LL_PWR_PVDLEVEL_5 (PWR_CR_PLS_LEV5) /*!< Voltage threshold detected by PVD 2.7 V */ +#define LL_PWR_PVDLEVEL_6 (PWR_CR_PLS_LEV6) /*!< Voltage threshold detected by PVD 2.8 V */ +#define LL_PWR_PVDLEVEL_7 (PWR_CR_PLS_LEV7) /*!< Voltage threshold detected by PVD 2.9 V */ +/** + * @} + */ +/** @defgroup PWR_LL_EC_WAKEUP_PIN Wakeup Pins + * @{ + */ +#if defined(PWR_CSR_EWUP) +#define LL_PWR_WAKEUP_PIN1 (PWR_CSR_EWUP) /*!< WKUP pin : PA0 */ +#endif /* PWR_CSR_EWUP */ +#if defined(PWR_CSR_EWUP1) +#define LL_PWR_WAKEUP_PIN1 (PWR_CSR_EWUP1) /*!< WKUP pin 1 : PA0 */ +#endif /* PWR_CSR_EWUP1 */ +#if defined(PWR_CSR_EWUP2) +#define LL_PWR_WAKEUP_PIN2 (PWR_CSR_EWUP2) /*!< WKUP pin 2 : PC0 or PC13 according to device */ +#endif /* PWR_CSR_EWUP2 */ +#if defined(PWR_CSR_EWUP3) +#define LL_PWR_WAKEUP_PIN3 (PWR_CSR_EWUP3) /*!< WKUP pin 3 : PC1 */ +#endif /* PWR_CSR_EWUP3 */ +/** + * @} + */ + +/** + * @} + */ + + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup PWR_LL_Exported_Macros PWR Exported Macros + * @{ + */ + +/** @defgroup PWR_LL_EM_WRITE_READ Common write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in PWR register + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_PWR_WriteReg(__REG__, __VALUE__) WRITE_REG(PWR->__REG__, (__VALUE__)) + +/** + * @brief Read a value in PWR register + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_PWR_ReadReg(__REG__) READ_REG(PWR->__REG__) +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup PWR_LL_Exported_Functions PWR Exported Functions + * @{ + */ + +/** @defgroup PWR_LL_EF_Configuration Configuration + * @{ + */ +#if defined(PWR_CR_FISSR) +/** + * @brief Enable FLASH interface STOP while system Run is ON + * @rmtoll CR FISSR LL_PWR_EnableFLASHInterfaceSTOP + * @note This mode is enabled only with STOP low power mode. + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableFLASHInterfaceSTOP(void) +{ + SET_BIT(PWR->CR, PWR_CR_FISSR); +} + +/** + * @brief Disable FLASH Interface STOP while system Run is ON + * @rmtoll CR FISSR LL_PWR_DisableFLASHInterfaceSTOP + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableFLASHInterfaceSTOP(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_FISSR); +} + +/** + * @brief Check if FLASH Interface STOP while system Run feature is enabled + * @rmtoll CR FISSR LL_PWR_IsEnabledFLASHInterfaceSTOP + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledFLASHInterfaceSTOP(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_FISSR) == (PWR_CR_FISSR)); +} +#endif /* PWR_CR_FISSR */ + +#if defined(PWR_CR_FMSSR) +/** + * @brief Enable FLASH Memory STOP while system Run is ON + * @rmtoll CR FMSSR LL_PWR_EnableFLASHMemorySTOP + * @note This mode is enabled only with STOP low power mode. + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableFLASHMemorySTOP(void) +{ + SET_BIT(PWR->CR, PWR_CR_FMSSR); +} + +/** + * @brief Disable FLASH Memory STOP while system Run is ON + * @rmtoll CR FMSSR LL_PWR_DisableFLASHMemorySTOP + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableFLASHMemorySTOP(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_FMSSR); +} + +/** + * @brief Check if FLASH Memory STOP while system Run feature is enabled + * @rmtoll CR FMSSR LL_PWR_IsEnabledFLASHMemorySTOP + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledFLASHMemorySTOP(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_FMSSR) == (PWR_CR_FMSSR)); +} +#endif /* PWR_CR_FMSSR */ +#if defined(PWR_CR_UDEN) +/** + * @brief Enable Under Drive Mode + * @rmtoll CR UDEN LL_PWR_EnableUnderDriveMode + * @note This mode is enabled only with STOP low power mode. + * In this mode, the 1.2V domain is preserved in reduced leakage mode. This + * mode is only available when the main Regulator or the low power Regulator + * is in low voltage mode. + * @note If the Under-drive mode was enabled, it is automatically disabled after + * exiting Stop mode. + * When the voltage Regulator operates in Under-drive mode, an additional + * startup delay is induced when waking up from Stop mode. + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableUnderDriveMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_UDEN); +} + +/** + * @brief Disable Under Drive Mode + * @rmtoll CR UDEN LL_PWR_DisableUnderDriveMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableUnderDriveMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_UDEN); +} + +/** + * @brief Check if Under Drive Mode is enabled + * @rmtoll CR UDEN LL_PWR_IsEnabledUnderDriveMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledUnderDriveMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_UDEN) == (PWR_CR_UDEN)); +} +#endif /* PWR_CR_UDEN */ + +#if defined(PWR_CR_ODSWEN) +/** + * @brief Enable Over drive switching + * @rmtoll CR ODSWEN LL_PWR_EnableOverDriveSwitching + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableOverDriveSwitching(void) +{ + SET_BIT(PWR->CR, PWR_CR_ODSWEN); +} + +/** + * @brief Disable Over drive switching + * @rmtoll CR ODSWEN LL_PWR_DisableOverDriveSwitching + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableOverDriveSwitching(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_ODSWEN); +} + +/** + * @brief Check if Over drive switching is enabled + * @rmtoll CR ODSWEN LL_PWR_IsEnabledOverDriveSwitching + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledOverDriveSwitching(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_ODSWEN) == (PWR_CR_ODSWEN)); +} +#endif /* PWR_CR_ODSWEN */ +#if defined(PWR_CR_ODEN) +/** + * @brief Enable Over drive Mode + * @rmtoll CR ODEN LL_PWR_EnableOverDriveMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableOverDriveMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_ODEN); +} + +/** + * @brief Disable Over drive Mode + * @rmtoll CR ODEN LL_PWR_DisableOverDriveMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableOverDriveMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_ODEN); +} + +/** + * @brief Check if Over drive switching is enabled + * @rmtoll CR ODEN LL_PWR_IsEnabledOverDriveMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledOverDriveMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_ODEN) == (PWR_CR_ODEN)); +} +#endif /* PWR_CR_ODEN */ +#if defined(PWR_CR_MRUDS) +/** + * @brief Enable Main Regulator in deepsleep under-drive Mode + * @rmtoll CR MRUDS LL_PWR_EnableMainRegulatorDeepSleepUDMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableMainRegulatorDeepSleepUDMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_MRUDS); +} + +/** + * @brief Disable Main Regulator in deepsleep under-drive Mode + * @rmtoll CR MRUDS LL_PWR_DisableMainRegulatorDeepSleepUDMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableMainRegulatorDeepSleepUDMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_MRUDS); +} + +/** + * @brief Check if Main Regulator in deepsleep under-drive Mode is enabled + * @rmtoll CR MRUDS LL_PWR_IsEnabledMainRegulatorDeepSleepUDMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledMainRegulatorDeepSleepUDMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_MRUDS) == (PWR_CR_MRUDS)); +} +#endif /* PWR_CR_MRUDS */ + +#if defined(PWR_CR_LPUDS) +/** + * @brief Enable Low Power Regulator in deepsleep under-drive Mode + * @rmtoll CR LPUDS LL_PWR_EnableLowPowerRegulatorDeepSleepUDMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableLowPowerRegulatorDeepSleepUDMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_LPUDS); +} + +/** + * @brief Disable Low Power Regulator in deepsleep under-drive Mode + * @rmtoll CR LPUDS LL_PWR_DisableLowPowerRegulatorDeepSleepUDMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableLowPowerRegulatorDeepSleepUDMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_LPUDS); +} + +/** + * @brief Check if Low Power Regulator in deepsleep under-drive Mode is enabled + * @rmtoll CR LPUDS LL_PWR_IsEnabledLowPowerRegulatorDeepSleepUDMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledLowPowerRegulatorDeepSleepUDMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_LPUDS) == (PWR_CR_LPUDS)); +} +#endif /* PWR_CR_LPUDS */ + +#if defined(PWR_CR_MRLVDS) +/** + * @brief Enable Main Regulator low voltage Mode + * @rmtoll CR MRLVDS LL_PWR_EnableMainRegulatorLowVoltageMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableMainRegulatorLowVoltageMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_MRLVDS); +} + +/** + * @brief Disable Main Regulator low voltage Mode + * @rmtoll CR MRLVDS LL_PWR_DisableMainRegulatorLowVoltageMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableMainRegulatorLowVoltageMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_MRLVDS); +} + +/** + * @brief Check if Main Regulator low voltage Mode is enabled + * @rmtoll CR MRLVDS LL_PWR_IsEnabledMainRegulatorLowVoltageMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledMainRegulatorLowVoltageMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_MRLVDS) == (PWR_CR_MRLVDS)); +} +#endif /* PWR_CR_MRLVDS */ + +#if defined(PWR_CR_LPLVDS) +/** + * @brief Enable Low Power Regulator low voltage Mode + * @rmtoll CR LPLVDS LL_PWR_EnableLowPowerRegulatorLowVoltageMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableLowPowerRegulatorLowVoltageMode(void) +{ + SET_BIT(PWR->CR, PWR_CR_LPLVDS); +} + +/** + * @brief Disable Low Power Regulator low voltage Mode + * @rmtoll CR LPLVDS LL_PWR_DisableLowPowerRegulatorLowVoltageMode + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableLowPowerRegulatorLowVoltageMode(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_LPLVDS); +} + +/** + * @brief Check if Low Power Regulator low voltage Mode is enabled + * @rmtoll CR LPLVDS LL_PWR_IsEnabledLowPowerRegulatorLowVoltageMode + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledLowPowerRegulatorLowVoltageMode(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_LPLVDS) == (PWR_CR_LPLVDS)); +} +#endif /* PWR_CR_LPLVDS */ +/** + * @brief Set the main internal Regulator output voltage + * @rmtoll CR VOS LL_PWR_SetRegulVoltageScaling + * @param VoltageScaling This parameter can be one of the following values: + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE1 (*) + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE2 + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE3 + * (*) LL_PWR_REGU_VOLTAGE_SCALE1 is not available for STM32F401xx devices + * @retval None + */ +__STATIC_INLINE void LL_PWR_SetRegulVoltageScaling(uint32_t VoltageScaling) +{ + MODIFY_REG(PWR->CR, PWR_CR_VOS, VoltageScaling); +} + +/** + * @brief Get the main internal Regulator output voltage + * @rmtoll CR VOS LL_PWR_GetRegulVoltageScaling + * @retval Returned value can be one of the following values: + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE1 (*) + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE2 + * @arg @ref LL_PWR_REGU_VOLTAGE_SCALE3 + * (*) LL_PWR_REGU_VOLTAGE_SCALE1 is not available for STM32F401xx devices + */ +__STATIC_INLINE uint32_t LL_PWR_GetRegulVoltageScaling(void) +{ + return (uint32_t)(READ_BIT(PWR->CR, PWR_CR_VOS)); +} +/** + * @brief Enable the Flash Power Down in Stop Mode + * @rmtoll CR FPDS LL_PWR_EnableFlashPowerDown + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableFlashPowerDown(void) +{ + SET_BIT(PWR->CR, PWR_CR_FPDS); +} + +/** + * @brief Disable the Flash Power Down in Stop Mode + * @rmtoll CR FPDS LL_PWR_DisableFlashPowerDown + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableFlashPowerDown(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_FPDS); +} + +/** + * @brief Check if the Flash Power Down in Stop Mode is enabled + * @rmtoll CR FPDS LL_PWR_IsEnabledFlashPowerDown + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledFlashPowerDown(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_FPDS) == (PWR_CR_FPDS)); +} + +/** + * @brief Enable access to the backup domain + * @rmtoll CR DBP LL_PWR_EnableBkUpAccess + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableBkUpAccess(void) +{ + SET_BIT(PWR->CR, PWR_CR_DBP); +} + +/** + * @brief Disable access to the backup domain + * @rmtoll CR DBP LL_PWR_DisableBkUpAccess + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableBkUpAccess(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_DBP); +} + +/** + * @brief Check if the backup domain is enabled + * @rmtoll CR DBP LL_PWR_IsEnabledBkUpAccess + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledBkUpAccess(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_DBP) == (PWR_CR_DBP)); +} +/** + * @brief Enable the backup Regulator + * @rmtoll CSR BRE LL_PWR_EnableBkUpRegulator + * @note The BRE bit of the PWR_CSR register is protected against parasitic write access. + * The LL_PWR_EnableBkUpAccess() must be called before using this API. + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableBkUpRegulator(void) +{ + SET_BIT(PWR->CSR, PWR_CSR_BRE); +} + +/** + * @brief Disable the backup Regulator + * @rmtoll CSR BRE LL_PWR_DisableBkUpRegulator + * @note The BRE bit of the PWR_CSR register is protected against parasitic write access. + * The LL_PWR_EnableBkUpAccess() must be called before using this API. + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableBkUpRegulator(void) +{ + CLEAR_BIT(PWR->CSR, PWR_CSR_BRE); +} + +/** + * @brief Check if the backup Regulator is enabled + * @rmtoll CSR BRE LL_PWR_IsEnabledBkUpRegulator + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledBkUpRegulator(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_BRE) == (PWR_CSR_BRE)); +} + +/** + * @brief Set voltage Regulator mode during deep sleep mode + * @rmtoll CR LPDS LL_PWR_SetRegulModeDS + * @param RegulMode This parameter can be one of the following values: + * @arg @ref LL_PWR_REGU_DSMODE_MAIN + * @arg @ref LL_PWR_REGU_DSMODE_LOW_POWER + * @retval None + */ +__STATIC_INLINE void LL_PWR_SetRegulModeDS(uint32_t RegulMode) +{ + MODIFY_REG(PWR->CR, PWR_CR_LPDS, RegulMode); +} + +/** + * @brief Get voltage Regulator mode during deep sleep mode + * @rmtoll CR LPDS LL_PWR_GetRegulModeDS + * @retval Returned value can be one of the following values: + * @arg @ref LL_PWR_REGU_DSMODE_MAIN + * @arg @ref LL_PWR_REGU_DSMODE_LOW_POWER + */ +__STATIC_INLINE uint32_t LL_PWR_GetRegulModeDS(void) +{ + return (uint32_t)(READ_BIT(PWR->CR, PWR_CR_LPDS)); +} + +/** + * @brief Set Power Down mode when CPU enters deepsleep + * @rmtoll CR PDDS LL_PWR_SetPowerMode\n + * @rmtoll CR MRUDS LL_PWR_SetPowerMode\n + * @rmtoll CR LPUDS LL_PWR_SetPowerMode\n + * @rmtoll CR FPDS LL_PWR_SetPowerMode\n + * @rmtoll CR MRLVDS LL_PWR_SetPowerMode\n + * @rmtoll CR LPlVDS LL_PWR_SetPowerMode\n + * @rmtoll CR FPDS LL_PWR_SetPowerMode\n + * @rmtoll CR LPDS LL_PWR_SetPowerMode + * @param PDMode This parameter can be one of the following values: + * @arg @ref LL_PWR_MODE_STOP_MAINREGU + * @arg @ref LL_PWR_MODE_STOP_LPREGU + * @arg @ref LL_PWR_MODE_STOP_MAINREGU_UNDERDRIVE (*) + * @arg @ref LL_PWR_MODE_STOP_LPREGU_UNDERDRIVE (*) + * @arg @ref LL_PWR_MODE_STOP_MAINREGU_DEEPSLEEP (*) + * @arg @ref LL_PWR_MODE_STOP_LPREGU_DEEPSLEEP (*) + * + * (*) not available on all devices + * @arg @ref LL_PWR_MODE_STANDBY + * @retval None + */ +__STATIC_INLINE void LL_PWR_SetPowerMode(uint32_t PDMode) +{ +#if defined(PWR_CR_MRUDS) && defined(PWR_CR_LPUDS) && defined(PWR_CR_FPDS) + MODIFY_REG(PWR->CR, (PWR_CR_PDDS | PWR_CR_LPDS | PWR_CR_FPDS | PWR_CR_LPUDS | PWR_CR_MRUDS), PDMode); +#elif defined(PWR_CR_MRLVDS) && defined(PWR_CR_LPLVDS) && defined(PWR_CR_FPDS) + MODIFY_REG(PWR->CR, (PWR_CR_PDDS | PWR_CR_LPDS | PWR_CR_FPDS | PWR_CR_LPLVDS | PWR_CR_MRLVDS), PDMode); +#else + MODIFY_REG(PWR->CR, (PWR_CR_PDDS| PWR_CR_LPDS), PDMode); +#endif /* PWR_CR_MRUDS && PWR_CR_LPUDS && PWR_CR_FPDS */ +} + +/** + * @brief Get Power Down mode when CPU enters deepsleep + * @rmtoll CR PDDS LL_PWR_GetPowerMode\n + * @rmtoll CR MRUDS LL_PWR_GetPowerMode\n + * @rmtoll CR LPUDS LL_PWR_GetPowerMode\n + * @rmtoll CR FPDS LL_PWR_GetPowerMode\n + * @rmtoll CR MRLVDS LL_PWR_GetPowerMode\n + * @rmtoll CR LPLVDS LL_PWR_GetPowerMode\n + * @rmtoll CR FPDS LL_PWR_GetPowerMode\n + * @rmtoll CR LPDS LL_PWR_GetPowerMode + * @retval Returned value can be one of the following values: + * @arg @ref LL_PWR_MODE_STOP_MAINREGU + * @arg @ref LL_PWR_MODE_STOP_LPREGU + * @arg @ref LL_PWR_MODE_STOP_MAINREGU_UNDERDRIVE (*) + * @arg @ref LL_PWR_MODE_STOP_LPREGU_UNDERDRIVE (*) + * @arg @ref LL_PWR_MODE_STOP_MAINREGU_DEEPSLEEP (*) + * @arg @ref LL_PWR_MODE_STOP_LPREGU_DEEPSLEEP (*) + * + * (*) not available on all devices + * @arg @ref LL_PWR_MODE_STANDBY + */ +__STATIC_INLINE uint32_t LL_PWR_GetPowerMode(void) +{ +#if defined(PWR_CR_MRUDS) && defined(PWR_CR_LPUDS) && defined(PWR_CR_FPDS) + return (uint32_t)(READ_BIT(PWR->CR, (PWR_CR_PDDS | PWR_CR_LPDS | PWR_CR_FPDS | PWR_CR_LPUDS | PWR_CR_MRUDS))); +#elif defined(PWR_CR_MRLVDS) && defined(PWR_CR_LPLVDS) && defined(PWR_CR_FPDS) + return (uint32_t)(READ_BIT(PWR->CR, (PWR_CR_PDDS | PWR_CR_LPDS | PWR_CR_FPDS | PWR_CR_LPLVDS | PWR_CR_MRLVDS))); +#else + return (uint32_t)(READ_BIT(PWR->CR, (PWR_CR_PDDS| PWR_CR_LPDS))); +#endif /* PWR_CR_MRUDS && PWR_CR_LPUDS && PWR_CR_FPDS */ +} + +/** + * @brief Configure the voltage threshold detected by the Power Voltage Detector + * @rmtoll CR PLS LL_PWR_SetPVDLevel + * @param PVDLevel This parameter can be one of the following values: + * @arg @ref LL_PWR_PVDLEVEL_0 + * @arg @ref LL_PWR_PVDLEVEL_1 + * @arg @ref LL_PWR_PVDLEVEL_2 + * @arg @ref LL_PWR_PVDLEVEL_3 + * @arg @ref LL_PWR_PVDLEVEL_4 + * @arg @ref LL_PWR_PVDLEVEL_5 + * @arg @ref LL_PWR_PVDLEVEL_6 + * @arg @ref LL_PWR_PVDLEVEL_7 + * @retval None + */ +__STATIC_INLINE void LL_PWR_SetPVDLevel(uint32_t PVDLevel) +{ + MODIFY_REG(PWR->CR, PWR_CR_PLS, PVDLevel); +} + +/** + * @brief Get the voltage threshold detection + * @rmtoll CR PLS LL_PWR_GetPVDLevel + * @retval Returned value can be one of the following values: + * @arg @ref LL_PWR_PVDLEVEL_0 + * @arg @ref LL_PWR_PVDLEVEL_1 + * @arg @ref LL_PWR_PVDLEVEL_2 + * @arg @ref LL_PWR_PVDLEVEL_3 + * @arg @ref LL_PWR_PVDLEVEL_4 + * @arg @ref LL_PWR_PVDLEVEL_5 + * @arg @ref LL_PWR_PVDLEVEL_6 + * @arg @ref LL_PWR_PVDLEVEL_7 + */ +__STATIC_INLINE uint32_t LL_PWR_GetPVDLevel(void) +{ + return (uint32_t)(READ_BIT(PWR->CR, PWR_CR_PLS)); +} + +/** + * @brief Enable Power Voltage Detector + * @rmtoll CR PVDE LL_PWR_EnablePVD + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnablePVD(void) +{ + SET_BIT(PWR->CR, PWR_CR_PVDE); +} + +/** + * @brief Disable Power Voltage Detector + * @rmtoll CR PVDE LL_PWR_DisablePVD + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisablePVD(void) +{ + CLEAR_BIT(PWR->CR, PWR_CR_PVDE); +} + +/** + * @brief Check if Power Voltage Detector is enabled + * @rmtoll CR PVDE LL_PWR_IsEnabledPVD + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledPVD(void) +{ + return (READ_BIT(PWR->CR, PWR_CR_PVDE) == (PWR_CR_PVDE)); +} + +/** + * @brief Enable the WakeUp PINx functionality + * @rmtoll CSR EWUP LL_PWR_EnableWakeUpPin\n + * @rmtoll CSR EWUP1 LL_PWR_EnableWakeUpPin\n + * @rmtoll CSR EWUP2 LL_PWR_EnableWakeUpPin\n + * @rmtoll CSR EWUP3 LL_PWR_EnableWakeUpPin + * @param WakeUpPin This parameter can be one of the following values: + * @arg @ref LL_PWR_WAKEUP_PIN1 + * @arg @ref LL_PWR_WAKEUP_PIN2 (*) + * @arg @ref LL_PWR_WAKEUP_PIN3 (*) + * + * (*) not available on all devices + * @retval None + */ +__STATIC_INLINE void LL_PWR_EnableWakeUpPin(uint32_t WakeUpPin) +{ + SET_BIT(PWR->CSR, WakeUpPin); +} + +/** + * @brief Disable the WakeUp PINx functionality + * @rmtoll CSR EWUP LL_PWR_DisableWakeUpPin\n + * @rmtoll CSR EWUP1 LL_PWR_DisableWakeUpPin\n + * @rmtoll CSR EWUP2 LL_PWR_DisableWakeUpPin\n + * @rmtoll CSR EWUP3 LL_PWR_DisableWakeUpPin + * @param WakeUpPin This parameter can be one of the following values: + * @arg @ref LL_PWR_WAKEUP_PIN1 + * @arg @ref LL_PWR_WAKEUP_PIN2 (*) + * @arg @ref LL_PWR_WAKEUP_PIN3 (*) + * + * (*) not available on all devices + * @retval None + */ +__STATIC_INLINE void LL_PWR_DisableWakeUpPin(uint32_t WakeUpPin) +{ + CLEAR_BIT(PWR->CSR, WakeUpPin); +} + +/** + * @brief Check if the WakeUp PINx functionality is enabled + * @rmtoll CSR EWUP LL_PWR_IsEnabledWakeUpPin\n + * @rmtoll CSR EWUP1 LL_PWR_IsEnabledWakeUpPin\n + * @rmtoll CSR EWUP2 LL_PWR_IsEnabledWakeUpPin\n + * @rmtoll CSR EWUP3 LL_PWR_IsEnabledWakeUpPin + * @param WakeUpPin This parameter can be one of the following values: + * @arg @ref LL_PWR_WAKEUP_PIN1 + * @arg @ref LL_PWR_WAKEUP_PIN2 (*) + * @arg @ref LL_PWR_WAKEUP_PIN3 (*) + * + * (*) not available on all devices + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsEnabledWakeUpPin(uint32_t WakeUpPin) +{ + return (READ_BIT(PWR->CSR, WakeUpPin) == (WakeUpPin)); +} + + +/** + * @} + */ + +/** @defgroup PWR_LL_EF_FLAG_Management FLAG_Management + * @{ + */ + +/** + * @brief Get Wake-up Flag + * @rmtoll CSR WUF LL_PWR_IsActiveFlag_WU + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_WU(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_WUF) == (PWR_CSR_WUF)); +} + +/** + * @brief Get Standby Flag + * @rmtoll CSR SBF LL_PWR_IsActiveFlag_SB + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_SB(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_SBF) == (PWR_CSR_SBF)); +} + +/** + * @brief Get Backup Regulator ready Flag + * @rmtoll CSR BRR LL_PWR_IsActiveFlag_BRR + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_BRR(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_BRR) == (PWR_CSR_BRR)); +} +/** + * @brief Indicate whether VDD voltage is below the selected PVD threshold + * @rmtoll CSR PVDO LL_PWR_IsActiveFlag_PVDO + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_PVDO(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_PVDO) == (PWR_CSR_PVDO)); +} + +/** + * @brief Indicate whether the Regulator is ready in the selected voltage range or if its output voltage is still changing to the required voltage level + * @rmtoll CSR VOS LL_PWR_IsActiveFlag_VOS + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_VOS(void) +{ + return (READ_BIT(PWR->CSR, LL_PWR_CSR_VOS) == (LL_PWR_CSR_VOS)); +} +#if defined(PWR_CR_ODEN) +/** + * @brief Indicate whether the Over-Drive mode is ready or not + * @rmtoll CSR ODRDY LL_PWR_IsActiveFlag_OD + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_OD(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_ODRDY) == (PWR_CSR_ODRDY)); +} +#endif /* PWR_CR_ODEN */ + +#if defined(PWR_CR_ODSWEN) +/** + * @brief Indicate whether the Over-Drive mode switching is ready or not + * @rmtoll CSR ODSWRDY LL_PWR_IsActiveFlag_ODSW + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_ODSW(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_ODSWRDY) == (PWR_CSR_ODSWRDY)); +} +#endif /* PWR_CR_ODSWEN */ + +#if defined(PWR_CR_UDEN) +/** + * @brief Indicate whether the Under-Drive mode is ready or not + * @rmtoll CSR UDRDY LL_PWR_IsActiveFlag_UD + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_PWR_IsActiveFlag_UD(void) +{ + return (READ_BIT(PWR->CSR, PWR_CSR_UDRDY) == (PWR_CSR_UDRDY)); +} +#endif /* PWR_CR_UDEN */ +/** + * @brief Clear Standby Flag + * @rmtoll CR CSBF LL_PWR_ClearFlag_SB + * @retval None + */ +__STATIC_INLINE void LL_PWR_ClearFlag_SB(void) +{ + SET_BIT(PWR->CR, PWR_CR_CSBF); +} + +/** + * @brief Clear Wake-up Flags + * @rmtoll CR CWUF LL_PWR_ClearFlag_WU + * @retval None + */ +__STATIC_INLINE void LL_PWR_ClearFlag_WU(void) +{ + SET_BIT(PWR->CR, PWR_CR_CWUF); +} +#if defined(PWR_CSR_UDRDY) +/** + * @brief Clear Under-Drive ready Flag + * @rmtoll CSR UDRDY LL_PWR_ClearFlag_UD + * @retval None + */ +__STATIC_INLINE void LL_PWR_ClearFlag_UD(void) +{ + WRITE_REG(PWR->CSR, PWR_CSR_UDRDY); +} +#endif /* PWR_CSR_UDRDY */ + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup PWR_LL_EF_Init De-initialization function + * @{ + */ +ErrorStatus LL_PWR_DeInit(void); +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined(PWR) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_PWR_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_rcc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_rcc.h new file mode 100644 index 00000000..796f06d8 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_rcc.h @@ -0,0 +1,7101 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_rcc.h + * @author MCD Application Team + * @brief Header file of RCC LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_RCC_H +#define __STM32F4xx_LL_RCC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined(RCC) + +/** @defgroup RCC_LL RCC + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup RCC_LL_Private_Variables RCC Private Variables + * @{ + */ + +#if defined(RCC_DCKCFGR_PLLSAIDIVR) +static const uint8_t aRCC_PLLSAIDIVRPrescTable[4] = {2, 4, 8, 16}; +#endif /* RCC_DCKCFGR_PLLSAIDIVR */ + +/** + * @} + */ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RCC_LL_Private_Macros RCC Private Macros + * @{ + */ +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RCC_LL_Exported_Types RCC Exported Types + * @{ + */ + +/** @defgroup LL_ES_CLOCK_FREQ Clocks Frequency Structure + * @{ + */ + +/** + * @brief RCC Clocks Frequency Structure + */ +typedef struct +{ + uint32_t SYSCLK_Frequency; /*!< SYSCLK clock frequency */ + uint32_t HCLK_Frequency; /*!< HCLK clock frequency */ + uint32_t PCLK1_Frequency; /*!< PCLK1 clock frequency */ + uint32_t PCLK2_Frequency; /*!< PCLK2 clock frequency */ +} LL_RCC_ClocksTypeDef; + +/** + * @} + */ + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RCC_LL_Exported_Constants RCC Exported Constants + * @{ + */ + +/** @defgroup RCC_LL_EC_OSC_VALUES Oscillator Values adaptation + * @brief Defines used to adapt values of different oscillators + * @note These values could be modified in the user environment according to + * HW set-up. + * @{ + */ +#if !defined (HSE_VALUE) +#define HSE_VALUE 25000000U /*!< Value of the HSE oscillator in Hz */ +#endif /* HSE_VALUE */ + +#if !defined (HSI_VALUE) +#define HSI_VALUE 16000000U /*!< Value of the HSI oscillator in Hz */ +#endif /* HSI_VALUE */ + +#if !defined (LSE_VALUE) +#define LSE_VALUE 32768U /*!< Value of the LSE oscillator in Hz */ +#endif /* LSE_VALUE */ + +#if !defined (LSI_VALUE) +#define LSI_VALUE 32000U /*!< Value of the LSI oscillator in Hz */ +#endif /* LSI_VALUE */ + +#if !defined (EXTERNAL_CLOCK_VALUE) +#define EXTERNAL_CLOCK_VALUE 12288000U /*!< Value of the I2S_CKIN external oscillator in Hz */ +#endif /* EXTERNAL_CLOCK_VALUE */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_CLEAR_FLAG Clear Flags Defines + * @brief Flags defines which can be used with LL_RCC_WriteReg function + * @{ + */ +#define LL_RCC_CIR_LSIRDYC RCC_CIR_LSIRDYC /*!< LSI Ready Interrupt Clear */ +#define LL_RCC_CIR_LSERDYC RCC_CIR_LSERDYC /*!< LSE Ready Interrupt Clear */ +#define LL_RCC_CIR_HSIRDYC RCC_CIR_HSIRDYC /*!< HSI Ready Interrupt Clear */ +#define LL_RCC_CIR_HSERDYC RCC_CIR_HSERDYC /*!< HSE Ready Interrupt Clear */ +#define LL_RCC_CIR_PLLRDYC RCC_CIR_PLLRDYC /*!< PLL Ready Interrupt Clear */ +#if defined(RCC_PLLI2S_SUPPORT) +#define LL_RCC_CIR_PLLI2SRDYC RCC_CIR_PLLI2SRDYC /*!< PLLI2S Ready Interrupt Clear */ +#endif /* RCC_PLLI2S_SUPPORT */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_CIR_PLLSAIRDYC RCC_CIR_PLLSAIRDYC /*!< PLLSAI Ready Interrupt Clear */ +#endif /* RCC_PLLSAI_SUPPORT */ +#define LL_RCC_CIR_CSSC RCC_CIR_CSSC /*!< Clock Security System Interrupt Clear */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_RCC_ReadReg function + * @{ + */ +#define LL_RCC_CIR_LSIRDYF RCC_CIR_LSIRDYF /*!< LSI Ready Interrupt flag */ +#define LL_RCC_CIR_LSERDYF RCC_CIR_LSERDYF /*!< LSE Ready Interrupt flag */ +#define LL_RCC_CIR_HSIRDYF RCC_CIR_HSIRDYF /*!< HSI Ready Interrupt flag */ +#define LL_RCC_CIR_HSERDYF RCC_CIR_HSERDYF /*!< HSE Ready Interrupt flag */ +#define LL_RCC_CIR_PLLRDYF RCC_CIR_PLLRDYF /*!< PLL Ready Interrupt flag */ +#if defined(RCC_PLLI2S_SUPPORT) +#define LL_RCC_CIR_PLLI2SRDYF RCC_CIR_PLLI2SRDYF /*!< PLLI2S Ready Interrupt flag */ +#endif /* RCC_PLLI2S_SUPPORT */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_CIR_PLLSAIRDYF RCC_CIR_PLLSAIRDYF /*!< PLLSAI Ready Interrupt flag */ +#endif /* RCC_PLLSAI_SUPPORT */ +#define LL_RCC_CIR_CSSF RCC_CIR_CSSF /*!< Clock Security System Interrupt flag */ +#define LL_RCC_CSR_LPWRRSTF RCC_CSR_LPWRRSTF /*!< Low-Power reset flag */ +#define LL_RCC_CSR_PINRSTF RCC_CSR_PINRSTF /*!< PIN reset flag */ +#define LL_RCC_CSR_PORRSTF RCC_CSR_PORRSTF /*!< POR/PDR reset flag */ +#define LL_RCC_CSR_SFTRSTF RCC_CSR_SFTRSTF /*!< Software Reset flag */ +#define LL_RCC_CSR_IWDGRSTF RCC_CSR_IWDGRSTF /*!< Independent Watchdog reset flag */ +#define LL_RCC_CSR_WWDGRSTF RCC_CSR_WWDGRSTF /*!< Window watchdog reset flag */ +#if defined(RCC_CSR_BORRSTF) +#define LL_RCC_CSR_BORRSTF RCC_CSR_BORRSTF /*!< BOR reset flag */ +#endif /* RCC_CSR_BORRSTF */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_RCC_ReadReg and LL_RCC_WriteReg functions + * @{ + */ +#define LL_RCC_CIR_LSIRDYIE RCC_CIR_LSIRDYIE /*!< LSI Ready Interrupt Enable */ +#define LL_RCC_CIR_LSERDYIE RCC_CIR_LSERDYIE /*!< LSE Ready Interrupt Enable */ +#define LL_RCC_CIR_HSIRDYIE RCC_CIR_HSIRDYIE /*!< HSI Ready Interrupt Enable */ +#define LL_RCC_CIR_HSERDYIE RCC_CIR_HSERDYIE /*!< HSE Ready Interrupt Enable */ +#define LL_RCC_CIR_PLLRDYIE RCC_CIR_PLLRDYIE /*!< PLL Ready Interrupt Enable */ +#if defined(RCC_PLLI2S_SUPPORT) +#define LL_RCC_CIR_PLLI2SRDYIE RCC_CIR_PLLI2SRDYIE /*!< PLLI2S Ready Interrupt Enable */ +#endif /* RCC_PLLI2S_SUPPORT */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_CIR_PLLSAIRDYIE RCC_CIR_PLLSAIRDYIE /*!< PLLSAI Ready Interrupt Enable */ +#endif /* RCC_PLLSAI_SUPPORT */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_SYS_CLKSOURCE System clock switch + * @{ + */ +#define LL_RCC_SYS_CLKSOURCE_HSI RCC_CFGR_SW_HSI /*!< HSI selection as system clock */ +#define LL_RCC_SYS_CLKSOURCE_HSE RCC_CFGR_SW_HSE /*!< HSE selection as system clock */ +#define LL_RCC_SYS_CLKSOURCE_PLL RCC_CFGR_SW_PLL /*!< PLL selection as system clock */ +#if defined(RCC_CFGR_SW_PLLR) +#define LL_RCC_SYS_CLKSOURCE_PLLR RCC_CFGR_SW_PLLR /*!< PLLR selection as system clock */ +#endif /* RCC_CFGR_SW_PLLR */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_SYS_CLKSOURCE_STATUS System clock switch status + * @{ + */ +#define LL_RCC_SYS_CLKSOURCE_STATUS_HSI RCC_CFGR_SWS_HSI /*!< HSI used as system clock */ +#define LL_RCC_SYS_CLKSOURCE_STATUS_HSE RCC_CFGR_SWS_HSE /*!< HSE used as system clock */ +#define LL_RCC_SYS_CLKSOURCE_STATUS_PLL RCC_CFGR_SWS_PLL /*!< PLL used as system clock */ +#if defined(RCC_PLLR_SYSCLK_SUPPORT) +#define LL_RCC_SYS_CLKSOURCE_STATUS_PLLR RCC_CFGR_SWS_PLLR /*!< PLLR used as system clock */ +#endif /* RCC_PLLR_SYSCLK_SUPPORT */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_SYSCLK_DIV AHB prescaler + * @{ + */ +#define LL_RCC_SYSCLK_DIV_1 RCC_CFGR_HPRE_DIV1 /*!< SYSCLK not divided */ +#define LL_RCC_SYSCLK_DIV_2 RCC_CFGR_HPRE_DIV2 /*!< SYSCLK divided by 2 */ +#define LL_RCC_SYSCLK_DIV_4 RCC_CFGR_HPRE_DIV4 /*!< SYSCLK divided by 4 */ +#define LL_RCC_SYSCLK_DIV_8 RCC_CFGR_HPRE_DIV8 /*!< SYSCLK divided by 8 */ +#define LL_RCC_SYSCLK_DIV_16 RCC_CFGR_HPRE_DIV16 /*!< SYSCLK divided by 16 */ +#define LL_RCC_SYSCLK_DIV_64 RCC_CFGR_HPRE_DIV64 /*!< SYSCLK divided by 64 */ +#define LL_RCC_SYSCLK_DIV_128 RCC_CFGR_HPRE_DIV128 /*!< SYSCLK divided by 128 */ +#define LL_RCC_SYSCLK_DIV_256 RCC_CFGR_HPRE_DIV256 /*!< SYSCLK divided by 256 */ +#define LL_RCC_SYSCLK_DIV_512 RCC_CFGR_HPRE_DIV512 /*!< SYSCLK divided by 512 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_APB1_DIV APB low-speed prescaler (APB1) + * @{ + */ +#define LL_RCC_APB1_DIV_1 RCC_CFGR_PPRE1_DIV1 /*!< HCLK not divided */ +#define LL_RCC_APB1_DIV_2 RCC_CFGR_PPRE1_DIV2 /*!< HCLK divided by 2 */ +#define LL_RCC_APB1_DIV_4 RCC_CFGR_PPRE1_DIV4 /*!< HCLK divided by 4 */ +#define LL_RCC_APB1_DIV_8 RCC_CFGR_PPRE1_DIV8 /*!< HCLK divided by 8 */ +#define LL_RCC_APB1_DIV_16 RCC_CFGR_PPRE1_DIV16 /*!< HCLK divided by 16 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_APB2_DIV APB high-speed prescaler (APB2) + * @{ + */ +#define LL_RCC_APB2_DIV_1 RCC_CFGR_PPRE2_DIV1 /*!< HCLK not divided */ +#define LL_RCC_APB2_DIV_2 RCC_CFGR_PPRE2_DIV2 /*!< HCLK divided by 2 */ +#define LL_RCC_APB2_DIV_4 RCC_CFGR_PPRE2_DIV4 /*!< HCLK divided by 4 */ +#define LL_RCC_APB2_DIV_8 RCC_CFGR_PPRE2_DIV8 /*!< HCLK divided by 8 */ +#define LL_RCC_APB2_DIV_16 RCC_CFGR_PPRE2_DIV16 /*!< HCLK divided by 16 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_MCOxSOURCE MCO source selection + * @{ + */ +#define LL_RCC_MCO1SOURCE_HSI (uint32_t)(RCC_CFGR_MCO1|0x00000000U) /*!< HSI selection as MCO1 source */ +#define LL_RCC_MCO1SOURCE_LSE (uint32_t)(RCC_CFGR_MCO1|(RCC_CFGR_MCO1_0 >> 16U)) /*!< LSE selection as MCO1 source */ +#define LL_RCC_MCO1SOURCE_HSE (uint32_t)(RCC_CFGR_MCO1|(RCC_CFGR_MCO1_1 >> 16U)) /*!< HSE selection as MCO1 source */ +#define LL_RCC_MCO1SOURCE_PLLCLK (uint32_t)(RCC_CFGR_MCO1|((RCC_CFGR_MCO1_1|RCC_CFGR_MCO1_0) >> 16U)) /*!< PLLCLK selection as MCO1 source */ +#if defined(RCC_CFGR_MCO2) +#define LL_RCC_MCO2SOURCE_SYSCLK (uint32_t)(RCC_CFGR_MCO2|0x00000000U) /*!< SYSCLK selection as MCO2 source */ +#define LL_RCC_MCO2SOURCE_PLLI2S (uint32_t)(RCC_CFGR_MCO2|(RCC_CFGR_MCO2_0 >> 16U)) /*!< PLLI2S selection as MCO2 source */ +#define LL_RCC_MCO2SOURCE_HSE (uint32_t)(RCC_CFGR_MCO2|(RCC_CFGR_MCO2_1 >> 16U)) /*!< HSE selection as MCO2 source */ +#define LL_RCC_MCO2SOURCE_PLLCLK (uint32_t)(RCC_CFGR_MCO2|((RCC_CFGR_MCO2_1|RCC_CFGR_MCO2_0) >> 16U)) /*!< PLLCLK selection as MCO2 source */ +#endif /* RCC_CFGR_MCO2 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_MCOx_DIV MCO prescaler + * @{ + */ +#define LL_RCC_MCO1_DIV_1 (uint32_t)(RCC_CFGR_MCO1PRE|0x00000000U) /*!< MCO1 not divided */ +#define LL_RCC_MCO1_DIV_2 (uint32_t)(RCC_CFGR_MCO1PRE|(RCC_CFGR_MCO1PRE_2 >> 16U)) /*!< MCO1 divided by 2 */ +#define LL_RCC_MCO1_DIV_3 (uint32_t)(RCC_CFGR_MCO1PRE|((RCC_CFGR_MCO1PRE_2|RCC_CFGR_MCO1PRE_0) >> 16U)) /*!< MCO1 divided by 3 */ +#define LL_RCC_MCO1_DIV_4 (uint32_t)(RCC_CFGR_MCO1PRE|((RCC_CFGR_MCO1PRE_2|RCC_CFGR_MCO1PRE_1) >> 16U)) /*!< MCO1 divided by 4 */ +#define LL_RCC_MCO1_DIV_5 (uint32_t)(RCC_CFGR_MCO1PRE|(RCC_CFGR_MCO1PRE >> 16U)) /*!< MCO1 divided by 5 */ +#if defined(RCC_CFGR_MCO2PRE) +#define LL_RCC_MCO2_DIV_1 (uint32_t)(RCC_CFGR_MCO2PRE|0x00000000U) /*!< MCO2 not divided */ +#define LL_RCC_MCO2_DIV_2 (uint32_t)(RCC_CFGR_MCO2PRE|(RCC_CFGR_MCO2PRE_2 >> 16U)) /*!< MCO2 divided by 2 */ +#define LL_RCC_MCO2_DIV_3 (uint32_t)(RCC_CFGR_MCO2PRE|((RCC_CFGR_MCO2PRE_2|RCC_CFGR_MCO2PRE_0) >> 16U)) /*!< MCO2 divided by 3 */ +#define LL_RCC_MCO2_DIV_4 (uint32_t)(RCC_CFGR_MCO2PRE|((RCC_CFGR_MCO2PRE_2|RCC_CFGR_MCO2PRE_1) >> 16U)) /*!< MCO2 divided by 4 */ +#define LL_RCC_MCO2_DIV_5 (uint32_t)(RCC_CFGR_MCO2PRE|(RCC_CFGR_MCO2PRE >> 16U)) /*!< MCO2 divided by 5 */ +#endif /* RCC_CFGR_MCO2PRE */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_RTC_HSEDIV HSE prescaler for RTC clock + * @{ + */ +#define LL_RCC_RTC_NOCLOCK 0x00000000U /*!< HSE not divided */ +#define LL_RCC_RTC_HSE_DIV_2 RCC_CFGR_RTCPRE_1 /*!< HSE clock divided by 2 */ +#define LL_RCC_RTC_HSE_DIV_3 (RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 3 */ +#define LL_RCC_RTC_HSE_DIV_4 RCC_CFGR_RTCPRE_2 /*!< HSE clock divided by 4 */ +#define LL_RCC_RTC_HSE_DIV_5 (RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 5 */ +#define LL_RCC_RTC_HSE_DIV_6 (RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 6 */ +#define LL_RCC_RTC_HSE_DIV_7 (RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 7 */ +#define LL_RCC_RTC_HSE_DIV_8 RCC_CFGR_RTCPRE_3 /*!< HSE clock divided by 8 */ +#define LL_RCC_RTC_HSE_DIV_9 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 9 */ +#define LL_RCC_RTC_HSE_DIV_10 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 10 */ +#define LL_RCC_RTC_HSE_DIV_11 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 11 */ +#define LL_RCC_RTC_HSE_DIV_12 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2) /*!< HSE clock divided by 12 */ +#define LL_RCC_RTC_HSE_DIV_13 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 13 */ +#define LL_RCC_RTC_HSE_DIV_14 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 14 */ +#define LL_RCC_RTC_HSE_DIV_15 (RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 15 */ +#define LL_RCC_RTC_HSE_DIV_16 RCC_CFGR_RTCPRE_4 /*!< HSE clock divided by 16 */ +#define LL_RCC_RTC_HSE_DIV_17 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 17 */ +#define LL_RCC_RTC_HSE_DIV_18 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 18 */ +#define LL_RCC_RTC_HSE_DIV_19 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 19 */ +#define LL_RCC_RTC_HSE_DIV_20 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_2) /*!< HSE clock divided by 20 */ +#define LL_RCC_RTC_HSE_DIV_21 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 21 */ +#define LL_RCC_RTC_HSE_DIV_22 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 22 */ +#define LL_RCC_RTC_HSE_DIV_23 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 23 */ +#define LL_RCC_RTC_HSE_DIV_24 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3) /*!< HSE clock divided by 24 */ +#define LL_RCC_RTC_HSE_DIV_25 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 25 */ +#define LL_RCC_RTC_HSE_DIV_26 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 26 */ +#define LL_RCC_RTC_HSE_DIV_27 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 27 */ +#define LL_RCC_RTC_HSE_DIV_28 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2) /*!< HSE clock divided by 28 */ +#define LL_RCC_RTC_HSE_DIV_29 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 29 */ +#define LL_RCC_RTC_HSE_DIV_30 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1) /*!< HSE clock divided by 30 */ +#define LL_RCC_RTC_HSE_DIV_31 (RCC_CFGR_RTCPRE_4|RCC_CFGR_RTCPRE_3|RCC_CFGR_RTCPRE_2|RCC_CFGR_RTCPRE_1|RCC_CFGR_RTCPRE_0) /*!< HSE clock divided by 31 */ +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RCC_LL_EC_PERIPH_FREQUENCY Peripheral clock frequency + * @{ + */ +#define LL_RCC_PERIPH_FREQUENCY_NO 0x00000000U /*!< No clock enabled for the peripheral */ +#define LL_RCC_PERIPH_FREQUENCY_NA 0xFFFFFFFFU /*!< Frequency cannot be provided as external clock */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +#if defined(FMPI2C1) +/** @defgroup RCC_LL_EC_FMPI2C1_CLKSOURCE Peripheral FMPI2C clock source selection + * @{ + */ +#define LL_RCC_FMPI2C1_CLKSOURCE_PCLK1 0x00000000U /*!< PCLK1 clock used as FMPI2C1 clock source */ +#define LL_RCC_FMPI2C1_CLKSOURCE_SYSCLK RCC_DCKCFGR2_FMPI2C1SEL_0 /*!< SYSCLK clock used as FMPI2C1 clock source */ +#define LL_RCC_FMPI2C1_CLKSOURCE_HSI RCC_DCKCFGR2_FMPI2C1SEL_1 /*!< HSI clock used as FMPI2C1 clock source */ +/** + * @} + */ +#endif /* FMPI2C1 */ + +#if defined(LPTIM1) +/** @defgroup RCC_LL_EC_LPTIM1_CLKSOURCE Peripheral LPTIM clock source selection + * @{ + */ +#define LL_RCC_LPTIM1_CLKSOURCE_PCLK1 0x00000000U /*!< PCLK1 clock used as LPTIM1 clock */ +#define LL_RCC_LPTIM1_CLKSOURCE_HSI RCC_DCKCFGR2_LPTIM1SEL_0 /*!< LSI oscillator clock used as LPTIM1 clock */ +#define LL_RCC_LPTIM1_CLKSOURCE_LSI RCC_DCKCFGR2_LPTIM1SEL_1 /*!< HSI oscillator clock used as LPTIM1 clock */ +#define LL_RCC_LPTIM1_CLKSOURCE_LSE (uint32_t)(RCC_DCKCFGR2_LPTIM1SEL_1 | RCC_DCKCFGR2_LPTIM1SEL_0) /*!< LSE oscillator clock used as LPTIM1 clock */ +/** + * @} + */ +#endif /* LPTIM1 */ + +#if defined(SAI1) +/** @defgroup RCC_LL_EC_SAIx_CLKSOURCE Peripheral SAI clock source selection + * @{ + */ +#if defined(RCC_DCKCFGR_SAI1SRC) +#define LL_RCC_SAI1_CLKSOURCE_PLLSAI (uint32_t)(RCC_DCKCFGR_SAI1SRC | 0x00000000U) /*!< PLLSAI clock used as SAI1 clock source */ +#define LL_RCC_SAI1_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI1SRC | (RCC_DCKCFGR_SAI1SRC_0 >> 16)) /*!< PLLI2S clock used as SAI1 clock source */ +#define LL_RCC_SAI1_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_SAI1SRC | (RCC_DCKCFGR_SAI1SRC_1 >> 16)) /*!< PLL clock used as SAI1 clock source */ +#define LL_RCC_SAI1_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_SAI1SRC | (RCC_DCKCFGR_SAI1SRC >> 16)) /*!< External pin clock used as SAI1 clock source */ +#endif /* RCC_DCKCFGR_SAI1SRC */ +#if defined(RCC_DCKCFGR_SAI2SRC) +#define LL_RCC_SAI2_CLKSOURCE_PLLSAI (uint32_t)(RCC_DCKCFGR_SAI2SRC | 0x00000000U) /*!< PLLSAI clock used as SAI2 clock source */ +#define LL_RCC_SAI2_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI2SRC | (RCC_DCKCFGR_SAI2SRC_0 >> 16)) /*!< PLLI2S clock used as SAI2 clock source */ +#define LL_RCC_SAI2_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_SAI2SRC | (RCC_DCKCFGR_SAI2SRC_1 >> 16)) /*!< PLL clock used as SAI2 clock source */ +#define LL_RCC_SAI2_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_SAI2SRC | (RCC_DCKCFGR_SAI2SRC >> 16)) /*!< PLL Main clock used as SAI2 clock source */ +#endif /* RCC_DCKCFGR_SAI2SRC */ +#if defined(RCC_DCKCFGR_SAI1ASRC) +#if defined(RCC_SAI1A_PLLSOURCE_SUPPORT) +#define LL_RCC_SAI1_A_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI1ASRC | 0x00000000U) /*!< PLLI2S clock used as SAI1 block A clock source */ +#define LL_RCC_SAI1_A_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_SAI1ASRC | (RCC_DCKCFGR_SAI1ASRC_0 >> 16)) /*!< External pin used as SAI1 block A clock source */ +#define LL_RCC_SAI1_A_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_SAI1ASRC | (RCC_DCKCFGR_SAI1ASRC_1 >> 16)) /*!< PLL clock used as SAI1 block A clock source */ +#define LL_RCC_SAI1_A_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_SAI1ASRC | (RCC_DCKCFGR_SAI1ASRC >> 16)) /*!< PLL Main clock used as SAI1 block A clock source */ +#else +#define LL_RCC_SAI1_A_CLKSOURCE_PLLSAI (uint32_t)(RCC_DCKCFGR_SAI1ASRC | 0x00000000U) /*!< PLLSAI clock used as SAI1 block A clock source */ +#define LL_RCC_SAI1_A_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI1ASRC | (RCC_DCKCFGR_SAI1ASRC_0 >> 16)) /*!< PLLI2S clock used as SAI1 block A clock source */ +#define LL_RCC_SAI1_A_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_SAI1ASRC | (RCC_DCKCFGR_SAI1ASRC_1 >> 16)) /*!< External pin clock used as SAI1 block A clock source */ +#endif /* RCC_SAI1A_PLLSOURCE_SUPPORT */ +#endif /* RCC_DCKCFGR_SAI1ASRC */ +#if defined(RCC_DCKCFGR_SAI1BSRC) +#if defined(RCC_SAI1B_PLLSOURCE_SUPPORT) +#define LL_RCC_SAI1_B_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI1BSRC | 0x00000000U) /*!< PLLI2S clock used as SAI1 block B clock source */ +#define LL_RCC_SAI1_B_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_SAI1BSRC | (RCC_DCKCFGR_SAI1BSRC_0 >> 16)) /*!< External pin used as SAI1 block B clock source */ +#define LL_RCC_SAI1_B_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_SAI1BSRC | (RCC_DCKCFGR_SAI1BSRC_1 >> 16)) /*!< PLL clock used as SAI1 block B clock source */ +#define LL_RCC_SAI1_B_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_SAI1BSRC | (RCC_DCKCFGR_SAI1BSRC >> 16)) /*!< PLL Main clock used as SAI1 block B clock source */ +#else +#define LL_RCC_SAI1_B_CLKSOURCE_PLLSAI (uint32_t)(RCC_DCKCFGR_SAI1BSRC | 0x00000000U) /*!< PLLSAI clock used as SAI1 block B clock source */ +#define LL_RCC_SAI1_B_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_SAI1BSRC | (RCC_DCKCFGR_SAI1BSRC_0 >> 16)) /*!< PLLI2S clock used as SAI1 block B clock source */ +#define LL_RCC_SAI1_B_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_SAI1BSRC | (RCC_DCKCFGR_SAI1BSRC_1 >> 16)) /*!< External pin clock used as SAI1 block B clock source */ +#endif /* RCC_SAI1B_PLLSOURCE_SUPPORT */ +#endif /* RCC_DCKCFGR_SAI1BSRC */ +/** + * @} + */ +#endif /* SAI1 */ + +#if defined(RCC_DCKCFGR_SDIOSEL) || defined(RCC_DCKCFGR2_SDIOSEL) +/** @defgroup RCC_LL_EC_SDIOx_CLKSOURCE Peripheral SDIO clock source selection + * @{ + */ +#define LL_RCC_SDIO_CLKSOURCE_PLL48CLK 0x00000000U /*!< PLL 48M domain clock used as SDIO clock */ +#if defined(RCC_DCKCFGR_SDIOSEL) +#define LL_RCC_SDIO_CLKSOURCE_SYSCLK RCC_DCKCFGR_SDIOSEL /*!< System clock clock used as SDIO clock */ +#else +#define LL_RCC_SDIO_CLKSOURCE_SYSCLK RCC_DCKCFGR2_SDIOSEL /*!< System clock clock used as SDIO clock */ +#endif /* RCC_DCKCFGR_SDIOSEL */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_SDIOSEL || RCC_DCKCFGR2_SDIOSEL */ + +#if defined(DSI) +/** @defgroup RCC_LL_EC_DSI_CLKSOURCE Peripheral DSI clock source selection + * @{ + */ +#define LL_RCC_DSI_CLKSOURCE_PHY 0x00000000U /*!< DSI-PHY clock used as DSI byte lane clock source */ +#define LL_RCC_DSI_CLKSOURCE_PLL RCC_DCKCFGR_DSISEL /*!< PLL clock used as DSI byte lane clock source */ +/** + * @} + */ +#endif /* DSI */ + +#if defined(CEC) +/** @defgroup RCC_LL_EC_CEC_CLKSOURCE Peripheral CEC clock source selection + * @{ + */ +#define LL_RCC_CEC_CLKSOURCE_HSI_DIV488 0x00000000U /*!< HSI oscillator clock divided by 488 used as CEC clock */ +#define LL_RCC_CEC_CLKSOURCE_LSE RCC_DCKCFGR2_CECSEL /*!< LSE oscillator clock used as CEC clock */ +/** + * @} + */ +#endif /* CEC */ + +/** @defgroup RCC_LL_EC_I2S1_CLKSOURCE Peripheral I2S clock source selection + * @{ + */ +#if defined(RCC_CFGR_I2SSRC) +#define LL_RCC_I2S1_CLKSOURCE_PLLI2S 0x00000000U /*!< I2S oscillator clock used as I2S1 clock */ +#define LL_RCC_I2S1_CLKSOURCE_PIN RCC_CFGR_I2SSRC /*!< External pin clock used as I2S1 clock */ +#endif /* RCC_CFGR_I2SSRC */ +#if defined(RCC_DCKCFGR_I2SSRC) +#define LL_RCC_I2S1_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_I2SSRC | 0x00000000U) /*!< PLL clock used as I2S1 clock source */ +#define LL_RCC_I2S1_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_I2SSRC | (RCC_DCKCFGR_I2SSRC_0 >> 16)) /*!< External pin used as I2S1 clock source */ +#define LL_RCC_I2S1_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_I2SSRC | (RCC_DCKCFGR_I2SSRC_1 >> 16)) /*!< PLL Main clock used as I2S1 clock source */ +#endif /* RCC_DCKCFGR_I2SSRC */ +#if defined(RCC_DCKCFGR_I2S1SRC) +#define LL_RCC_I2S1_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_I2S1SRC | 0x00000000U) /*!< PLLI2S clock used as I2S1 clock source */ +#define LL_RCC_I2S1_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_I2S1SRC | (RCC_DCKCFGR_I2S1SRC_0 >> 16)) /*!< External pin used as I2S1 clock source */ +#define LL_RCC_I2S1_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_I2S1SRC | (RCC_DCKCFGR_I2S1SRC_1 >> 16)) /*!< PLL clock used as I2S1 clock source */ +#define LL_RCC_I2S1_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_I2S1SRC | (RCC_DCKCFGR_I2S1SRC >> 16)) /*!< PLL Main clock used as I2S1 clock source */ +#endif /* RCC_DCKCFGR_I2S1SRC */ +#if defined(RCC_DCKCFGR_I2S2SRC) +#define LL_RCC_I2S2_CLKSOURCE_PLLI2S (uint32_t)(RCC_DCKCFGR_I2S2SRC | 0x00000000U) /*!< PLLI2S clock used as I2S2 clock source */ +#define LL_RCC_I2S2_CLKSOURCE_PIN (uint32_t)(RCC_DCKCFGR_I2S2SRC | (RCC_DCKCFGR_I2S2SRC_0 >> 16)) /*!< External pin used as I2S2 clock source */ +#define LL_RCC_I2S2_CLKSOURCE_PLL (uint32_t)(RCC_DCKCFGR_I2S2SRC | (RCC_DCKCFGR_I2S2SRC_1 >> 16)) /*!< PLL clock used as I2S2 clock source */ +#define LL_RCC_I2S2_CLKSOURCE_PLLSRC (uint32_t)(RCC_DCKCFGR_I2S2SRC | (RCC_DCKCFGR_I2S2SRC >> 16)) /*!< PLL Main clock used as I2S2 clock source */ +#endif /* RCC_DCKCFGR_I2S2SRC */ +/** + * @} + */ + +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +/** @defgroup RCC_LL_EC_CK48M_CLKSOURCE Peripheral 48Mhz domain clock source selection + * @{ + */ +#if defined(RCC_DCKCFGR_CK48MSEL) +#define LL_RCC_CK48M_CLKSOURCE_PLL 0x00000000U /*!< PLL oscillator clock used as 48Mhz domain clock */ +#define LL_RCC_CK48M_CLKSOURCE_PLLSAI RCC_DCKCFGR_CK48MSEL /*!< PLLSAI oscillator clock used as 48Mhz domain clock */ +#endif /* RCC_DCKCFGR_CK48MSEL */ +#if defined(RCC_DCKCFGR2_CK48MSEL) +#define LL_RCC_CK48M_CLKSOURCE_PLL 0x00000000U /*!< PLL oscillator clock used as 48Mhz domain clock */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_CK48M_CLKSOURCE_PLLSAI RCC_DCKCFGR2_CK48MSEL /*!< PLLSAI oscillator clock used as 48Mhz domain clock */ +#endif /* RCC_PLLSAI_SUPPORT */ +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) && !defined(RCC_DCKCFGR_PLLI2SDIVQ) +#define LL_RCC_CK48M_CLKSOURCE_PLLI2S RCC_DCKCFGR2_CK48MSEL /*!< PLLI2S oscillator clock used as 48Mhz domain clock */ +#endif /* RCC_PLLI2SCFGR_PLLI2SQ && !RCC_DCKCFGR_PLLI2SDIVQ */ +#endif /* RCC_DCKCFGR2_CK48MSEL */ +/** + * @} + */ + +#if defined(RNG) +/** @defgroup RCC_LL_EC_RNG_CLKSOURCE Peripheral RNG clock source selection + * @{ + */ +#define LL_RCC_RNG_CLKSOURCE_PLL LL_RCC_CK48M_CLKSOURCE_PLL /*!< PLL clock used as RNG clock source */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_RNG_CLKSOURCE_PLLSAI LL_RCC_CK48M_CLKSOURCE_PLLSAI /*!< PLLSAI clock used as RNG clock source */ +#endif /* RCC_PLLSAI_SUPPORT */ +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) && !defined(RCC_DCKCFGR_PLLI2SDIVQ) +#define LL_RCC_RNG_CLKSOURCE_PLLI2S LL_RCC_CK48M_CLKSOURCE_PLLI2S /*!< PLLI2S clock used as RNG clock source */ +#endif /* RCC_PLLI2SCFGR_PLLI2SQ && !RCC_DCKCFGR_PLLI2SDIVQ */ +/** + * @} + */ +#endif /* RNG */ + +#if defined(USB_OTG_FS) || defined(USB_OTG_HS) +/** @defgroup RCC_LL_EC_USB_CLKSOURCE Peripheral USB clock source selection + * @{ + */ +#define LL_RCC_USB_CLKSOURCE_PLL LL_RCC_CK48M_CLKSOURCE_PLL /*!< PLL clock used as USB clock source */ +#if defined(RCC_PLLSAI_SUPPORT) +#define LL_RCC_USB_CLKSOURCE_PLLSAI LL_RCC_CK48M_CLKSOURCE_PLLSAI /*!< PLLSAI clock used as USB clock source */ +#endif /* RCC_PLLSAI_SUPPORT */ +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) && !defined(RCC_DCKCFGR_PLLI2SDIVQ) +#define LL_RCC_USB_CLKSOURCE_PLLI2S LL_RCC_CK48M_CLKSOURCE_PLLI2S /*!< PLLI2S clock used as USB clock source */ +#endif /* RCC_PLLI2SCFGR_PLLI2SQ && !RCC_DCKCFGR_PLLI2SDIVQ */ +/** + * @} + */ +#endif /* USB_OTG_FS || USB_OTG_HS */ + +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ + +#if defined(DFSDM1_Channel0) || defined(DFSDM2_Channel0) +/** @defgroup RCC_LL_EC_DFSDM1_AUDIO_CLKSOURCE Peripheral DFSDM Audio clock source selection + * @{ + */ +#define LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S1 (uint32_t)(RCC_DCKCFGR_CKDFSDM1ASEL | 0x00000000U) /*!< I2S1 clock used as DFSDM1 Audio clock source */ +#define LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S2 (uint32_t)(RCC_DCKCFGR_CKDFSDM1ASEL | (RCC_DCKCFGR_CKDFSDM1ASEL << 16)) /*!< I2S2 clock used as DFSDM1 Audio clock source */ +#if defined(DFSDM2_Channel0) +#define LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S1 (uint32_t)(RCC_DCKCFGR_CKDFSDM2ASEL | 0x00000000U) /*!< I2S1 clock used as DFSDM2 Audio clock source */ +#define LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S2 (uint32_t)(RCC_DCKCFGR_CKDFSDM2ASEL | (RCC_DCKCFGR_CKDFSDM2ASEL << 16)) /*!< I2S2 clock used as DFSDM2 Audio clock source */ +#endif /* DFSDM2_Channel0 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_DFSDM1_CLKSOURCE Peripheral DFSDM clock source selection + * @{ + */ +#define LL_RCC_DFSDM1_CLKSOURCE_PCLK2 0x00000000U /*!< PCLK2 clock used as DFSDM1 clock */ +#define LL_RCC_DFSDM1_CLKSOURCE_SYSCLK RCC_DCKCFGR_CKDFSDM1SEL /*!< System clock used as DFSDM1 clock */ +#if defined(DFSDM2_Channel0) +#define LL_RCC_DFSDM2_CLKSOURCE_PCLK2 0x00000000U /*!< PCLK2 clock used as DFSDM2 clock */ +#define LL_RCC_DFSDM2_CLKSOURCE_SYSCLK RCC_DCKCFGR_CKDFSDM1SEL /*!< System clock used as DFSDM2 clock */ +#endif /* DFSDM2_Channel0 */ +/** + * @} + */ +#endif /* DFSDM1_Channel0 || DFSDM2_Channel0 */ + +#if defined(FMPI2C1) +/** @defgroup RCC_LL_EC_FMPI2C1 Peripheral FMPI2C get clock source + * @{ + */ +#define LL_RCC_FMPI2C1_CLKSOURCE RCC_DCKCFGR2_FMPI2C1SEL /*!< FMPI2C1 Clock source selection */ +/** + * @} + */ +#endif /* FMPI2C1 */ + +#if defined(SPDIFRX) +/** @defgroup RCC_LL_EC_SPDIFRX_CLKSOURCE Peripheral SPDIFRX clock source selection + * @{ + */ +#define LL_RCC_SPDIFRX1_CLKSOURCE_PLL 0x00000000U /*!< PLL clock used as SPDIFRX clock source */ +#define LL_RCC_SPDIFRX1_CLKSOURCE_PLLI2S RCC_DCKCFGR2_SPDIFRXSEL /*!< PLLI2S clock used as SPDIFRX clock source */ +/** + * @} + */ +#endif /* SPDIFRX */ + +#if defined(LPTIM1) +/** @defgroup RCC_LL_EC_LPTIM1 Peripheral LPTIM get clock source + * @{ + */ +#define LL_RCC_LPTIM1_CLKSOURCE RCC_DCKCFGR2_LPTIM1SEL /*!< LPTIM1 Clock source selection */ +/** + * @} + */ +#endif /* LPTIM1 */ + +#if defined(SAI1) +/** @defgroup RCC_LL_EC_SAIx Peripheral SAI get clock source + * @{ + */ +#if defined(RCC_DCKCFGR_SAI1ASRC) +#define LL_RCC_SAI1_A_CLKSOURCE RCC_DCKCFGR_SAI1ASRC /*!< SAI1 block A Clock source selection */ +#endif /* RCC_DCKCFGR_SAI1ASRC */ +#if defined(RCC_DCKCFGR_SAI1BSRC) +#define LL_RCC_SAI1_B_CLKSOURCE RCC_DCKCFGR_SAI1BSRC /*!< SAI1 block B Clock source selection */ +#endif /* RCC_DCKCFGR_SAI1BSRC */ +#if defined(RCC_DCKCFGR_SAI1SRC) +#define LL_RCC_SAI1_CLKSOURCE RCC_DCKCFGR_SAI1SRC /*!< SAI1 Clock source selection */ +#endif /* RCC_DCKCFGR_SAI1SRC */ +#if defined(RCC_DCKCFGR_SAI2SRC) +#define LL_RCC_SAI2_CLKSOURCE RCC_DCKCFGR_SAI2SRC /*!< SAI2 Clock source selection */ +#endif /* RCC_DCKCFGR_SAI2SRC */ +/** + * @} + */ +#endif /* SAI1 */ + +#if defined(SDIO) +/** @defgroup RCC_LL_EC_SDIOx Peripheral SDIO get clock source + * @{ + */ +#if defined(RCC_DCKCFGR_SDIOSEL) +#define LL_RCC_SDIO_CLKSOURCE RCC_DCKCFGR_SDIOSEL /*!< SDIO Clock source selection */ +#elif defined(RCC_DCKCFGR2_SDIOSEL) +#define LL_RCC_SDIO_CLKSOURCE RCC_DCKCFGR2_SDIOSEL /*!< SDIO Clock source selection */ +#else +#define LL_RCC_SDIO_CLKSOURCE RCC_PLLCFGR_PLLQ /*!< SDIO Clock source selection */ +#endif /* RCC_DCKCFGR_SDIOSEL */ +/** + * @} + */ +#endif /* SDIO */ + +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +/** @defgroup RCC_LL_EC_CK48M Peripheral CK48M get clock source + * @{ + */ +#if defined(RCC_DCKCFGR_CK48MSEL) +#define LL_RCC_CK48M_CLKSOURCE RCC_DCKCFGR_CK48MSEL /*!< CK48M Domain clock source selection */ +#endif /* RCC_DCKCFGR_CK48MSEL */ +#if defined(RCC_DCKCFGR2_CK48MSEL) +#define LL_RCC_CK48M_CLKSOURCE RCC_DCKCFGR2_CK48MSEL /*!< CK48M Domain clock source selection */ +#endif /* RCC_DCKCFGR_CK48MSEL */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ + +#if defined(RNG) +/** @defgroup RCC_LL_EC_RNG Peripheral RNG get clock source + * @{ + */ +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +#define LL_RCC_RNG_CLKSOURCE LL_RCC_CK48M_CLKSOURCE /*!< RNG Clock source selection */ +#else +#define LL_RCC_RNG_CLKSOURCE RCC_PLLCFGR_PLLQ /*!< RNG Clock source selection */ +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ +/** + * @} + */ +#endif /* RNG */ + +#if defined(USB_OTG_FS) || defined(USB_OTG_HS) +/** @defgroup RCC_LL_EC_USB Peripheral USB get clock source + * @{ + */ +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +#define LL_RCC_USB_CLKSOURCE LL_RCC_CK48M_CLKSOURCE /*!< USB Clock source selection */ +#else +#define LL_RCC_USB_CLKSOURCE RCC_PLLCFGR_PLLQ /*!< USB Clock source selection */ +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ +/** + * @} + */ +#endif /* USB_OTG_FS || USB_OTG_HS */ + +#if defined(CEC) +/** @defgroup RCC_LL_EC_CEC Peripheral CEC get clock source + * @{ + */ +#define LL_RCC_CEC_CLKSOURCE RCC_DCKCFGR2_CECSEL /*!< CEC Clock source selection */ +/** + * @} + */ +#endif /* CEC */ + +/** @defgroup RCC_LL_EC_I2S1 Peripheral I2S get clock source + * @{ + */ +#if defined(RCC_CFGR_I2SSRC) +#define LL_RCC_I2S1_CLKSOURCE RCC_CFGR_I2SSRC /*!< I2S1 Clock source selection */ +#endif /* RCC_CFGR_I2SSRC */ +#if defined(RCC_DCKCFGR_I2SSRC) +#define LL_RCC_I2S1_CLKSOURCE RCC_DCKCFGR_I2SSRC /*!< I2S1 Clock source selection */ +#endif /* RCC_DCKCFGR_I2SSRC */ +#if defined(RCC_DCKCFGR_I2S1SRC) +#define LL_RCC_I2S1_CLKSOURCE RCC_DCKCFGR_I2S1SRC /*!< I2S1 Clock source selection */ +#endif /* RCC_DCKCFGR_I2S1SRC */ +#if defined(RCC_DCKCFGR_I2S2SRC) +#define LL_RCC_I2S2_CLKSOURCE RCC_DCKCFGR_I2S2SRC /*!< I2S2 Clock source selection */ +#endif /* RCC_DCKCFGR_I2S2SRC */ +/** + * @} + */ + +#if defined(DFSDM1_Channel0) || defined(DFSDM2_Channel0) +/** @defgroup RCC_LL_EC_DFSDM_AUDIO Peripheral DFSDM Audio get clock source + * @{ + */ +#define LL_RCC_DFSDM1_AUDIO_CLKSOURCE RCC_DCKCFGR_CKDFSDM1ASEL /*!< DFSDM1 Audio Clock source selection */ +#if defined(DFSDM2_Channel0) +#define LL_RCC_DFSDM2_AUDIO_CLKSOURCE RCC_DCKCFGR_CKDFSDM2ASEL /*!< DFSDM2 Audio Clock source selection */ +#endif /* DFSDM2_Channel0 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_DFSDM Peripheral DFSDM get clock source + * @{ + */ +#define LL_RCC_DFSDM1_CLKSOURCE RCC_DCKCFGR_CKDFSDM1SEL /*!< DFSDM1 Clock source selection */ +#if defined(DFSDM2_Channel0) +#define LL_RCC_DFSDM2_CLKSOURCE RCC_DCKCFGR_CKDFSDM1SEL /*!< DFSDM2 Clock source selection */ +#endif /* DFSDM2_Channel0 */ +/** + * @} + */ +#endif /* DFSDM1_Channel0 || DFSDM2_Channel0 */ + +#if defined(SPDIFRX) +/** @defgroup RCC_LL_EC_SPDIFRX Peripheral SPDIFRX get clock source + * @{ + */ +#define LL_RCC_SPDIFRX1_CLKSOURCE RCC_DCKCFGR2_SPDIFRXSEL /*!< SPDIFRX Clock source selection */ +/** + * @} + */ +#endif /* SPDIFRX */ + +#if defined(DSI) +/** @defgroup RCC_LL_EC_DSI Peripheral DSI get clock source + * @{ + */ +#define LL_RCC_DSI_CLKSOURCE RCC_DCKCFGR_DSISEL /*!< DSI Clock source selection */ +/** + * @} + */ +#endif /* DSI */ + +#if defined(LTDC) +/** @defgroup RCC_LL_EC_LTDC Peripheral LTDC get clock source + * @{ + */ +#define LL_RCC_LTDC_CLKSOURCE RCC_DCKCFGR_PLLSAIDIVR /*!< LTDC Clock source selection */ +/** + * @} + */ +#endif /* LTDC */ + + +/** @defgroup RCC_LL_EC_RTC_CLKSOURCE RTC clock source selection + * @{ + */ +#define LL_RCC_RTC_CLKSOURCE_NONE 0x00000000U /*!< No clock used as RTC clock */ +#define LL_RCC_RTC_CLKSOURCE_LSE RCC_BDCR_RTCSEL_0 /*!< LSE oscillator clock used as RTC clock */ +#define LL_RCC_RTC_CLKSOURCE_LSI RCC_BDCR_RTCSEL_1 /*!< LSI oscillator clock used as RTC clock */ +#define LL_RCC_RTC_CLKSOURCE_HSE RCC_BDCR_RTCSEL /*!< HSE oscillator clock divided by HSE prescaler used as RTC clock */ +/** + * @} + */ + +#if defined(RCC_DCKCFGR_TIMPRE) +/** @defgroup RCC_LL_EC_TIM_CLKPRESCALER Timers clocks prescalers selection + * @{ + */ +#define LL_RCC_TIM_PRESCALER_TWICE 0x00000000U /*!< Timers clock to twice PCLK */ +#define LL_RCC_TIM_PRESCALER_FOUR_TIMES RCC_DCKCFGR_TIMPRE /*!< Timers clock to four time PCLK */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_TIMPRE */ + +/** @defgroup RCC_LL_EC_PLLSOURCE PLL, PLLI2S and PLLSAI entry clock source + * @{ + */ +#define LL_RCC_PLLSOURCE_HSI RCC_PLLCFGR_PLLSRC_HSI /*!< HSI16 clock selected as PLL entry clock source */ +#define LL_RCC_PLLSOURCE_HSE RCC_PLLCFGR_PLLSRC_HSE /*!< HSE clock selected as PLL entry clock source */ +#if defined(RCC_PLLI2SCFGR_PLLI2SSRC) +#define LL_RCC_PLLI2SSOURCE_PIN (RCC_PLLI2SCFGR_PLLI2SSRC | 0x80U) /*!< I2S External pin input clock selected as PLLI2S entry clock source */ +#endif /* RCC_PLLI2SCFGR_PLLI2SSRC */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_PLLM_DIV PLL, PLLI2S and PLLSAI division factor + * @{ + */ +#define LL_RCC_PLLM_DIV_2 (RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 2 */ +#define LL_RCC_PLLM_DIV_3 (RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 3 */ +#define LL_RCC_PLLM_DIV_4 (RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 4 */ +#define LL_RCC_PLLM_DIV_5 (RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 5 */ +#define LL_RCC_PLLM_DIV_6 (RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 6 */ +#define LL_RCC_PLLM_DIV_7 (RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 7 */ +#define LL_RCC_PLLM_DIV_8 (RCC_PLLCFGR_PLLM_3) /*!< PLL, PLLI2S and PLLSAI division factor by 8 */ +#define LL_RCC_PLLM_DIV_9 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 9 */ +#define LL_RCC_PLLM_DIV_10 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 10 */ +#define LL_RCC_PLLM_DIV_11 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 11 */ +#define LL_RCC_PLLM_DIV_12 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 12 */ +#define LL_RCC_PLLM_DIV_13 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 13 */ +#define LL_RCC_PLLM_DIV_14 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 14 */ +#define LL_RCC_PLLM_DIV_15 (RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 15 */ +#define LL_RCC_PLLM_DIV_16 (RCC_PLLCFGR_PLLM_4) /*!< PLL, PLLI2S and PLLSAI division factor by 16 */ +#define LL_RCC_PLLM_DIV_17 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 17 */ +#define LL_RCC_PLLM_DIV_18 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 18 */ +#define LL_RCC_PLLM_DIV_19 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 19 */ +#define LL_RCC_PLLM_DIV_20 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 20 */ +#define LL_RCC_PLLM_DIV_21 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 21 */ +#define LL_RCC_PLLM_DIV_22 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 22 */ +#define LL_RCC_PLLM_DIV_23 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 23 */ +#define LL_RCC_PLLM_DIV_24 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3) /*!< PLL, PLLI2S and PLLSAI division factor by 24 */ +#define LL_RCC_PLLM_DIV_25 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 25 */ +#define LL_RCC_PLLM_DIV_26 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 26 */ +#define LL_RCC_PLLM_DIV_27 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 27 */ +#define LL_RCC_PLLM_DIV_28 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 28 */ +#define LL_RCC_PLLM_DIV_29 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 29 */ +#define LL_RCC_PLLM_DIV_30 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 30 */ +#define LL_RCC_PLLM_DIV_31 (RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 31 */ +#define LL_RCC_PLLM_DIV_32 (RCC_PLLCFGR_PLLM_5) /*!< PLL, PLLI2S and PLLSAI division factor by 32 */ +#define LL_RCC_PLLM_DIV_33 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 33 */ +#define LL_RCC_PLLM_DIV_34 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 34 */ +#define LL_RCC_PLLM_DIV_35 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 35 */ +#define LL_RCC_PLLM_DIV_36 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 36 */ +#define LL_RCC_PLLM_DIV_37 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 37 */ +#define LL_RCC_PLLM_DIV_38 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 38 */ +#define LL_RCC_PLLM_DIV_39 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 39 */ +#define LL_RCC_PLLM_DIV_40 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3) /*!< PLL, PLLI2S and PLLSAI division factor by 40 */ +#define LL_RCC_PLLM_DIV_41 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 41 */ +#define LL_RCC_PLLM_DIV_42 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 42 */ +#define LL_RCC_PLLM_DIV_43 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 43 */ +#define LL_RCC_PLLM_DIV_44 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 44 */ +#define LL_RCC_PLLM_DIV_45 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 45 */ +#define LL_RCC_PLLM_DIV_46 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 46 */ +#define LL_RCC_PLLM_DIV_47 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 47 */ +#define LL_RCC_PLLM_DIV_48 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4) /*!< PLL, PLLI2S and PLLSAI division factor by 48 */ +#define LL_RCC_PLLM_DIV_49 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 49 */ +#define LL_RCC_PLLM_DIV_50 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 50 */ +#define LL_RCC_PLLM_DIV_51 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 51 */ +#define LL_RCC_PLLM_DIV_52 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 52 */ +#define LL_RCC_PLLM_DIV_53 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 53 */ +#define LL_RCC_PLLM_DIV_54 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 54 */ +#define LL_RCC_PLLM_DIV_55 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 55 */ +#define LL_RCC_PLLM_DIV_56 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3) /*!< PLL, PLLI2S and PLLSAI division factor by 56 */ +#define LL_RCC_PLLM_DIV_57 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 57 */ +#define LL_RCC_PLLM_DIV_58 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 58 */ +#define LL_RCC_PLLM_DIV_59 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 59 */ +#define LL_RCC_PLLM_DIV_60 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2) /*!< PLL, PLLI2S and PLLSAI division factor by 60 */ +#define LL_RCC_PLLM_DIV_61 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 61 */ +#define LL_RCC_PLLM_DIV_62 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1) /*!< PLL, PLLI2S and PLLSAI division factor by 62 */ +#define LL_RCC_PLLM_DIV_63 (RCC_PLLCFGR_PLLM_5 | RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLM_3 | RCC_PLLCFGR_PLLM_2 | RCC_PLLCFGR_PLLM_1 | RCC_PLLCFGR_PLLM_0) /*!< PLL, PLLI2S and PLLSAI division factor by 63 */ +/** + * @} + */ + +#if defined(RCC_PLLCFGR_PLLR) +/** @defgroup RCC_LL_EC_PLLR_DIV PLL division factor (PLLR) + * @{ + */ +#define LL_RCC_PLLR_DIV_2 (RCC_PLLCFGR_PLLR_1) /*!< Main PLL division factor for PLLCLK (system clock) by 2 */ +#define LL_RCC_PLLR_DIV_3 (RCC_PLLCFGR_PLLR_1|RCC_PLLCFGR_PLLR_0) /*!< Main PLL division factor for PLLCLK (system clock) by 3 */ +#define LL_RCC_PLLR_DIV_4 (RCC_PLLCFGR_PLLR_2) /*!< Main PLL division factor for PLLCLK (system clock) by 4 */ +#define LL_RCC_PLLR_DIV_5 (RCC_PLLCFGR_PLLR_2|RCC_PLLCFGR_PLLR_0) /*!< Main PLL division factor for PLLCLK (system clock) by 5 */ +#define LL_RCC_PLLR_DIV_6 (RCC_PLLCFGR_PLLR_2|RCC_PLLCFGR_PLLR_1) /*!< Main PLL division factor for PLLCLK (system clock) by 6 */ +#define LL_RCC_PLLR_DIV_7 (RCC_PLLCFGR_PLLR) /*!< Main PLL division factor for PLLCLK (system clock) by 7 */ +/** + * @} + */ +#endif /* RCC_PLLCFGR_PLLR */ + +#if defined(RCC_DCKCFGR_PLLDIVR) +/** @defgroup RCC_LL_EC_PLLDIVR PLLDIVR division factor (PLLDIVR) + * @{ + */ +#define LL_RCC_PLLDIVR_DIV_1 (RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 1 */ +#define LL_RCC_PLLDIVR_DIV_2 (RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 2 */ +#define LL_RCC_PLLDIVR_DIV_3 (RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 3 */ +#define LL_RCC_PLLDIVR_DIV_4 (RCC_DCKCFGR_PLLDIVR_2) /*!< PLL division factor for PLLDIVR output by 4 */ +#define LL_RCC_PLLDIVR_DIV_5 (RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 5 */ +#define LL_RCC_PLLDIVR_DIV_6 (RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 6 */ +#define LL_RCC_PLLDIVR_DIV_7 (RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 7 */ +#define LL_RCC_PLLDIVR_DIV_8 (RCC_DCKCFGR_PLLDIVR_3) /*!< PLL division factor for PLLDIVR output by 8 */ +#define LL_RCC_PLLDIVR_DIV_9 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 9 */ +#define LL_RCC_PLLDIVR_DIV_10 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 10 */ +#define LL_RCC_PLLDIVR_DIV_11 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 11 */ +#define LL_RCC_PLLDIVR_DIV_12 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2) /*!< PLL division factor for PLLDIVR output by 12 */ +#define LL_RCC_PLLDIVR_DIV_13 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 13 */ +#define LL_RCC_PLLDIVR_DIV_14 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 14 */ +#define LL_RCC_PLLDIVR_DIV_15 (RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 15 */ +#define LL_RCC_PLLDIVR_DIV_16 (RCC_DCKCFGR_PLLDIVR_4) /*!< PLL division factor for PLLDIVR output by 16 */ +#define LL_RCC_PLLDIVR_DIV_17 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 17 */ +#define LL_RCC_PLLDIVR_DIV_18 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 18 */ +#define LL_RCC_PLLDIVR_DIV_19 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 19 */ +#define LL_RCC_PLLDIVR_DIV_20 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_2) /*!< PLL division factor for PLLDIVR output by 20 */ +#define LL_RCC_PLLDIVR_DIV_21 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 21 */ +#define LL_RCC_PLLDIVR_DIV_22 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 22 */ +#define LL_RCC_PLLDIVR_DIV_23 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 23 */ +#define LL_RCC_PLLDIVR_DIV_24 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3) /*!< PLL division factor for PLLDIVR output by 24 */ +#define LL_RCC_PLLDIVR_DIV_25 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 25 */ +#define LL_RCC_PLLDIVR_DIV_26 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 26 */ +#define LL_RCC_PLLDIVR_DIV_27 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 27 */ +#define LL_RCC_PLLDIVR_DIV_28 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2) /*!< PLL division factor for PLLDIVR output by 28 */ +#define LL_RCC_PLLDIVR_DIV_29 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 29 */ +#define LL_RCC_PLLDIVR_DIV_30 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1) /*!< PLL division factor for PLLDIVR output by 30 */ +#define LL_RCC_PLLDIVR_DIV_31 (RCC_DCKCFGR_PLLDIVR_4 | RCC_DCKCFGR_PLLDIVR_3 | RCC_DCKCFGR_PLLDIVR_2 | RCC_DCKCFGR_PLLDIVR_1 | RCC_DCKCFGR_PLLDIVR_0) /*!< PLL division factor for PLLDIVR output by 31 */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_PLLDIVR */ + +/** @defgroup RCC_LL_EC_PLLP_DIV PLL division factor (PLLP) + * @{ + */ +#define LL_RCC_PLLP_DIV_2 0x00000000U /*!< Main PLL division factor for PLLP output by 2 */ +#define LL_RCC_PLLP_DIV_4 RCC_PLLCFGR_PLLP_0 /*!< Main PLL division factor for PLLP output by 4 */ +#define LL_RCC_PLLP_DIV_6 RCC_PLLCFGR_PLLP_1 /*!< Main PLL division factor for PLLP output by 6 */ +#define LL_RCC_PLLP_DIV_8 (RCC_PLLCFGR_PLLP_1 | RCC_PLLCFGR_PLLP_0) /*!< Main PLL division factor for PLLP output by 8 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_PLLQ_DIV PLL division factor (PLLQ) + * @{ + */ +#define LL_RCC_PLLQ_DIV_2 RCC_PLLCFGR_PLLQ_1 /*!< Main PLL division factor for PLLQ output by 2 */ +#define LL_RCC_PLLQ_DIV_3 (RCC_PLLCFGR_PLLQ_1|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 3 */ +#define LL_RCC_PLLQ_DIV_4 RCC_PLLCFGR_PLLQ_2 /*!< Main PLL division factor for PLLQ output by 4 */ +#define LL_RCC_PLLQ_DIV_5 (RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 5 */ +#define LL_RCC_PLLQ_DIV_6 (RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_1) /*!< Main PLL division factor for PLLQ output by 6 */ +#define LL_RCC_PLLQ_DIV_7 (RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_1|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 7 */ +#define LL_RCC_PLLQ_DIV_8 RCC_PLLCFGR_PLLQ_3 /*!< Main PLL division factor for PLLQ output by 8 */ +#define LL_RCC_PLLQ_DIV_9 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 9 */ +#define LL_RCC_PLLQ_DIV_10 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_1) /*!< Main PLL division factor for PLLQ output by 10 */ +#define LL_RCC_PLLQ_DIV_11 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_1|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 11 */ +#define LL_RCC_PLLQ_DIV_12 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_2) /*!< Main PLL division factor for PLLQ output by 12 */ +#define LL_RCC_PLLQ_DIV_13 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 13 */ +#define LL_RCC_PLLQ_DIV_14 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_1) /*!< Main PLL division factor for PLLQ output by 14 */ +#define LL_RCC_PLLQ_DIV_15 (RCC_PLLCFGR_PLLQ_3|RCC_PLLCFGR_PLLQ_2|RCC_PLLCFGR_PLLQ_1|RCC_PLLCFGR_PLLQ_0) /*!< Main PLL division factor for PLLQ output by 15 */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_PLL_SPRE_SEL PLL Spread Spectrum Selection + * @{ + */ +#define LL_RCC_SPREAD_SELECT_CENTER 0x00000000U /*!< PLL center spread spectrum selection */ +#define LL_RCC_SPREAD_SELECT_DOWN RCC_SSCGR_SPREADSEL /*!< PLL down spread spectrum selection */ +/** + * @} + */ + +#if defined(RCC_PLLI2S_SUPPORT) +/** @defgroup RCC_LL_EC_PLLI2SM PLLI2SM division factor (PLLI2SM) + * @{ + */ +#if defined(RCC_PLLI2SCFGR_PLLI2SM) +#define LL_RCC_PLLI2SM_DIV_2 (RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 2 */ +#define LL_RCC_PLLI2SM_DIV_3 (RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 3 */ +#define LL_RCC_PLLI2SM_DIV_4 (RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 4 */ +#define LL_RCC_PLLI2SM_DIV_5 (RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 5 */ +#define LL_RCC_PLLI2SM_DIV_6 (RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 6 */ +#define LL_RCC_PLLI2SM_DIV_7 (RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 7 */ +#define LL_RCC_PLLI2SM_DIV_8 (RCC_PLLI2SCFGR_PLLI2SM_3) /*!< PLLI2S division factor for PLLI2SM output by 8 */ +#define LL_RCC_PLLI2SM_DIV_9 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 9 */ +#define LL_RCC_PLLI2SM_DIV_10 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 10 */ +#define LL_RCC_PLLI2SM_DIV_11 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 11 */ +#define LL_RCC_PLLI2SM_DIV_12 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 12 */ +#define LL_RCC_PLLI2SM_DIV_13 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 13 */ +#define LL_RCC_PLLI2SM_DIV_14 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 14 */ +#define LL_RCC_PLLI2SM_DIV_15 (RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 15 */ +#define LL_RCC_PLLI2SM_DIV_16 (RCC_PLLI2SCFGR_PLLI2SM_4) /*!< PLLI2S division factor for PLLI2SM output by 16 */ +#define LL_RCC_PLLI2SM_DIV_17 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 17 */ +#define LL_RCC_PLLI2SM_DIV_18 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 18 */ +#define LL_RCC_PLLI2SM_DIV_19 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 19 */ +#define LL_RCC_PLLI2SM_DIV_20 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 20 */ +#define LL_RCC_PLLI2SM_DIV_21 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 21 */ +#define LL_RCC_PLLI2SM_DIV_22 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 22 */ +#define LL_RCC_PLLI2SM_DIV_23 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 23 */ +#define LL_RCC_PLLI2SM_DIV_24 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3) /*!< PLLI2S division factor for PLLI2SM output by 24 */ +#define LL_RCC_PLLI2SM_DIV_25 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 25 */ +#define LL_RCC_PLLI2SM_DIV_26 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 26 */ +#define LL_RCC_PLLI2SM_DIV_27 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 27 */ +#define LL_RCC_PLLI2SM_DIV_28 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 28 */ +#define LL_RCC_PLLI2SM_DIV_29 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 29 */ +#define LL_RCC_PLLI2SM_DIV_30 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 30 */ +#define LL_RCC_PLLI2SM_DIV_31 (RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 31 */ +#define LL_RCC_PLLI2SM_DIV_32 (RCC_PLLI2SCFGR_PLLI2SM_5) /*!< PLLI2S division factor for PLLI2SM output by 32 */ +#define LL_RCC_PLLI2SM_DIV_33 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 33 */ +#define LL_RCC_PLLI2SM_DIV_34 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 34 */ +#define LL_RCC_PLLI2SM_DIV_35 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 35 */ +#define LL_RCC_PLLI2SM_DIV_36 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 36 */ +#define LL_RCC_PLLI2SM_DIV_37 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 37 */ +#define LL_RCC_PLLI2SM_DIV_38 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 38 */ +#define LL_RCC_PLLI2SM_DIV_39 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 39 */ +#define LL_RCC_PLLI2SM_DIV_40 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3) /*!< PLLI2S division factor for PLLI2SM output by 40 */ +#define LL_RCC_PLLI2SM_DIV_41 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 41 */ +#define LL_RCC_PLLI2SM_DIV_42 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 42 */ +#define LL_RCC_PLLI2SM_DIV_43 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 43 */ +#define LL_RCC_PLLI2SM_DIV_44 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 44 */ +#define LL_RCC_PLLI2SM_DIV_45 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 45 */ +#define LL_RCC_PLLI2SM_DIV_46 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 46 */ +#define LL_RCC_PLLI2SM_DIV_47 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 47 */ +#define LL_RCC_PLLI2SM_DIV_48 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4) /*!< PLLI2S division factor for PLLI2SM output by 48 */ +#define LL_RCC_PLLI2SM_DIV_49 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 49 */ +#define LL_RCC_PLLI2SM_DIV_50 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 50 */ +#define LL_RCC_PLLI2SM_DIV_51 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 51 */ +#define LL_RCC_PLLI2SM_DIV_52 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 52 */ +#define LL_RCC_PLLI2SM_DIV_53 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 53 */ +#define LL_RCC_PLLI2SM_DIV_54 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 54 */ +#define LL_RCC_PLLI2SM_DIV_55 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 55 */ +#define LL_RCC_PLLI2SM_DIV_56 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3) /*!< PLLI2S division factor for PLLI2SM output by 56 */ +#define LL_RCC_PLLI2SM_DIV_57 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 57 */ +#define LL_RCC_PLLI2SM_DIV_58 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 58 */ +#define LL_RCC_PLLI2SM_DIV_59 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 59 */ +#define LL_RCC_PLLI2SM_DIV_60 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2) /*!< PLLI2S division factor for PLLI2SM output by 60 */ +#define LL_RCC_PLLI2SM_DIV_61 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 61 */ +#define LL_RCC_PLLI2SM_DIV_62 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1) /*!< PLLI2S division factor for PLLI2SM output by 62 */ +#define LL_RCC_PLLI2SM_DIV_63 (RCC_PLLI2SCFGR_PLLI2SM_5 | RCC_PLLI2SCFGR_PLLI2SM_4 | RCC_PLLI2SCFGR_PLLI2SM_3 | RCC_PLLI2SCFGR_PLLI2SM_2 | RCC_PLLI2SCFGR_PLLI2SM_1 | RCC_PLLI2SCFGR_PLLI2SM_0) /*!< PLLI2S division factor for PLLI2SM output by 63 */ +#else +#define LL_RCC_PLLI2SM_DIV_2 LL_RCC_PLLM_DIV_2 /*!< PLLI2S division factor for PLLI2SM output by 2 */ +#define LL_RCC_PLLI2SM_DIV_3 LL_RCC_PLLM_DIV_3 /*!< PLLI2S division factor for PLLI2SM output by 3 */ +#define LL_RCC_PLLI2SM_DIV_4 LL_RCC_PLLM_DIV_4 /*!< PLLI2S division factor for PLLI2SM output by 4 */ +#define LL_RCC_PLLI2SM_DIV_5 LL_RCC_PLLM_DIV_5 /*!< PLLI2S division factor for PLLI2SM output by 5 */ +#define LL_RCC_PLLI2SM_DIV_6 LL_RCC_PLLM_DIV_6 /*!< PLLI2S division factor for PLLI2SM output by 6 */ +#define LL_RCC_PLLI2SM_DIV_7 LL_RCC_PLLM_DIV_7 /*!< PLLI2S division factor for PLLI2SM output by 7 */ +#define LL_RCC_PLLI2SM_DIV_8 LL_RCC_PLLM_DIV_8 /*!< PLLI2S division factor for PLLI2SM output by 8 */ +#define LL_RCC_PLLI2SM_DIV_9 LL_RCC_PLLM_DIV_9 /*!< PLLI2S division factor for PLLI2SM output by 9 */ +#define LL_RCC_PLLI2SM_DIV_10 LL_RCC_PLLM_DIV_10 /*!< PLLI2S division factor for PLLI2SM output by 10 */ +#define LL_RCC_PLLI2SM_DIV_11 LL_RCC_PLLM_DIV_11 /*!< PLLI2S division factor for PLLI2SM output by 11 */ +#define LL_RCC_PLLI2SM_DIV_12 LL_RCC_PLLM_DIV_12 /*!< PLLI2S division factor for PLLI2SM output by 12 */ +#define LL_RCC_PLLI2SM_DIV_13 LL_RCC_PLLM_DIV_13 /*!< PLLI2S division factor for PLLI2SM output by 13 */ +#define LL_RCC_PLLI2SM_DIV_14 LL_RCC_PLLM_DIV_14 /*!< PLLI2S division factor for PLLI2SM output by 14 */ +#define LL_RCC_PLLI2SM_DIV_15 LL_RCC_PLLM_DIV_15 /*!< PLLI2S division factor for PLLI2SM output by 15 */ +#define LL_RCC_PLLI2SM_DIV_16 LL_RCC_PLLM_DIV_16 /*!< PLLI2S division factor for PLLI2SM output by 16 */ +#define LL_RCC_PLLI2SM_DIV_17 LL_RCC_PLLM_DIV_17 /*!< PLLI2S division factor for PLLI2SM output by 17 */ +#define LL_RCC_PLLI2SM_DIV_18 LL_RCC_PLLM_DIV_18 /*!< PLLI2S division factor for PLLI2SM output by 18 */ +#define LL_RCC_PLLI2SM_DIV_19 LL_RCC_PLLM_DIV_19 /*!< PLLI2S division factor for PLLI2SM output by 19 */ +#define LL_RCC_PLLI2SM_DIV_20 LL_RCC_PLLM_DIV_20 /*!< PLLI2S division factor for PLLI2SM output by 20 */ +#define LL_RCC_PLLI2SM_DIV_21 LL_RCC_PLLM_DIV_21 /*!< PLLI2S division factor for PLLI2SM output by 21 */ +#define LL_RCC_PLLI2SM_DIV_22 LL_RCC_PLLM_DIV_22 /*!< PLLI2S division factor for PLLI2SM output by 22 */ +#define LL_RCC_PLLI2SM_DIV_23 LL_RCC_PLLM_DIV_23 /*!< PLLI2S division factor for PLLI2SM output by 23 */ +#define LL_RCC_PLLI2SM_DIV_24 LL_RCC_PLLM_DIV_24 /*!< PLLI2S division factor for PLLI2SM output by 24 */ +#define LL_RCC_PLLI2SM_DIV_25 LL_RCC_PLLM_DIV_25 /*!< PLLI2S division factor for PLLI2SM output by 25 */ +#define LL_RCC_PLLI2SM_DIV_26 LL_RCC_PLLM_DIV_26 /*!< PLLI2S division factor for PLLI2SM output by 26 */ +#define LL_RCC_PLLI2SM_DIV_27 LL_RCC_PLLM_DIV_27 /*!< PLLI2S division factor for PLLI2SM output by 27 */ +#define LL_RCC_PLLI2SM_DIV_28 LL_RCC_PLLM_DIV_28 /*!< PLLI2S division factor for PLLI2SM output by 28 */ +#define LL_RCC_PLLI2SM_DIV_29 LL_RCC_PLLM_DIV_29 /*!< PLLI2S division factor for PLLI2SM output by 29 */ +#define LL_RCC_PLLI2SM_DIV_30 LL_RCC_PLLM_DIV_30 /*!< PLLI2S division factor for PLLI2SM output by 30 */ +#define LL_RCC_PLLI2SM_DIV_31 LL_RCC_PLLM_DIV_31 /*!< PLLI2S division factor for PLLI2SM output by 31 */ +#define LL_RCC_PLLI2SM_DIV_32 LL_RCC_PLLM_DIV_32 /*!< PLLI2S division factor for PLLI2SM output by 32 */ +#define LL_RCC_PLLI2SM_DIV_33 LL_RCC_PLLM_DIV_33 /*!< PLLI2S division factor for PLLI2SM output by 33 */ +#define LL_RCC_PLLI2SM_DIV_34 LL_RCC_PLLM_DIV_34 /*!< PLLI2S division factor for PLLI2SM output by 34 */ +#define LL_RCC_PLLI2SM_DIV_35 LL_RCC_PLLM_DIV_35 /*!< PLLI2S division factor for PLLI2SM output by 35 */ +#define LL_RCC_PLLI2SM_DIV_36 LL_RCC_PLLM_DIV_36 /*!< PLLI2S division factor for PLLI2SM output by 36 */ +#define LL_RCC_PLLI2SM_DIV_37 LL_RCC_PLLM_DIV_37 /*!< PLLI2S division factor for PLLI2SM output by 37 */ +#define LL_RCC_PLLI2SM_DIV_38 LL_RCC_PLLM_DIV_38 /*!< PLLI2S division factor for PLLI2SM output by 38 */ +#define LL_RCC_PLLI2SM_DIV_39 LL_RCC_PLLM_DIV_39 /*!< PLLI2S division factor for PLLI2SM output by 39 */ +#define LL_RCC_PLLI2SM_DIV_40 LL_RCC_PLLM_DIV_40 /*!< PLLI2S division factor for PLLI2SM output by 40 */ +#define LL_RCC_PLLI2SM_DIV_41 LL_RCC_PLLM_DIV_41 /*!< PLLI2S division factor for PLLI2SM output by 41 */ +#define LL_RCC_PLLI2SM_DIV_42 LL_RCC_PLLM_DIV_42 /*!< PLLI2S division factor for PLLI2SM output by 42 */ +#define LL_RCC_PLLI2SM_DIV_43 LL_RCC_PLLM_DIV_43 /*!< PLLI2S division factor for PLLI2SM output by 43 */ +#define LL_RCC_PLLI2SM_DIV_44 LL_RCC_PLLM_DIV_44 /*!< PLLI2S division factor for PLLI2SM output by 44 */ +#define LL_RCC_PLLI2SM_DIV_45 LL_RCC_PLLM_DIV_45 /*!< PLLI2S division factor for PLLI2SM output by 45 */ +#define LL_RCC_PLLI2SM_DIV_46 LL_RCC_PLLM_DIV_46 /*!< PLLI2S division factor for PLLI2SM output by 46 */ +#define LL_RCC_PLLI2SM_DIV_47 LL_RCC_PLLM_DIV_47 /*!< PLLI2S division factor for PLLI2SM output by 47 */ +#define LL_RCC_PLLI2SM_DIV_48 LL_RCC_PLLM_DIV_48 /*!< PLLI2S division factor for PLLI2SM output by 48 */ +#define LL_RCC_PLLI2SM_DIV_49 LL_RCC_PLLM_DIV_49 /*!< PLLI2S division factor for PLLI2SM output by 49 */ +#define LL_RCC_PLLI2SM_DIV_50 LL_RCC_PLLM_DIV_50 /*!< PLLI2S division factor for PLLI2SM output by 50 */ +#define LL_RCC_PLLI2SM_DIV_51 LL_RCC_PLLM_DIV_51 /*!< PLLI2S division factor for PLLI2SM output by 51 */ +#define LL_RCC_PLLI2SM_DIV_52 LL_RCC_PLLM_DIV_52 /*!< PLLI2S division factor for PLLI2SM output by 52 */ +#define LL_RCC_PLLI2SM_DIV_53 LL_RCC_PLLM_DIV_53 /*!< PLLI2S division factor for PLLI2SM output by 53 */ +#define LL_RCC_PLLI2SM_DIV_54 LL_RCC_PLLM_DIV_54 /*!< PLLI2S division factor for PLLI2SM output by 54 */ +#define LL_RCC_PLLI2SM_DIV_55 LL_RCC_PLLM_DIV_55 /*!< PLLI2S division factor for PLLI2SM output by 55 */ +#define LL_RCC_PLLI2SM_DIV_56 LL_RCC_PLLM_DIV_56 /*!< PLLI2S division factor for PLLI2SM output by 56 */ +#define LL_RCC_PLLI2SM_DIV_57 LL_RCC_PLLM_DIV_57 /*!< PLLI2S division factor for PLLI2SM output by 57 */ +#define LL_RCC_PLLI2SM_DIV_58 LL_RCC_PLLM_DIV_58 /*!< PLLI2S division factor for PLLI2SM output by 58 */ +#define LL_RCC_PLLI2SM_DIV_59 LL_RCC_PLLM_DIV_59 /*!< PLLI2S division factor for PLLI2SM output by 59 */ +#define LL_RCC_PLLI2SM_DIV_60 LL_RCC_PLLM_DIV_60 /*!< PLLI2S division factor for PLLI2SM output by 60 */ +#define LL_RCC_PLLI2SM_DIV_61 LL_RCC_PLLM_DIV_61 /*!< PLLI2S division factor for PLLI2SM output by 61 */ +#define LL_RCC_PLLI2SM_DIV_62 LL_RCC_PLLM_DIV_62 /*!< PLLI2S division factor for PLLI2SM output by 62 */ +#define LL_RCC_PLLI2SM_DIV_63 LL_RCC_PLLM_DIV_63 /*!< PLLI2S division factor for PLLI2SM output by 63 */ +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ +/** + * @} + */ + +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) +/** @defgroup RCC_LL_EC_PLLI2SQ PLLI2SQ division factor (PLLI2SQ) + * @{ + */ +#define LL_RCC_PLLI2SQ_DIV_2 RCC_PLLI2SCFGR_PLLI2SQ_1 /*!< PLLI2S division factor for PLLI2SQ output by 2 */ +#define LL_RCC_PLLI2SQ_DIV_3 (RCC_PLLI2SCFGR_PLLI2SQ_1 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 3 */ +#define LL_RCC_PLLI2SQ_DIV_4 RCC_PLLI2SCFGR_PLLI2SQ_2 /*!< PLLI2S division factor for PLLI2SQ output by 4 */ +#define LL_RCC_PLLI2SQ_DIV_5 (RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 5 */ +#define LL_RCC_PLLI2SQ_DIV_6 (RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_1) /*!< PLLI2S division factor for PLLI2SQ output by 6 */ +#define LL_RCC_PLLI2SQ_DIV_7 (RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_1 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 7 */ +#define LL_RCC_PLLI2SQ_DIV_8 RCC_PLLI2SCFGR_PLLI2SQ_3 /*!< PLLI2S division factor for PLLI2SQ output by 8 */ +#define LL_RCC_PLLI2SQ_DIV_9 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 9 */ +#define LL_RCC_PLLI2SQ_DIV_10 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_1) /*!< PLLI2S division factor for PLLI2SQ output by 10 */ +#define LL_RCC_PLLI2SQ_DIV_11 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_1 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 11 */ +#define LL_RCC_PLLI2SQ_DIV_12 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_2) /*!< PLLI2S division factor for PLLI2SQ output by 12 */ +#define LL_RCC_PLLI2SQ_DIV_13 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 13 */ +#define LL_RCC_PLLI2SQ_DIV_14 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_1) /*!< PLLI2S division factor for PLLI2SQ output by 14 */ +#define LL_RCC_PLLI2SQ_DIV_15 (RCC_PLLI2SCFGR_PLLI2SQ_3 | RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SQ_1 | RCC_PLLI2SCFGR_PLLI2SQ_0) /*!< PLLI2S division factor for PLLI2SQ output by 15 */ +/** + * @} + */ +#endif /* RCC_PLLI2SCFGR_PLLI2SQ */ + +#if defined(RCC_DCKCFGR_PLLI2SDIVQ) +/** @defgroup RCC_LL_EC_PLLI2SDIVQ PLLI2SDIVQ division factor (PLLI2SDIVQ) + * @{ + */ +#define LL_RCC_PLLI2SDIVQ_DIV_1 0x00000000U /*!< PLLI2S division factor for PLLI2SDIVQ output by 1 */ +#define LL_RCC_PLLI2SDIVQ_DIV_2 RCC_DCKCFGR_PLLI2SDIVQ_0 /*!< PLLI2S division factor for PLLI2SDIVQ output by 2 */ +#define LL_RCC_PLLI2SDIVQ_DIV_3 RCC_DCKCFGR_PLLI2SDIVQ_1 /*!< PLLI2S division factor for PLLI2SDIVQ output by 3 */ +#define LL_RCC_PLLI2SDIVQ_DIV_4 (RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 4 */ +#define LL_RCC_PLLI2SDIVQ_DIV_5 RCC_DCKCFGR_PLLI2SDIVQ_2 /*!< PLLI2S division factor for PLLI2SDIVQ output by 5 */ +#define LL_RCC_PLLI2SDIVQ_DIV_6 (RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 6 */ +#define LL_RCC_PLLI2SDIVQ_DIV_7 (RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 7 */ +#define LL_RCC_PLLI2SDIVQ_DIV_8 (RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 8 */ +#define LL_RCC_PLLI2SDIVQ_DIV_9 RCC_DCKCFGR_PLLI2SDIVQ_3 /*!< PLLI2S division factor for PLLI2SDIVQ output by 9 */ +#define LL_RCC_PLLI2SDIVQ_DIV_10 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 10 */ +#define LL_RCC_PLLI2SDIVQ_DIV_11 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 11 */ +#define LL_RCC_PLLI2SDIVQ_DIV_12 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 12 */ +#define LL_RCC_PLLI2SDIVQ_DIV_13 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2) /*!< PLLI2S division factor for PLLI2SDIVQ output by 13 */ +#define LL_RCC_PLLI2SDIVQ_DIV_14 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 14 */ +#define LL_RCC_PLLI2SDIVQ_DIV_15 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 15 */ +#define LL_RCC_PLLI2SDIVQ_DIV_16 (RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 16 */ +#define LL_RCC_PLLI2SDIVQ_DIV_17 RCC_DCKCFGR_PLLI2SDIVQ_4 /*!< PLLI2S division factor for PLLI2SDIVQ output by 17 */ +#define LL_RCC_PLLI2SDIVQ_DIV_18 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 18 */ +#define LL_RCC_PLLI2SDIVQ_DIV_19 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 19 */ +#define LL_RCC_PLLI2SDIVQ_DIV_20 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 20 */ +#define LL_RCC_PLLI2SDIVQ_DIV_21 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_2) /*!< PLLI2S division factor for PLLI2SDIVQ output by 21 */ +#define LL_RCC_PLLI2SDIVQ_DIV_22 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 22 */ +#define LL_RCC_PLLI2SDIVQ_DIV_23 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 23 */ +#define LL_RCC_PLLI2SDIVQ_DIV_24 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 24 */ +#define LL_RCC_PLLI2SDIVQ_DIV_25 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3) /*!< PLLI2S division factor for PLLI2SDIVQ output by 25 */ +#define LL_RCC_PLLI2SDIVQ_DIV_26 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 26 */ +#define LL_RCC_PLLI2SDIVQ_DIV_27 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 27 */ +#define LL_RCC_PLLI2SDIVQ_DIV_28 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 28 */ +#define LL_RCC_PLLI2SDIVQ_DIV_29 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2) /*!< PLLI2S division factor for PLLI2SDIVQ output by 29 */ +#define LL_RCC_PLLI2SDIVQ_DIV_30 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 30 */ +#define LL_RCC_PLLI2SDIVQ_DIV_31 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1) /*!< PLLI2S division factor for PLLI2SDIVQ output by 31 */ +#define LL_RCC_PLLI2SDIVQ_DIV_32 (RCC_DCKCFGR_PLLI2SDIVQ_4 | RCC_DCKCFGR_PLLI2SDIVQ_3 | RCC_DCKCFGR_PLLI2SDIVQ_2 | RCC_DCKCFGR_PLLI2SDIVQ_1 | RCC_DCKCFGR_PLLI2SDIVQ_0) /*!< PLLI2S division factor for PLLI2SDIVQ output by 32 */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_PLLI2SDIVQ */ + +#if defined(RCC_DCKCFGR_PLLI2SDIVR) +/** @defgroup RCC_LL_EC_PLLI2SDIVR PLLI2SDIVR division factor (PLLI2SDIVR) + * @{ + */ +#define LL_RCC_PLLI2SDIVR_DIV_1 (RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 1 */ +#define LL_RCC_PLLI2SDIVR_DIV_2 (RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 2 */ +#define LL_RCC_PLLI2SDIVR_DIV_3 (RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 3 */ +#define LL_RCC_PLLI2SDIVR_DIV_4 (RCC_DCKCFGR_PLLI2SDIVR_2) /*!< PLLI2S division factor for PLLI2SDIVR output by 4 */ +#define LL_RCC_PLLI2SDIVR_DIV_5 (RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 5 */ +#define LL_RCC_PLLI2SDIVR_DIV_6 (RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 6 */ +#define LL_RCC_PLLI2SDIVR_DIV_7 (RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 7 */ +#define LL_RCC_PLLI2SDIVR_DIV_8 (RCC_DCKCFGR_PLLI2SDIVR_3) /*!< PLLI2S division factor for PLLI2SDIVR output by 8 */ +#define LL_RCC_PLLI2SDIVR_DIV_9 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 9 */ +#define LL_RCC_PLLI2SDIVR_DIV_10 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 10 */ +#define LL_RCC_PLLI2SDIVR_DIV_11 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 11 */ +#define LL_RCC_PLLI2SDIVR_DIV_12 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2) /*!< PLLI2S division factor for PLLI2SDIVR output by 12 */ +#define LL_RCC_PLLI2SDIVR_DIV_13 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 13 */ +#define LL_RCC_PLLI2SDIVR_DIV_14 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 14 */ +#define LL_RCC_PLLI2SDIVR_DIV_15 (RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 15 */ +#define LL_RCC_PLLI2SDIVR_DIV_16 (RCC_DCKCFGR_PLLI2SDIVR_4) /*!< PLLI2S division factor for PLLI2SDIVR output by 16 */ +#define LL_RCC_PLLI2SDIVR_DIV_17 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 17 */ +#define LL_RCC_PLLI2SDIVR_DIV_18 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 18 */ +#define LL_RCC_PLLI2SDIVR_DIV_19 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 19 */ +#define LL_RCC_PLLI2SDIVR_DIV_20 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_2) /*!< PLLI2S division factor for PLLI2SDIVR output by 20 */ +#define LL_RCC_PLLI2SDIVR_DIV_21 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 21 */ +#define LL_RCC_PLLI2SDIVR_DIV_22 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 22 */ +#define LL_RCC_PLLI2SDIVR_DIV_23 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 23 */ +#define LL_RCC_PLLI2SDIVR_DIV_24 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3) /*!< PLLI2S division factor for PLLI2SDIVR output by 24 */ +#define LL_RCC_PLLI2SDIVR_DIV_25 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 25 */ +#define LL_RCC_PLLI2SDIVR_DIV_26 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 26 */ +#define LL_RCC_PLLI2SDIVR_DIV_27 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 27 */ +#define LL_RCC_PLLI2SDIVR_DIV_28 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2) /*!< PLLI2S division factor for PLLI2SDIVR output by 28 */ +#define LL_RCC_PLLI2SDIVR_DIV_29 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 29 */ +#define LL_RCC_PLLI2SDIVR_DIV_30 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1) /*!< PLLI2S division factor for PLLI2SDIVR output by 30 */ +#define LL_RCC_PLLI2SDIVR_DIV_31 (RCC_DCKCFGR_PLLI2SDIVR_4 | RCC_DCKCFGR_PLLI2SDIVR_3 | RCC_DCKCFGR_PLLI2SDIVR_2 | RCC_DCKCFGR_PLLI2SDIVR_1 | RCC_DCKCFGR_PLLI2SDIVR_0) /*!< PLLI2S division factor for PLLI2SDIVR output by 31 */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_PLLI2SDIVR */ + +/** @defgroup RCC_LL_EC_PLLI2SR PLLI2SR division factor (PLLI2SR) + * @{ + */ +#define LL_RCC_PLLI2SR_DIV_2 RCC_PLLI2SCFGR_PLLI2SR_1 /*!< PLLI2S division factor for PLLI2SR output by 2 */ +#define LL_RCC_PLLI2SR_DIV_3 (RCC_PLLI2SCFGR_PLLI2SR_1 | RCC_PLLI2SCFGR_PLLI2SR_0) /*!< PLLI2S division factor for PLLI2SR output by 3 */ +#define LL_RCC_PLLI2SR_DIV_4 RCC_PLLI2SCFGR_PLLI2SR_2 /*!< PLLI2S division factor for PLLI2SR output by 4 */ +#define LL_RCC_PLLI2SR_DIV_5 (RCC_PLLI2SCFGR_PLLI2SR_2 | RCC_PLLI2SCFGR_PLLI2SR_0) /*!< PLLI2S division factor for PLLI2SR output by 5 */ +#define LL_RCC_PLLI2SR_DIV_6 (RCC_PLLI2SCFGR_PLLI2SR_2 | RCC_PLLI2SCFGR_PLLI2SR_1) /*!< PLLI2S division factor for PLLI2SR output by 6 */ +#define LL_RCC_PLLI2SR_DIV_7 (RCC_PLLI2SCFGR_PLLI2SR_2 | RCC_PLLI2SCFGR_PLLI2SR_1 | RCC_PLLI2SCFGR_PLLI2SR_0) /*!< PLLI2S division factor for PLLI2SR output by 7 */ +/** + * @} + */ + +#if defined(RCC_PLLI2SCFGR_PLLI2SP) +/** @defgroup RCC_LL_EC_PLLI2SP PLLI2SP division factor (PLLI2SP) + * @{ + */ +#define LL_RCC_PLLI2SP_DIV_2 0x00000000U /*!< PLLI2S division factor for PLLI2SP output by 2 */ +#define LL_RCC_PLLI2SP_DIV_4 RCC_PLLI2SCFGR_PLLI2SP_0 /*!< PLLI2S division factor for PLLI2SP output by 4 */ +#define LL_RCC_PLLI2SP_DIV_6 RCC_PLLI2SCFGR_PLLI2SP_1 /*!< PLLI2S division factor for PLLI2SP output by 6 */ +#define LL_RCC_PLLI2SP_DIV_8 (RCC_PLLI2SCFGR_PLLI2SP_1 | RCC_PLLI2SCFGR_PLLI2SP_0) /*!< PLLI2S division factor for PLLI2SP output by 8 */ +/** + * @} + */ +#endif /* RCC_PLLI2SCFGR_PLLI2SP */ +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** @defgroup RCC_LL_EC_PLLSAIM PLLSAIM division factor (PLLSAIM or PLLM) + * @{ + */ +#if defined(RCC_PLLSAICFGR_PLLSAIM) +#define LL_RCC_PLLSAIM_DIV_2 (RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 2 */ +#define LL_RCC_PLLSAIM_DIV_3 (RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 3 */ +#define LL_RCC_PLLSAIM_DIV_4 (RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 4 */ +#define LL_RCC_PLLSAIM_DIV_5 (RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 5 */ +#define LL_RCC_PLLSAIM_DIV_6 (RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 6 */ +#define LL_RCC_PLLSAIM_DIV_7 (RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 7 */ +#define LL_RCC_PLLSAIM_DIV_8 (RCC_PLLSAICFGR_PLLSAIM_3) /*!< PLLSAI division factor for PLLSAIM output by 8 */ +#define LL_RCC_PLLSAIM_DIV_9 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 9 */ +#define LL_RCC_PLLSAIM_DIV_10 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 10 */ +#define LL_RCC_PLLSAIM_DIV_11 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 11 */ +#define LL_RCC_PLLSAIM_DIV_12 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 12 */ +#define LL_RCC_PLLSAIM_DIV_13 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 13 */ +#define LL_RCC_PLLSAIM_DIV_14 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 14 */ +#define LL_RCC_PLLSAIM_DIV_15 (RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 15 */ +#define LL_RCC_PLLSAIM_DIV_16 (RCC_PLLSAICFGR_PLLSAIM_4) /*!< PLLSAI division factor for PLLSAIM output by 16 */ +#define LL_RCC_PLLSAIM_DIV_17 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 17 */ +#define LL_RCC_PLLSAIM_DIV_18 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 18 */ +#define LL_RCC_PLLSAIM_DIV_19 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 19 */ +#define LL_RCC_PLLSAIM_DIV_20 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 20 */ +#define LL_RCC_PLLSAIM_DIV_21 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 21 */ +#define LL_RCC_PLLSAIM_DIV_22 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 22 */ +#define LL_RCC_PLLSAIM_DIV_23 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 23 */ +#define LL_RCC_PLLSAIM_DIV_24 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3) /*!< PLLSAI division factor for PLLSAIM output by 24 */ +#define LL_RCC_PLLSAIM_DIV_25 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 25 */ +#define LL_RCC_PLLSAIM_DIV_26 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 26 */ +#define LL_RCC_PLLSAIM_DIV_27 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 27 */ +#define LL_RCC_PLLSAIM_DIV_28 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 28 */ +#define LL_RCC_PLLSAIM_DIV_29 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 29 */ +#define LL_RCC_PLLSAIM_DIV_30 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 30 */ +#define LL_RCC_PLLSAIM_DIV_31 (RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 31 */ +#define LL_RCC_PLLSAIM_DIV_32 (RCC_PLLSAICFGR_PLLSAIM_5) /*!< PLLSAI division factor for PLLSAIM output by 32 */ +#define LL_RCC_PLLSAIM_DIV_33 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 33 */ +#define LL_RCC_PLLSAIM_DIV_34 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 34 */ +#define LL_RCC_PLLSAIM_DIV_35 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 35 */ +#define LL_RCC_PLLSAIM_DIV_36 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 36 */ +#define LL_RCC_PLLSAIM_DIV_37 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 37 */ +#define LL_RCC_PLLSAIM_DIV_38 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 38 */ +#define LL_RCC_PLLSAIM_DIV_39 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 39 */ +#define LL_RCC_PLLSAIM_DIV_40 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3) /*!< PLLSAI division factor for PLLSAIM output by 40 */ +#define LL_RCC_PLLSAIM_DIV_41 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 41 */ +#define LL_RCC_PLLSAIM_DIV_42 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 42 */ +#define LL_RCC_PLLSAIM_DIV_43 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 43 */ +#define LL_RCC_PLLSAIM_DIV_44 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 44 */ +#define LL_RCC_PLLSAIM_DIV_45 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 45 */ +#define LL_RCC_PLLSAIM_DIV_46 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 46 */ +#define LL_RCC_PLLSAIM_DIV_47 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 47 */ +#define LL_RCC_PLLSAIM_DIV_48 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4) /*!< PLLSAI division factor for PLLSAIM output by 48 */ +#define LL_RCC_PLLSAIM_DIV_49 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 49 */ +#define LL_RCC_PLLSAIM_DIV_50 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 50 */ +#define LL_RCC_PLLSAIM_DIV_51 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 51 */ +#define LL_RCC_PLLSAIM_DIV_52 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 52 */ +#define LL_RCC_PLLSAIM_DIV_53 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 53 */ +#define LL_RCC_PLLSAIM_DIV_54 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 54 */ +#define LL_RCC_PLLSAIM_DIV_55 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 55 */ +#define LL_RCC_PLLSAIM_DIV_56 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3) /*!< PLLSAI division factor for PLLSAIM output by 56 */ +#define LL_RCC_PLLSAIM_DIV_57 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 57 */ +#define LL_RCC_PLLSAIM_DIV_58 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 58 */ +#define LL_RCC_PLLSAIM_DIV_59 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 59 */ +#define LL_RCC_PLLSAIM_DIV_60 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2) /*!< PLLSAI division factor for PLLSAIM output by 60 */ +#define LL_RCC_PLLSAIM_DIV_61 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 61 */ +#define LL_RCC_PLLSAIM_DIV_62 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1) /*!< PLLSAI division factor for PLLSAIM output by 62 */ +#define LL_RCC_PLLSAIM_DIV_63 (RCC_PLLSAICFGR_PLLSAIM_5 | RCC_PLLSAICFGR_PLLSAIM_4 | RCC_PLLSAICFGR_PLLSAIM_3 | RCC_PLLSAICFGR_PLLSAIM_2 | RCC_PLLSAICFGR_PLLSAIM_1 | RCC_PLLSAICFGR_PLLSAIM_0) /*!< PLLSAI division factor for PLLSAIM output by 63 */ +#else +#define LL_RCC_PLLSAIM_DIV_2 LL_RCC_PLLM_DIV_2 /*!< PLLSAI division factor for PLLSAIM output by 2 */ +#define LL_RCC_PLLSAIM_DIV_3 LL_RCC_PLLM_DIV_3 /*!< PLLSAI division factor for PLLSAIM output by 3 */ +#define LL_RCC_PLLSAIM_DIV_4 LL_RCC_PLLM_DIV_4 /*!< PLLSAI division factor for PLLSAIM output by 4 */ +#define LL_RCC_PLLSAIM_DIV_5 LL_RCC_PLLM_DIV_5 /*!< PLLSAI division factor for PLLSAIM output by 5 */ +#define LL_RCC_PLLSAIM_DIV_6 LL_RCC_PLLM_DIV_6 /*!< PLLSAI division factor for PLLSAIM output by 6 */ +#define LL_RCC_PLLSAIM_DIV_7 LL_RCC_PLLM_DIV_7 /*!< PLLSAI division factor for PLLSAIM output by 7 */ +#define LL_RCC_PLLSAIM_DIV_8 LL_RCC_PLLM_DIV_8 /*!< PLLSAI division factor for PLLSAIM output by 8 */ +#define LL_RCC_PLLSAIM_DIV_9 LL_RCC_PLLM_DIV_9 /*!< PLLSAI division factor for PLLSAIM output by 9 */ +#define LL_RCC_PLLSAIM_DIV_10 LL_RCC_PLLM_DIV_10 /*!< PLLSAI division factor for PLLSAIM output by 10 */ +#define LL_RCC_PLLSAIM_DIV_11 LL_RCC_PLLM_DIV_11 /*!< PLLSAI division factor for PLLSAIM output by 11 */ +#define LL_RCC_PLLSAIM_DIV_12 LL_RCC_PLLM_DIV_12 /*!< PLLSAI division factor for PLLSAIM output by 12 */ +#define LL_RCC_PLLSAIM_DIV_13 LL_RCC_PLLM_DIV_13 /*!< PLLSAI division factor for PLLSAIM output by 13 */ +#define LL_RCC_PLLSAIM_DIV_14 LL_RCC_PLLM_DIV_14 /*!< PLLSAI division factor for PLLSAIM output by 14 */ +#define LL_RCC_PLLSAIM_DIV_15 LL_RCC_PLLM_DIV_15 /*!< PLLSAI division factor for PLLSAIM output by 15 */ +#define LL_RCC_PLLSAIM_DIV_16 LL_RCC_PLLM_DIV_16 /*!< PLLSAI division factor for PLLSAIM output by 16 */ +#define LL_RCC_PLLSAIM_DIV_17 LL_RCC_PLLM_DIV_17 /*!< PLLSAI division factor for PLLSAIM output by 17 */ +#define LL_RCC_PLLSAIM_DIV_18 LL_RCC_PLLM_DIV_18 /*!< PLLSAI division factor for PLLSAIM output by 18 */ +#define LL_RCC_PLLSAIM_DIV_19 LL_RCC_PLLM_DIV_19 /*!< PLLSAI division factor for PLLSAIM output by 19 */ +#define LL_RCC_PLLSAIM_DIV_20 LL_RCC_PLLM_DIV_20 /*!< PLLSAI division factor for PLLSAIM output by 20 */ +#define LL_RCC_PLLSAIM_DIV_21 LL_RCC_PLLM_DIV_21 /*!< PLLSAI division factor for PLLSAIM output by 21 */ +#define LL_RCC_PLLSAIM_DIV_22 LL_RCC_PLLM_DIV_22 /*!< PLLSAI division factor for PLLSAIM output by 22 */ +#define LL_RCC_PLLSAIM_DIV_23 LL_RCC_PLLM_DIV_23 /*!< PLLSAI division factor for PLLSAIM output by 23 */ +#define LL_RCC_PLLSAIM_DIV_24 LL_RCC_PLLM_DIV_24 /*!< PLLSAI division factor for PLLSAIM output by 24 */ +#define LL_RCC_PLLSAIM_DIV_25 LL_RCC_PLLM_DIV_25 /*!< PLLSAI division factor for PLLSAIM output by 25 */ +#define LL_RCC_PLLSAIM_DIV_26 LL_RCC_PLLM_DIV_26 /*!< PLLSAI division factor for PLLSAIM output by 26 */ +#define LL_RCC_PLLSAIM_DIV_27 LL_RCC_PLLM_DIV_27 /*!< PLLSAI division factor for PLLSAIM output by 27 */ +#define LL_RCC_PLLSAIM_DIV_28 LL_RCC_PLLM_DIV_28 /*!< PLLSAI division factor for PLLSAIM output by 28 */ +#define LL_RCC_PLLSAIM_DIV_29 LL_RCC_PLLM_DIV_29 /*!< PLLSAI division factor for PLLSAIM output by 29 */ +#define LL_RCC_PLLSAIM_DIV_30 LL_RCC_PLLM_DIV_30 /*!< PLLSAI division factor for PLLSAIM output by 30 */ +#define LL_RCC_PLLSAIM_DIV_31 LL_RCC_PLLM_DIV_31 /*!< PLLSAI division factor for PLLSAIM output by 31 */ +#define LL_RCC_PLLSAIM_DIV_32 LL_RCC_PLLM_DIV_32 /*!< PLLSAI division factor for PLLSAIM output by 32 */ +#define LL_RCC_PLLSAIM_DIV_33 LL_RCC_PLLM_DIV_33 /*!< PLLSAI division factor for PLLSAIM output by 33 */ +#define LL_RCC_PLLSAIM_DIV_34 LL_RCC_PLLM_DIV_34 /*!< PLLSAI division factor for PLLSAIM output by 34 */ +#define LL_RCC_PLLSAIM_DIV_35 LL_RCC_PLLM_DIV_35 /*!< PLLSAI division factor for PLLSAIM output by 35 */ +#define LL_RCC_PLLSAIM_DIV_36 LL_RCC_PLLM_DIV_36 /*!< PLLSAI division factor for PLLSAIM output by 36 */ +#define LL_RCC_PLLSAIM_DIV_37 LL_RCC_PLLM_DIV_37 /*!< PLLSAI division factor for PLLSAIM output by 37 */ +#define LL_RCC_PLLSAIM_DIV_38 LL_RCC_PLLM_DIV_38 /*!< PLLSAI division factor for PLLSAIM output by 38 */ +#define LL_RCC_PLLSAIM_DIV_39 LL_RCC_PLLM_DIV_39 /*!< PLLSAI division factor for PLLSAIM output by 39 */ +#define LL_RCC_PLLSAIM_DIV_40 LL_RCC_PLLM_DIV_40 /*!< PLLSAI division factor for PLLSAIM output by 40 */ +#define LL_RCC_PLLSAIM_DIV_41 LL_RCC_PLLM_DIV_41 /*!< PLLSAI division factor for PLLSAIM output by 41 */ +#define LL_RCC_PLLSAIM_DIV_42 LL_RCC_PLLM_DIV_42 /*!< PLLSAI division factor for PLLSAIM output by 42 */ +#define LL_RCC_PLLSAIM_DIV_43 LL_RCC_PLLM_DIV_43 /*!< PLLSAI division factor for PLLSAIM output by 43 */ +#define LL_RCC_PLLSAIM_DIV_44 LL_RCC_PLLM_DIV_44 /*!< PLLSAI division factor for PLLSAIM output by 44 */ +#define LL_RCC_PLLSAIM_DIV_45 LL_RCC_PLLM_DIV_45 /*!< PLLSAI division factor for PLLSAIM output by 45 */ +#define LL_RCC_PLLSAIM_DIV_46 LL_RCC_PLLM_DIV_46 /*!< PLLSAI division factor for PLLSAIM output by 46 */ +#define LL_RCC_PLLSAIM_DIV_47 LL_RCC_PLLM_DIV_47 /*!< PLLSAI division factor for PLLSAIM output by 47 */ +#define LL_RCC_PLLSAIM_DIV_48 LL_RCC_PLLM_DIV_48 /*!< PLLSAI division factor for PLLSAIM output by 48 */ +#define LL_RCC_PLLSAIM_DIV_49 LL_RCC_PLLM_DIV_49 /*!< PLLSAI division factor for PLLSAIM output by 49 */ +#define LL_RCC_PLLSAIM_DIV_50 LL_RCC_PLLM_DIV_50 /*!< PLLSAI division factor for PLLSAIM output by 50 */ +#define LL_RCC_PLLSAIM_DIV_51 LL_RCC_PLLM_DIV_51 /*!< PLLSAI division factor for PLLSAIM output by 51 */ +#define LL_RCC_PLLSAIM_DIV_52 LL_RCC_PLLM_DIV_52 /*!< PLLSAI division factor for PLLSAIM output by 52 */ +#define LL_RCC_PLLSAIM_DIV_53 LL_RCC_PLLM_DIV_53 /*!< PLLSAI division factor for PLLSAIM output by 53 */ +#define LL_RCC_PLLSAIM_DIV_54 LL_RCC_PLLM_DIV_54 /*!< PLLSAI division factor for PLLSAIM output by 54 */ +#define LL_RCC_PLLSAIM_DIV_55 LL_RCC_PLLM_DIV_55 /*!< PLLSAI division factor for PLLSAIM output by 55 */ +#define LL_RCC_PLLSAIM_DIV_56 LL_RCC_PLLM_DIV_56 /*!< PLLSAI division factor for PLLSAIM output by 56 */ +#define LL_RCC_PLLSAIM_DIV_57 LL_RCC_PLLM_DIV_57 /*!< PLLSAI division factor for PLLSAIM output by 57 */ +#define LL_RCC_PLLSAIM_DIV_58 LL_RCC_PLLM_DIV_58 /*!< PLLSAI division factor for PLLSAIM output by 58 */ +#define LL_RCC_PLLSAIM_DIV_59 LL_RCC_PLLM_DIV_59 /*!< PLLSAI division factor for PLLSAIM output by 59 */ +#define LL_RCC_PLLSAIM_DIV_60 LL_RCC_PLLM_DIV_60 /*!< PLLSAI division factor for PLLSAIM output by 60 */ +#define LL_RCC_PLLSAIM_DIV_61 LL_RCC_PLLM_DIV_61 /*!< PLLSAI division factor for PLLSAIM output by 61 */ +#define LL_RCC_PLLSAIM_DIV_62 LL_RCC_PLLM_DIV_62 /*!< PLLSAI division factor for PLLSAIM output by 62 */ +#define LL_RCC_PLLSAIM_DIV_63 LL_RCC_PLLM_DIV_63 /*!< PLLSAI division factor for PLLSAIM output by 63 */ +#endif /* RCC_PLLSAICFGR_PLLSAIM */ +/** + * @} + */ + +/** @defgroup RCC_LL_EC_PLLSAIQ PLLSAIQ division factor (PLLSAIQ) + * @{ + */ +#define LL_RCC_PLLSAIQ_DIV_2 RCC_PLLSAICFGR_PLLSAIQ_1 /*!< PLLSAI division factor for PLLSAIQ output by 2 */ +#define LL_RCC_PLLSAIQ_DIV_3 (RCC_PLLSAICFGR_PLLSAIQ_1 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 3 */ +#define LL_RCC_PLLSAIQ_DIV_4 RCC_PLLSAICFGR_PLLSAIQ_2 /*!< PLLSAI division factor for PLLSAIQ output by 4 */ +#define LL_RCC_PLLSAIQ_DIV_5 (RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 5 */ +#define LL_RCC_PLLSAIQ_DIV_6 (RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_1) /*!< PLLSAI division factor for PLLSAIQ output by 6 */ +#define LL_RCC_PLLSAIQ_DIV_7 (RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_1 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 7 */ +#define LL_RCC_PLLSAIQ_DIV_8 RCC_PLLSAICFGR_PLLSAIQ_3 /*!< PLLSAI division factor for PLLSAIQ output by 8 */ +#define LL_RCC_PLLSAIQ_DIV_9 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 9 */ +#define LL_RCC_PLLSAIQ_DIV_10 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_1) /*!< PLLSAI division factor for PLLSAIQ output by 10 */ +#define LL_RCC_PLLSAIQ_DIV_11 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_1 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 11 */ +#define LL_RCC_PLLSAIQ_DIV_12 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_2) /*!< PLLSAI division factor for PLLSAIQ output by 12 */ +#define LL_RCC_PLLSAIQ_DIV_13 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 13 */ +#define LL_RCC_PLLSAIQ_DIV_14 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_1) /*!< PLLSAI division factor for PLLSAIQ output by 14 */ +#define LL_RCC_PLLSAIQ_DIV_15 (RCC_PLLSAICFGR_PLLSAIQ_3 | RCC_PLLSAICFGR_PLLSAIQ_2 | RCC_PLLSAICFGR_PLLSAIQ_1 | RCC_PLLSAICFGR_PLLSAIQ_0) /*!< PLLSAI division factor for PLLSAIQ output by 15 */ +/** + * @} + */ + +#if defined(RCC_DCKCFGR_PLLSAIDIVQ) +/** @defgroup RCC_LL_EC_PLLSAIDIVQ PLLSAIDIVQ division factor (PLLSAIDIVQ) + * @{ + */ +#define LL_RCC_PLLSAIDIVQ_DIV_1 0x00000000U /*!< PLLSAI division factor for PLLSAIDIVQ output by 1 */ +#define LL_RCC_PLLSAIDIVQ_DIV_2 RCC_DCKCFGR_PLLSAIDIVQ_0 /*!< PLLSAI division factor for PLLSAIDIVQ output by 2 */ +#define LL_RCC_PLLSAIDIVQ_DIV_3 RCC_DCKCFGR_PLLSAIDIVQ_1 /*!< PLLSAI division factor for PLLSAIDIVQ output by 3 */ +#define LL_RCC_PLLSAIDIVQ_DIV_4 (RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 4 */ +#define LL_RCC_PLLSAIDIVQ_DIV_5 RCC_DCKCFGR_PLLSAIDIVQ_2 /*!< PLLSAI division factor for PLLSAIDIVQ output by 5 */ +#define LL_RCC_PLLSAIDIVQ_DIV_6 (RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 6 */ +#define LL_RCC_PLLSAIDIVQ_DIV_7 (RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 7 */ +#define LL_RCC_PLLSAIDIVQ_DIV_8 (RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 8 */ +#define LL_RCC_PLLSAIDIVQ_DIV_9 RCC_DCKCFGR_PLLSAIDIVQ_3 /*!< PLLSAI division factor for PLLSAIDIVQ output by 9 */ +#define LL_RCC_PLLSAIDIVQ_DIV_10 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 10 */ +#define LL_RCC_PLLSAIDIVQ_DIV_11 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 11 */ +#define LL_RCC_PLLSAIDIVQ_DIV_12 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 12 */ +#define LL_RCC_PLLSAIDIVQ_DIV_13 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2) /*!< PLLSAI division factor for PLLSAIDIVQ output by 13 */ +#define LL_RCC_PLLSAIDIVQ_DIV_14 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 14 */ +#define LL_RCC_PLLSAIDIVQ_DIV_15 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 15 */ +#define LL_RCC_PLLSAIDIVQ_DIV_16 (RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 16 */ +#define LL_RCC_PLLSAIDIVQ_DIV_17 RCC_DCKCFGR_PLLSAIDIVQ_4 /*!< PLLSAI division factor for PLLSAIDIVQ output by 17 */ +#define LL_RCC_PLLSAIDIVQ_DIV_18 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 18 */ +#define LL_RCC_PLLSAIDIVQ_DIV_19 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 19 */ +#define LL_RCC_PLLSAIDIVQ_DIV_20 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 20 */ +#define LL_RCC_PLLSAIDIVQ_DIV_21 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_2) /*!< PLLSAI division factor for PLLSAIDIVQ output by 21 */ +#define LL_RCC_PLLSAIDIVQ_DIV_22 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 22 */ +#define LL_RCC_PLLSAIDIVQ_DIV_23 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 23 */ +#define LL_RCC_PLLSAIDIVQ_DIV_24 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 24 */ +#define LL_RCC_PLLSAIDIVQ_DIV_25 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3) /*!< PLLSAI division factor for PLLSAIDIVQ output by 25 */ +#define LL_RCC_PLLSAIDIVQ_DIV_26 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 26 */ +#define LL_RCC_PLLSAIDIVQ_DIV_27 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 27 */ +#define LL_RCC_PLLSAIDIVQ_DIV_28 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 28 */ +#define LL_RCC_PLLSAIDIVQ_DIV_29 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2) /*!< PLLSAI division factor for PLLSAIDIVQ output by 29 */ +#define LL_RCC_PLLSAIDIVQ_DIV_30 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 30 */ +#define LL_RCC_PLLSAIDIVQ_DIV_31 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1) /*!< PLLSAI division factor for PLLSAIDIVQ output by 31 */ +#define LL_RCC_PLLSAIDIVQ_DIV_32 (RCC_DCKCFGR_PLLSAIDIVQ_4 | RCC_DCKCFGR_PLLSAIDIVQ_3 | RCC_DCKCFGR_PLLSAIDIVQ_2 | RCC_DCKCFGR_PLLSAIDIVQ_1 | RCC_DCKCFGR_PLLSAIDIVQ_0) /*!< PLLSAI division factor for PLLSAIDIVQ output by 32 */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_PLLSAIDIVQ */ + +#if defined(RCC_PLLSAICFGR_PLLSAIR) +/** @defgroup RCC_LL_EC_PLLSAIR PLLSAIR division factor (PLLSAIR) + * @{ + */ +#define LL_RCC_PLLSAIR_DIV_2 RCC_PLLSAICFGR_PLLSAIR_1 /*!< PLLSAI division factor for PLLSAIR output by 2 */ +#define LL_RCC_PLLSAIR_DIV_3 (RCC_PLLSAICFGR_PLLSAIR_1 | RCC_PLLSAICFGR_PLLSAIR_0) /*!< PLLSAI division factor for PLLSAIR output by 3 */ +#define LL_RCC_PLLSAIR_DIV_4 RCC_PLLSAICFGR_PLLSAIR_2 /*!< PLLSAI division factor for PLLSAIR output by 4 */ +#define LL_RCC_PLLSAIR_DIV_5 (RCC_PLLSAICFGR_PLLSAIR_2 | RCC_PLLSAICFGR_PLLSAIR_0) /*!< PLLSAI division factor for PLLSAIR output by 5 */ +#define LL_RCC_PLLSAIR_DIV_6 (RCC_PLLSAICFGR_PLLSAIR_2 | RCC_PLLSAICFGR_PLLSAIR_1) /*!< PLLSAI division factor for PLLSAIR output by 6 */ +#define LL_RCC_PLLSAIR_DIV_7 (RCC_PLLSAICFGR_PLLSAIR_2 | RCC_PLLSAICFGR_PLLSAIR_1 | RCC_PLLSAICFGR_PLLSAIR_0) /*!< PLLSAI division factor for PLLSAIR output by 7 */ +/** + * @} + */ +#endif /* RCC_PLLSAICFGR_PLLSAIR */ + +#if defined(RCC_DCKCFGR_PLLSAIDIVR) +/** @defgroup RCC_LL_EC_PLLSAIDIVR PLLSAIDIVR division factor (PLLSAIDIVR) + * @{ + */ +#define LL_RCC_PLLSAIDIVR_DIV_2 0x00000000U /*!< PLLSAI division factor for PLLSAIDIVR output by 2 */ +#define LL_RCC_PLLSAIDIVR_DIV_4 RCC_DCKCFGR_PLLSAIDIVR_0 /*!< PLLSAI division factor for PLLSAIDIVR output by 4 */ +#define LL_RCC_PLLSAIDIVR_DIV_8 RCC_DCKCFGR_PLLSAIDIVR_1 /*!< PLLSAI division factor for PLLSAIDIVR output by 8 */ +#define LL_RCC_PLLSAIDIVR_DIV_16 (RCC_DCKCFGR_PLLSAIDIVR_1 | RCC_DCKCFGR_PLLSAIDIVR_0) /*!< PLLSAI division factor for PLLSAIDIVR output by 16 */ +/** + * @} + */ +#endif /* RCC_DCKCFGR_PLLSAIDIVR */ + +#if defined(RCC_PLLSAICFGR_PLLSAIP) +/** @defgroup RCC_LL_EC_PLLSAIP PLLSAIP division factor (PLLSAIP) + * @{ + */ +#define LL_RCC_PLLSAIP_DIV_2 0x00000000U /*!< PLLSAI division factor for PLLSAIP output by 2 */ +#define LL_RCC_PLLSAIP_DIV_4 RCC_PLLSAICFGR_PLLSAIP_0 /*!< PLLSAI division factor for PLLSAIP output by 4 */ +#define LL_RCC_PLLSAIP_DIV_6 RCC_PLLSAICFGR_PLLSAIP_1 /*!< PLLSAI division factor for PLLSAIP output by 6 */ +#define LL_RCC_PLLSAIP_DIV_8 (RCC_PLLSAICFGR_PLLSAIP_1 | RCC_PLLSAICFGR_PLLSAIP_0) /*!< PLLSAI division factor for PLLSAIP output by 8 */ +/** + * @} + */ +#endif /* RCC_PLLSAICFGR_PLLSAIP */ +#endif /* RCC_PLLSAI_SUPPORT */ +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup RCC_LL_Exported_Macros RCC Exported Macros + * @{ + */ + +/** @defgroup RCC_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in RCC register + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_RCC_WriteReg(__REG__, __VALUE__) WRITE_REG(RCC->__REG__, (__VALUE__)) + +/** + * @brief Read a value in RCC register + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_RCC_ReadReg(__REG__) READ_REG(RCC->__REG__) +/** + * @} + */ + +/** @defgroup RCC_LL_EM_CALC_FREQ Calculate frequencies + * @{ + */ + +/** + * @brief Helper macro to calculate the PLLCLK frequency on system domain + * @note ex: @ref __LL_RCC_CALC_PLLCLK_FREQ (HSE_VALUE,@ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetP ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLP__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLP_DIV_2 + * @arg @ref LL_RCC_PLLP_DIV_4 + * @arg @ref LL_RCC_PLLP_DIV_6 + * @arg @ref LL_RCC_PLLP_DIV_8 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLCLK_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLP__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((((__PLLP__) >> RCC_PLLCFGR_PLLP_Pos ) + 1U) * 2U)) + +#if defined(RCC_PLLR_SYSCLK_SUPPORT) +/** + * @brief Helper macro to calculate the PLLRCLK frequency on system domain + * @note ex: @ref __LL_RCC_CALC_PLLRCLK_FREQ (HSE_VALUE,@ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50 and 432 + * @param __PLLR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLRCLK_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) + +#endif /* RCC_PLLR_SYSCLK_SUPPORT */ + +/** + * @brief Helper macro to calculate the PLLCLK frequency used on 48M domain + * @note ex: @ref __LL_RCC_CALC_PLLCLK_48M_FREQ (HSE_VALUE,@ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetQ ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLQ__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLQ_DIV_2 + * @arg @ref LL_RCC_PLLQ_DIV_3 + * @arg @ref LL_RCC_PLLQ_DIV_4 + * @arg @ref LL_RCC_PLLQ_DIV_5 + * @arg @ref LL_RCC_PLLQ_DIV_6 + * @arg @ref LL_RCC_PLLQ_DIV_7 + * @arg @ref LL_RCC_PLLQ_DIV_8 + * @arg @ref LL_RCC_PLLQ_DIV_9 + * @arg @ref LL_RCC_PLLQ_DIV_10 + * @arg @ref LL_RCC_PLLQ_DIV_11 + * @arg @ref LL_RCC_PLLQ_DIV_12 + * @arg @ref LL_RCC_PLLQ_DIV_13 + * @arg @ref LL_RCC_PLLQ_DIV_14 + * @arg @ref LL_RCC_PLLQ_DIV_15 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLCLK_48M_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLQ__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLQ__) >> RCC_PLLCFGR_PLLQ_Pos )) + +#if defined(DSI) +/** + * @brief Helper macro to calculate the PLLCLK frequency used on DSI + * @note ex: @ref __LL_RCC_CALC_PLLCLK_DSI_FREQ (HSE_VALUE, @ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50 and 432 + * @param __PLLR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLCLK_DSI_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) +#endif /* DSI */ + +#if defined(RCC_PLLR_I2S_CLKSOURCE_SUPPORT) +/** + * @brief Helper macro to calculate the PLLCLK frequency used on I2S + * @note ex: @ref __LL_RCC_CALC_PLLCLK_I2S_FREQ (HSE_VALUE, @ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50 and 432 + * @param __PLLR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLCLK_I2S_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) +#endif /* RCC_PLLR_I2S_CLKSOURCE_SUPPORT */ + +#if defined(SPDIFRX) +/** + * @brief Helper macro to calculate the PLLCLK frequency used on SPDIFRX + * @note ex: @ref __LL_RCC_CALC_PLLCLK_SPDIFRX_FREQ (HSE_VALUE, @ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50 and 432 + * @param __PLLR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval PLL clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLCLK_SPDIFRX_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) +#endif /* SPDIFRX */ + +#if defined(RCC_PLLCFGR_PLLR) +#if defined(SAI1) +/** + * @brief Helper macro to calculate the PLLCLK frequency used on SAI + * @note ex: @ref __LL_RCC_CALC_PLLCLK_SAI_FREQ (HSE_VALUE, @ref LL_RCC_PLL_GetDivider (), + * @ref LL_RCC_PLL_GetN (), @ref LL_RCC_PLL_GetR (), @ref LL_RCC_PLL_GetDIVR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param __PLLN__ Between 50 and 432 + * @param __PLLR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @param __PLLDIVR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLDIVR_DIV_1 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_2 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_3 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_4 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_5 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_6 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_7 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_8 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_9 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_10 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_11 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_12 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_13 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_14 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_15 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_16 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_17 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_18 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_19 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_20 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_21 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_22 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_23 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_24 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_25 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_26 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_27 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_28 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_29 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_30 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_31 (*) + * + * (*) value not defined in all devices. + * @retval PLL clock frequency (in Hz) + */ +#if defined(RCC_DCKCFGR_PLLDIVR) +#define __LL_RCC_CALC_PLLCLK_SAI_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__, __PLLDIVR__) (((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) / ((__PLLDIVR__) >> RCC_DCKCFGR_PLLDIVR_Pos )) +#else +#define __LL_RCC_CALC_PLLCLK_SAI_FREQ(__INPUTFREQ__, __PLLM__, __PLLN__, __PLLR__) ((__INPUTFREQ__) / (__PLLM__) * (__PLLN__) / \ + ((__PLLR__) >> RCC_PLLCFGR_PLLR_Pos )) +#endif /* RCC_DCKCFGR_PLLDIVR */ +#endif /* SAI1 */ +#endif /* RCC_PLLCFGR_PLLR */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Helper macro to calculate the PLLSAI frequency used for SAI domain + * @note ex: @ref __LL_RCC_CALC_PLLSAI_SAI_FREQ (HSE_VALUE,@ref LL_RCC_PLLSAI_GetDivider (), + * @ref LL_RCC_PLLSAI_GetN (), @ref LL_RCC_PLLSAI_GetQ (), @ref LL_RCC_PLLSAI_GetDIVQ ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param __PLLSAIN__ Between 49/50(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLSAIQ__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIQ_DIV_15 + * @param __PLLSAIDIVQ__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_1 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_15 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_16 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_17 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_18 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_19 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_20 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_21 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_22 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_23 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_24 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_25 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_26 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_27 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_28 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_29 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_30 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_31 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_32 + * @retval PLLSAI clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLSAI_SAI_FREQ(__INPUTFREQ__, __PLLM__, __PLLSAIN__, __PLLSAIQ__, __PLLSAIDIVQ__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLSAIN__) / \ + (((__PLLSAIQ__) >> RCC_PLLSAICFGR_PLLSAIQ_Pos) * (((__PLLSAIDIVQ__) >> RCC_DCKCFGR_PLLSAIDIVQ_Pos) + 1U))) + +#if defined(RCC_PLLSAICFGR_PLLSAIP) +/** + * @brief Helper macro to calculate the PLLSAI frequency used on 48Mhz domain + * @note ex: @ref __LL_RCC_CALC_PLLSAI_48M_FREQ (HSE_VALUE,@ref LL_RCC_PLLSAI_GetDivider (), + * @ref LL_RCC_PLLSAI_GetN (), @ref LL_RCC_PLLSAI_GetP ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param __PLLSAIN__ Between 50 and 432 + * @param __PLLSAIP__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIP_DIV_2 + * @arg @ref LL_RCC_PLLSAIP_DIV_4 + * @arg @ref LL_RCC_PLLSAIP_DIV_6 + * @arg @ref LL_RCC_PLLSAIP_DIV_8 + * @retval PLLSAI clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLSAI_48M_FREQ(__INPUTFREQ__, __PLLM__, __PLLSAIN__, __PLLSAIP__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLSAIN__) / \ + ((((__PLLSAIP__) >> RCC_PLLSAICFGR_PLLSAIP_Pos) + 1U) * 2U)) +#endif /* RCC_PLLSAICFGR_PLLSAIP */ + +#if defined(LTDC) +/** + * @brief Helper macro to calculate the PLLSAI frequency used for LTDC domain + * @note ex: @ref __LL_RCC_CALC_PLLSAI_LTDC_FREQ (HSE_VALUE,@ref LL_RCC_PLLSAI_GetDivider (), + * @ref LL_RCC_PLLSAI_GetN (), @ref LL_RCC_PLLSAI_GetR (), @ref LL_RCC_PLLSAI_GetDIVR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param __PLLSAIN__ Between 49/50(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLSAIR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIR_DIV_2 + * @arg @ref LL_RCC_PLLSAIR_DIV_3 + * @arg @ref LL_RCC_PLLSAIR_DIV_4 + * @arg @ref LL_RCC_PLLSAIR_DIV_5 + * @arg @ref LL_RCC_PLLSAIR_DIV_6 + * @arg @ref LL_RCC_PLLSAIR_DIV_7 + * @param __PLLSAIDIVR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_16 + * @retval PLLSAI clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLSAI_LTDC_FREQ(__INPUTFREQ__, __PLLM__, __PLLSAIN__, __PLLSAIR__, __PLLSAIDIVR__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLSAIN__) / \ + (((__PLLSAIR__) >> RCC_PLLSAICFGR_PLLSAIR_Pos) * (aRCC_PLLSAIDIVRPrescTable[(__PLLSAIDIVR__) >> RCC_DCKCFGR_PLLSAIDIVR_Pos]))) +#endif /* LTDC */ +#endif /* RCC_PLLSAI_SUPPORT */ + +#if defined(RCC_PLLI2S_SUPPORT) +#if defined(RCC_DCKCFGR_PLLI2SDIVQ) || defined(RCC_DCKCFGR_PLLI2SDIVR) +/** + * @brief Helper macro to calculate the PLLI2S frequency used for SAI domain + * @note ex: @ref __LL_RCC_CALC_PLLI2S_SAI_FREQ (HSE_VALUE,@ref LL_RCC_PLLI2S_GetDivider (), + * @ref LL_RCC_PLLI2S_GetN (), @ref LL_RCC_PLLI2S_GetQ (), @ref LL_RCC_PLLI2S_GetDIVQ ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param __PLLI2SN__ Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLI2SQ_R__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SQ_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_7 (*) + * + * (*) value not defined in all devices. + * @param __PLLI2SDIVQ_R__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_1 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_16 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_17 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_18 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_19 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_20 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_21 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_22 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_23 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_24 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_25 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_26 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_27 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_28 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_29 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_30 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_31 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_32 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_1 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_16 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_17 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_18 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_19 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_20 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_21 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_22 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_23 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_24 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_25 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_26 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_27 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_28 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_29 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_30 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_31 (*) + * + * (*) value not defined in all devices. + * @retval PLLI2S clock frequency (in Hz) + */ +#if defined(RCC_DCKCFGR_PLLI2SDIVQ) +#define __LL_RCC_CALC_PLLI2S_SAI_FREQ(__INPUTFREQ__, __PLLM__, __PLLI2SN__, __PLLI2SQ_R__, __PLLI2SDIVQ_R__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLI2SN__) / \ + (((__PLLI2SQ_R__) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos) * (((__PLLI2SDIVQ_R__) >> RCC_DCKCFGR_PLLI2SDIVQ_Pos) + 1U))) +#else +#define __LL_RCC_CALC_PLLI2S_SAI_FREQ(__INPUTFREQ__, __PLLM__, __PLLI2SN__, __PLLI2SQ_R__, __PLLI2SDIVQ_R__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLI2SN__) / \ + (((__PLLI2SQ_R__) >> RCC_PLLI2SCFGR_PLLI2SR_Pos) * ((__PLLI2SDIVQ_R__) >> RCC_DCKCFGR_PLLI2SDIVR_Pos))) + +#endif /* RCC_DCKCFGR_PLLI2SDIVQ */ +#endif /* RCC_DCKCFGR_PLLI2SDIVQ || RCC_DCKCFGR_PLLI2SDIVR */ + +#if defined(SPDIFRX) +/** + * @brief Helper macro to calculate the PLLI2S frequency used on SPDIFRX domain + * @note ex: @ref __LL_RCC_CALC_PLLI2S_SPDIFRX_FREQ (HSE_VALUE,@ref LL_RCC_PLLI2S_GetDivider (), + * @ref LL_RCC_PLLI2S_GetN (), @ref LL_RCC_PLLI2S_GetP ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param __PLLI2SN__ Between 50 and 432 + * @param __PLLI2SP__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SP_DIV_2 + * @arg @ref LL_RCC_PLLI2SP_DIV_4 + * @arg @ref LL_RCC_PLLI2SP_DIV_6 + * @arg @ref LL_RCC_PLLI2SP_DIV_8 + * @retval PLLI2S clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLI2S_SPDIFRX_FREQ(__INPUTFREQ__, __PLLM__, __PLLI2SN__, __PLLI2SP__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLI2SN__) / \ + ((((__PLLI2SP__) >> RCC_PLLI2SCFGR_PLLI2SP_Pos) + 1U) * 2U)) + +#endif /* SPDIFRX */ + +/** + * @brief Helper macro to calculate the PLLI2S frequency used for I2S domain + * @note ex: @ref __LL_RCC_CALC_PLLI2S_I2S_FREQ (HSE_VALUE,@ref LL_RCC_PLLI2S_GetDivider (), + * @ref LL_RCC_PLLI2S_GetN (), @ref LL_RCC_PLLI2S_GetR ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param __PLLI2SN__ Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param __PLLI2SR__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SR_DIV_2 + * @arg @ref LL_RCC_PLLI2SR_DIV_3 + * @arg @ref LL_RCC_PLLI2SR_DIV_4 + * @arg @ref LL_RCC_PLLI2SR_DIV_5 + * @arg @ref LL_RCC_PLLI2SR_DIV_6 + * @arg @ref LL_RCC_PLLI2SR_DIV_7 + * @retval PLLI2S clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLI2S_I2S_FREQ(__INPUTFREQ__, __PLLM__, __PLLI2SN__, __PLLI2SR__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLI2SN__) / \ + ((__PLLI2SR__) >> RCC_PLLI2SCFGR_PLLI2SR_Pos)) + +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) && !defined(RCC_DCKCFGR_PLLI2SDIVQ) +/** + * @brief Helper macro to calculate the PLLI2S frequency used for 48Mhz domain + * @note ex: @ref __LL_RCC_CALC_PLLI2S_48M_FREQ (HSE_VALUE,@ref LL_RCC_PLLI2S_GetDivider (), + * @ref LL_RCC_PLLI2S_GetN (), @ref LL_RCC_PLLI2S_GetQ ()); + * @param __INPUTFREQ__ PLL Input frequency (based on HSE/HSI) + * @param __PLLM__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param __PLLI2SN__ Between 50 and 432 + * @param __PLLI2SQ__ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SQ_DIV_2 + * @arg @ref LL_RCC_PLLI2SQ_DIV_3 + * @arg @ref LL_RCC_PLLI2SQ_DIV_4 + * @arg @ref LL_RCC_PLLI2SQ_DIV_5 + * @arg @ref LL_RCC_PLLI2SQ_DIV_6 + * @arg @ref LL_RCC_PLLI2SQ_DIV_7 + * @arg @ref LL_RCC_PLLI2SQ_DIV_8 + * @arg @ref LL_RCC_PLLI2SQ_DIV_9 + * @arg @ref LL_RCC_PLLI2SQ_DIV_10 + * @arg @ref LL_RCC_PLLI2SQ_DIV_11 + * @arg @ref LL_RCC_PLLI2SQ_DIV_12 + * @arg @ref LL_RCC_PLLI2SQ_DIV_13 + * @arg @ref LL_RCC_PLLI2SQ_DIV_14 + * @arg @ref LL_RCC_PLLI2SQ_DIV_15 + * @retval PLLI2S clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PLLI2S_48M_FREQ(__INPUTFREQ__, __PLLM__, __PLLI2SN__, __PLLI2SQ__) (((__INPUTFREQ__) / (__PLLM__)) * (__PLLI2SN__) / \ + ((__PLLI2SQ__) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos)) + +#endif /* RCC_PLLI2SCFGR_PLLI2SQ && !RCC_DCKCFGR_PLLI2SDIVQ */ +#endif /* RCC_PLLI2S_SUPPORT */ + +/** + * @brief Helper macro to calculate the HCLK frequency + * @param __SYSCLKFREQ__ SYSCLK frequency (based on HSE/HSI/PLLCLK) + * @param __AHBPRESCALER__ This parameter can be one of the following values: + * @arg @ref LL_RCC_SYSCLK_DIV_1 + * @arg @ref LL_RCC_SYSCLK_DIV_2 + * @arg @ref LL_RCC_SYSCLK_DIV_4 + * @arg @ref LL_RCC_SYSCLK_DIV_8 + * @arg @ref LL_RCC_SYSCLK_DIV_16 + * @arg @ref LL_RCC_SYSCLK_DIV_64 + * @arg @ref LL_RCC_SYSCLK_DIV_128 + * @arg @ref LL_RCC_SYSCLK_DIV_256 + * @arg @ref LL_RCC_SYSCLK_DIV_512 + * @retval HCLK clock frequency (in Hz) + */ +#define __LL_RCC_CALC_HCLK_FREQ(__SYSCLKFREQ__, __AHBPRESCALER__) ((__SYSCLKFREQ__) >> AHBPrescTable[((__AHBPRESCALER__) &\ + RCC_CFGR_HPRE) >> RCC_CFGR_HPRE_Pos]) + +/** + * @brief Helper macro to calculate the PCLK1 frequency (ABP1) + * @param __HCLKFREQ__ HCLK frequency + * @param __APB1PRESCALER__ This parameter can be one of the following values: + * @arg @ref LL_RCC_APB1_DIV_1 + * @arg @ref LL_RCC_APB1_DIV_2 + * @arg @ref LL_RCC_APB1_DIV_4 + * @arg @ref LL_RCC_APB1_DIV_8 + * @arg @ref LL_RCC_APB1_DIV_16 + * @retval PCLK1 clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PCLK1_FREQ(__HCLKFREQ__, __APB1PRESCALER__) ((__HCLKFREQ__) >> APBPrescTable[(__APB1PRESCALER__) >> RCC_CFGR_PPRE1_Pos]) + +/** + * @brief Helper macro to calculate the PCLK2 frequency (ABP2) + * @param __HCLKFREQ__ HCLK frequency + * @param __APB2PRESCALER__ This parameter can be one of the following values: + * @arg @ref LL_RCC_APB2_DIV_1 + * @arg @ref LL_RCC_APB2_DIV_2 + * @arg @ref LL_RCC_APB2_DIV_4 + * @arg @ref LL_RCC_APB2_DIV_8 + * @arg @ref LL_RCC_APB2_DIV_16 + * @retval PCLK2 clock frequency (in Hz) + */ +#define __LL_RCC_CALC_PCLK2_FREQ(__HCLKFREQ__, __APB2PRESCALER__) ((__HCLKFREQ__) >> APBPrescTable[(__APB2PRESCALER__) >> RCC_CFGR_PPRE2_Pos]) + +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup RCC_LL_Exported_Functions RCC Exported Functions + * @{ + */ + +/** @defgroup RCC_LL_EF_HSE HSE + * @{ + */ + +/** + * @brief Enable the Clock Security System. + * @rmtoll CR CSSON LL_RCC_HSE_EnableCSS + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSE_EnableCSS(void) +{ + SET_BIT(RCC->CR, RCC_CR_CSSON); +} + +/** + * @brief Enable HSE external oscillator (HSE Bypass) + * @rmtoll CR HSEBYP LL_RCC_HSE_EnableBypass + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSE_EnableBypass(void) +{ + SET_BIT(RCC->CR, RCC_CR_HSEBYP); +} + +/** + * @brief Disable HSE external oscillator (HSE Bypass) + * @rmtoll CR HSEBYP LL_RCC_HSE_DisableBypass + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSE_DisableBypass(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_HSEBYP); +} + +/** + * @brief Enable HSE crystal oscillator (HSE ON) + * @rmtoll CR HSEON LL_RCC_HSE_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSE_Enable(void) +{ + SET_BIT(RCC->CR, RCC_CR_HSEON); +} + +/** + * @brief Disable HSE crystal oscillator (HSE ON) + * @rmtoll CR HSEON LL_RCC_HSE_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSE_Disable(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON); +} + +/** + * @brief Check if HSE oscillator Ready + * @rmtoll CR HSERDY LL_RCC_HSE_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_HSE_IsReady(void) +{ + return (READ_BIT(RCC->CR, RCC_CR_HSERDY) == (RCC_CR_HSERDY)); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_HSI HSI + * @{ + */ + +/** + * @brief Enable HSI oscillator + * @rmtoll CR HSION LL_RCC_HSI_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSI_Enable(void) +{ + SET_BIT(RCC->CR, RCC_CR_HSION); +} + +/** + * @brief Disable HSI oscillator + * @rmtoll CR HSION LL_RCC_HSI_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSI_Disable(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_HSION); +} + +/** + * @brief Check if HSI clock is ready + * @rmtoll CR HSIRDY LL_RCC_HSI_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_HSI_IsReady(void) +{ + return (READ_BIT(RCC->CR, RCC_CR_HSIRDY) == (RCC_CR_HSIRDY)); +} + +/** + * @brief Get HSI Calibration value + * @note When HSITRIM is written, HSICAL is updated with the sum of + * HSITRIM and the factory trim value + * @rmtoll CR HSICAL LL_RCC_HSI_GetCalibration + * @retval Between Min_Data = 0x00 and Max_Data = 0xFF + */ +__STATIC_INLINE uint32_t LL_RCC_HSI_GetCalibration(void) +{ + return (uint32_t)(READ_BIT(RCC->CR, RCC_CR_HSICAL) >> RCC_CR_HSICAL_Pos); +} + +/** + * @brief Set HSI Calibration trimming + * @note user-programmable trimming value that is added to the HSICAL + * @note Default value is 16, which, when added to the HSICAL value, + * should trim the HSI to 16 MHz +/- 1 % + * @rmtoll CR HSITRIM LL_RCC_HSI_SetCalibTrimming + * @param Value Between Min_Data = 0 and Max_Data = 31 + * @retval None + */ +__STATIC_INLINE void LL_RCC_HSI_SetCalibTrimming(uint32_t Value) +{ + MODIFY_REG(RCC->CR, RCC_CR_HSITRIM, Value << RCC_CR_HSITRIM_Pos); +} + +/** + * @brief Get HSI Calibration trimming + * @rmtoll CR HSITRIM LL_RCC_HSI_GetCalibTrimming + * @retval Between Min_Data = 0 and Max_Data = 31 + */ +__STATIC_INLINE uint32_t LL_RCC_HSI_GetCalibTrimming(void) +{ + return (uint32_t)(READ_BIT(RCC->CR, RCC_CR_HSITRIM) >> RCC_CR_HSITRIM_Pos); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_LSE LSE + * @{ + */ + +/** + * @brief Enable Low Speed External (LSE) crystal. + * @rmtoll BDCR LSEON LL_RCC_LSE_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_Enable(void) +{ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); +} + +/** + * @brief Disable Low Speed External (LSE) crystal. + * @rmtoll BDCR LSEON LL_RCC_LSE_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_Disable(void) +{ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEON); +} + +/** + * @brief Enable external clock source (LSE bypass). + * @rmtoll BDCR LSEBYP LL_RCC_LSE_EnableBypass + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_EnableBypass(void) +{ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); +} + +/** + * @brief Disable external clock source (LSE bypass). + * @rmtoll BDCR LSEBYP LL_RCC_LSE_DisableBypass + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_DisableBypass(void) +{ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); +} + +/** + * @brief Check if LSE oscillator Ready + * @rmtoll BDCR LSERDY LL_RCC_LSE_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_LSE_IsReady(void) +{ + return (READ_BIT(RCC->BDCR, RCC_BDCR_LSERDY) == (RCC_BDCR_LSERDY)); +} + +#if defined(RCC_BDCR_LSEMOD) +/** + * @brief Enable LSE high drive mode. + * @note LSE high drive mode can be enabled only when the LSE clock is disabled + * @rmtoll BDCR LSEMOD LL_RCC_LSE_EnableHighDriveMode + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_EnableHighDriveMode(void) +{ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEMOD); +} + +/** + * @brief Disable LSE high drive mode. + * @note LSE high drive mode can be disabled only when the LSE clock is disabled + * @rmtoll BDCR LSEMOD LL_RCC_LSE_DisableHighDriveMode + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSE_DisableHighDriveMode(void) +{ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEMOD); +} +#endif /* RCC_BDCR_LSEMOD */ + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_LSI LSI + * @{ + */ + +/** + * @brief Enable LSI Oscillator + * @rmtoll CSR LSION LL_RCC_LSI_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSI_Enable(void) +{ + SET_BIT(RCC->CSR, RCC_CSR_LSION); +} + +/** + * @brief Disable LSI Oscillator + * @rmtoll CSR LSION LL_RCC_LSI_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_LSI_Disable(void) +{ + CLEAR_BIT(RCC->CSR, RCC_CSR_LSION); +} + +/** + * @brief Check if LSI is Ready + * @rmtoll CSR LSIRDY LL_RCC_LSI_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_LSI_IsReady(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_LSIRDY) == (RCC_CSR_LSIRDY)); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_System System + * @{ + */ + +/** + * @brief Configure the system clock source + * @rmtoll CFGR SW LL_RCC_SetSysClkSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_SYS_CLKSOURCE_HSI + * @arg @ref LL_RCC_SYS_CLKSOURCE_HSE + * @arg @ref LL_RCC_SYS_CLKSOURCE_PLL + * @arg @ref LL_RCC_SYS_CLKSOURCE_PLLR (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetSysClkSource(uint32_t Source) +{ + MODIFY_REG(RCC->CFGR, RCC_CFGR_SW, Source); +} + +/** + * @brief Get the system clock source + * @rmtoll CFGR SWS LL_RCC_GetSysClkSource + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SYS_CLKSOURCE_STATUS_HSI + * @arg @ref LL_RCC_SYS_CLKSOURCE_STATUS_HSE + * @arg @ref LL_RCC_SYS_CLKSOURCE_STATUS_PLL + * @arg @ref LL_RCC_SYS_CLKSOURCE_STATUS_PLLR (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetSysClkSource(void) +{ + return (uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_SWS)); +} + +/** + * @brief Set AHB prescaler + * @rmtoll CFGR HPRE LL_RCC_SetAHBPrescaler + * @param Prescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_SYSCLK_DIV_1 + * @arg @ref LL_RCC_SYSCLK_DIV_2 + * @arg @ref LL_RCC_SYSCLK_DIV_4 + * @arg @ref LL_RCC_SYSCLK_DIV_8 + * @arg @ref LL_RCC_SYSCLK_DIV_16 + * @arg @ref LL_RCC_SYSCLK_DIV_64 + * @arg @ref LL_RCC_SYSCLK_DIV_128 + * @arg @ref LL_RCC_SYSCLK_DIV_256 + * @arg @ref LL_RCC_SYSCLK_DIV_512 + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetAHBPrescaler(uint32_t Prescaler) +{ + MODIFY_REG(RCC->CFGR, RCC_CFGR_HPRE, Prescaler); +} + +/** + * @brief Set APB1 prescaler + * @rmtoll CFGR PPRE1 LL_RCC_SetAPB1Prescaler + * @param Prescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_APB1_DIV_1 + * @arg @ref LL_RCC_APB1_DIV_2 + * @arg @ref LL_RCC_APB1_DIV_4 + * @arg @ref LL_RCC_APB1_DIV_8 + * @arg @ref LL_RCC_APB1_DIV_16 + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetAPB1Prescaler(uint32_t Prescaler) +{ + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, Prescaler); +} + +/** + * @brief Set APB2 prescaler + * @rmtoll CFGR PPRE2 LL_RCC_SetAPB2Prescaler + * @param Prescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_APB2_DIV_1 + * @arg @ref LL_RCC_APB2_DIV_2 + * @arg @ref LL_RCC_APB2_DIV_4 + * @arg @ref LL_RCC_APB2_DIV_8 + * @arg @ref LL_RCC_APB2_DIV_16 + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetAPB2Prescaler(uint32_t Prescaler) +{ + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, Prescaler); +} + +/** + * @brief Get AHB prescaler + * @rmtoll CFGR HPRE LL_RCC_GetAHBPrescaler + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SYSCLK_DIV_1 + * @arg @ref LL_RCC_SYSCLK_DIV_2 + * @arg @ref LL_RCC_SYSCLK_DIV_4 + * @arg @ref LL_RCC_SYSCLK_DIV_8 + * @arg @ref LL_RCC_SYSCLK_DIV_16 + * @arg @ref LL_RCC_SYSCLK_DIV_64 + * @arg @ref LL_RCC_SYSCLK_DIV_128 + * @arg @ref LL_RCC_SYSCLK_DIV_256 + * @arg @ref LL_RCC_SYSCLK_DIV_512 + */ +__STATIC_INLINE uint32_t LL_RCC_GetAHBPrescaler(void) +{ + return (uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_HPRE)); +} + +/** + * @brief Get APB1 prescaler + * @rmtoll CFGR PPRE1 LL_RCC_GetAPB1Prescaler + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_APB1_DIV_1 + * @arg @ref LL_RCC_APB1_DIV_2 + * @arg @ref LL_RCC_APB1_DIV_4 + * @arg @ref LL_RCC_APB1_DIV_8 + * @arg @ref LL_RCC_APB1_DIV_16 + */ +__STATIC_INLINE uint32_t LL_RCC_GetAPB1Prescaler(void) +{ + return (uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_PPRE1)); +} + +/** + * @brief Get APB2 prescaler + * @rmtoll CFGR PPRE2 LL_RCC_GetAPB2Prescaler + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_APB2_DIV_1 + * @arg @ref LL_RCC_APB2_DIV_2 + * @arg @ref LL_RCC_APB2_DIV_4 + * @arg @ref LL_RCC_APB2_DIV_8 + * @arg @ref LL_RCC_APB2_DIV_16 + */ +__STATIC_INLINE uint32_t LL_RCC_GetAPB2Prescaler(void) +{ + return (uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_PPRE2)); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_MCO MCO + * @{ + */ + +#if defined(RCC_CFGR_MCO1EN) +/** + * @brief Enable MCO1 output + * @rmtoll CFGR RCC_CFGR_MCO1EN LL_RCC_MCO1_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_MCO1_Enable(void) +{ + SET_BIT(RCC->CFGR, RCC_CFGR_MCO1EN); +} + +/** + * @brief Disable MCO1 output + * @rmtoll CFGR RCC_CFGR_MCO1EN LL_RCC_MCO1_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_MCO1_Disable(void) +{ + CLEAR_BIT(RCC->CFGR, RCC_CFGR_MCO1EN); +} +#endif /* RCC_CFGR_MCO1EN */ + +#if defined(RCC_CFGR_MCO2EN) +/** + * @brief Enable MCO2 output + * @rmtoll CFGR RCC_CFGR_MCO2EN LL_RCC_MCO2_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_MCO2_Enable(void) +{ + SET_BIT(RCC->CFGR, RCC_CFGR_MCO2EN); +} + +/** + * @brief Disable MCO2 output + * @rmtoll CFGR RCC_CFGR_MCO2EN LL_RCC_MCO2_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_MCO2_Disable(void) +{ + CLEAR_BIT(RCC->CFGR, RCC_CFGR_MCO2EN); +} +#endif /* RCC_CFGR_MCO2EN */ + +/** + * @brief Configure MCOx + * @rmtoll CFGR MCO1 LL_RCC_ConfigMCO\n + * CFGR MCO1PRE LL_RCC_ConfigMCO\n + * CFGR MCO2 LL_RCC_ConfigMCO\n + * CFGR MCO2PRE LL_RCC_ConfigMCO + * @param MCOxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_MCO1SOURCE_HSI + * @arg @ref LL_RCC_MCO1SOURCE_LSE + * @arg @ref LL_RCC_MCO1SOURCE_HSE + * @arg @ref LL_RCC_MCO1SOURCE_PLLCLK + * @arg @ref LL_RCC_MCO2SOURCE_SYSCLK + * @arg @ref LL_RCC_MCO2SOURCE_PLLI2S + * @arg @ref LL_RCC_MCO2SOURCE_HSE + * @arg @ref LL_RCC_MCO2SOURCE_PLLCLK + * @param MCOxPrescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_MCO1_DIV_1 + * @arg @ref LL_RCC_MCO1_DIV_2 + * @arg @ref LL_RCC_MCO1_DIV_3 + * @arg @ref LL_RCC_MCO1_DIV_4 + * @arg @ref LL_RCC_MCO1_DIV_5 + * @arg @ref LL_RCC_MCO2_DIV_1 + * @arg @ref LL_RCC_MCO2_DIV_2 + * @arg @ref LL_RCC_MCO2_DIV_3 + * @arg @ref LL_RCC_MCO2_DIV_4 + * @arg @ref LL_RCC_MCO2_DIV_5 + * @retval None + */ +__STATIC_INLINE void LL_RCC_ConfigMCO(uint32_t MCOxSource, uint32_t MCOxPrescaler) +{ + MODIFY_REG(RCC->CFGR, (MCOxSource & 0xFFFF0000U) | (MCOxPrescaler & 0xFFFF0000U), (MCOxSource << 16U) | (MCOxPrescaler << 16U)); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_Peripheral_Clock_Source Peripheral Clock Source + * @{ + */ +#if defined(FMPI2C1) +/** + * @brief Configure FMPI2C clock source + * @rmtoll DCKCFGR2 FMPI2C1SEL LL_RCC_SetFMPI2CClockSource + * @param FMPI2CxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_PCLK1 + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_SYSCLK + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_HSI + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetFMPI2CClockSource(uint32_t FMPI2CxSource) +{ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_FMPI2C1SEL, FMPI2CxSource); +} +#endif /* FMPI2C1 */ + +#if defined(LPTIM1) +/** + * @brief Configure LPTIMx clock source + * @rmtoll DCKCFGR2 LPTIM1SEL LL_RCC_SetLPTIMClockSource + * @param LPTIMxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_PCLK1 + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_HSI + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_LSI + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_LSE + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetLPTIMClockSource(uint32_t LPTIMxSource) +{ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL, LPTIMxSource); +} +#endif /* LPTIM1 */ + +#if defined(SAI1) +/** + * @brief Configure SAIx clock source + * @rmtoll DCKCFGR SAI1SRC LL_RCC_SetSAIClockSource\n + * DCKCFGR SAI2SRC LL_RCC_SetSAIClockSource\n + * DCKCFGR SAI1ASRC LL_RCC_SetSAIClockSource\n + * DCKCFGR SAI1BSRC LL_RCC_SetSAIClockSource + * @param SAIxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLSRC (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetSAIClockSource(uint32_t SAIxSource) +{ + MODIFY_REG(RCC->DCKCFGR, (SAIxSource & 0xFFFF0000U), (SAIxSource << 16U)); +} +#endif /* SAI1 */ + +#if defined(RCC_DCKCFGR_SDIOSEL) || defined(RCC_DCKCFGR2_SDIOSEL) +/** + * @brief Configure SDIO clock source + * @rmtoll DCKCFGR SDIOSEL LL_RCC_SetSDIOClockSource\n + * DCKCFGR2 SDIOSEL LL_RCC_SetSDIOClockSource + * @param SDIOxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_SDIO_CLKSOURCE_PLL48CLK + * @arg @ref LL_RCC_SDIO_CLKSOURCE_SYSCLK + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetSDIOClockSource(uint32_t SDIOxSource) +{ +#if defined(RCC_DCKCFGR_SDIOSEL) + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_SDIOSEL, SDIOxSource); +#else + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SDIOSEL, SDIOxSource); +#endif /* RCC_DCKCFGR_SDIOSEL */ +} +#endif /* RCC_DCKCFGR_SDIOSEL || RCC_DCKCFGR2_SDIOSEL */ + +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +/** + * @brief Configure 48Mhz domain clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_SetCK48MClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_SetCK48MClockSource + * @param CK48MxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLL + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetCK48MClockSource(uint32_t CK48MxSource) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CK48MSEL, CK48MxSource); +#else + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, CK48MxSource); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} + +#if defined(RNG) +/** + * @brief Configure RNG clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_SetRNGClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_SetRNGClockSource + * @param RNGxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLL + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetRNGClockSource(uint32_t RNGxSource) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CK48MSEL, RNGxSource); +#else + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, RNGxSource); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} +#endif /* RNG */ + +#if defined(USB_OTG_FS) || defined(USB_OTG_HS) +/** + * @brief Configure USB clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_SetUSBClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_SetUSBClockSource + * @param USBxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_USB_CLKSOURCE_PLL + * @arg @ref LL_RCC_USB_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_USB_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetUSBClockSource(uint32_t USBxSource) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CK48MSEL, USBxSource); +#else + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, USBxSource); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} +#endif /* USB_OTG_FS || USB_OTG_HS */ +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ + +#if defined(CEC) +/** + * @brief Configure CEC clock source + * @rmtoll DCKCFGR2 CECSEL LL_RCC_SetCECClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_CEC_CLKSOURCE_HSI_DIV488 + * @arg @ref LL_RCC_CEC_CLKSOURCE_LSE + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetCECClockSource(uint32_t Source) +{ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CECSEL, Source); +} +#endif /* CEC */ + +/** + * @brief Configure I2S clock source + * @rmtoll CFGR I2SSRC LL_RCC_SetI2SClockSource\n + * DCKCFGR I2SSRC LL_RCC_SetI2SClockSource\n + * DCKCFGR I2S1SRC LL_RCC_SetI2SClockSource\n + * DCKCFGR I2S2SRC LL_RCC_SetI2SClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PIN + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLLSRC (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetI2SClockSource(uint32_t Source) +{ +#if defined(RCC_CFGR_I2SSRC) + MODIFY_REG(RCC->CFGR, RCC_CFGR_I2SSRC, Source); +#else + MODIFY_REG(RCC->DCKCFGR, (Source & 0xFFFF0000U), (Source << 16U)); +#endif /* RCC_CFGR_I2SSRC */ +} + +#if defined(DSI) +/** + * @brief Configure DSI clock source + * @rmtoll DCKCFGR DSISEL LL_RCC_SetDSIClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_DSI_CLKSOURCE_PHY + * @arg @ref LL_RCC_DSI_CLKSOURCE_PLL + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetDSIClockSource(uint32_t Source) +{ + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_DSISEL, Source); +} +#endif /* DSI */ + +#if defined(DFSDM1_Channel0) +/** + * @brief Configure DFSDM Audio clock source + * @rmtoll DCKCFGR CKDFSDM1ASEL LL_RCC_SetDFSDMAudioClockSource\n + * DCKCFGR CKDFSDM2ASEL LL_RCC_SetDFSDMAudioClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S1 + * @arg @ref LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S2 + * @arg @ref LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S1 (*) + * @arg @ref LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S2 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetDFSDMAudioClockSource(uint32_t Source) +{ + MODIFY_REG(RCC->DCKCFGR, (Source & 0x0000FFFFU), (Source >> 16U)); +} + +/** + * @brief Configure DFSDM Kernel clock source + * @rmtoll DCKCFGR CKDFSDM1SEL LL_RCC_SetDFSDMClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_CLKSOURCE_PCLK2 + * @arg @ref LL_RCC_DFSDM1_CLKSOURCE_SYSCLK + * @arg @ref LL_RCC_DFSDM2_CLKSOURCE_PCLK2 (*) + * @arg @ref LL_RCC_DFSDM2_CLKSOURCE_SYSCLK (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetDFSDMClockSource(uint32_t Source) +{ + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_CKDFSDM1SEL, Source); +} +#endif /* DFSDM1_Channel0 */ + +#if defined(SPDIFRX) +/** + * @brief Configure SPDIFRX clock source + * @rmtoll DCKCFGR2 SPDIFRXSEL LL_RCC_SetSPDIFRXClockSource + * @param SPDIFRXxSource This parameter can be one of the following values: + * @arg @ref LL_RCC_SPDIFRX1_CLKSOURCE_PLL + * @arg @ref LL_RCC_SPDIFRX1_CLKSOURCE_PLLI2S + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetSPDIFRXClockSource(uint32_t SPDIFRXxSource) +{ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SPDIFRXSEL, SPDIFRXxSource); +} +#endif /* SPDIFRX */ + +#if defined(FMPI2C1) +/** + * @brief Get FMPI2C clock source + * @rmtoll DCKCFGR2 FMPI2C1SEL LL_RCC_GetFMPI2CClockSource + * @param FMPI2Cx This parameter can be one of the following values: + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_PCLK1 + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_SYSCLK + * @arg @ref LL_RCC_FMPI2C1_CLKSOURCE_HSI + */ +__STATIC_INLINE uint32_t LL_RCC_GetFMPI2CClockSource(uint32_t FMPI2Cx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, FMPI2Cx)); +} +#endif /* FMPI2C1 */ + +#if defined(LPTIM1) +/** + * @brief Get LPTIMx clock source + * @rmtoll DCKCFGR2 LPTIM1SEL LL_RCC_GetLPTIMClockSource + * @param LPTIMx This parameter can be one of the following values: + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_PCLK1 + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_HSI + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_LSI + * @arg @ref LL_RCC_LPTIM1_CLKSOURCE_LSE + */ +__STATIC_INLINE uint32_t LL_RCC_GetLPTIMClockSource(uint32_t LPTIMx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL)); +} +#endif /* LPTIM1 */ + +#if defined(SAI1) +/** + * @brief Get SAIx clock source + * @rmtoll DCKCFGR SAI1SEL LL_RCC_GetSAIClockSource\n + * DCKCFGR SAI2SEL LL_RCC_GetSAIClockSource\n + * DCKCFGR SAI1ASRC LL_RCC_GetSAIClockSource\n + * DCKCFGR SAI1BSRC LL_RCC_GetSAIClockSource + * @param SAIx This parameter can be one of the following values: + * @arg @ref LL_RCC_SAI1_CLKSOURCE (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE (*) + * + * (*) value not defined in all devices. + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI2_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_A_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_SAI1_B_CLKSOURCE_PLLSRC (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetSAIClockSource(uint32_t SAIx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, SAIx) >> 16U | SAIx); +} +#endif /* SAI1 */ + +#if defined(RCC_DCKCFGR_SDIOSEL) || defined(RCC_DCKCFGR2_SDIOSEL) +/** + * @brief Get SDIOx clock source + * @rmtoll DCKCFGR SDIOSEL LL_RCC_GetSDIOClockSource\n + * DCKCFGR2 SDIOSEL LL_RCC_GetSDIOClockSource + * @param SDIOx This parameter can be one of the following values: + * @arg @ref LL_RCC_SDIO_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SDIO_CLKSOURCE_PLL48CLK + * @arg @ref LL_RCC_SDIO_CLKSOURCE_SYSCLK + */ +__STATIC_INLINE uint32_t LL_RCC_GetSDIOClockSource(uint32_t SDIOx) +{ +#if defined(RCC_DCKCFGR_SDIOSEL) + return (uint32_t)(READ_BIT(RCC->DCKCFGR, SDIOx)); +#else + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, SDIOx)); +#endif /* RCC_DCKCFGR_SDIOSEL */ +} +#endif /* RCC_DCKCFGR_SDIOSEL || RCC_DCKCFGR2_SDIOSEL */ + +#if defined(RCC_DCKCFGR_CK48MSEL) || defined(RCC_DCKCFGR2_CK48MSEL) +/** + * @brief Get 48Mhz domain clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_GetCK48MClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_GetCK48MClockSource + * @param CK48Mx This parameter can be one of the following values: + * @arg @ref LL_RCC_CK48M_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLL + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_CK48M_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetCK48MClockSource(uint32_t CK48Mx) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + return (uint32_t)(READ_BIT(RCC->DCKCFGR, CK48Mx)); +#else + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, CK48Mx)); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} + +#if defined(RNG) +/** + * @brief Get RNGx clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_GetRNGClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_GetRNGClockSource + * @param RNGx This parameter can be one of the following values: + * @arg @ref LL_RCC_RNG_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLL + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_RNG_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetRNGClockSource(uint32_t RNGx) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RNGx)); +#else + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, RNGx)); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} +#endif /* RNG */ + +#if defined(USB_OTG_FS) || defined(USB_OTG_HS) +/** + * @brief Get USBx clock source + * @rmtoll DCKCFGR CK48MSEL LL_RCC_GetUSBClockSource\n + * DCKCFGR2 CK48MSEL LL_RCC_GetUSBClockSource + * @param USBx This parameter can be one of the following values: + * @arg @ref LL_RCC_USB_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_USB_CLKSOURCE_PLL + * @arg @ref LL_RCC_USB_CLKSOURCE_PLLSAI (*) + * @arg @ref LL_RCC_USB_CLKSOURCE_PLLI2S (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetUSBClockSource(uint32_t USBx) +{ +#if defined(RCC_DCKCFGR_CK48MSEL) + return (uint32_t)(READ_BIT(RCC->DCKCFGR, USBx)); +#else + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, USBx)); +#endif /* RCC_DCKCFGR_CK48MSEL */ +} +#endif /* USB_OTG_FS || USB_OTG_HS */ +#endif /* RCC_DCKCFGR_CK48MSEL || RCC_DCKCFGR2_CK48MSEL */ + +#if defined(CEC) +/** + * @brief Get CEC Clock Source + * @rmtoll DCKCFGR2 CECSEL LL_RCC_GetCECClockSource + * @param CECx This parameter can be one of the following values: + * @arg @ref LL_RCC_CEC_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_CEC_CLKSOURCE_HSI_DIV488 + * @arg @ref LL_RCC_CEC_CLKSOURCE_LSE + */ +__STATIC_INLINE uint32_t LL_RCC_GetCECClockSource(uint32_t CECx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, CECx)); +} +#endif /* CEC */ + +/** + * @brief Get I2S Clock Source + * @rmtoll CFGR I2SSRC LL_RCC_GetI2SClockSource\n + * DCKCFGR I2SSRC LL_RCC_GetI2SClockSource\n + * DCKCFGR I2S1SRC LL_RCC_GetI2SClockSource\n + * DCKCFGR I2S2SRC LL_RCC_GetI2SClockSource + * @param I2Sx This parameter can be one of the following values: + * @arg @ref LL_RCC_I2S1_CLKSOURCE + * @arg @ref LL_RCC_I2S2_CLKSOURCE (*) + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PIN + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_I2S1_CLKSOURCE_PLLSRC (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLLI2S (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PIN (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLL (*) + * @arg @ref LL_RCC_I2S2_CLKSOURCE_PLLSRC (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetI2SClockSource(uint32_t I2Sx) +{ +#if defined(RCC_CFGR_I2SSRC) + return (uint32_t)(READ_BIT(RCC->CFGR, I2Sx)); +#else + return (uint32_t)(READ_BIT(RCC->DCKCFGR, I2Sx) >> 16U | I2Sx); +#endif /* RCC_CFGR_I2SSRC */ +} + +#if defined(DFSDM1_Channel0) +/** + * @brief Get DFSDM Audio Clock Source + * @rmtoll DCKCFGR CKDFSDM1ASEL LL_RCC_GetDFSDMAudioClockSource\n + * DCKCFGR CKDFSDM2ASEL LL_RCC_GetDFSDMAudioClockSource + * @param DFSDMx This parameter can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_AUDIO_CLKSOURCE + * @arg @ref LL_RCC_DFSDM2_AUDIO_CLKSOURCE (*) + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S1 + * @arg @ref LL_RCC_DFSDM1_AUDIO_CLKSOURCE_I2S2 + * @arg @ref LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S1 (*) + * @arg @ref LL_RCC_DFSDM2_AUDIO_CLKSOURCE_I2S2 (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetDFSDMAudioClockSource(uint32_t DFSDMx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, DFSDMx) << 16U | DFSDMx); +} + +/** + * @brief Get DFSDM Audio Clock Source + * @rmtoll DCKCFGR CKDFSDM1SEL LL_RCC_GetDFSDMClockSource + * @param DFSDMx This parameter can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_CLKSOURCE + * @arg @ref LL_RCC_DFSDM2_CLKSOURCE (*) + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_DFSDM1_CLKSOURCE_PCLK2 + * @arg @ref LL_RCC_DFSDM1_CLKSOURCE_SYSCLK + * @arg @ref LL_RCC_DFSDM2_CLKSOURCE_PCLK2 (*) + * @arg @ref LL_RCC_DFSDM2_CLKSOURCE_SYSCLK (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetDFSDMClockSource(uint32_t DFSDMx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, DFSDMx)); +} +#endif /* DFSDM1_Channel0 */ + +#if defined(SPDIFRX) +/** + * @brief Get SPDIFRX clock source + * @rmtoll DCKCFGR2 SPDIFRXSEL LL_RCC_GetSPDIFRXClockSource + * @param SPDIFRXx This parameter can be one of the following values: + * @arg @ref LL_RCC_SPDIFRX1_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SPDIFRX1_CLKSOURCE_PLL + * @arg @ref LL_RCC_SPDIFRX1_CLKSOURCE_PLLI2S + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_GetSPDIFRXClockSource(uint32_t SPDIFRXx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR2, SPDIFRXx)); +} +#endif /* SPDIFRX */ + +#if defined(DSI) +/** + * @brief Get DSI Clock Source + * @rmtoll DCKCFGR DSISEL LL_RCC_GetDSIClockSource + * @param DSIx This parameter can be one of the following values: + * @arg @ref LL_RCC_DSI_CLKSOURCE + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_DSI_CLKSOURCE_PHY + * @arg @ref LL_RCC_DSI_CLKSOURCE_PLL + */ +__STATIC_INLINE uint32_t LL_RCC_GetDSIClockSource(uint32_t DSIx) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, DSIx)); +} +#endif /* DSI */ + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_RTC RTC + * @{ + */ + +/** + * @brief Set RTC Clock Source + * @note Once the RTC clock source has been selected, it cannot be changed anymore unless + * the Backup domain is reset, or unless a failure is detected on LSE (LSECSSD is + * set). The BDRST bit can be used to reset them. + * @rmtoll BDCR RTCSEL LL_RCC_SetRTCClockSource + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_RTC_CLKSOURCE_NONE + * @arg @ref LL_RCC_RTC_CLKSOURCE_LSE + * @arg @ref LL_RCC_RTC_CLKSOURCE_LSI + * @arg @ref LL_RCC_RTC_CLKSOURCE_HSE + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetRTCClockSource(uint32_t Source) +{ + MODIFY_REG(RCC->BDCR, RCC_BDCR_RTCSEL, Source); +} + +/** + * @brief Get RTC Clock Source + * @rmtoll BDCR RTCSEL LL_RCC_GetRTCClockSource + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_RTC_CLKSOURCE_NONE + * @arg @ref LL_RCC_RTC_CLKSOURCE_LSE + * @arg @ref LL_RCC_RTC_CLKSOURCE_LSI + * @arg @ref LL_RCC_RTC_CLKSOURCE_HSE + */ +__STATIC_INLINE uint32_t LL_RCC_GetRTCClockSource(void) +{ + return (uint32_t)(READ_BIT(RCC->BDCR, RCC_BDCR_RTCSEL)); +} + +/** + * @brief Enable RTC + * @rmtoll BDCR RTCEN LL_RCC_EnableRTC + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableRTC(void) +{ + SET_BIT(RCC->BDCR, RCC_BDCR_RTCEN); +} + +/** + * @brief Disable RTC + * @rmtoll BDCR RTCEN LL_RCC_DisableRTC + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableRTC(void) +{ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_RTCEN); +} + +/** + * @brief Check if RTC has been enabled or not + * @rmtoll BDCR RTCEN LL_RCC_IsEnabledRTC + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledRTC(void) +{ + return (READ_BIT(RCC->BDCR, RCC_BDCR_RTCEN) == (RCC_BDCR_RTCEN)); +} + +/** + * @brief Force the Backup domain reset + * @rmtoll BDCR BDRST LL_RCC_ForceBackupDomainReset + * @retval None + */ +__STATIC_INLINE void LL_RCC_ForceBackupDomainReset(void) +{ + SET_BIT(RCC->BDCR, RCC_BDCR_BDRST); +} + +/** + * @brief Release the Backup domain reset + * @rmtoll BDCR BDRST LL_RCC_ReleaseBackupDomainReset + * @retval None + */ +__STATIC_INLINE void LL_RCC_ReleaseBackupDomainReset(void) +{ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_BDRST); +} + +/** + * @brief Set HSE Prescalers for RTC Clock + * @rmtoll CFGR RTCPRE LL_RCC_SetRTC_HSEPrescaler + * @param Prescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_RTC_NOCLOCK + * @arg @ref LL_RCC_RTC_HSE_DIV_2 + * @arg @ref LL_RCC_RTC_HSE_DIV_3 + * @arg @ref LL_RCC_RTC_HSE_DIV_4 + * @arg @ref LL_RCC_RTC_HSE_DIV_5 + * @arg @ref LL_RCC_RTC_HSE_DIV_6 + * @arg @ref LL_RCC_RTC_HSE_DIV_7 + * @arg @ref LL_RCC_RTC_HSE_DIV_8 + * @arg @ref LL_RCC_RTC_HSE_DIV_9 + * @arg @ref LL_RCC_RTC_HSE_DIV_10 + * @arg @ref LL_RCC_RTC_HSE_DIV_11 + * @arg @ref LL_RCC_RTC_HSE_DIV_12 + * @arg @ref LL_RCC_RTC_HSE_DIV_13 + * @arg @ref LL_RCC_RTC_HSE_DIV_14 + * @arg @ref LL_RCC_RTC_HSE_DIV_15 + * @arg @ref LL_RCC_RTC_HSE_DIV_16 + * @arg @ref LL_RCC_RTC_HSE_DIV_17 + * @arg @ref LL_RCC_RTC_HSE_DIV_18 + * @arg @ref LL_RCC_RTC_HSE_DIV_19 + * @arg @ref LL_RCC_RTC_HSE_DIV_20 + * @arg @ref LL_RCC_RTC_HSE_DIV_21 + * @arg @ref LL_RCC_RTC_HSE_DIV_22 + * @arg @ref LL_RCC_RTC_HSE_DIV_23 + * @arg @ref LL_RCC_RTC_HSE_DIV_24 + * @arg @ref LL_RCC_RTC_HSE_DIV_25 + * @arg @ref LL_RCC_RTC_HSE_DIV_26 + * @arg @ref LL_RCC_RTC_HSE_DIV_27 + * @arg @ref LL_RCC_RTC_HSE_DIV_28 + * @arg @ref LL_RCC_RTC_HSE_DIV_29 + * @arg @ref LL_RCC_RTC_HSE_DIV_30 + * @arg @ref LL_RCC_RTC_HSE_DIV_31 + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetRTC_HSEPrescaler(uint32_t Prescaler) +{ + MODIFY_REG(RCC->CFGR, RCC_CFGR_RTCPRE, Prescaler); +} + +/** + * @brief Get HSE Prescalers for RTC Clock + * @rmtoll CFGR RTCPRE LL_RCC_GetRTC_HSEPrescaler + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_RTC_NOCLOCK + * @arg @ref LL_RCC_RTC_HSE_DIV_2 + * @arg @ref LL_RCC_RTC_HSE_DIV_3 + * @arg @ref LL_RCC_RTC_HSE_DIV_4 + * @arg @ref LL_RCC_RTC_HSE_DIV_5 + * @arg @ref LL_RCC_RTC_HSE_DIV_6 + * @arg @ref LL_RCC_RTC_HSE_DIV_7 + * @arg @ref LL_RCC_RTC_HSE_DIV_8 + * @arg @ref LL_RCC_RTC_HSE_DIV_9 + * @arg @ref LL_RCC_RTC_HSE_DIV_10 + * @arg @ref LL_RCC_RTC_HSE_DIV_11 + * @arg @ref LL_RCC_RTC_HSE_DIV_12 + * @arg @ref LL_RCC_RTC_HSE_DIV_13 + * @arg @ref LL_RCC_RTC_HSE_DIV_14 + * @arg @ref LL_RCC_RTC_HSE_DIV_15 + * @arg @ref LL_RCC_RTC_HSE_DIV_16 + * @arg @ref LL_RCC_RTC_HSE_DIV_17 + * @arg @ref LL_RCC_RTC_HSE_DIV_18 + * @arg @ref LL_RCC_RTC_HSE_DIV_19 + * @arg @ref LL_RCC_RTC_HSE_DIV_20 + * @arg @ref LL_RCC_RTC_HSE_DIV_21 + * @arg @ref LL_RCC_RTC_HSE_DIV_22 + * @arg @ref LL_RCC_RTC_HSE_DIV_23 + * @arg @ref LL_RCC_RTC_HSE_DIV_24 + * @arg @ref LL_RCC_RTC_HSE_DIV_25 + * @arg @ref LL_RCC_RTC_HSE_DIV_26 + * @arg @ref LL_RCC_RTC_HSE_DIV_27 + * @arg @ref LL_RCC_RTC_HSE_DIV_28 + * @arg @ref LL_RCC_RTC_HSE_DIV_29 + * @arg @ref LL_RCC_RTC_HSE_DIV_30 + * @arg @ref LL_RCC_RTC_HSE_DIV_31 + */ +__STATIC_INLINE uint32_t LL_RCC_GetRTC_HSEPrescaler(void) +{ + return (uint32_t)(READ_BIT(RCC->CFGR, RCC_CFGR_RTCPRE)); +} + +/** + * @} + */ + +#if defined(RCC_DCKCFGR_TIMPRE) +/** @defgroup RCC_LL_EF_TIM_CLOCK_PRESCALER TIM + * @{ + */ + +/** + * @brief Set Timers Clock Prescalers + * @rmtoll DCKCFGR TIMPRE LL_RCC_SetTIMPrescaler + * @param Prescaler This parameter can be one of the following values: + * @arg @ref LL_RCC_TIM_PRESCALER_TWICE + * @arg @ref LL_RCC_TIM_PRESCALER_FOUR_TIMES + * @retval None + */ +__STATIC_INLINE void LL_RCC_SetTIMPrescaler(uint32_t Prescaler) +{ + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_TIMPRE, Prescaler); +} + +/** + * @brief Get Timers Clock Prescalers + * @rmtoll DCKCFGR TIMPRE LL_RCC_GetTIMPrescaler + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_TIM_PRESCALER_TWICE + * @arg @ref LL_RCC_TIM_PRESCALER_FOUR_TIMES + */ +__STATIC_INLINE uint32_t LL_RCC_GetTIMPrescaler(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_TIMPRE)); +} + +/** + * @} + */ +#endif /* RCC_DCKCFGR_TIMPRE */ + +/** @defgroup RCC_LL_EF_PLL PLL + * @{ + */ + +/** + * @brief Enable PLL + * @rmtoll CR PLLON LL_RCC_PLL_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_Enable(void) +{ + SET_BIT(RCC->CR, RCC_CR_PLLON); +} + +/** + * @brief Disable PLL + * @note Cannot be disabled if the PLL clock is used as the system clock + * @rmtoll CR PLLON LL_RCC_PLL_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_Disable(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_PLLON); +} + +/** + * @brief Check if PLL Ready + * @rmtoll CR PLLRDY LL_RCC_PLL_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_IsReady(void) +{ + return (READ_BIT(RCC->CR, RCC_CR_PLLRDY) == (RCC_CR_PLLRDY)); +} + +/** + * @brief Configure PLL used for SYSCLK Domain + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLP can be written only when PLL is disabled + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_SYS\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_SYS\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_SYS\n + * PLLCFGR PLLR LL_RCC_PLL_ConfigDomain_SYS\n + * PLLCFGR PLLP LL_RCC_PLL_ConfigDomain_SYS + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLP_R This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLP_DIV_2 + * @arg @ref LL_RCC_PLLP_DIV_4 + * @arg @ref LL_RCC_PLLP_DIV_6 + * @arg @ref LL_RCC_PLLP_DIV_8 + * @arg @ref LL_RCC_PLLR_DIV_2 (*) + * @arg @ref LL_RCC_PLLR_DIV_3 (*) + * @arg @ref LL_RCC_PLLR_DIV_4 (*) + * @arg @ref LL_RCC_PLLR_DIV_5 (*) + * @arg @ref LL_RCC_PLLR_DIV_6 (*) + * @arg @ref LL_RCC_PLLR_DIV_7 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_SYS(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLP_R) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos); + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLP, PLLP_R); +#if defined(RCC_PLLR_SYSCLK_SUPPORT) + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLR, PLLP_R); +#endif /* RCC_PLLR_SYSCLK_SUPPORT */ +} + +/** + * @brief Configure PLL used for 48Mhz domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLQ can be written only when PLL is disabled + * @note This can be selected for USB, RNG, SDIO + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_48M\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_48M\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_48M\n + * PLLCFGR PLLQ LL_RCC_PLL_ConfigDomain_48M + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLQ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLQ_DIV_2 + * @arg @ref LL_RCC_PLLQ_DIV_3 + * @arg @ref LL_RCC_PLLQ_DIV_4 + * @arg @ref LL_RCC_PLLQ_DIV_5 + * @arg @ref LL_RCC_PLLQ_DIV_6 + * @arg @ref LL_RCC_PLLQ_DIV_7 + * @arg @ref LL_RCC_PLLQ_DIV_8 + * @arg @ref LL_RCC_PLLQ_DIV_9 + * @arg @ref LL_RCC_PLLQ_DIV_10 + * @arg @ref LL_RCC_PLLQ_DIV_11 + * @arg @ref LL_RCC_PLLQ_DIV_12 + * @arg @ref LL_RCC_PLLQ_DIV_13 + * @arg @ref LL_RCC_PLLQ_DIV_14 + * @arg @ref LL_RCC_PLLQ_DIV_15 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_48M(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLQ) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN | RCC_PLLCFGR_PLLQ, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos | PLLQ); +} + +#if defined(DSI) +/** + * @brief Configure PLL used for DSI clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI are disabled + * @note PLLN/PLLR can be written only when PLL is disabled + * @note This can be selected for DSI + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_DSI\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_DSI\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_DSI\n + * PLLCFGR PLLR LL_RCC_PLL_ConfigDomain_DSI + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_DSI(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN | RCC_PLLCFGR_PLLR, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos | PLLR); +} +#endif /* DSI */ + +#if defined(RCC_PLLR_I2S_CLKSOURCE_SUPPORT) +/** + * @brief Configure PLL used for I2S clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI are disabled + * @note PLLN/PLLR can be written only when PLL is disabled + * @note This can be selected for I2S + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_I2S\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_I2S\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_I2S\n + * PLLCFGR PLLR LL_RCC_PLL_ConfigDomain_I2S + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_I2S(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN | RCC_PLLCFGR_PLLR, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos | PLLR); +} +#endif /* RCC_PLLR_I2S_CLKSOURCE_SUPPORT */ + +#if defined(SPDIFRX) +/** + * @brief Configure PLL used for SPDIFRX clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI are disabled + * @note PLLN/PLLR can be written only when PLL is disabled + * @note This can be selected for SPDIFRX + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_SPDIFRX\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_SPDIFRX\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_SPDIFRX\n + * PLLCFGR PLLR LL_RCC_PLL_ConfigDomain_SPDIFRX + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_SPDIFRX(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN | RCC_PLLCFGR_PLLR, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos | PLLR); +} +#endif /* SPDIFRX */ + +#if defined(RCC_PLLCFGR_PLLR) +#if defined(SAI1) +/** + * @brief Configure PLL used for SAI clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI are disabled + * @note PLLN/PLLR can be written only when PLL is disabled + * @note This can be selected for SAI + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_ConfigDomain_SAI\n + * PLLCFGR PLLM LL_RCC_PLL_ConfigDomain_SAI\n + * PLLCFGR PLLN LL_RCC_PLL_ConfigDomain_SAI\n + * PLLCFGR PLLR LL_RCC_PLL_ConfigDomain_SAI\n + * DCKCFGR PLLDIVR LL_RCC_PLL_ConfigDomain_SAI + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + * @param PLLDIVR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLDIVR_DIV_1 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_2 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_3 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_4 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_5 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_6 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_7 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_8 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_9 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_10 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_11 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_12 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_13 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_14 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_15 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_16 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_17 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_18 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_19 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_20 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_21 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_22 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_23 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_24 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_25 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_26 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_27 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_28 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_29 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_30 (*) + * @arg @ref LL_RCC_PLLDIVR_DIV_31 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +#if defined(RCC_DCKCFGR_PLLDIVR) +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_SAI(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR, + uint32_t PLLDIVR) +#else +__STATIC_INLINE void LL_RCC_PLL_ConfigDomain_SAI(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR) +#endif /* RCC_DCKCFGR_PLLDIVR */ +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM | RCC_PLLCFGR_PLLN | RCC_PLLCFGR_PLLR, + Source | PLLM | PLLN << RCC_PLLCFGR_PLLN_Pos | PLLR); +#if defined(RCC_DCKCFGR_PLLDIVR) + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLDIVR, PLLDIVR); +#endif /* RCC_DCKCFGR_PLLDIVR */ +} +#endif /* SAI1 */ +#endif /* RCC_PLLCFGR_PLLR */ + +/** + * @brief Configure PLL clock source + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_SetMainSource + * @param PLLSource This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_SetMainSource(uint32_t PLLSource) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, PLLSource); +} + +/** + * @brief Get the oscillator used as PLL clock source. + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLL_GetMainSource + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetMainSource(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC)); +} + +/** + * @brief Get Main PLL multiplication factor for VCO + * @rmtoll PLLCFGR PLLN LL_RCC_PLL_GetN + * @retval Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetN(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos); +} + +/** + * @brief Get Main PLL division factor for PLLP + * @rmtoll PLLCFGR PLLP LL_RCC_PLL_GetP + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLP_DIV_2 + * @arg @ref LL_RCC_PLLP_DIV_4 + * @arg @ref LL_RCC_PLLP_DIV_6 + * @arg @ref LL_RCC_PLLP_DIV_8 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetP(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLP)); +} + +/** + * @brief Get Main PLL division factor for PLLQ + * @note used for PLL48MCLK selected for USB, RNG, SDIO (48 MHz clock) + * @rmtoll PLLCFGR PLLQ LL_RCC_PLL_GetQ + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLQ_DIV_2 + * @arg @ref LL_RCC_PLLQ_DIV_3 + * @arg @ref LL_RCC_PLLQ_DIV_4 + * @arg @ref LL_RCC_PLLQ_DIV_5 + * @arg @ref LL_RCC_PLLQ_DIV_6 + * @arg @ref LL_RCC_PLLQ_DIV_7 + * @arg @ref LL_RCC_PLLQ_DIV_8 + * @arg @ref LL_RCC_PLLQ_DIV_9 + * @arg @ref LL_RCC_PLLQ_DIV_10 + * @arg @ref LL_RCC_PLLQ_DIV_11 + * @arg @ref LL_RCC_PLLQ_DIV_12 + * @arg @ref LL_RCC_PLLQ_DIV_13 + * @arg @ref LL_RCC_PLLQ_DIV_14 + * @arg @ref LL_RCC_PLLQ_DIV_15 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetQ(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLQ)); +} + +#if defined(RCC_PLLCFGR_PLLR) +/** + * @brief Get Main PLL division factor for PLLR + * @note used for PLLCLK (system clock) + * @rmtoll PLLCFGR PLLR LL_RCC_PLL_GetR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLR_DIV_2 + * @arg @ref LL_RCC_PLLR_DIV_3 + * @arg @ref LL_RCC_PLLR_DIV_4 + * @arg @ref LL_RCC_PLLR_DIV_5 + * @arg @ref LL_RCC_PLLR_DIV_6 + * @arg @ref LL_RCC_PLLR_DIV_7 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetR(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLR)); +} +#endif /* RCC_PLLCFGR_PLLR */ + +#if defined(RCC_DCKCFGR_PLLDIVR) +/** + * @brief Get Main PLL division factor for PLLDIVR + * @note used for PLLSAICLK (SAI1 and SAI2 clock) + * @rmtoll DCKCFGR PLLDIVR LL_RCC_PLL_GetDIVR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLDIVR_DIV_1 + * @arg @ref LL_RCC_PLLDIVR_DIV_2 + * @arg @ref LL_RCC_PLLDIVR_DIV_3 + * @arg @ref LL_RCC_PLLDIVR_DIV_4 + * @arg @ref LL_RCC_PLLDIVR_DIV_5 + * @arg @ref LL_RCC_PLLDIVR_DIV_6 + * @arg @ref LL_RCC_PLLDIVR_DIV_7 + * @arg @ref LL_RCC_PLLDIVR_DIV_8 + * @arg @ref LL_RCC_PLLDIVR_DIV_9 + * @arg @ref LL_RCC_PLLDIVR_DIV_10 + * @arg @ref LL_RCC_PLLDIVR_DIV_11 + * @arg @ref LL_RCC_PLLDIVR_DIV_12 + * @arg @ref LL_RCC_PLLDIVR_DIV_13 + * @arg @ref LL_RCC_PLLDIVR_DIV_14 + * @arg @ref LL_RCC_PLLDIVR_DIV_15 + * @arg @ref LL_RCC_PLLDIVR_DIV_16 + * @arg @ref LL_RCC_PLLDIVR_DIV_17 + * @arg @ref LL_RCC_PLLDIVR_DIV_18 + * @arg @ref LL_RCC_PLLDIVR_DIV_19 + * @arg @ref LL_RCC_PLLDIVR_DIV_20 + * @arg @ref LL_RCC_PLLDIVR_DIV_21 + * @arg @ref LL_RCC_PLLDIVR_DIV_22 + * @arg @ref LL_RCC_PLLDIVR_DIV_23 + * @arg @ref LL_RCC_PLLDIVR_DIV_24 + * @arg @ref LL_RCC_PLLDIVR_DIV_25 + * @arg @ref LL_RCC_PLLDIVR_DIV_26 + * @arg @ref LL_RCC_PLLDIVR_DIV_27 + * @arg @ref LL_RCC_PLLDIVR_DIV_28 + * @arg @ref LL_RCC_PLLDIVR_DIV_29 + * @arg @ref LL_RCC_PLLDIVR_DIV_30 + * @arg @ref LL_RCC_PLLDIVR_DIV_31 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetDIVR(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_PLLDIVR)); +} +#endif /* RCC_DCKCFGR_PLLDIVR */ + +/** + * @brief Get Division factor for the main PLL and other PLL + * @rmtoll PLLCFGR PLLM LL_RCC_PLL_GetDivider + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLM_DIV_2 + * @arg @ref LL_RCC_PLLM_DIV_3 + * @arg @ref LL_RCC_PLLM_DIV_4 + * @arg @ref LL_RCC_PLLM_DIV_5 + * @arg @ref LL_RCC_PLLM_DIV_6 + * @arg @ref LL_RCC_PLLM_DIV_7 + * @arg @ref LL_RCC_PLLM_DIV_8 + * @arg @ref LL_RCC_PLLM_DIV_9 + * @arg @ref LL_RCC_PLLM_DIV_10 + * @arg @ref LL_RCC_PLLM_DIV_11 + * @arg @ref LL_RCC_PLLM_DIV_12 + * @arg @ref LL_RCC_PLLM_DIV_13 + * @arg @ref LL_RCC_PLLM_DIV_14 + * @arg @ref LL_RCC_PLLM_DIV_15 + * @arg @ref LL_RCC_PLLM_DIV_16 + * @arg @ref LL_RCC_PLLM_DIV_17 + * @arg @ref LL_RCC_PLLM_DIV_18 + * @arg @ref LL_RCC_PLLM_DIV_19 + * @arg @ref LL_RCC_PLLM_DIV_20 + * @arg @ref LL_RCC_PLLM_DIV_21 + * @arg @ref LL_RCC_PLLM_DIV_22 + * @arg @ref LL_RCC_PLLM_DIV_23 + * @arg @ref LL_RCC_PLLM_DIV_24 + * @arg @ref LL_RCC_PLLM_DIV_25 + * @arg @ref LL_RCC_PLLM_DIV_26 + * @arg @ref LL_RCC_PLLM_DIV_27 + * @arg @ref LL_RCC_PLLM_DIV_28 + * @arg @ref LL_RCC_PLLM_DIV_29 + * @arg @ref LL_RCC_PLLM_DIV_30 + * @arg @ref LL_RCC_PLLM_DIV_31 + * @arg @ref LL_RCC_PLLM_DIV_32 + * @arg @ref LL_RCC_PLLM_DIV_33 + * @arg @ref LL_RCC_PLLM_DIV_34 + * @arg @ref LL_RCC_PLLM_DIV_35 + * @arg @ref LL_RCC_PLLM_DIV_36 + * @arg @ref LL_RCC_PLLM_DIV_37 + * @arg @ref LL_RCC_PLLM_DIV_38 + * @arg @ref LL_RCC_PLLM_DIV_39 + * @arg @ref LL_RCC_PLLM_DIV_40 + * @arg @ref LL_RCC_PLLM_DIV_41 + * @arg @ref LL_RCC_PLLM_DIV_42 + * @arg @ref LL_RCC_PLLM_DIV_43 + * @arg @ref LL_RCC_PLLM_DIV_44 + * @arg @ref LL_RCC_PLLM_DIV_45 + * @arg @ref LL_RCC_PLLM_DIV_46 + * @arg @ref LL_RCC_PLLM_DIV_47 + * @arg @ref LL_RCC_PLLM_DIV_48 + * @arg @ref LL_RCC_PLLM_DIV_49 + * @arg @ref LL_RCC_PLLM_DIV_50 + * @arg @ref LL_RCC_PLLM_DIV_51 + * @arg @ref LL_RCC_PLLM_DIV_52 + * @arg @ref LL_RCC_PLLM_DIV_53 + * @arg @ref LL_RCC_PLLM_DIV_54 + * @arg @ref LL_RCC_PLLM_DIV_55 + * @arg @ref LL_RCC_PLLM_DIV_56 + * @arg @ref LL_RCC_PLLM_DIV_57 + * @arg @ref LL_RCC_PLLM_DIV_58 + * @arg @ref LL_RCC_PLLM_DIV_59 + * @arg @ref LL_RCC_PLLM_DIV_60 + * @arg @ref LL_RCC_PLLM_DIV_61 + * @arg @ref LL_RCC_PLLM_DIV_62 + * @arg @ref LL_RCC_PLLM_DIV_63 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetDivider(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLM)); +} + +/** + * @brief Configure Spread Spectrum used for PLL + * @note These bits must be written before enabling PLL + * @rmtoll SSCGR MODPER LL_RCC_PLL_ConfigSpreadSpectrum\n + * SSCGR INCSTEP LL_RCC_PLL_ConfigSpreadSpectrum\n + * SSCGR SPREADSEL LL_RCC_PLL_ConfigSpreadSpectrum + * @param Mod Between Min_Data=0 and Max_Data=8191 + * @param Inc Between Min_Data=0 and Max_Data=32767 + * @param Sel This parameter can be one of the following values: + * @arg @ref LL_RCC_SPREAD_SELECT_CENTER + * @arg @ref LL_RCC_SPREAD_SELECT_DOWN + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_ConfigSpreadSpectrum(uint32_t Mod, uint32_t Inc, uint32_t Sel) +{ + MODIFY_REG(RCC->SSCGR, RCC_SSCGR_MODPER | RCC_SSCGR_INCSTEP | RCC_SSCGR_SPREADSEL, Mod | (Inc << RCC_SSCGR_INCSTEP_Pos) | Sel); +} + +/** + * @brief Get Spread Spectrum Modulation Period for PLL + * @rmtoll SSCGR MODPER LL_RCC_PLL_GetPeriodModulation + * @retval Between Min_Data=0 and Max_Data=8191 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetPeriodModulation(void) +{ + return (uint32_t)(READ_BIT(RCC->SSCGR, RCC_SSCGR_MODPER)); +} + +/** + * @brief Get Spread Spectrum Incrementation Step for PLL + * @note Must be written before enabling PLL + * @rmtoll SSCGR INCSTEP LL_RCC_PLL_GetStepIncrementation + * @retval Between Min_Data=0 and Max_Data=32767 + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetStepIncrementation(void) +{ + return (uint32_t)(READ_BIT(RCC->SSCGR, RCC_SSCGR_INCSTEP) >> RCC_SSCGR_INCSTEP_Pos); +} + +/** + * @brief Get Spread Spectrum Selection for PLL + * @note Must be written before enabling PLL + * @rmtoll SSCGR SPREADSEL LL_RCC_PLL_GetSpreadSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_SPREAD_SELECT_CENTER + * @arg @ref LL_RCC_SPREAD_SELECT_DOWN + */ +__STATIC_INLINE uint32_t LL_RCC_PLL_GetSpreadSelection(void) +{ + return (uint32_t)(READ_BIT(RCC->SSCGR, RCC_SSCGR_SPREADSEL)); +} + +/** + * @brief Enable Spread Spectrum for PLL. + * @rmtoll SSCGR SSCGEN LL_RCC_PLL_SpreadSpectrum_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_SpreadSpectrum_Enable(void) +{ + SET_BIT(RCC->SSCGR, RCC_SSCGR_SSCGEN); +} + +/** + * @brief Disable Spread Spectrum for PLL. + * @rmtoll SSCGR SSCGEN LL_RCC_PLL_SpreadSpectrum_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLL_SpreadSpectrum_Disable(void) +{ + CLEAR_BIT(RCC->SSCGR, RCC_SSCGR_SSCGEN); +} + +/** + * @} + */ + +#if defined(RCC_PLLI2S_SUPPORT) +/** @defgroup RCC_LL_EF_PLLI2S PLLI2S + * @{ + */ + +/** + * @brief Enable PLLI2S + * @rmtoll CR PLLI2SON LL_RCC_PLLI2S_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_Enable(void) +{ + SET_BIT(RCC->CR, RCC_CR_PLLI2SON); +} + +/** + * @brief Disable PLLI2S + * @rmtoll CR PLLI2SON LL_RCC_PLLI2S_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_Disable(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_PLLI2SON); +} + +/** + * @brief Check if PLLI2S Ready + * @rmtoll CR PLLI2SRDY LL_RCC_PLLI2S_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_IsReady(void) +{ + return (READ_BIT(RCC->CR, RCC_CR_PLLI2SRDY) == (RCC_CR_PLLI2SRDY)); +} + +#if (defined(RCC_DCKCFGR_PLLI2SDIVQ) || defined(RCC_DCKCFGR_PLLI2SDIVR)) +/** + * @brief Configure PLLI2S used for SAI domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLQ/PLLR can be written only when PLLI2S is disabled + * @note This can be selected for SAI + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLI2SCFGR PLLI2SSRC LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLCFGR PLLM LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLI2SCFGR PLLI2SM LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLI2SCFGR PLLI2SN LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLI2SCFGR PLLI2SQ LL_RCC_PLLI2S_ConfigDomain_SAI\n + * PLLI2SCFGR PLLI2SR LL_RCC_PLLI2S_ConfigDomain_SAI\n + * DCKCFGR PLLI2SDIVQ LL_RCC_PLLI2S_ConfigDomain_SAI\n + * DCKCFGR PLLI2SDIVR LL_RCC_PLLI2S_ConfigDomain_SAI + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @arg @ref LL_RCC_PLLI2SSOURCE_PIN (*) + * + * (*) value not defined in all devices. + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param PLLN Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLQ_R This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SQ_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SQ_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SR_DIV_7 (*) + * + * (*) value not defined in all devices. + * @param PLLDIVQ_R This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_1 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_16 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_17 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_18 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_19 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_20 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_21 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_22 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_23 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_24 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_25 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_26 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_27 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_28 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_29 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_30 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_31 (*) + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_32 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_1 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_2 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_3 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_4 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_5 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_6 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_7 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_8 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_9 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_10 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_11 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_12 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_13 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_14 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_15 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_16 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_17 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_18 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_19 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_20 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_21 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_22 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_23 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_24 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_25 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_26 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_27 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_28 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_29 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_30 (*) + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_31 (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_ConfigDomain_SAI(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLQ_R, + uint32_t PLLDIVQ_R) +{ + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&RCC->PLLCFGR) + (Source & 0x80U))); + MODIFY_REG(*pReg, RCC_PLLCFGR_PLLSRC, (Source & (~0x80U))); +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SN, PLLN << RCC_PLLI2SCFGR_PLLI2SN_Pos); +#if defined(RCC_DCKCFGR_PLLI2SDIVQ) + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SQ, PLLQ_R); + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVQ, PLLDIVQ_R); +#else + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SR, PLLQ_R); + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVR, PLLDIVQ_R); +#endif /* RCC_DCKCFGR_PLLI2SDIVQ */ +} +#endif /* RCC_DCKCFGR_PLLI2SDIVQ && RCC_DCKCFGR_PLLI2SDIVR */ + +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) && !defined(RCC_DCKCFGR_PLLI2SDIVQ) +/** + * @brief Configure PLLI2S used for 48Mhz domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLQ can be written only when PLLI2S is disabled + * @note This can be selected for RNG, USB, SDIO + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLI2S_ConfigDomain_48M\n + * PLLI2SCFGR PLLI2SSRC LL_RCC_PLLI2S_ConfigDomain_48M\n + * PLLCFGR PLLM LL_RCC_PLLI2S_ConfigDomain_48M\n + * PLLI2SCFGR PLLI2SM LL_RCC_PLLI2S_ConfigDomain_48M\n + * PLLI2SCFGR PLLI2SN LL_RCC_PLLI2S_ConfigDomain_48M\n + * PLLI2SCFGR PLLI2SQ LL_RCC_PLLI2S_ConfigDomain_48M + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @arg @ref LL_RCC_PLLI2SSOURCE_PIN (*) + * + * (*) value not defined in all devices. + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLQ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SQ_DIV_2 + * @arg @ref LL_RCC_PLLI2SQ_DIV_3 + * @arg @ref LL_RCC_PLLI2SQ_DIV_4 + * @arg @ref LL_RCC_PLLI2SQ_DIV_5 + * @arg @ref LL_RCC_PLLI2SQ_DIV_6 + * @arg @ref LL_RCC_PLLI2SQ_DIV_7 + * @arg @ref LL_RCC_PLLI2SQ_DIV_8 + * @arg @ref LL_RCC_PLLI2SQ_DIV_9 + * @arg @ref LL_RCC_PLLI2SQ_DIV_10 + * @arg @ref LL_RCC_PLLI2SQ_DIV_11 + * @arg @ref LL_RCC_PLLI2SQ_DIV_12 + * @arg @ref LL_RCC_PLLI2SQ_DIV_13 + * @arg @ref LL_RCC_PLLI2SQ_DIV_14 + * @arg @ref LL_RCC_PLLI2SQ_DIV_15 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_ConfigDomain_48M(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLQ) +{ + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&RCC->PLLCFGR) + (Source & 0x80U))); + MODIFY_REG(*pReg, RCC_PLLCFGR_PLLSRC, (Source & (~0x80U))); +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SN | RCC_PLLI2SCFGR_PLLI2SQ, PLLN << RCC_PLLI2SCFGR_PLLI2SN_Pos | PLLQ); +} +#endif /* RCC_PLLI2SCFGR_PLLI2SQ && !RCC_DCKCFGR_PLLI2SDIVQ */ + +#if defined(SPDIFRX) +/** + * @brief Configure PLLI2S used for SPDIFRX domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLP can be written only when PLLI2S is disabled + * @note This can be selected for SPDIFRX + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLI2S_ConfigDomain_SPDIFRX\n + * PLLCFGR PLLM LL_RCC_PLLI2S_ConfigDomain_SPDIFRX\n + * PLLI2SCFGR PLLI2SM LL_RCC_PLLI2S_ConfigDomain_SPDIFRX\n + * PLLI2SCFGR PLLI2SN LL_RCC_PLLI2S_ConfigDomain_SPDIFRX\n + * PLLI2SCFGR PLLI2SP LL_RCC_PLLI2S_ConfigDomain_SPDIFRX + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLP This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SP_DIV_2 + * @arg @ref LL_RCC_PLLI2SP_DIV_4 + * @arg @ref LL_RCC_PLLI2SP_DIV_6 + * @arg @ref LL_RCC_PLLI2SP_DIV_8 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_ConfigDomain_SPDIFRX(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLP) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, Source); +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SN | RCC_PLLI2SCFGR_PLLI2SP, PLLN << RCC_PLLI2SCFGR_PLLI2SN_Pos | PLLP); +} +#endif /* SPDIFRX */ + +/** + * @brief Configure PLLI2S used for I2S1 domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLR can be written only when PLLI2S is disabled + * @note This can be selected for I2S + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLI2S_ConfigDomain_I2S\n + * PLLCFGR PLLM LL_RCC_PLLI2S_ConfigDomain_I2S\n + * PLLI2SCFGR PLLI2SSRC LL_RCC_PLLI2S_ConfigDomain_I2S\n + * PLLI2SCFGR PLLI2SM LL_RCC_PLLI2S_ConfigDomain_I2S\n + * PLLI2SCFGR PLLI2SN LL_RCC_PLLI2S_ConfigDomain_I2S\n + * PLLI2SCFGR PLLI2SR LL_RCC_PLLI2S_ConfigDomain_I2S + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @arg @ref LL_RCC_PLLI2SSOURCE_PIN (*) + * + * (*) value not defined in all devices. + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + * @param PLLN Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLI2SR_DIV_2 + * @arg @ref LL_RCC_PLLI2SR_DIV_3 + * @arg @ref LL_RCC_PLLI2SR_DIV_4 + * @arg @ref LL_RCC_PLLI2SR_DIV_5 + * @arg @ref LL_RCC_PLLI2SR_DIV_6 + * @arg @ref LL_RCC_PLLI2SR_DIV_7 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLI2S_ConfigDomain_I2S(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR) +{ + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&RCC->PLLCFGR) + (Source & 0x80U))); + MODIFY_REG(*pReg, RCC_PLLCFGR_PLLSRC, (Source & (~0x80U))); +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ + MODIFY_REG(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SN | RCC_PLLI2SCFGR_PLLI2SR, PLLN << RCC_PLLI2SCFGR_PLLI2SN_Pos | PLLR); +} + +/** + * @brief Get I2SPLL multiplication factor for VCO + * @rmtoll PLLI2SCFGR PLLI2SN LL_RCC_PLLI2S_GetN + * @retval Between 50/192(*) and 432 + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetN(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); +} + +#if defined(RCC_PLLI2SCFGR_PLLI2SQ) +/** + * @brief Get I2SPLL division factor for PLLI2SQ + * @rmtoll PLLI2SCFGR PLLI2SQ LL_RCC_PLLI2S_GetQ + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SQ_DIV_2 + * @arg @ref LL_RCC_PLLI2SQ_DIV_3 + * @arg @ref LL_RCC_PLLI2SQ_DIV_4 + * @arg @ref LL_RCC_PLLI2SQ_DIV_5 + * @arg @ref LL_RCC_PLLI2SQ_DIV_6 + * @arg @ref LL_RCC_PLLI2SQ_DIV_7 + * @arg @ref LL_RCC_PLLI2SQ_DIV_8 + * @arg @ref LL_RCC_PLLI2SQ_DIV_9 + * @arg @ref LL_RCC_PLLI2SQ_DIV_10 + * @arg @ref LL_RCC_PLLI2SQ_DIV_11 + * @arg @ref LL_RCC_PLLI2SQ_DIV_12 + * @arg @ref LL_RCC_PLLI2SQ_DIV_13 + * @arg @ref LL_RCC_PLLI2SQ_DIV_14 + * @arg @ref LL_RCC_PLLI2SQ_DIV_15 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetQ(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SQ)); +} +#endif /* RCC_PLLI2SCFGR_PLLI2SQ */ + +/** + * @brief Get I2SPLL division factor for PLLI2SR + * @note used for PLLI2SCLK (I2S clock) + * @rmtoll PLLI2SCFGR PLLI2SR LL_RCC_PLLI2S_GetR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SR_DIV_2 + * @arg @ref LL_RCC_PLLI2SR_DIV_3 + * @arg @ref LL_RCC_PLLI2SR_DIV_4 + * @arg @ref LL_RCC_PLLI2SR_DIV_5 + * @arg @ref LL_RCC_PLLI2SR_DIV_6 + * @arg @ref LL_RCC_PLLI2SR_DIV_7 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetR(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SR)); +} + +#if defined(RCC_PLLI2SCFGR_PLLI2SP) +/** + * @brief Get I2SPLL division factor for PLLI2SP + * @note used for PLLSPDIFRXCLK (SPDIFRX clock) + * @rmtoll PLLI2SCFGR PLLI2SP LL_RCC_PLLI2S_GetP + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SP_DIV_2 + * @arg @ref LL_RCC_PLLI2SP_DIV_4 + * @arg @ref LL_RCC_PLLI2SP_DIV_6 + * @arg @ref LL_RCC_PLLI2SP_DIV_8 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetP(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SP)); +} +#endif /* RCC_PLLI2SCFGR_PLLI2SP */ + +#if defined(RCC_DCKCFGR_PLLI2SDIVQ) +/** + * @brief Get I2SPLL division factor for PLLI2SDIVQ + * @note used PLLSAICLK selected (SAI clock) + * @rmtoll DCKCFGR PLLI2SDIVQ LL_RCC_PLLI2S_GetDIVQ + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_1 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_2 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_3 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_4 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_5 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_6 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_7 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_8 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_9 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_10 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_11 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_12 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_13 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_14 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_15 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_16 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_17 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_18 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_19 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_20 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_21 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_22 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_23 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_24 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_25 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_26 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_27 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_28 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_29 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_30 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_31 + * @arg @ref LL_RCC_PLLI2SDIVQ_DIV_32 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetDIVQ(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVQ)); +} +#endif /* RCC_DCKCFGR_PLLI2SDIVQ */ + +#if defined(RCC_DCKCFGR_PLLI2SDIVR) +/** + * @brief Get I2SPLL division factor for PLLI2SDIVR + * @note used PLLSAICLK selected (SAI clock) + * @rmtoll DCKCFGR PLLI2SDIVR LL_RCC_PLLI2S_GetDIVR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_1 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_2 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_3 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_4 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_5 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_6 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_7 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_8 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_9 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_10 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_11 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_12 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_13 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_14 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_15 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_16 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_17 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_18 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_19 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_20 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_21 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_22 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_23 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_24 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_25 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_26 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_27 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_28 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_29 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_30 + * @arg @ref LL_RCC_PLLI2SDIVR_DIV_31 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetDIVR(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_PLLI2SDIVR)); +} +#endif /* RCC_DCKCFGR_PLLI2SDIVR */ + +/** + * @brief Get division factor for PLLI2S input clock + * @rmtoll PLLCFGR PLLM LL_RCC_PLLI2S_GetDivider\n + * PLLI2SCFGR PLLI2SM LL_RCC_PLLI2S_GetDivider + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLI2SM_DIV_2 + * @arg @ref LL_RCC_PLLI2SM_DIV_3 + * @arg @ref LL_RCC_PLLI2SM_DIV_4 + * @arg @ref LL_RCC_PLLI2SM_DIV_5 + * @arg @ref LL_RCC_PLLI2SM_DIV_6 + * @arg @ref LL_RCC_PLLI2SM_DIV_7 + * @arg @ref LL_RCC_PLLI2SM_DIV_8 + * @arg @ref LL_RCC_PLLI2SM_DIV_9 + * @arg @ref LL_RCC_PLLI2SM_DIV_10 + * @arg @ref LL_RCC_PLLI2SM_DIV_11 + * @arg @ref LL_RCC_PLLI2SM_DIV_12 + * @arg @ref LL_RCC_PLLI2SM_DIV_13 + * @arg @ref LL_RCC_PLLI2SM_DIV_14 + * @arg @ref LL_RCC_PLLI2SM_DIV_15 + * @arg @ref LL_RCC_PLLI2SM_DIV_16 + * @arg @ref LL_RCC_PLLI2SM_DIV_17 + * @arg @ref LL_RCC_PLLI2SM_DIV_18 + * @arg @ref LL_RCC_PLLI2SM_DIV_19 + * @arg @ref LL_RCC_PLLI2SM_DIV_20 + * @arg @ref LL_RCC_PLLI2SM_DIV_21 + * @arg @ref LL_RCC_PLLI2SM_DIV_22 + * @arg @ref LL_RCC_PLLI2SM_DIV_23 + * @arg @ref LL_RCC_PLLI2SM_DIV_24 + * @arg @ref LL_RCC_PLLI2SM_DIV_25 + * @arg @ref LL_RCC_PLLI2SM_DIV_26 + * @arg @ref LL_RCC_PLLI2SM_DIV_27 + * @arg @ref LL_RCC_PLLI2SM_DIV_28 + * @arg @ref LL_RCC_PLLI2SM_DIV_29 + * @arg @ref LL_RCC_PLLI2SM_DIV_30 + * @arg @ref LL_RCC_PLLI2SM_DIV_31 + * @arg @ref LL_RCC_PLLI2SM_DIV_32 + * @arg @ref LL_RCC_PLLI2SM_DIV_33 + * @arg @ref LL_RCC_PLLI2SM_DIV_34 + * @arg @ref LL_RCC_PLLI2SM_DIV_35 + * @arg @ref LL_RCC_PLLI2SM_DIV_36 + * @arg @ref LL_RCC_PLLI2SM_DIV_37 + * @arg @ref LL_RCC_PLLI2SM_DIV_38 + * @arg @ref LL_RCC_PLLI2SM_DIV_39 + * @arg @ref LL_RCC_PLLI2SM_DIV_40 + * @arg @ref LL_RCC_PLLI2SM_DIV_41 + * @arg @ref LL_RCC_PLLI2SM_DIV_42 + * @arg @ref LL_RCC_PLLI2SM_DIV_43 + * @arg @ref LL_RCC_PLLI2SM_DIV_44 + * @arg @ref LL_RCC_PLLI2SM_DIV_45 + * @arg @ref LL_RCC_PLLI2SM_DIV_46 + * @arg @ref LL_RCC_PLLI2SM_DIV_47 + * @arg @ref LL_RCC_PLLI2SM_DIV_48 + * @arg @ref LL_RCC_PLLI2SM_DIV_49 + * @arg @ref LL_RCC_PLLI2SM_DIV_50 + * @arg @ref LL_RCC_PLLI2SM_DIV_51 + * @arg @ref LL_RCC_PLLI2SM_DIV_52 + * @arg @ref LL_RCC_PLLI2SM_DIV_53 + * @arg @ref LL_RCC_PLLI2SM_DIV_54 + * @arg @ref LL_RCC_PLLI2SM_DIV_55 + * @arg @ref LL_RCC_PLLI2SM_DIV_56 + * @arg @ref LL_RCC_PLLI2SM_DIV_57 + * @arg @ref LL_RCC_PLLI2SM_DIV_58 + * @arg @ref LL_RCC_PLLI2SM_DIV_59 + * @arg @ref LL_RCC_PLLI2SM_DIV_60 + * @arg @ref LL_RCC_PLLI2SM_DIV_61 + * @arg @ref LL_RCC_PLLI2SM_DIV_62 + * @arg @ref LL_RCC_PLLI2SM_DIV_63 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetDivider(void) +{ +#if defined(RCC_PLLI2SCFGR_PLLI2SM) + return (uint32_t)(READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SM)); +#else + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLM)); +#endif /* RCC_PLLI2SCFGR_PLLI2SM */ +} + +/** + * @brief Get the oscillator used as PLL clock source. + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLI2S_GetMainSource\n + * PLLI2SCFGR PLLI2SSRC LL_RCC_PLLI2S_GetMainSource + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @arg @ref LL_RCC_PLLI2SSOURCE_PIN (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_PLLI2S_GetMainSource(void) +{ +#if defined(RCC_PLLI2SCFGR_PLLI2SSRC) + uint32_t pllsrc = READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC); + uint32_t plli2sssrc0 = READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SSRC); + uint32_t plli2sssrc1 = READ_BIT(RCC->PLLI2SCFGR, RCC_PLLI2SCFGR_PLLI2SSRC) >> 15U; + return (uint32_t)(pllsrc | plli2sssrc0 | plli2sssrc1); +#else + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC)); +#endif /* RCC_PLLI2SCFGR_PLLI2SSRC */ +} + +/** + * @} + */ +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** @defgroup RCC_LL_EF_PLLSAI PLLSAI + * @{ + */ + +/** + * @brief Enable PLLSAI + * @rmtoll CR PLLSAION LL_RCC_PLLSAI_Enable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLSAI_Enable(void) +{ + SET_BIT(RCC->CR, RCC_CR_PLLSAION); +} + +/** + * @brief Disable PLLSAI + * @rmtoll CR PLLSAION LL_RCC_PLLSAI_Disable + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLSAI_Disable(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_PLLSAION); +} + +/** + * @brief Check if PLLSAI Ready + * @rmtoll CR PLLSAIRDY LL_RCC_PLLSAI_IsReady + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_IsReady(void) +{ + return (READ_BIT(RCC->CR, RCC_CR_PLLSAIRDY) == (RCC_CR_PLLSAIRDY)); +} + +/** + * @brief Configure PLLSAI used for SAI domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLQ can be written only when PLLSAI is disabled + * @note This can be selected for SAI + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLSAI_ConfigDomain_SAI\n + * PLLCFGR PLLM LL_RCC_PLLSAI_ConfigDomain_SAI\n + * PLLSAICFGR PLLSAIM LL_RCC_PLLSAI_ConfigDomain_SAI\n + * PLLSAICFGR PLLSAIN LL_RCC_PLLSAI_ConfigDomain_SAI\n + * PLLSAICFGR PLLSAIQ LL_RCC_PLLSAI_ConfigDomain_SAI\n + * DCKCFGR PLLSAIDIVQ LL_RCC_PLLSAI_ConfigDomain_SAI + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param PLLN Between 49/50(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLQ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIQ_DIV_15 + * @param PLLDIVQ This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_1 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_15 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_16 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_17 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_18 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_19 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_20 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_21 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_22 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_23 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_24 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_25 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_26 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_27 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_28 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_29 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_30 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_31 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_32 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLSAI_ConfigDomain_SAI(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLQ, + uint32_t PLLDIVQ) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, Source); +#if defined(RCC_PLLSAICFGR_PLLSAIM) + MODIFY_REG(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLSAICFGR_PLLSAIM */ + MODIFY_REG(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIN | RCC_PLLSAICFGR_PLLSAIQ, PLLN << RCC_PLLSAICFGR_PLLSAIN_Pos | PLLQ); + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVQ, PLLDIVQ); +} + +#if defined(RCC_PLLSAICFGR_PLLSAIP) +/** + * @brief Configure PLLSAI used for 48Mhz domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLP can be written only when PLLSAI is disabled + * @note This can be selected for USB, RNG, SDIO + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLSAI_ConfigDomain_48M\n + * PLLCFGR PLLM LL_RCC_PLLSAI_ConfigDomain_48M\n + * PLLSAICFGR PLLSAIM LL_RCC_PLLSAI_ConfigDomain_48M\n + * PLLSAICFGR PLLSAIN LL_RCC_PLLSAI_ConfigDomain_48M\n + * PLLSAICFGR PLLSAIP LL_RCC_PLLSAI_ConfigDomain_48M + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param PLLN Between 50 and 432 + * @param PLLP This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIP_DIV_2 + * @arg @ref LL_RCC_PLLSAIP_DIV_4 + * @arg @ref LL_RCC_PLLSAIP_DIV_6 + * @arg @ref LL_RCC_PLLSAIP_DIV_8 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLSAI_ConfigDomain_48M(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLP) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, Source); +#if defined(RCC_PLLSAICFGR_PLLSAIM) + MODIFY_REG(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIM, PLLM); +#else + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, PLLM); +#endif /* RCC_PLLSAICFGR_PLLSAIM */ + MODIFY_REG(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIN | RCC_PLLSAICFGR_PLLSAIP, PLLN << RCC_PLLSAICFGR_PLLSAIN_Pos | PLLP); +} +#endif /* RCC_PLLSAICFGR_PLLSAIP */ + +#if defined(LTDC) +/** + * @brief Configure PLLSAI used for LTDC domain clock + * @note PLL Source and PLLM Divider can be written only when PLL, + * PLLI2S and PLLSAI(*) are disabled + * @note PLLN/PLLR can be written only when PLLSAI is disabled + * @note This can be selected for LTDC + * @rmtoll PLLCFGR PLLSRC LL_RCC_PLLSAI_ConfigDomain_LTDC\n + * PLLCFGR PLLM LL_RCC_PLLSAI_ConfigDomain_LTDC\n + * PLLSAICFGR PLLSAIN LL_RCC_PLLSAI_ConfigDomain_LTDC\n + * PLLSAICFGR PLLSAIR LL_RCC_PLLSAI_ConfigDomain_LTDC\n + * DCKCFGR PLLSAIDIVR LL_RCC_PLLSAI_ConfigDomain_LTDC + * @param Source This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSOURCE_HSI + * @arg @ref LL_RCC_PLLSOURCE_HSE + * @param PLLM This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + * @param PLLN Between 49/50(*) and 432 + * + * (*) value not defined in all devices. + * @param PLLR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIR_DIV_2 + * @arg @ref LL_RCC_PLLSAIR_DIV_3 + * @arg @ref LL_RCC_PLLSAIR_DIV_4 + * @arg @ref LL_RCC_PLLSAIR_DIV_5 + * @arg @ref LL_RCC_PLLSAIR_DIV_6 + * @arg @ref LL_RCC_PLLSAIR_DIV_7 + * @param PLLDIVR This parameter can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_16 + * @retval None + */ +__STATIC_INLINE void LL_RCC_PLLSAI_ConfigDomain_LTDC(uint32_t Source, uint32_t PLLM, uint32_t PLLN, uint32_t PLLR, + uint32_t PLLDIVR) +{ + MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC | RCC_PLLCFGR_PLLM, Source | PLLM); + MODIFY_REG(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIN | RCC_PLLSAICFGR_PLLSAIR, PLLN << RCC_PLLSAICFGR_PLLSAIN_Pos | PLLR); + MODIFY_REG(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVR, PLLDIVR); +} +#endif /* LTDC */ + +/** + * @brief Get division factor for PLLSAI input clock + * @rmtoll PLLCFGR PLLM LL_RCC_PLLSAI_GetDivider\n + * PLLSAICFGR PLLSAIM LL_RCC_PLLSAI_GetDivider + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIM_DIV_2 + * @arg @ref LL_RCC_PLLSAIM_DIV_3 + * @arg @ref LL_RCC_PLLSAIM_DIV_4 + * @arg @ref LL_RCC_PLLSAIM_DIV_5 + * @arg @ref LL_RCC_PLLSAIM_DIV_6 + * @arg @ref LL_RCC_PLLSAIM_DIV_7 + * @arg @ref LL_RCC_PLLSAIM_DIV_8 + * @arg @ref LL_RCC_PLLSAIM_DIV_9 + * @arg @ref LL_RCC_PLLSAIM_DIV_10 + * @arg @ref LL_RCC_PLLSAIM_DIV_11 + * @arg @ref LL_RCC_PLLSAIM_DIV_12 + * @arg @ref LL_RCC_PLLSAIM_DIV_13 + * @arg @ref LL_RCC_PLLSAIM_DIV_14 + * @arg @ref LL_RCC_PLLSAIM_DIV_15 + * @arg @ref LL_RCC_PLLSAIM_DIV_16 + * @arg @ref LL_RCC_PLLSAIM_DIV_17 + * @arg @ref LL_RCC_PLLSAIM_DIV_18 + * @arg @ref LL_RCC_PLLSAIM_DIV_19 + * @arg @ref LL_RCC_PLLSAIM_DIV_20 + * @arg @ref LL_RCC_PLLSAIM_DIV_21 + * @arg @ref LL_RCC_PLLSAIM_DIV_22 + * @arg @ref LL_RCC_PLLSAIM_DIV_23 + * @arg @ref LL_RCC_PLLSAIM_DIV_24 + * @arg @ref LL_RCC_PLLSAIM_DIV_25 + * @arg @ref LL_RCC_PLLSAIM_DIV_26 + * @arg @ref LL_RCC_PLLSAIM_DIV_27 + * @arg @ref LL_RCC_PLLSAIM_DIV_28 + * @arg @ref LL_RCC_PLLSAIM_DIV_29 + * @arg @ref LL_RCC_PLLSAIM_DIV_30 + * @arg @ref LL_RCC_PLLSAIM_DIV_31 + * @arg @ref LL_RCC_PLLSAIM_DIV_32 + * @arg @ref LL_RCC_PLLSAIM_DIV_33 + * @arg @ref LL_RCC_PLLSAIM_DIV_34 + * @arg @ref LL_RCC_PLLSAIM_DIV_35 + * @arg @ref LL_RCC_PLLSAIM_DIV_36 + * @arg @ref LL_RCC_PLLSAIM_DIV_37 + * @arg @ref LL_RCC_PLLSAIM_DIV_38 + * @arg @ref LL_RCC_PLLSAIM_DIV_39 + * @arg @ref LL_RCC_PLLSAIM_DIV_40 + * @arg @ref LL_RCC_PLLSAIM_DIV_41 + * @arg @ref LL_RCC_PLLSAIM_DIV_42 + * @arg @ref LL_RCC_PLLSAIM_DIV_43 + * @arg @ref LL_RCC_PLLSAIM_DIV_44 + * @arg @ref LL_RCC_PLLSAIM_DIV_45 + * @arg @ref LL_RCC_PLLSAIM_DIV_46 + * @arg @ref LL_RCC_PLLSAIM_DIV_47 + * @arg @ref LL_RCC_PLLSAIM_DIV_48 + * @arg @ref LL_RCC_PLLSAIM_DIV_49 + * @arg @ref LL_RCC_PLLSAIM_DIV_50 + * @arg @ref LL_RCC_PLLSAIM_DIV_51 + * @arg @ref LL_RCC_PLLSAIM_DIV_52 + * @arg @ref LL_RCC_PLLSAIM_DIV_53 + * @arg @ref LL_RCC_PLLSAIM_DIV_54 + * @arg @ref LL_RCC_PLLSAIM_DIV_55 + * @arg @ref LL_RCC_PLLSAIM_DIV_56 + * @arg @ref LL_RCC_PLLSAIM_DIV_57 + * @arg @ref LL_RCC_PLLSAIM_DIV_58 + * @arg @ref LL_RCC_PLLSAIM_DIV_59 + * @arg @ref LL_RCC_PLLSAIM_DIV_60 + * @arg @ref LL_RCC_PLLSAIM_DIV_61 + * @arg @ref LL_RCC_PLLSAIM_DIV_62 + * @arg @ref LL_RCC_PLLSAIM_DIV_63 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetDivider(void) +{ +#if defined(RCC_PLLSAICFGR_PLLSAIM) + return (uint32_t)(READ_BIT(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIM)); +#else + return (uint32_t)(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLM)); +#endif /* RCC_PLLSAICFGR_PLLSAIM */ +} + +/** + * @brief Get SAIPLL multiplication factor for VCO + * @rmtoll PLLSAICFGR PLLSAIN LL_RCC_PLLSAI_GetN + * @retval Between 49/50(*) and 432 + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetN(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIN) >> RCC_PLLSAICFGR_PLLSAIN_Pos); +} + +/** + * @brief Get SAIPLL division factor for PLLSAIQ + * @rmtoll PLLSAICFGR PLLSAIQ LL_RCC_PLLSAI_GetQ + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIQ_DIV_15 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetQ(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIQ)); +} + +#if defined(RCC_PLLSAICFGR_PLLSAIR) +/** + * @brief Get SAIPLL division factor for PLLSAIR + * @note used for PLLSAICLK (SAI clock) + * @rmtoll PLLSAICFGR PLLSAIR LL_RCC_PLLSAI_GetR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIR_DIV_2 + * @arg @ref LL_RCC_PLLSAIR_DIV_3 + * @arg @ref LL_RCC_PLLSAIR_DIV_4 + * @arg @ref LL_RCC_PLLSAIR_DIV_5 + * @arg @ref LL_RCC_PLLSAIR_DIV_6 + * @arg @ref LL_RCC_PLLSAIR_DIV_7 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetR(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIR)); +} +#endif /* RCC_PLLSAICFGR_PLLSAIR */ + +#if defined(RCC_PLLSAICFGR_PLLSAIP) +/** + * @brief Get SAIPLL division factor for PLLSAIP + * @note used for PLL48MCLK (48M domain clock) + * @rmtoll PLLSAICFGR PLLSAIP LL_RCC_PLLSAI_GetP + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIP_DIV_2 + * @arg @ref LL_RCC_PLLSAIP_DIV_4 + * @arg @ref LL_RCC_PLLSAIP_DIV_6 + * @arg @ref LL_RCC_PLLSAIP_DIV_8 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetP(void) +{ + return (uint32_t)(READ_BIT(RCC->PLLSAICFGR, RCC_PLLSAICFGR_PLLSAIP)); +} +#endif /* RCC_PLLSAICFGR_PLLSAIP */ + +/** + * @brief Get SAIPLL division factor for PLLSAIDIVQ + * @note used PLLSAICLK selected (SAI clock) + * @rmtoll DCKCFGR PLLSAIDIVQ LL_RCC_PLLSAI_GetDIVQ + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_1 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_3 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_5 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_6 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_7 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_9 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_10 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_11 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_12 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_13 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_14 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_15 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_16 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_17 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_18 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_19 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_20 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_21 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_22 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_23 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_24 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_25 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_26 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_27 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_28 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_29 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_30 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_31 + * @arg @ref LL_RCC_PLLSAIDIVQ_DIV_32 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetDIVQ(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVQ)); +} + +#if defined(RCC_DCKCFGR_PLLSAIDIVR) +/** + * @brief Get SAIPLL division factor for PLLSAIDIVR + * @note used for LTDC domain clock + * @rmtoll DCKCFGR PLLSAIDIVR LL_RCC_PLLSAI_GetDIVR + * @retval Returned value can be one of the following values: + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_2 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_4 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_8 + * @arg @ref LL_RCC_PLLSAIDIVR_DIV_16 + */ +__STATIC_INLINE uint32_t LL_RCC_PLLSAI_GetDIVR(void) +{ + return (uint32_t)(READ_BIT(RCC->DCKCFGR, RCC_DCKCFGR_PLLSAIDIVR)); +} +#endif /* RCC_DCKCFGR_PLLSAIDIVR */ + +/** + * @} + */ +#endif /* RCC_PLLSAI_SUPPORT */ + +/** @defgroup RCC_LL_EF_FLAG_Management FLAG Management + * @{ + */ + +/** + * @brief Clear LSI ready interrupt flag + * @rmtoll CIR LSIRDYC LL_RCC_ClearFlag_LSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_LSIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_LSIRDYC); +} + +/** + * @brief Clear LSE ready interrupt flag + * @rmtoll CIR LSERDYC LL_RCC_ClearFlag_LSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_LSERDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_LSERDYC); +} + +/** + * @brief Clear HSI ready interrupt flag + * @rmtoll CIR HSIRDYC LL_RCC_ClearFlag_HSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_HSIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_HSIRDYC); +} + +/** + * @brief Clear HSE ready interrupt flag + * @rmtoll CIR HSERDYC LL_RCC_ClearFlag_HSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_HSERDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_HSERDYC); +} + +/** + * @brief Clear PLL ready interrupt flag + * @rmtoll CIR PLLRDYC LL_RCC_ClearFlag_PLLRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_PLLRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLRDYC); +} + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Clear PLLI2S ready interrupt flag + * @rmtoll CIR PLLI2SRDYC LL_RCC_ClearFlag_PLLI2SRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_PLLI2SRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYC); +} + +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Clear PLLSAI ready interrupt flag + * @rmtoll CIR PLLSAIRDYC LL_RCC_ClearFlag_PLLSAIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_PLLSAIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYC); +} + +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @brief Clear Clock security system interrupt flag + * @rmtoll CIR CSSC LL_RCC_ClearFlag_HSECSS + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearFlag_HSECSS(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_CSSC); +} + +/** + * @brief Check if LSI ready interrupt occurred or not + * @rmtoll CIR LSIRDYF LL_RCC_IsActiveFlag_LSIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_LSIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_LSIRDYF) == (RCC_CIR_LSIRDYF)); +} + +/** + * @brief Check if LSE ready interrupt occurred or not + * @rmtoll CIR LSERDYF LL_RCC_IsActiveFlag_LSERDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_LSERDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_LSERDYF) == (RCC_CIR_LSERDYF)); +} + +/** + * @brief Check if HSI ready interrupt occurred or not + * @rmtoll CIR HSIRDYF LL_RCC_IsActiveFlag_HSIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_HSIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_HSIRDYF) == (RCC_CIR_HSIRDYF)); +} + +/** + * @brief Check if HSE ready interrupt occurred or not + * @rmtoll CIR HSERDYF LL_RCC_IsActiveFlag_HSERDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_HSERDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_HSERDYF) == (RCC_CIR_HSERDYF)); +} + +/** + * @brief Check if PLL ready interrupt occurred or not + * @rmtoll CIR PLLRDYF LL_RCC_IsActiveFlag_PLLRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_PLLRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLRDYF) == (RCC_CIR_PLLRDYF)); +} + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Check if PLLI2S ready interrupt occurred or not + * @rmtoll CIR PLLI2SRDYF LL_RCC_IsActiveFlag_PLLI2SRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_PLLI2SRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYF) == (RCC_CIR_PLLI2SRDYF)); +} +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Check if PLLSAI ready interrupt occurred or not + * @rmtoll CIR PLLSAIRDYF LL_RCC_IsActiveFlag_PLLSAIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_PLLSAIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYF) == (RCC_CIR_PLLSAIRDYF)); +} +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @brief Check if Clock security system interrupt occurred or not + * @rmtoll CIR CSSF LL_RCC_IsActiveFlag_HSECSS + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_HSECSS(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_CSSF) == (RCC_CIR_CSSF)); +} + +/** + * @brief Check if RCC flag Independent Watchdog reset is set or not. + * @rmtoll CSR IWDGRSTF LL_RCC_IsActiveFlag_IWDGRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_IWDGRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_IWDGRSTF) == (RCC_CSR_IWDGRSTF)); +} + +/** + * @brief Check if RCC flag Low Power reset is set or not. + * @rmtoll CSR LPWRRSTF LL_RCC_IsActiveFlag_LPWRRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_LPWRRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_LPWRRSTF) == (RCC_CSR_LPWRRSTF)); +} + +/** + * @brief Check if RCC flag Pin reset is set or not. + * @rmtoll CSR PINRSTF LL_RCC_IsActiveFlag_PINRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_PINRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_PINRSTF) == (RCC_CSR_PINRSTF)); +} + +/** + * @brief Check if RCC flag POR/PDR reset is set or not. + * @rmtoll CSR PORRSTF LL_RCC_IsActiveFlag_PORRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_PORRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_PORRSTF) == (RCC_CSR_PORRSTF)); +} + +/** + * @brief Check if RCC flag Software reset is set or not. + * @rmtoll CSR SFTRSTF LL_RCC_IsActiveFlag_SFTRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_SFTRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_SFTRSTF) == (RCC_CSR_SFTRSTF)); +} + +/** + * @brief Check if RCC flag Window Watchdog reset is set or not. + * @rmtoll CSR WWDGRSTF LL_RCC_IsActiveFlag_WWDGRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_WWDGRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_WWDGRSTF) == (RCC_CSR_WWDGRSTF)); +} + +#if defined(RCC_CSR_BORRSTF) +/** + * @brief Check if RCC flag BOR reset is set or not. + * @rmtoll CSR BORRSTF LL_RCC_IsActiveFlag_BORRST + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsActiveFlag_BORRST(void) +{ + return (READ_BIT(RCC->CSR, RCC_CSR_BORRSTF) == (RCC_CSR_BORRSTF)); +} +#endif /* RCC_CSR_BORRSTF */ + +/** + * @brief Set RMVF bit to clear the reset flags. + * @rmtoll CSR RMVF LL_RCC_ClearResetFlags + * @retval None + */ +__STATIC_INLINE void LL_RCC_ClearResetFlags(void) +{ + SET_BIT(RCC->CSR, RCC_CSR_RMVF); +} + +/** + * @} + */ + +/** @defgroup RCC_LL_EF_IT_Management IT Management + * @{ + */ + +/** + * @brief Enable LSI ready interrupt + * @rmtoll CIR LSIRDYIE LL_RCC_EnableIT_LSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_LSIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_LSIRDYIE); +} + +/** + * @brief Enable LSE ready interrupt + * @rmtoll CIR LSERDYIE LL_RCC_EnableIT_LSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_LSERDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_LSERDYIE); +} + +/** + * @brief Enable HSI ready interrupt + * @rmtoll CIR HSIRDYIE LL_RCC_EnableIT_HSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_HSIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_HSIRDYIE); +} + +/** + * @brief Enable HSE ready interrupt + * @rmtoll CIR HSERDYIE LL_RCC_EnableIT_HSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_HSERDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_HSERDYIE); +} + +/** + * @brief Enable PLL ready interrupt + * @rmtoll CIR PLLRDYIE LL_RCC_EnableIT_PLLRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_PLLRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLRDYIE); +} + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Enable PLLI2S ready interrupt + * @rmtoll CIR PLLI2SRDYIE LL_RCC_EnableIT_PLLI2SRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_PLLI2SRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYIE); +} +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Enable PLLSAI ready interrupt + * @rmtoll CIR PLLSAIRDYIE LL_RCC_EnableIT_PLLSAIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_EnableIT_PLLSAIRDY(void) +{ + SET_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYIE); +} +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @brief Disable LSI ready interrupt + * @rmtoll CIR LSIRDYIE LL_RCC_DisableIT_LSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_LSIRDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_LSIRDYIE); +} + +/** + * @brief Disable LSE ready interrupt + * @rmtoll CIR LSERDYIE LL_RCC_DisableIT_LSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_LSERDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_LSERDYIE); +} + +/** + * @brief Disable HSI ready interrupt + * @rmtoll CIR HSIRDYIE LL_RCC_DisableIT_HSIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_HSIRDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_HSIRDYIE); +} + +/** + * @brief Disable HSE ready interrupt + * @rmtoll CIR HSERDYIE LL_RCC_DisableIT_HSERDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_HSERDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_HSERDYIE); +} + +/** + * @brief Disable PLL ready interrupt + * @rmtoll CIR PLLRDYIE LL_RCC_DisableIT_PLLRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_PLLRDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_PLLRDYIE); +} + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Disable PLLI2S ready interrupt + * @rmtoll CIR PLLI2SRDYIE LL_RCC_DisableIT_PLLI2SRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_PLLI2SRDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYIE); +} + +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Disable PLLSAI ready interrupt + * @rmtoll CIR PLLSAIRDYIE LL_RCC_DisableIT_PLLSAIRDY + * @retval None + */ +__STATIC_INLINE void LL_RCC_DisableIT_PLLSAIRDY(void) +{ + CLEAR_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYIE); +} +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @brief Checks if LSI ready interrupt source is enabled or disabled. + * @rmtoll CIR LSIRDYIE LL_RCC_IsEnabledIT_LSIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_LSIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_LSIRDYIE) == (RCC_CIR_LSIRDYIE)); +} + +/** + * @brief Checks if LSE ready interrupt source is enabled or disabled. + * @rmtoll CIR LSERDYIE LL_RCC_IsEnabledIT_LSERDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_LSERDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_LSERDYIE) == (RCC_CIR_LSERDYIE)); +} + +/** + * @brief Checks if HSI ready interrupt source is enabled or disabled. + * @rmtoll CIR HSIRDYIE LL_RCC_IsEnabledIT_HSIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_HSIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_HSIRDYIE) == (RCC_CIR_HSIRDYIE)); +} + +/** + * @brief Checks if HSE ready interrupt source is enabled or disabled. + * @rmtoll CIR HSERDYIE LL_RCC_IsEnabledIT_HSERDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_HSERDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_HSERDYIE) == (RCC_CIR_HSERDYIE)); +} + +/** + * @brief Checks if PLL ready interrupt source is enabled or disabled. + * @rmtoll CIR PLLRDYIE LL_RCC_IsEnabledIT_PLLRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_PLLRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLRDYIE) == (RCC_CIR_PLLRDYIE)); +} + +#if defined(RCC_PLLI2S_SUPPORT) +/** + * @brief Checks if PLLI2S ready interrupt source is enabled or disabled. + * @rmtoll CIR PLLI2SRDYIE LL_RCC_IsEnabledIT_PLLI2SRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_PLLI2SRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYIE) == (RCC_CIR_PLLI2SRDYIE)); +} + +#endif /* RCC_PLLI2S_SUPPORT */ + +#if defined(RCC_PLLSAI_SUPPORT) +/** + * @brief Checks if PLLSAI ready interrupt source is enabled or disabled. + * @rmtoll CIR PLLSAIRDYIE LL_RCC_IsEnabledIT_PLLSAIRDY + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RCC_IsEnabledIT_PLLSAIRDY(void) +{ + return (READ_BIT(RCC->CIR, RCC_CIR_PLLSAIRDYIE) == (RCC_CIR_PLLSAIRDYIE)); +} +#endif /* RCC_PLLSAI_SUPPORT */ + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RCC_LL_EF_Init De-initialization function + * @{ + */ +ErrorStatus LL_RCC_DeInit(void); +/** + * @} + */ + +/** @defgroup RCC_LL_EF_Get_Freq Get system and peripherals clocks frequency functions + * @{ + */ +void LL_RCC_GetSystemClocksFreq(LL_RCC_ClocksTypeDef *RCC_Clocks); +#if defined(FMPI2C1) +uint32_t LL_RCC_GetFMPI2CClockFreq(uint32_t FMPI2CxSource); +#endif /* FMPI2C1 */ +#if defined(LPTIM1) +uint32_t LL_RCC_GetLPTIMClockFreq(uint32_t LPTIMxSource); +#endif /* LPTIM1 */ +#if defined(SAI1) +uint32_t LL_RCC_GetSAIClockFreq(uint32_t SAIxSource); +#endif /* SAI1 */ +#if defined(SDIO) +uint32_t LL_RCC_GetSDIOClockFreq(uint32_t SDIOxSource); +#endif /* SDIO */ +#if defined(RNG) +uint32_t LL_RCC_GetRNGClockFreq(uint32_t RNGxSource); +#endif /* RNG */ +#if defined(USB_OTG_FS) || defined(USB_OTG_HS) +uint32_t LL_RCC_GetUSBClockFreq(uint32_t USBxSource); +#endif /* USB_OTG_FS || USB_OTG_HS */ +#if defined(DFSDM1_Channel0) +uint32_t LL_RCC_GetDFSDMClockFreq(uint32_t DFSDMxSource); +uint32_t LL_RCC_GetDFSDMAudioClockFreq(uint32_t DFSDMxSource); +#endif /* DFSDM1_Channel0 */ +uint32_t LL_RCC_GetI2SClockFreq(uint32_t I2SxSource); +#if defined(CEC) +uint32_t LL_RCC_GetCECClockFreq(uint32_t CECxSource); +#endif /* CEC */ +#if defined(LTDC) +uint32_t LL_RCC_GetLTDCClockFreq(uint32_t LTDCxSource); +#endif /* LTDC */ +#if defined(SPDIFRX) +uint32_t LL_RCC_GetSPDIFRXClockFreq(uint32_t SPDIFRXxSource); +#endif /* SPDIFRX */ +#if defined(DSI) +uint32_t LL_RCC_GetDSIClockFreq(uint32_t DSIxSource); +#endif /* DSI */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined(RCC) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_RCC_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_rtc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_rtc.h new file mode 100644 index 00000000..74a0aee0 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_rtc.h @@ -0,0 +1,3663 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_rtc.h + * @author MCD Application Team + * @brief Header file of RTC LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F4xx_LL_RTC_H +#define STM32F4xx_LL_RTC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined(RTC) + +/** @defgroup RTC_LL RTC + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup RTC_LL_Private_Constants RTC Private Constants + * @{ + */ +/* Masks Definition */ +#define RTC_INIT_MASK 0xFFFFFFFFU +#define RTC_RSF_MASK ((uint32_t)~(RTC_ISR_INIT | RTC_ISR_RSF)) + +/* Write protection defines */ +#define RTC_WRITE_PROTECTION_DISABLE ((uint8_t)0xFFU) +#define RTC_WRITE_PROTECTION_ENABLE_1 ((uint8_t)0xCAU) +#define RTC_WRITE_PROTECTION_ENABLE_2 ((uint8_t)0x53U) + +/* Defines used to combine date & time */ +#define RTC_OFFSET_WEEKDAY 24U +#define RTC_OFFSET_DAY 16U +#define RTC_OFFSET_MONTH 8U +#define RTC_OFFSET_HOUR 16U +#define RTC_OFFSET_MINUTE 8U + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RTC_LL_Private_Macros RTC Private Macros + * @{ + */ +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RTC_LL_ES_INIT RTC Exported Init structure + * @{ + */ + +/** + * @brief RTC Init structures definition + */ +typedef struct +{ + uint32_t HourFormat; /*!< Specifies the RTC Hours Format. + This parameter can be a value of @ref RTC_LL_EC_HOURFORMAT + + This feature can be modified afterwards using unitary function + @ref LL_RTC_SetHourFormat(). */ + + uint32_t AsynchPrescaler; /*!< Specifies the RTC Asynchronous Predivider value. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0x7F + + This feature can be modified afterwards using unitary function + @ref LL_RTC_SetAsynchPrescaler(). */ + + uint32_t SynchPrescaler; /*!< Specifies the RTC Synchronous Predivider value. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0x7FFF + + This feature can be modified afterwards using unitary function + @ref LL_RTC_SetSynchPrescaler(). */ +} LL_RTC_InitTypeDef; + +/** + * @brief RTC Time structure definition + */ +typedef struct +{ + uint32_t TimeFormat; /*!< Specifies the RTC AM/PM Time. + This parameter can be a value of @ref RTC_LL_EC_TIME_FORMAT + + This feature can be modified afterwards using unitary function @ref LL_RTC_TIME_SetFormat(). */ + + uint8_t Hours; /*!< Specifies the RTC Time Hours. + This parameter must be a number between Min_Data = 0 and Max_Data = 12 if the @ref LL_RTC_TIME_FORMAT_PM is selected. + This parameter must be a number between Min_Data = 0 and Max_Data = 23 if the @ref LL_RTC_TIME_FORMAT_AM_OR_24 is selected. + + This feature can be modified afterwards using unitary function @ref LL_RTC_TIME_SetHour(). */ + + uint8_t Minutes; /*!< Specifies the RTC Time Minutes. + This parameter must be a number between Min_Data = 0 and Max_Data = 59 + + This feature can be modified afterwards using unitary function @ref LL_RTC_TIME_SetMinute(). */ + + uint8_t Seconds; /*!< Specifies the RTC Time Seconds. + This parameter must be a number between Min_Data = 0 and Max_Data = 59 + + This feature can be modified afterwards using unitary function @ref LL_RTC_TIME_SetSecond(). */ +} LL_RTC_TimeTypeDef; + +/** + * @brief RTC Date structure definition + */ +typedef struct +{ + uint8_t WeekDay; /*!< Specifies the RTC Date WeekDay. + This parameter can be a value of @ref RTC_LL_EC_WEEKDAY + + This feature can be modified afterwards using unitary function @ref LL_RTC_DATE_SetWeekDay(). */ + + uint8_t Month; /*!< Specifies the RTC Date Month. + This parameter can be a value of @ref RTC_LL_EC_MONTH + + This feature can be modified afterwards using unitary function @ref LL_RTC_DATE_SetMonth(). */ + + uint8_t Day; /*!< Specifies the RTC Date Day. + This parameter must be a number between Min_Data = 1 and Max_Data = 31 + + This feature can be modified afterwards using unitary function @ref LL_RTC_DATE_SetDay(). */ + + uint8_t Year; /*!< Specifies the RTC Date Year. + This parameter must be a number between Min_Data = 0 and Max_Data = 99 + + This feature can be modified afterwards using unitary function @ref LL_RTC_DATE_SetYear(). */ +} LL_RTC_DateTypeDef; + +/** + * @brief RTC Alarm structure definition + */ +typedef struct +{ + LL_RTC_TimeTypeDef AlarmTime; /*!< Specifies the RTC Alarm Time members. */ + + uint32_t AlarmMask; /*!< Specifies the RTC Alarm Masks. + This parameter can be a value of @ref RTC_LL_EC_ALMA_MASK for ALARM A or @ref RTC_LL_EC_ALMB_MASK for ALARM B. + + This feature can be modified afterwards using unitary function @ref LL_RTC_ALMA_SetMask() for ALARM A + or @ref LL_RTC_ALMB_SetMask() for ALARM B. + */ + + uint32_t AlarmDateWeekDaySel; /*!< Specifies the RTC Alarm is on day or WeekDay. + This parameter can be a value of @ref RTC_LL_EC_ALMA_WEEKDAY_SELECTION for ALARM A or @ref RTC_LL_EC_ALMB_WEEKDAY_SELECTION for ALARM B + + This feature can be modified afterwards using unitary function @ref LL_RTC_ALMA_EnableWeekday() or @ref LL_RTC_ALMA_DisableWeekday() + for ALARM A or @ref LL_RTC_ALMB_EnableWeekday() or @ref LL_RTC_ALMB_DisableWeekday() for ALARM B + */ + + uint8_t AlarmDateWeekDay; /*!< Specifies the RTC Alarm Day/WeekDay. + If AlarmDateWeekDaySel set to day, this parameter must be a number between Min_Data = 1 and Max_Data = 31. + + This feature can be modified afterwards using unitary function @ref LL_RTC_ALMA_SetDay() + for ALARM A or @ref LL_RTC_ALMB_SetDay() for ALARM B. + + If AlarmDateWeekDaySel set to Weekday, this parameter can be a value of @ref RTC_LL_EC_WEEKDAY. + + This feature can be modified afterwards using unitary function @ref LL_RTC_ALMA_SetWeekDay() + for ALARM A or @ref LL_RTC_ALMB_SetWeekDay() for ALARM B. + */ +} LL_RTC_AlarmTypeDef; + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RTC_LL_Exported_Constants RTC Exported Constants + * @{ + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RTC_LL_EC_FORMAT FORMAT + * @{ + */ +#define LL_RTC_FORMAT_BIN 0x00000000U /*!< Binary data format */ +#define LL_RTC_FORMAT_BCD 0x00000001U /*!< BCD data format */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_ALMA_WEEKDAY_SELECTION RTC Alarm A Date WeekDay + * @{ + */ +#define LL_RTC_ALMA_DATEWEEKDAYSEL_DATE 0x00000000U /*!< Alarm A Date is selected */ +#define LL_RTC_ALMA_DATEWEEKDAYSEL_WEEKDAY RTC_ALRMAR_WDSEL /*!< Alarm A WeekDay is selected */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_ALMB_WEEKDAY_SELECTION RTC Alarm B Date WeekDay + * @{ + */ +#define LL_RTC_ALMB_DATEWEEKDAYSEL_DATE 0x00000000U /*!< Alarm B Date is selected */ +#define LL_RTC_ALMB_DATEWEEKDAYSEL_WEEKDAY RTC_ALRMBR_WDSEL /*!< Alarm B WeekDay is selected */ +/** + * @} + */ + +#endif /* USE_FULL_LL_DRIVER */ + +/** @defgroup RTC_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_RTC_ReadReg function + * @{ + */ +#define LL_RTC_ISR_RECALPF RTC_ISR_RECALPF +#if defined(RTC_TAMPER2_SUPPORT) +#define LL_RTC_ISR_TAMP2F RTC_ISR_TAMP2F +#endif /* RTC_TAMPER2_SUPPORT */ +#define LL_RTC_ISR_TAMP1F RTC_ISR_TAMP1F +#define LL_RTC_ISR_TSOVF RTC_ISR_TSOVF +#define LL_RTC_ISR_TSF RTC_ISR_TSF +#define LL_RTC_ISR_WUTF RTC_ISR_WUTF +#define LL_RTC_ISR_ALRBF RTC_ISR_ALRBF +#define LL_RTC_ISR_ALRAF RTC_ISR_ALRAF +#define LL_RTC_ISR_INITF RTC_ISR_INITF +#define LL_RTC_ISR_RSF RTC_ISR_RSF +#define LL_RTC_ISR_INITS RTC_ISR_INITS +#define LL_RTC_ISR_SHPF RTC_ISR_SHPF +#define LL_RTC_ISR_WUTWF RTC_ISR_WUTWF +#define LL_RTC_ISR_ALRBWF RTC_ISR_ALRBWF +#define LL_RTC_ISR_ALRAWF RTC_ISR_ALRAWF +/** + * @} + */ + +/** @defgroup RTC_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_RTC_ReadReg and LL_RTC_WriteReg functions + * @{ + */ +#define LL_RTC_CR_TSIE RTC_CR_TSIE +#define LL_RTC_CR_WUTIE RTC_CR_WUTIE +#define LL_RTC_CR_ALRBIE RTC_CR_ALRBIE +#define LL_RTC_CR_ALRAIE RTC_CR_ALRAIE +#define LL_RTC_TAFCR_TAMPIE RTC_TAFCR_TAMPIE +/** + * @} + */ + +/** @defgroup RTC_LL_EC_WEEKDAY WEEK DAY + * @{ + */ +#define LL_RTC_WEEKDAY_MONDAY ((uint8_t)0x01U) /*!< Monday */ +#define LL_RTC_WEEKDAY_TUESDAY ((uint8_t)0x02U) /*!< Tuesday */ +#define LL_RTC_WEEKDAY_WEDNESDAY ((uint8_t)0x03U) /*!< Wednesday */ +#define LL_RTC_WEEKDAY_THURSDAY ((uint8_t)0x04U) /*!< Thrusday */ +#define LL_RTC_WEEKDAY_FRIDAY ((uint8_t)0x05U) /*!< Friday */ +#define LL_RTC_WEEKDAY_SATURDAY ((uint8_t)0x06U) /*!< Saturday */ +#define LL_RTC_WEEKDAY_SUNDAY ((uint8_t)0x07U) /*!< Sunday */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_MONTH MONTH + * @{ + */ +#define LL_RTC_MONTH_JANUARY ((uint8_t)0x01U) /*!< January */ +#define LL_RTC_MONTH_FEBRUARY ((uint8_t)0x02U) /*!< February */ +#define LL_RTC_MONTH_MARCH ((uint8_t)0x03U) /*!< March */ +#define LL_RTC_MONTH_APRIL ((uint8_t)0x04U) /*!< April */ +#define LL_RTC_MONTH_MAY ((uint8_t)0x05U) /*!< May */ +#define LL_RTC_MONTH_JUNE ((uint8_t)0x06U) /*!< June */ +#define LL_RTC_MONTH_JULY ((uint8_t)0x07U) /*!< July */ +#define LL_RTC_MONTH_AUGUST ((uint8_t)0x08U) /*!< August */ +#define LL_RTC_MONTH_SEPTEMBER ((uint8_t)0x09U) /*!< September */ +#define LL_RTC_MONTH_OCTOBER ((uint8_t)0x10U) /*!< October */ +#define LL_RTC_MONTH_NOVEMBER ((uint8_t)0x11U) /*!< November */ +#define LL_RTC_MONTH_DECEMBER ((uint8_t)0x12U) /*!< December */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_HOURFORMAT HOUR FORMAT + * @{ + */ +#define LL_RTC_HOURFORMAT_24HOUR 0x00000000U /*!< 24 hour/day format */ +#define LL_RTC_HOURFORMAT_AMPM RTC_CR_FMT /*!< AM/PM hour format */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_ALARMOUT ALARM OUTPUT + * @{ + */ +#define LL_RTC_ALARMOUT_DISABLE 0x00000000U /*!< Output disabled */ +#define LL_RTC_ALARMOUT_ALMA RTC_CR_OSEL_0 /*!< Alarm A output enabled */ +#define LL_RTC_ALARMOUT_ALMB RTC_CR_OSEL_1 /*!< Alarm B output enabled */ +#define LL_RTC_ALARMOUT_WAKEUP RTC_CR_OSEL /*!< Wakeup output enabled */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_ALARM_OUTPUTTYPE ALARM OUTPUT TYPE + * @{ + */ +#define LL_RTC_ALARM_OUTPUTTYPE_OPENDRAIN 0x00000000U /*!< RTC_ALARM, when mapped on PC13, is open-drain output */ +#define LL_RTC_ALARM_OUTPUTTYPE_PUSHPULL RTC_TAFCR_ALARMOUTTYPE /*!< RTC_ALARM, when mapped on PC13, is push-pull output */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_OUTPUTPOLARITY_PIN OUTPUT POLARITY PIN + * @{ + */ +#define LL_RTC_OUTPUTPOLARITY_PIN_HIGH 0x00000000U /*!< Pin is high when ALRAF/ALRBF/WUTF is asserted (depending on OSEL)*/ +#define LL_RTC_OUTPUTPOLARITY_PIN_LOW RTC_CR_POL /*!< Pin is low when ALRAF/ALRBF/WUTF is asserted (depending on OSEL) */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_TIME_FORMAT TIME FORMAT + * @{ + */ +#define LL_RTC_TIME_FORMAT_AM_OR_24 0x00000000U /*!< AM or 24-hour format */ +#define LL_RTC_TIME_FORMAT_PM RTC_TR_PM /*!< PM */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_SHIFT_SECOND SHIFT SECOND + * @{ + */ +#define LL_RTC_SHIFT_SECOND_DELAY 0x00000000U /* Delay (seconds) = SUBFS / (PREDIV_S + 1) */ +#define LL_RTC_SHIFT_SECOND_ADVANCE RTC_SHIFTR_ADD1S /* Advance (seconds) = (1 - (SUBFS / (PREDIV_S + 1))) */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_ALMA_MASK ALARMA MASK + * @{ + */ +#define LL_RTC_ALMA_MASK_NONE 0x00000000U /*!< No masks applied on Alarm A*/ +#define LL_RTC_ALMA_MASK_DATEWEEKDAY RTC_ALRMAR_MSK4 /*!< Date/day do not care in Alarm A comparison */ +#define LL_RTC_ALMA_MASK_HOURS RTC_ALRMAR_MSK3 /*!< Hours do not care in Alarm A comparison */ +#define LL_RTC_ALMA_MASK_MINUTES RTC_ALRMAR_MSK2 /*!< Minutes do not care in Alarm A comparison */ +#define LL_RTC_ALMA_MASK_SECONDS RTC_ALRMAR_MSK1 /*!< Seconds do not care in Alarm A comparison */ +#define LL_RTC_ALMA_MASK_ALL (RTC_ALRMAR_MSK4 | RTC_ALRMAR_MSK3 | RTC_ALRMAR_MSK2 | RTC_ALRMAR_MSK1) /*!< Masks all */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_ALMA_TIME_FORMAT ALARMA TIME FORMAT + * @{ + */ +#define LL_RTC_ALMA_TIME_FORMAT_AM 0x00000000U /*!< AM or 24-hour format */ +#define LL_RTC_ALMA_TIME_FORMAT_PM RTC_ALRMAR_PM /*!< PM */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_ALMB_MASK ALARMB MASK + * @{ + */ +#define LL_RTC_ALMB_MASK_NONE 0x00000000U /*!< No masks applied on Alarm B */ +#define LL_RTC_ALMB_MASK_DATEWEEKDAY RTC_ALRMBR_MSK4 /*!< Date/day do not care in Alarm B comparison */ +#define LL_RTC_ALMB_MASK_HOURS RTC_ALRMBR_MSK3 /*!< Hours do not care in Alarm B comparison */ +#define LL_RTC_ALMB_MASK_MINUTES RTC_ALRMBR_MSK2 /*!< Minutes do not care in Alarm B comparison */ +#define LL_RTC_ALMB_MASK_SECONDS RTC_ALRMBR_MSK1 /*!< Seconds do not care in Alarm B comparison */ +#define LL_RTC_ALMB_MASK_ALL (RTC_ALRMBR_MSK4 | RTC_ALRMBR_MSK3 | RTC_ALRMBR_MSK2 | RTC_ALRMBR_MSK1) /*!< Masks all */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_ALMB_TIME_FORMAT ALARMB TIME FORMAT + * @{ + */ +#define LL_RTC_ALMB_TIME_FORMAT_AM 0x00000000U /*!< AM or 24-hour format */ +#define LL_RTC_ALMB_TIME_FORMAT_PM RTC_ALRMBR_PM /*!< PM */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_TIMESTAMP_EDGE TIMESTAMP EDGE + * @{ + */ +#define LL_RTC_TIMESTAMP_EDGE_RISING 0x00000000U /*!< RTC_TS input rising edge generates a time-stamp event */ +#define LL_RTC_TIMESTAMP_EDGE_FALLING RTC_CR_TSEDGE /*!< RTC_TS input falling edge generates a time-stamp even */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_TS_TIME_FORMAT TIMESTAMP TIME FORMAT + * @{ + */ +#define LL_RTC_TS_TIME_FORMAT_AM 0x00000000U /*!< AM or 24-hour format */ +#define LL_RTC_TS_TIME_FORMAT_PM RTC_TSTR_PM /*!< PM */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_TAMPER TAMPER + * @{ + */ +#define LL_RTC_TAMPER_1 RTC_TAFCR_TAMP1E /*!< RTC_TAMP1 input detection */ +#if defined(RTC_TAMPER2_SUPPORT) +#define LL_RTC_TAMPER_2 RTC_TAFCR_TAMP2E /*!< RTC_TAMP2 input detection */ +#endif /* RTC_TAMPER2_SUPPORT */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_TAMPER_DURATION TAMPER DURATION + * @{ + */ +#define LL_RTC_TAMPER_DURATION_1RTCCLK 0x00000000U /*!< Tamper pins are pre-charged before sampling during 1 RTCCLK cycle */ +#define LL_RTC_TAMPER_DURATION_2RTCCLK RTC_TAFCR_TAMPPRCH_0 /*!< Tamper pins are pre-charged before sampling during 2 RTCCLK cycles */ +#define LL_RTC_TAMPER_DURATION_4RTCCLK RTC_TAFCR_TAMPPRCH_1 /*!< Tamper pins are pre-charged before sampling during 4 RTCCLK cycles */ +#define LL_RTC_TAMPER_DURATION_8RTCCLK RTC_TAFCR_TAMPPRCH /*!< Tamper pins are pre-charged before sampling during 8 RTCCLK cycles */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_TAMPER_FILTER TAMPER FILTER + * @{ + */ +#define LL_RTC_TAMPER_FILTER_DISABLE 0x00000000U /*!< Tamper filter is disabled */ +#define LL_RTC_TAMPER_FILTER_2SAMPLE RTC_TAFCR_TAMPFLT_0 /*!< Tamper is activated after 2 consecutive samples at the active level */ +#define LL_RTC_TAMPER_FILTER_4SAMPLE RTC_TAFCR_TAMPFLT_1 /*!< Tamper is activated after 4 consecutive samples at the active level */ +#define LL_RTC_TAMPER_FILTER_8SAMPLE RTC_TAFCR_TAMPFLT /*!< Tamper is activated after 8 consecutive samples at the active level. */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_TAMPER_SAMPLFREQDIV TAMPER SAMPLING FREQUENCY DIVIDER + * @{ + */ +#define LL_RTC_TAMPER_SAMPLFREQDIV_32768 0x00000000U /*!< Each of the tamper inputs are sampled with a frequency = RTCCLK / 32768 */ +#define LL_RTC_TAMPER_SAMPLFREQDIV_16384 RTC_TAFCR_TAMPFREQ_0 /*!< Each of the tamper inputs are sampled with a frequency = RTCCLK / 16384 */ +#define LL_RTC_TAMPER_SAMPLFREQDIV_8192 RTC_TAFCR_TAMPFREQ_1 /*!< Each of the tamper inputs are sampled with a frequency = RTCCLK / 8192 */ +#define LL_RTC_TAMPER_SAMPLFREQDIV_4096 (RTC_TAFCR_TAMPFREQ_1 | RTC_TAFCR_TAMPFREQ_0) /*!< Each of the tamper inputs are sampled with a frequency = RTCCLK / 4096 */ +#define LL_RTC_TAMPER_SAMPLFREQDIV_2048 RTC_TAFCR_TAMPFREQ_2 /*!< Each of the tamper inputs are sampled with a frequency = RTCCLK / 2048 */ +#define LL_RTC_TAMPER_SAMPLFREQDIV_1024 (RTC_TAFCR_TAMPFREQ_2 | RTC_TAFCR_TAMPFREQ_0) /*!< Each of the tamper inputs are sampled with a frequency = RTCCLK / 1024 */ +#define LL_RTC_TAMPER_SAMPLFREQDIV_512 (RTC_TAFCR_TAMPFREQ_2 | RTC_TAFCR_TAMPFREQ_1) /*!< Each of the tamper inputs are sampled with a frequency = RTCCLK / 512 */ +#define LL_RTC_TAMPER_SAMPLFREQDIV_256 RTC_TAFCR_TAMPFREQ /*!< Each of the tamper inputs are sampled with a frequency = RTCCLK / 256 */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_TAMPER_ACTIVELEVEL TAMPER ACTIVE LEVEL + * @{ + */ +#define LL_RTC_TAMPER_ACTIVELEVEL_TAMP1 RTC_TAFCR_TAMP1TRG /*!< RTC_TAMP1 input falling edge (if TAMPFLT = 00) or staying high (if TAMPFLT != 00) triggers a tamper detection event */ +#if defined(RTC_TAMPER2_SUPPORT) +#define LL_RTC_TAMPER_ACTIVELEVEL_TAMP2 RTC_TAFCR_TAMP2TRG /*!< RTC_TAMP2 input falling edge (if TAMPFLT = 00) or staying high (if TAMPFLT != 00) triggers a tamper detection event */ +#endif /* RTC_TAMPER2_SUPPORT */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_WAKEUPCLOCK_DIV WAKEUP CLOCK DIV + * @{ + */ +#define LL_RTC_WAKEUPCLOCK_DIV_16 0x00000000U /*!< RTC/16 clock is selected */ +#define LL_RTC_WAKEUPCLOCK_DIV_8 (RTC_CR_WUCKSEL_0) /*!< RTC/8 clock is selected */ +#define LL_RTC_WAKEUPCLOCK_DIV_4 (RTC_CR_WUCKSEL_1) /*!< RTC/4 clock is selected */ +#define LL_RTC_WAKEUPCLOCK_DIV_2 (RTC_CR_WUCKSEL_1 | RTC_CR_WUCKSEL_0) /*!< RTC/2 clock is selected */ +#define LL_RTC_WAKEUPCLOCK_CKSPRE (RTC_CR_WUCKSEL_2) /*!< ck_spre (usually 1 Hz) clock is selected */ +#define LL_RTC_WAKEUPCLOCK_CKSPRE_WUT (RTC_CR_WUCKSEL_2 | RTC_CR_WUCKSEL_1) /*!< ck_spre (usually 1 Hz) clock is selected and 2exp16 is added to the WUT counter value*/ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_BKP BACKUP + * @{ + */ +#define LL_RTC_BKP_DR0 0x00000000U +#define LL_RTC_BKP_DR1 0x00000001U +#define LL_RTC_BKP_DR2 0x00000002U +#define LL_RTC_BKP_DR3 0x00000003U +#define LL_RTC_BKP_DR4 0x00000004U +#define LL_RTC_BKP_DR5 0x00000005U +#define LL_RTC_BKP_DR6 0x00000006U +#define LL_RTC_BKP_DR7 0x00000007U +#define LL_RTC_BKP_DR8 0x00000008U +#define LL_RTC_BKP_DR9 0x00000009U +#define LL_RTC_BKP_DR10 0x0000000AU +#define LL_RTC_BKP_DR11 0x0000000BU +#define LL_RTC_BKP_DR12 0x0000000CU +#define LL_RTC_BKP_DR13 0x0000000DU +#define LL_RTC_BKP_DR14 0x0000000EU +#define LL_RTC_BKP_DR15 0x0000000FU +#define LL_RTC_BKP_DR16 0x00000010U +#define LL_RTC_BKP_DR17 0x00000011U +#define LL_RTC_BKP_DR18 0x00000012U +#define LL_RTC_BKP_DR19 0x00000013U +/** + * @} + */ + +/** @defgroup RTC_LL_EC_CALIB_OUTPUT Calibration output + * @{ + */ +#define LL_RTC_CALIB_OUTPUT_NONE 0x00000000U /*!< Calibration output disabled */ +#define LL_RTC_CALIB_OUTPUT_1HZ (RTC_CR_COE | RTC_CR_COSEL) /*!< Calibration output is 1 Hz */ +#define LL_RTC_CALIB_OUTPUT_512HZ (RTC_CR_COE) /*!< Calibration output is 512 Hz */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_CALIB_SIGN Coarse digital calibration sign + * @{ + */ +#define LL_RTC_CALIB_SIGN_POSITIVE 0x00000000U /*!< Positive calibration: calendar update frequency is increased */ +#define LL_RTC_CALIB_SIGN_NEGATIVE RTC_CALIBR_DCS /*!< Negative calibration: calendar update frequency is decreased */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_CALIB_INSERTPULSE Calibration pulse insertion + * @{ + */ +#define LL_RTC_CALIB_INSERTPULSE_NONE 0x00000000U /*!< No RTCCLK pulses are added */ +#define LL_RTC_CALIB_INSERTPULSE_SET RTC_CALR_CALP /*!< One RTCCLK pulse is effectively inserted every 2exp11 pulses (frequency increased by 488.5 ppm) */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_CALIB_PERIOD Calibration period + * @{ + */ +#define LL_RTC_CALIB_PERIOD_32SEC 0x00000000U /*!< Use a 32-second calibration cycle period */ +#define LL_RTC_CALIB_PERIOD_16SEC RTC_CALR_CALW16 /*!< Use a 16-second calibration cycle period */ +#define LL_RTC_CALIB_PERIOD_8SEC RTC_CALR_CALW8 /*!< Use a 8-second calibration cycle period */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_TSINSEL TIMESTAMP mapping + * @{ + */ +#define LL_RTC_TimeStampPin_Default 0x00000000U /*!< Use RTC_AF1 as TIMESTAMP */ +#if defined(RTC_AF2_SUPPORT) +#define LL_RTC_TimeStampPin_Pos1 RTC_TAFCR_TSINSEL /*!< Use RTC_AF2 as TIMESTAMP */ +#endif /* RTC_AF2_SUPPORT */ +/** + * @} + */ + +/** @defgroup RTC_LL_EC_TAMP1INSEL TAMPER1 mapping + * @{ + */ +#define LL_RTC_TamperPin_Default 0x00000000U /*!< Use RTC_AF1 as TAMPER1 */ +#if defined(RTC_AF2_SUPPORT) +#define LL_RTC_TamperPin_Pos1 RTC_TAFCR_TAMP1INSEL /*!< Use RTC_AF2 as TAMPER1 */ +#endif /* RTC_AF2_SUPPORT */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup RTC_LL_Exported_Macros RTC Exported Macros + * @{ + */ + +/** @defgroup RTC_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in RTC register + * @param __INSTANCE__ RTC Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_RTC_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in RTC register + * @param __INSTANCE__ RTC Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_RTC_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** @defgroup RTC_LL_EM_Convert Convert helper Macros + * @{ + */ + +/** + * @brief Helper macro to convert a value from 2 digit decimal format to BCD format + * @param __VALUE__ Byte to be converted + * @retval Converted byte + */ +#define __LL_RTC_CONVERT_BIN2BCD(__VALUE__) (uint8_t)((((__VALUE__) / 10U) << 4U) | ((__VALUE__) % 10U)) + +/** + * @brief Helper macro to convert a value from BCD format to 2 digit decimal format + * @param __VALUE__ BCD value to be converted + * @retval Converted byte + */ +#define __LL_RTC_CONVERT_BCD2BIN(__VALUE__) (uint8_t)(((uint8_t)((__VALUE__) & (uint8_t)0xF0U) >> (uint8_t)0x4U) * 10U + ((__VALUE__) & (uint8_t)0x0FU)) + +/** + * @} + */ + +/** @defgroup RTC_LL_EM_Date Date helper Macros + * @{ + */ + +/** + * @brief Helper macro to retrieve weekday. + * @param __RTC_DATE__ Date returned by @ref LL_RTC_DATE_Get function. + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_WEEKDAY_MONDAY + * @arg @ref LL_RTC_WEEKDAY_TUESDAY + * @arg @ref LL_RTC_WEEKDAY_WEDNESDAY + * @arg @ref LL_RTC_WEEKDAY_THURSDAY + * @arg @ref LL_RTC_WEEKDAY_FRIDAY + * @arg @ref LL_RTC_WEEKDAY_SATURDAY + * @arg @ref LL_RTC_WEEKDAY_SUNDAY + */ +#define __LL_RTC_GET_WEEKDAY(__RTC_DATE__) (((__RTC_DATE__) >> RTC_OFFSET_WEEKDAY) & 0x000000FFU) + +/** + * @brief Helper macro to retrieve Year in BCD format + * @param __RTC_DATE__ Value returned by @ref LL_RTC_DATE_Get + * @retval Year in BCD format (0x00 . . . 0x99) + */ +#define __LL_RTC_GET_YEAR(__RTC_DATE__) ((__RTC_DATE__) & 0x000000FFU) + +/** + * @brief Helper macro to retrieve Month in BCD format + * @param __RTC_DATE__ Value returned by @ref LL_RTC_DATE_Get + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_MONTH_JANUARY + * @arg @ref LL_RTC_MONTH_FEBRUARY + * @arg @ref LL_RTC_MONTH_MARCH + * @arg @ref LL_RTC_MONTH_APRIL + * @arg @ref LL_RTC_MONTH_MAY + * @arg @ref LL_RTC_MONTH_JUNE + * @arg @ref LL_RTC_MONTH_JULY + * @arg @ref LL_RTC_MONTH_AUGUST + * @arg @ref LL_RTC_MONTH_SEPTEMBER + * @arg @ref LL_RTC_MONTH_OCTOBER + * @arg @ref LL_RTC_MONTH_NOVEMBER + * @arg @ref LL_RTC_MONTH_DECEMBER + */ +#define __LL_RTC_GET_MONTH(__RTC_DATE__) (((__RTC_DATE__) >>RTC_OFFSET_MONTH) & 0x000000FFU) + +/** + * @brief Helper macro to retrieve Day in BCD format + * @param __RTC_DATE__ Value returned by @ref LL_RTC_DATE_Get + * @retval Day in BCD format (0x01 . . . 0x31) + */ +#define __LL_RTC_GET_DAY(__RTC_DATE__) (((__RTC_DATE__) >>RTC_OFFSET_DAY) & 0x000000FFU) + +/** + * @} + */ + +/** @defgroup RTC_LL_EM_Time Time helper Macros + * @{ + */ + +/** + * @brief Helper macro to retrieve hour in BCD format + * @param __RTC_TIME__ RTC time returned by @ref LL_RTC_TIME_Get function + * @retval Hours in BCD format (0x01. . .0x12 or between Min_Data=0x00 and Max_Data=0x23) + */ +#define __LL_RTC_GET_HOUR(__RTC_TIME__) (((__RTC_TIME__) >> RTC_OFFSET_HOUR) & 0x000000FFU) + +/** + * @brief Helper macro to retrieve minute in BCD format + * @param __RTC_TIME__ RTC time returned by @ref LL_RTC_TIME_Get function + * @retval Minutes in BCD format (0x00. . .0x59) + */ +#define __LL_RTC_GET_MINUTE(__RTC_TIME__) (((__RTC_TIME__) >> RTC_OFFSET_MINUTE) & 0x000000FFU) + +/** + * @brief Helper macro to retrieve second in BCD format + * @param __RTC_TIME__ RTC time returned by @ref LL_RTC_TIME_Get function + * @retval Seconds in format (0x00. . .0x59) + */ +#define __LL_RTC_GET_SECOND(__RTC_TIME__) ((__RTC_TIME__) & 0x000000FFU) + +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup RTC_LL_Exported_Functions RTC Exported Functions + * @{ + */ + +/** @defgroup RTC_LL_EF_Configuration Configuration + * @{ + */ + +/** + * @brief Set Hours format (24 hour/day or AM/PM hour format) + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @rmtoll CR FMT LL_RTC_SetHourFormat + * @param RTCx RTC Instance + * @param HourFormat This parameter can be one of the following values: + * @arg @ref LL_RTC_HOURFORMAT_24HOUR + * @arg @ref LL_RTC_HOURFORMAT_AMPM + * @retval None + */ +__STATIC_INLINE void LL_RTC_SetHourFormat(RTC_TypeDef *RTCx, uint32_t HourFormat) +{ + MODIFY_REG(RTCx->CR, RTC_CR_FMT, HourFormat); +} + +/** + * @brief Get Hours format (24 hour/day or AM/PM hour format) + * @rmtoll CR FMT LL_RTC_GetHourFormat + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_HOURFORMAT_24HOUR + * @arg @ref LL_RTC_HOURFORMAT_AMPM + */ +__STATIC_INLINE uint32_t LL_RTC_GetHourFormat(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->CR, RTC_CR_FMT)); +} + +/** + * @brief Select the flag to be routed to RTC_ALARM output + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR OSEL LL_RTC_SetAlarmOutEvent + * @param RTCx RTC Instance + * @param AlarmOutput This parameter can be one of the following values: + * @arg @ref LL_RTC_ALARMOUT_DISABLE + * @arg @ref LL_RTC_ALARMOUT_ALMA + * @arg @ref LL_RTC_ALARMOUT_ALMB + * @arg @ref LL_RTC_ALARMOUT_WAKEUP + * @retval None + */ +__STATIC_INLINE void LL_RTC_SetAlarmOutEvent(RTC_TypeDef *RTCx, uint32_t AlarmOutput) +{ + MODIFY_REG(RTCx->CR, RTC_CR_OSEL, AlarmOutput); +} + +/** + * @brief Get the flag to be routed to RTC_ALARM output + * @rmtoll CR OSEL LL_RTC_GetAlarmOutEvent + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_ALARMOUT_DISABLE + * @arg @ref LL_RTC_ALARMOUT_ALMA + * @arg @ref LL_RTC_ALARMOUT_ALMB + * @arg @ref LL_RTC_ALARMOUT_WAKEUP + */ +__STATIC_INLINE uint32_t LL_RTC_GetAlarmOutEvent(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->CR, RTC_CR_OSEL)); +} + +/** + * @brief Set RTC_ALARM output type (ALARM in push-pull or open-drain output) + * @note Used only when RTC_ALARM is mapped on PC13 + * @rmtoll TAFCR ALARMOUTTYPE LL_RTC_SetAlarmOutputType + * @param RTCx RTC Instance + * @param Output This parameter can be one of the following values: + * @arg @ref LL_RTC_ALARM_OUTPUTTYPE_OPENDRAIN + * @arg @ref LL_RTC_ALARM_OUTPUTTYPE_PUSHPULL + * @retval None + */ +__STATIC_INLINE void LL_RTC_SetAlarmOutputType(RTC_TypeDef *RTCx, uint32_t Output) +{ + MODIFY_REG(RTCx->TAFCR, RTC_TAFCR_ALARMOUTTYPE, Output); +} + +/** + * @brief Get RTC_ALARM output type (ALARM in push-pull or open-drain output) + * @note used only when RTC_ALARM is mapped on PC13 + * @rmtoll TAFCR ALARMOUTTYPE LL_RTC_GetAlarmOutputType + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_ALARM_OUTPUTTYPE_OPENDRAIN + * @arg @ref LL_RTC_ALARM_OUTPUTTYPE_PUSHPULL + */ +__STATIC_INLINE uint32_t LL_RTC_GetAlarmOutputType(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TAFCR, RTC_TAFCR_ALARMOUTTYPE)); +} + +/** + * @brief Enable initialization mode + * @note Initialization mode is used to program time and date register (RTC_TR and RTC_DR) + * and prescaler register (RTC_PRER). + * Counters are stopped and start counting from the new value when INIT is reset. + * @rmtoll ISR INIT LL_RTC_EnableInitMode + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_EnableInitMode(RTC_TypeDef *RTCx) +{ + /* Set the Initialization mode */ + WRITE_REG(RTCx->ISR, RTC_INIT_MASK); +} + +/** + * @brief Disable initialization mode (Free running mode) + * @rmtoll ISR INIT LL_RTC_DisableInitMode + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_DisableInitMode(RTC_TypeDef *RTCx) +{ + /* Exit Initialization mode */ + WRITE_REG(RTCx->ISR, (uint32_t)~RTC_ISR_INIT); +} + +/** + * @brief Set Output polarity (pin is low when ALRAF/ALRBF/WUTF is asserted) + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR POL LL_RTC_SetOutputPolarity + * @param RTCx RTC Instance + * @param Polarity This parameter can be one of the following values: + * @arg @ref LL_RTC_OUTPUTPOLARITY_PIN_HIGH + * @arg @ref LL_RTC_OUTPUTPOLARITY_PIN_LOW + * @retval None + */ +__STATIC_INLINE void LL_RTC_SetOutputPolarity(RTC_TypeDef *RTCx, uint32_t Polarity) +{ + MODIFY_REG(RTCx->CR, RTC_CR_POL, Polarity); +} + +/** + * @brief Get Output polarity + * @rmtoll CR POL LL_RTC_GetOutputPolarity + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_OUTPUTPOLARITY_PIN_HIGH + * @arg @ref LL_RTC_OUTPUTPOLARITY_PIN_LOW + */ +__STATIC_INLINE uint32_t LL_RTC_GetOutputPolarity(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->CR, RTC_CR_POL)); +} + +/** + * @brief Enable Bypass the shadow registers + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR BYPSHAD LL_RTC_EnableShadowRegBypass + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_EnableShadowRegBypass(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_BYPSHAD); +} + +/** + * @brief Disable Bypass the shadow registers + * @rmtoll CR BYPSHAD LL_RTC_DisableShadowRegBypass + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_DisableShadowRegBypass(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_BYPSHAD); +} + +/** + * @brief Check if Shadow registers bypass is enabled or not. + * @rmtoll CR BYPSHAD LL_RTC_IsShadowRegBypassEnabled + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsShadowRegBypassEnabled(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->CR, RTC_CR_BYPSHAD) == (RTC_CR_BYPSHAD)) ? 1UL : 0UL); +} + +/** + * @brief Enable RTC_REFIN reference clock detection (50 or 60 Hz) + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @rmtoll CR REFCKON LL_RTC_EnableRefClock + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_EnableRefClock(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_REFCKON); +} + +/** + * @brief Disable RTC_REFIN reference clock detection (50 or 60 Hz) + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @rmtoll CR REFCKON LL_RTC_DisableRefClock + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_DisableRefClock(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_REFCKON); +} + +/** + * @brief Set Asynchronous prescaler factor + * @rmtoll PRER PREDIV_A LL_RTC_SetAsynchPrescaler + * @param RTCx RTC Instance + * @param AsynchPrescaler Value between Min_Data = 0 and Max_Data = 0x7F + * @retval None + */ +__STATIC_INLINE void LL_RTC_SetAsynchPrescaler(RTC_TypeDef *RTCx, uint32_t AsynchPrescaler) +{ + MODIFY_REG(RTCx->PRER, RTC_PRER_PREDIV_A, AsynchPrescaler << RTC_PRER_PREDIV_A_Pos); +} + +/** + * @brief Set Synchronous prescaler factor + * @rmtoll PRER PREDIV_S LL_RTC_SetSynchPrescaler + * @param RTCx RTC Instance + * @param SynchPrescaler Value between Min_Data = 0 and Max_Data = 0x7FFF + * @retval None + */ +__STATIC_INLINE void LL_RTC_SetSynchPrescaler(RTC_TypeDef *RTCx, uint32_t SynchPrescaler) +{ + MODIFY_REG(RTCx->PRER, RTC_PRER_PREDIV_S, SynchPrescaler); +} + +/** + * @brief Get Asynchronous prescaler factor + * @rmtoll PRER PREDIV_A LL_RTC_GetAsynchPrescaler + * @param RTCx RTC Instance + * @retval Value between Min_Data = 0 and Max_Data = 0x7F + */ +__STATIC_INLINE uint32_t LL_RTC_GetAsynchPrescaler(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->PRER, RTC_PRER_PREDIV_A) >> RTC_PRER_PREDIV_A_Pos); +} + +/** + * @brief Get Synchronous prescaler factor + * @rmtoll PRER PREDIV_S LL_RTC_GetSynchPrescaler + * @param RTCx RTC Instance + * @retval Value between Min_Data = 0 and Max_Data = 0x7FFF + */ +__STATIC_INLINE uint32_t LL_RTC_GetSynchPrescaler(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->PRER, RTC_PRER_PREDIV_S)); +} + +/** + * @brief Enable the write protection for RTC registers. + * @rmtoll WPR KEY LL_RTC_EnableWriteProtection + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_EnableWriteProtection(RTC_TypeDef *RTCx) +{ + WRITE_REG(RTCx->WPR, RTC_WRITE_PROTECTION_DISABLE); +} + +/** + * @brief Disable the write protection for RTC registers. + * @rmtoll WPR KEY LL_RTC_DisableWriteProtection + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_DisableWriteProtection(RTC_TypeDef *RTCx) +{ + WRITE_REG(RTCx->WPR, RTC_WRITE_PROTECTION_ENABLE_1); + WRITE_REG(RTCx->WPR, RTC_WRITE_PROTECTION_ENABLE_2); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_Time Time + * @{ + */ + +/** + * @brief Set time format (AM/24-hour or PM notation) + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @rmtoll TR PM LL_RTC_TIME_SetFormat + * @param RTCx RTC Instance + * @param TimeFormat This parameter can be one of the following values: + * @arg @ref LL_RTC_TIME_FORMAT_AM_OR_24 + * @arg @ref LL_RTC_TIME_FORMAT_PM + * @retval None + */ +__STATIC_INLINE void LL_RTC_TIME_SetFormat(RTC_TypeDef *RTCx, uint32_t TimeFormat) +{ + MODIFY_REG(RTCx->TR, RTC_TR_PM, TimeFormat); +} + +/** + * @brief Get time format (AM or PM notation) + * @note if RTC shadow registers are not bypassed (BYPSHAD=0), need to check if RSF flag is set + * before reading this bit + * @note Read either RTC_SSR or RTC_TR locks the values in the higher-order calendar + * shadow registers until RTC_DR is read (LL_RTC_ReadReg(RTC, DR)). + * @rmtoll TR PM LL_RTC_TIME_GetFormat + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_TIME_FORMAT_AM_OR_24 + * @arg @ref LL_RTC_TIME_FORMAT_PM + */ +__STATIC_INLINE uint32_t LL_RTC_TIME_GetFormat(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TR, RTC_TR_PM)); +} + +/** + * @brief Set Hours in BCD format + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert hour from binary to BCD format + * @rmtoll TR HT LL_RTC_TIME_SetHour\n + * TR HU LL_RTC_TIME_SetHour + * @param RTCx RTC Instance + * @param Hours Value between Min_Data=0x01 and Max_Data=0x12 or between Min_Data=0x00 and Max_Data=0x23 + * @retval None + */ +__STATIC_INLINE void LL_RTC_TIME_SetHour(RTC_TypeDef *RTCx, uint32_t Hours) +{ + MODIFY_REG(RTCx->TR, (RTC_TR_HT | RTC_TR_HU), + (((Hours & 0xF0U) << (RTC_TR_HT_Pos - 4U)) | ((Hours & 0x0FU) << RTC_TR_HU_Pos))); +} + +/** + * @brief Get Hours in BCD format + * @note if RTC shadow registers are not bypassed (BYPSHAD=0), need to check if RSF flag is set + * before reading this bit + * @note Read either RTC_SSR or RTC_TR locks the values in the higher-order calendar + * shadow registers until RTC_DR is read (LL_RTC_ReadReg(RTC, DR)). + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert hour from BCD to + * Binary format + * @rmtoll TR HT LL_RTC_TIME_GetHour\n + * TR HU LL_RTC_TIME_GetHour + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x01 and Max_Data=0x12 or between Min_Data=0x00 and Max_Data=0x23 + */ +__STATIC_INLINE uint32_t LL_RTC_TIME_GetHour(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->TR, (RTC_TR_HT | RTC_TR_HU))) >> RTC_TR_HU_Pos); +} + +/** + * @brief Set Minutes in BCD format + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Minutes from binary to BCD format + * @rmtoll TR MNT LL_RTC_TIME_SetMinute\n + * TR MNU LL_RTC_TIME_SetMinute + * @param RTCx RTC Instance + * @param Minutes Value between Min_Data=0x00 and Max_Data=0x59 + * @retval None + */ +__STATIC_INLINE void LL_RTC_TIME_SetMinute(RTC_TypeDef *RTCx, uint32_t Minutes) +{ + MODIFY_REG(RTCx->TR, (RTC_TR_MNT | RTC_TR_MNU), + (((Minutes & 0xF0U) << (RTC_TR_MNT_Pos - 4U)) | ((Minutes & 0x0FU) << RTC_TR_MNU_Pos))); +} + +/** + * @brief Get Minutes in BCD format + * @note if RTC shadow registers are not bypassed (BYPSHAD=0), need to check if RSF flag is set + * before reading this bit + * @note Read either RTC_SSR or RTC_TR locks the values in the higher-order calendar + * shadow registers until RTC_DR is read (LL_RTC_ReadReg(RTC, DR)). + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert minute from BCD + * to Binary format + * @rmtoll TR MNT LL_RTC_TIME_GetMinute\n + * TR MNU LL_RTC_TIME_GetMinute + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x59 + */ +__STATIC_INLINE uint32_t LL_RTC_TIME_GetMinute(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TR, (RTC_TR_MNT | RTC_TR_MNU)) >> RTC_TR_MNU_Pos); +} + +/** + * @brief Set Seconds in BCD format + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Seconds from binary to BCD format + * @rmtoll TR ST LL_RTC_TIME_SetSecond\n + * TR SU LL_RTC_TIME_SetSecond + * @param RTCx RTC Instance + * @param Seconds Value between Min_Data=0x00 and Max_Data=0x59 + * @retval None + */ +__STATIC_INLINE void LL_RTC_TIME_SetSecond(RTC_TypeDef *RTCx, uint32_t Seconds) +{ + MODIFY_REG(RTCx->TR, (RTC_TR_ST | RTC_TR_SU), + (((Seconds & 0xF0U) << (RTC_TR_ST_Pos - 4U)) | ((Seconds & 0x0FU) << RTC_TR_SU_Pos))); +} + +/** + * @brief Get Seconds in BCD format + * @note if RTC shadow registers are not bypassed (BYPSHAD=0), need to check if RSF flag is set + * before reading this bit + * @note Read either RTC_SSR or RTC_TR locks the values in the higher-order calendar + * shadow registers until RTC_DR is read (LL_RTC_ReadReg(RTC, DR)). + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Seconds from BCD + * to Binary format + * @rmtoll TR ST LL_RTC_TIME_GetSecond\n + * TR SU LL_RTC_TIME_GetSecond + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x59 + */ +__STATIC_INLINE uint32_t LL_RTC_TIME_GetSecond(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TR, (RTC_TR_ST | RTC_TR_SU)) >> RTC_TR_SU_Pos); +} + +/** + * @brief Set time (hour, minute and second) in BCD format + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @note TimeFormat and Hours should follow the same format + * @rmtoll TR PM LL_RTC_TIME_Config\n + * TR HT LL_RTC_TIME_Config\n + * TR HU LL_RTC_TIME_Config\n + * TR MNT LL_RTC_TIME_Config\n + * TR MNU LL_RTC_TIME_Config\n + * TR ST LL_RTC_TIME_Config\n + * TR SU LL_RTC_TIME_Config + * @param RTCx RTC Instance + * @param Format12_24 This parameter can be one of the following values: + * @arg @ref LL_RTC_TIME_FORMAT_AM_OR_24 + * @arg @ref LL_RTC_TIME_FORMAT_PM + * @param Hours Value between Min_Data=0x01 and Max_Data=0x12 or between Min_Data=0x00 and Max_Data=0x23 + * @param Minutes Value between Min_Data=0x00 and Max_Data=0x59 + * @param Seconds Value between Min_Data=0x00 and Max_Data=0x59 + * @retval None + */ +__STATIC_INLINE void LL_RTC_TIME_Config(RTC_TypeDef *RTCx, uint32_t Format12_24, uint32_t Hours, uint32_t Minutes, uint32_t Seconds) +{ + uint32_t temp; + + temp = Format12_24 | \ + (((Hours & 0xF0U) << (RTC_TR_HT_Pos - 4U)) | ((Hours & 0x0FU) << RTC_TR_HU_Pos)) | \ + (((Minutes & 0xF0U) << (RTC_TR_MNT_Pos - 4U)) | ((Minutes & 0x0FU) << RTC_TR_MNU_Pos)) | \ + (((Seconds & 0xF0U) << (RTC_TR_ST_Pos - 4U)) | ((Seconds & 0x0FU) << RTC_TR_SU_Pos)); + MODIFY_REG(RTCx->TR, (RTC_TR_PM | RTC_TR_HT | RTC_TR_HU | RTC_TR_MNT | RTC_TR_MNU | RTC_TR_ST | RTC_TR_SU), temp); +} + +/** + * @brief Get time (hour, minute and second) in BCD format + * @note if RTC shadow registers are not bypassed (BYPSHAD=0), need to check if RSF flag is set + * before reading this bit + * @note Read either RTC_SSR or RTC_TR locks the values in the higher-order calendar + * shadow registers until RTC_DR is read (LL_RTC_ReadReg(RTC, DR)). + * @note helper macros __LL_RTC_GET_HOUR, __LL_RTC_GET_MINUTE and __LL_RTC_GET_SECOND + * are available to get independently each parameter. + * @rmtoll TR HT LL_RTC_TIME_Get\n + * TR HU LL_RTC_TIME_Get\n + * TR MNT LL_RTC_TIME_Get\n + * TR MNU LL_RTC_TIME_Get\n + * TR ST LL_RTC_TIME_Get\n + * TR SU LL_RTC_TIME_Get + * @param RTCx RTC Instance + * @retval Combination of hours, minutes and seconds (Format: 0x00HHMMSS). + */ +__STATIC_INLINE uint32_t LL_RTC_TIME_Get(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TR, (RTC_TR_HT | RTC_TR_HU | RTC_TR_MNT | RTC_TR_MNU | RTC_TR_ST | RTC_TR_SU))); +} + +/** + * @brief Memorize whether the daylight saving time change has been performed + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR BKP LL_RTC_TIME_EnableDayLightStore + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_TIME_EnableDayLightStore(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_BKP); +} + +/** + * @brief Disable memorization whether the daylight saving time change has been performed. + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR BKP LL_RTC_TIME_DisableDayLightStore + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_TIME_DisableDayLightStore(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_BKP); +} + +/** + * @brief Check if RTC Day Light Saving stored operation has been enabled or not + * @rmtoll CR BKP LL_RTC_TIME_IsDayLightStoreEnabled + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_TIME_IsDayLightStoreEnabled(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->CR, RTC_CR_BKP) == (RTC_CR_BKP)) ? 1UL : 0UL); +} + +/** + * @brief Subtract 1 hour (winter time change) + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR SUB1H LL_RTC_TIME_DecHour + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_TIME_DecHour(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_SUB1H); +} + +/** + * @brief Add 1 hour (summer time change) + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR ADD1H LL_RTC_TIME_IncHour + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_TIME_IncHour(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_ADD1H); +} + +/** + * @brief Get subseconds value in the synchronous prescaler counter. + * @note You can use both SubSeconds value and SecondFraction (PREDIV_S through + * LL_RTC_GetSynchPrescaler function) terms returned to convert Calendar + * SubSeconds value in second fraction ratio with time unit following + * generic formula: + * ==> Seconds fraction ratio * time_unit = + * [(SecondFraction-SubSeconds)/(SecondFraction+1)] * time_unit + * This conversion can be performed only if no shift operation is pending + * (ie. SHFP=0) when PREDIV_S >= SS. + * @rmtoll SSR SS LL_RTC_TIME_GetSubSecond + * @param RTCx RTC Instance + * @retval Subseconds value (number between 0 and 65535) + */ +__STATIC_INLINE uint32_t LL_RTC_TIME_GetSubSecond(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->SSR, RTC_SSR_SS)); +} + +/** + * @brief Synchronize to a remote clock with a high degree of precision. + * @note This operation effectively subtracts from (delays) or advance the clock of a fraction of a second. + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note When REFCKON is set, firmware must not write to Shift control register. + * @rmtoll SHIFTR ADD1S LL_RTC_TIME_Synchronize\n + * SHIFTR SUBFS LL_RTC_TIME_Synchronize + * @param RTCx RTC Instance + * @param ShiftSecond This parameter can be one of the following values: + * @arg @ref LL_RTC_SHIFT_SECOND_DELAY + * @arg @ref LL_RTC_SHIFT_SECOND_ADVANCE + * @param Fraction Number of Seconds Fractions (any value from 0 to 0x7FFF) + * @retval None + */ +__STATIC_INLINE void LL_RTC_TIME_Synchronize(RTC_TypeDef *RTCx, uint32_t ShiftSecond, uint32_t Fraction) +{ + WRITE_REG(RTCx->SHIFTR, ShiftSecond | Fraction); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_Date Date + * @{ + */ + +/** + * @brief Set Year in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Year from binary to BCD format + * @rmtoll DR YT LL_RTC_DATE_SetYear\n + * DR YU LL_RTC_DATE_SetYear + * @param RTCx RTC Instance + * @param Year Value between Min_Data=0x00 and Max_Data=0x99 + * @retval None + */ +__STATIC_INLINE void LL_RTC_DATE_SetYear(RTC_TypeDef *RTCx, uint32_t Year) +{ + MODIFY_REG(RTCx->DR, (RTC_DR_YT | RTC_DR_YU), + (((Year & 0xF0U) << (RTC_DR_YT_Pos - 4U)) | ((Year & 0x0FU) << RTC_DR_YU_Pos))); +} + +/** + * @brief Get Year in BCD format + * @note if RTC shadow registers are not bypassed (BYPSHAD=0), need to check if RSF flag is set + * before reading this bit + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Year from BCD to Binary format + * @rmtoll DR YT LL_RTC_DATE_GetYear\n + * DR YU LL_RTC_DATE_GetYear + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x99 + */ +__STATIC_INLINE uint32_t LL_RTC_DATE_GetYear(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->DR, (RTC_DR_YT | RTC_DR_YU))) >> RTC_DR_YU_Pos); +} + +/** + * @brief Set Week day + * @rmtoll DR WDU LL_RTC_DATE_SetWeekDay + * @param RTCx RTC Instance + * @param WeekDay This parameter can be one of the following values: + * @arg @ref LL_RTC_WEEKDAY_MONDAY + * @arg @ref LL_RTC_WEEKDAY_TUESDAY + * @arg @ref LL_RTC_WEEKDAY_WEDNESDAY + * @arg @ref LL_RTC_WEEKDAY_THURSDAY + * @arg @ref LL_RTC_WEEKDAY_FRIDAY + * @arg @ref LL_RTC_WEEKDAY_SATURDAY + * @arg @ref LL_RTC_WEEKDAY_SUNDAY + * @retval None + */ +__STATIC_INLINE void LL_RTC_DATE_SetWeekDay(RTC_TypeDef *RTCx, uint32_t WeekDay) +{ + MODIFY_REG(RTCx->DR, RTC_DR_WDU, WeekDay << RTC_DR_WDU_Pos); +} + +/** + * @brief Get Week day + * @note if RTC shadow registers are not bypassed (BYPSHAD=0), need to check if RSF flag is set + * before reading this bit + * @rmtoll DR WDU LL_RTC_DATE_GetWeekDay + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_WEEKDAY_MONDAY + * @arg @ref LL_RTC_WEEKDAY_TUESDAY + * @arg @ref LL_RTC_WEEKDAY_WEDNESDAY + * @arg @ref LL_RTC_WEEKDAY_THURSDAY + * @arg @ref LL_RTC_WEEKDAY_FRIDAY + * @arg @ref LL_RTC_WEEKDAY_SATURDAY + * @arg @ref LL_RTC_WEEKDAY_SUNDAY + */ +__STATIC_INLINE uint32_t LL_RTC_DATE_GetWeekDay(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->DR, RTC_DR_WDU) >> RTC_DR_WDU_Pos); +} + +/** + * @brief Set Month in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Month from binary to BCD format + * @rmtoll DR MT LL_RTC_DATE_SetMonth\n + * DR MU LL_RTC_DATE_SetMonth + * @param RTCx RTC Instance + * @param Month This parameter can be one of the following values: + * @arg @ref LL_RTC_MONTH_JANUARY + * @arg @ref LL_RTC_MONTH_FEBRUARY + * @arg @ref LL_RTC_MONTH_MARCH + * @arg @ref LL_RTC_MONTH_APRIL + * @arg @ref LL_RTC_MONTH_MAY + * @arg @ref LL_RTC_MONTH_JUNE + * @arg @ref LL_RTC_MONTH_JULY + * @arg @ref LL_RTC_MONTH_AUGUST + * @arg @ref LL_RTC_MONTH_SEPTEMBER + * @arg @ref LL_RTC_MONTH_OCTOBER + * @arg @ref LL_RTC_MONTH_NOVEMBER + * @arg @ref LL_RTC_MONTH_DECEMBER + * @retval None + */ +__STATIC_INLINE void LL_RTC_DATE_SetMonth(RTC_TypeDef *RTCx, uint32_t Month) +{ + MODIFY_REG(RTCx->DR, (RTC_DR_MT | RTC_DR_MU), + (((Month & 0xF0U) << (RTC_DR_MT_Pos - 4U)) | ((Month & 0x0FU) << RTC_DR_MU_Pos))); +} + +/** + * @brief Get Month in BCD format + * @note if RTC shadow registers are not bypassed (BYPSHAD=0), need to check if RSF flag is set + * before reading this bit + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Month from BCD to Binary format + * @rmtoll DR MT LL_RTC_DATE_GetMonth\n + * DR MU LL_RTC_DATE_GetMonth + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_MONTH_JANUARY + * @arg @ref LL_RTC_MONTH_FEBRUARY + * @arg @ref LL_RTC_MONTH_MARCH + * @arg @ref LL_RTC_MONTH_APRIL + * @arg @ref LL_RTC_MONTH_MAY + * @arg @ref LL_RTC_MONTH_JUNE + * @arg @ref LL_RTC_MONTH_JULY + * @arg @ref LL_RTC_MONTH_AUGUST + * @arg @ref LL_RTC_MONTH_SEPTEMBER + * @arg @ref LL_RTC_MONTH_OCTOBER + * @arg @ref LL_RTC_MONTH_NOVEMBER + * @arg @ref LL_RTC_MONTH_DECEMBER + */ +__STATIC_INLINE uint32_t LL_RTC_DATE_GetMonth(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->DR, (RTC_DR_MT | RTC_DR_MU))) >> RTC_DR_MU_Pos); +} + +/** + * @brief Set Day in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Day from binary to BCD format + * @rmtoll DR DT LL_RTC_DATE_SetDay\n + * DR DU LL_RTC_DATE_SetDay + * @param RTCx RTC Instance + * @param Day Value between Min_Data=0x01 and Max_Data=0x31 + * @retval None + */ +__STATIC_INLINE void LL_RTC_DATE_SetDay(RTC_TypeDef *RTCx, uint32_t Day) +{ + MODIFY_REG(RTCx->DR, (RTC_DR_DT | RTC_DR_DU), + (((Day & 0xF0U) << (RTC_DR_DT_Pos - 4U)) | ((Day & 0x0FU) << RTC_DR_DU_Pos))); +} + +/** + * @brief Get Day in BCD format + * @note if RTC shadow registers are not bypassed (BYPSHAD=0), need to check if RSF flag is set + * before reading this bit + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Day from BCD to Binary format + * @rmtoll DR DT LL_RTC_DATE_GetDay\n + * DR DU LL_RTC_DATE_GetDay + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x01 and Max_Data=0x31 + */ +__STATIC_INLINE uint32_t LL_RTC_DATE_GetDay(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->DR, (RTC_DR_DT | RTC_DR_DU))) >> RTC_DR_DU_Pos); +} + +/** + * @brief Set date (WeekDay, Day, Month and Year) in BCD format + * @rmtoll DR WDU LL_RTC_DATE_Config\n + * DR MT LL_RTC_DATE_Config\n + * DR MU LL_RTC_DATE_Config\n + * DR DT LL_RTC_DATE_Config\n + * DR DU LL_RTC_DATE_Config\n + * DR YT LL_RTC_DATE_Config\n + * DR YU LL_RTC_DATE_Config + * @param RTCx RTC Instance + * @param WeekDay This parameter can be one of the following values: + * @arg @ref LL_RTC_WEEKDAY_MONDAY + * @arg @ref LL_RTC_WEEKDAY_TUESDAY + * @arg @ref LL_RTC_WEEKDAY_WEDNESDAY + * @arg @ref LL_RTC_WEEKDAY_THURSDAY + * @arg @ref LL_RTC_WEEKDAY_FRIDAY + * @arg @ref LL_RTC_WEEKDAY_SATURDAY + * @arg @ref LL_RTC_WEEKDAY_SUNDAY + * @param Day Value between Min_Data=0x01 and Max_Data=0x31 + * @param Month This parameter can be one of the following values: + * @arg @ref LL_RTC_MONTH_JANUARY + * @arg @ref LL_RTC_MONTH_FEBRUARY + * @arg @ref LL_RTC_MONTH_MARCH + * @arg @ref LL_RTC_MONTH_APRIL + * @arg @ref LL_RTC_MONTH_MAY + * @arg @ref LL_RTC_MONTH_JUNE + * @arg @ref LL_RTC_MONTH_JULY + * @arg @ref LL_RTC_MONTH_AUGUST + * @arg @ref LL_RTC_MONTH_SEPTEMBER + * @arg @ref LL_RTC_MONTH_OCTOBER + * @arg @ref LL_RTC_MONTH_NOVEMBER + * @arg @ref LL_RTC_MONTH_DECEMBER + * @param Year Value between Min_Data=0x00 and Max_Data=0x99 + * @retval None + */ +__STATIC_INLINE void LL_RTC_DATE_Config(RTC_TypeDef *RTCx, uint32_t WeekDay, uint32_t Day, uint32_t Month, uint32_t Year) +{ + uint32_t temp; + + temp = ( WeekDay << RTC_DR_WDU_Pos) | \ + (((Year & 0xF0U) << (RTC_DR_YT_Pos - 4U)) | ((Year & 0x0FU) << RTC_DR_YU_Pos)) | \ + (((Month & 0xF0U) << (RTC_DR_MT_Pos - 4U)) | ((Month & 0x0FU) << RTC_DR_MU_Pos)) | \ + (((Day & 0xF0U) << (RTC_DR_DT_Pos - 4U)) | ((Day & 0x0FU) << RTC_DR_DU_Pos)); + + MODIFY_REG(RTCx->DR, (RTC_DR_WDU | RTC_DR_MT | RTC_DR_MU | RTC_DR_DT | RTC_DR_DU | RTC_DR_YT | RTC_DR_YU), temp); +} + +/** + * @brief Get date (WeekDay, Day, Month and Year) in BCD format + * @note if RTC shadow registers are not bypassed (BYPSHAD=0), need to check if RSF flag is set + * before reading this bit + * @note helper macros __LL_RTC_GET_WEEKDAY, __LL_RTC_GET_YEAR, __LL_RTC_GET_MONTH, + * and __LL_RTC_GET_DAY are available to get independently each parameter. + * @rmtoll DR WDU LL_RTC_DATE_Get\n + * DR MT LL_RTC_DATE_Get\n + * DR MU LL_RTC_DATE_Get\n + * DR DT LL_RTC_DATE_Get\n + * DR DU LL_RTC_DATE_Get\n + * DR YT LL_RTC_DATE_Get\n + * DR YU LL_RTC_DATE_Get + * @param RTCx RTC Instance + * @retval Combination of WeekDay, Day, Month and Year (Format: 0xWWDDMMYY). + */ +__STATIC_INLINE uint32_t LL_RTC_DATE_Get(RTC_TypeDef *RTCx) +{ + uint32_t temp; + + temp = READ_BIT(RTCx->DR, (RTC_DR_WDU | RTC_DR_MT | RTC_DR_MU | RTC_DR_DT | RTC_DR_DU | RTC_DR_YT | RTC_DR_YU)); + + return (uint32_t)((((temp & RTC_DR_WDU) >> RTC_DR_WDU_Pos) << RTC_OFFSET_WEEKDAY) | \ + (((temp & (RTC_DR_DT | RTC_DR_DU)) >> RTC_DR_DU_Pos) << RTC_OFFSET_DAY) | \ + (((temp & (RTC_DR_MT | RTC_DR_MU)) >> RTC_DR_MU_Pos) << RTC_OFFSET_MONTH) | \ + ((temp & (RTC_DR_YT | RTC_DR_YU)) >> RTC_DR_YU_Pos)); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_ALARMA ALARMA + * @{ + */ + +/** + * @brief Enable Alarm A + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR ALRAE LL_RTC_ALMA_Enable + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_Enable(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_ALRAE); +} + +/** + * @brief Disable Alarm A + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR ALRAE LL_RTC_ALMA_Disable + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_Disable(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_ALRAE); +} + +/** + * @brief Specify the Alarm A masks. + * @rmtoll ALRMAR MSK4 LL_RTC_ALMA_SetMask\n + * ALRMAR MSK3 LL_RTC_ALMA_SetMask\n + * ALRMAR MSK2 LL_RTC_ALMA_SetMask\n + * ALRMAR MSK1 LL_RTC_ALMA_SetMask + * @param RTCx RTC Instance + * @param Mask This parameter can be a combination of the following values: + * @arg @ref LL_RTC_ALMA_MASK_NONE + * @arg @ref LL_RTC_ALMA_MASK_DATEWEEKDAY + * @arg @ref LL_RTC_ALMA_MASK_HOURS + * @arg @ref LL_RTC_ALMA_MASK_MINUTES + * @arg @ref LL_RTC_ALMA_MASK_SECONDS + * @arg @ref LL_RTC_ALMA_MASK_ALL + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_SetMask(RTC_TypeDef *RTCx, uint32_t Mask) +{ + MODIFY_REG(RTCx->ALRMAR, RTC_ALRMAR_MSK4 | RTC_ALRMAR_MSK3 | RTC_ALRMAR_MSK2 | RTC_ALRMAR_MSK1, Mask); +} + +/** + * @brief Get the Alarm A masks. + * @rmtoll ALRMAR MSK4 LL_RTC_ALMA_GetMask\n + * ALRMAR MSK3 LL_RTC_ALMA_GetMask\n + * ALRMAR MSK2 LL_RTC_ALMA_GetMask\n + * ALRMAR MSK1 LL_RTC_ALMA_GetMask + * @param RTCx RTC Instance + * @retval Returned value can be can be a combination of the following values: + * @arg @ref LL_RTC_ALMA_MASK_NONE + * @arg @ref LL_RTC_ALMA_MASK_DATEWEEKDAY + * @arg @ref LL_RTC_ALMA_MASK_HOURS + * @arg @ref LL_RTC_ALMA_MASK_MINUTES + * @arg @ref LL_RTC_ALMA_MASK_SECONDS + * @arg @ref LL_RTC_ALMA_MASK_ALL + */ +__STATIC_INLINE uint32_t LL_RTC_ALMA_GetMask(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->ALRMAR, RTC_ALRMAR_MSK4 | RTC_ALRMAR_MSK3 | RTC_ALRMAR_MSK2 | RTC_ALRMAR_MSK1)); +} + +/** + * @brief Enable AlarmA Week day selection (DU[3:0] represents the week day. DT[1:0] is do not care) + * @rmtoll ALRMAR WDSEL LL_RTC_ALMA_EnableWeekday + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_EnableWeekday(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->ALRMAR, RTC_ALRMAR_WDSEL); +} + +/** + * @brief Disable AlarmA Week day selection (DU[3:0] represents the date ) + * @rmtoll ALRMAR WDSEL LL_RTC_ALMA_DisableWeekday + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_DisableWeekday(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->ALRMAR, RTC_ALRMAR_WDSEL); +} + +/** + * @brief Set ALARM A Day in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Day from binary to BCD format + * @rmtoll ALRMAR DT LL_RTC_ALMA_SetDay\n + * ALRMAR DU LL_RTC_ALMA_SetDay + * @param RTCx RTC Instance + * @param Day Value between Min_Data=0x01 and Max_Data=0x31 + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_SetDay(RTC_TypeDef *RTCx, uint32_t Day) +{ + MODIFY_REG(RTCx->ALRMAR, (RTC_ALRMAR_DT | RTC_ALRMAR_DU), + (((Day & 0xF0U) << (RTC_ALRMAR_DT_Pos - 4U)) | ((Day & 0x0FU) << RTC_ALRMAR_DU_Pos))); +} + +/** + * @brief Get ALARM A Day in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Day from BCD to Binary format + * @rmtoll ALRMAR DT LL_RTC_ALMA_GetDay\n + * ALRMAR DU LL_RTC_ALMA_GetDay + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x01 and Max_Data=0x31 + */ +__STATIC_INLINE uint32_t LL_RTC_ALMA_GetDay(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->ALRMAR, (RTC_ALRMAR_DT | RTC_ALRMAR_DU))) >> RTC_ALRMAR_DU_Pos); +} + +/** + * @brief Set ALARM A Weekday + * @rmtoll ALRMAR DU LL_RTC_ALMA_SetWeekDay + * @param RTCx RTC Instance + * @param WeekDay This parameter can be one of the following values: + * @arg @ref LL_RTC_WEEKDAY_MONDAY + * @arg @ref LL_RTC_WEEKDAY_TUESDAY + * @arg @ref LL_RTC_WEEKDAY_WEDNESDAY + * @arg @ref LL_RTC_WEEKDAY_THURSDAY + * @arg @ref LL_RTC_WEEKDAY_FRIDAY + * @arg @ref LL_RTC_WEEKDAY_SATURDAY + * @arg @ref LL_RTC_WEEKDAY_SUNDAY + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_SetWeekDay(RTC_TypeDef *RTCx, uint32_t WeekDay) +{ + MODIFY_REG(RTCx->ALRMAR, RTC_ALRMAR_DU, WeekDay << RTC_ALRMAR_DU_Pos); +} + +/** + * @brief Get ALARM A Weekday + * @rmtoll ALRMAR DU LL_RTC_ALMA_GetWeekDay + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_WEEKDAY_MONDAY + * @arg @ref LL_RTC_WEEKDAY_TUESDAY + * @arg @ref LL_RTC_WEEKDAY_WEDNESDAY + * @arg @ref LL_RTC_WEEKDAY_THURSDAY + * @arg @ref LL_RTC_WEEKDAY_FRIDAY + * @arg @ref LL_RTC_WEEKDAY_SATURDAY + * @arg @ref LL_RTC_WEEKDAY_SUNDAY + */ +__STATIC_INLINE uint32_t LL_RTC_ALMA_GetWeekDay(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->ALRMAR, RTC_ALRMAR_DU) >> RTC_ALRMAR_DU_Pos); +} + +/** + * @brief Set Alarm A time format (AM/24-hour or PM notation) + * @rmtoll ALRMAR PM LL_RTC_ALMA_SetTimeFormat + * @param RTCx RTC Instance + * @param TimeFormat This parameter can be one of the following values: + * @arg @ref LL_RTC_ALMA_TIME_FORMAT_AM + * @arg @ref LL_RTC_ALMA_TIME_FORMAT_PM + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_SetTimeFormat(RTC_TypeDef *RTCx, uint32_t TimeFormat) +{ + MODIFY_REG(RTCx->ALRMAR, RTC_ALRMAR_PM, TimeFormat); +} + +/** + * @brief Get Alarm A time format (AM or PM notation) + * @rmtoll ALRMAR PM LL_RTC_ALMA_GetTimeFormat + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_ALMA_TIME_FORMAT_AM + * @arg @ref LL_RTC_ALMA_TIME_FORMAT_PM + */ +__STATIC_INLINE uint32_t LL_RTC_ALMA_GetTimeFormat(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->ALRMAR, RTC_ALRMAR_PM)); +} + +/** + * @brief Set ALARM A Hours in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Hours from binary to BCD format + * @rmtoll ALRMAR HT LL_RTC_ALMA_SetHour\n + * ALRMAR HU LL_RTC_ALMA_SetHour + * @param RTCx RTC Instance + * @param Hours Value between Min_Data=0x01 and Max_Data=0x12 or between Min_Data=0x00 and Max_Data=0x23 + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_SetHour(RTC_TypeDef *RTCx, uint32_t Hours) +{ + MODIFY_REG(RTCx->ALRMAR, (RTC_ALRMAR_HT | RTC_ALRMAR_HU), + (((Hours & 0xF0U) << (RTC_ALRMAR_HT_Pos - 4U)) | ((Hours & 0x0FU) << RTC_ALRMAR_HU_Pos))); +} + +/** + * @brief Get ALARM A Hours in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Hours from BCD to Binary format + * @rmtoll ALRMAR HT LL_RTC_ALMA_GetHour\n + * ALRMAR HU LL_RTC_ALMA_GetHour + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x01 and Max_Data=0x12 or between Min_Data=0x00 and Max_Data=0x23 + */ +__STATIC_INLINE uint32_t LL_RTC_ALMA_GetHour(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->ALRMAR, (RTC_ALRMAR_HT | RTC_ALRMAR_HU))) >> RTC_ALRMAR_HU_Pos); +} + +/** + * @brief Set ALARM A Minutes in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Minutes from binary to BCD format + * @rmtoll ALRMAR MNT LL_RTC_ALMA_SetMinute\n + * ALRMAR MNU LL_RTC_ALMA_SetMinute + * @param RTCx RTC Instance + * @param Minutes Value between Min_Data=0x00 and Max_Data=0x59 + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_SetMinute(RTC_TypeDef *RTCx, uint32_t Minutes) +{ + MODIFY_REG(RTCx->ALRMAR, (RTC_ALRMAR_MNT | RTC_ALRMAR_MNU), + (((Minutes & 0xF0U) << (RTC_ALRMAR_MNT_Pos - 4U)) | ((Minutes & 0x0FU) << RTC_ALRMAR_MNU_Pos))); +} + +/** + * @brief Get ALARM A Minutes in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Minutes from BCD to Binary format + * @rmtoll ALRMAR MNT LL_RTC_ALMA_GetMinute\n + * ALRMAR MNU LL_RTC_ALMA_GetMinute + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x59 + */ +__STATIC_INLINE uint32_t LL_RTC_ALMA_GetMinute(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->ALRMAR, (RTC_ALRMAR_MNT | RTC_ALRMAR_MNU))) >> RTC_ALRMAR_MNU_Pos); +} + +/** + * @brief Set ALARM A Seconds in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Seconds from binary to BCD format + * @rmtoll ALRMAR ST LL_RTC_ALMA_SetSecond\n + * ALRMAR SU LL_RTC_ALMA_SetSecond + * @param RTCx RTC Instance + * @param Seconds Value between Min_Data=0x00 and Max_Data=0x59 + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_SetSecond(RTC_TypeDef *RTCx, uint32_t Seconds) +{ + MODIFY_REG(RTCx->ALRMAR, (RTC_ALRMAR_ST | RTC_ALRMAR_SU), + (((Seconds & 0xF0U) << (RTC_ALRMAR_ST_Pos - 4U)) | ((Seconds & 0x0FU) << RTC_ALRMAR_SU_Pos))); +} + +/** + * @brief Get ALARM A Seconds in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Seconds from BCD to Binary format + * @rmtoll ALRMAR ST LL_RTC_ALMA_GetSecond\n + * ALRMAR SU LL_RTC_ALMA_GetSecond + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x59 + */ +__STATIC_INLINE uint32_t LL_RTC_ALMA_GetSecond(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->ALRMAR, (RTC_ALRMAR_ST | RTC_ALRMAR_SU))) >> RTC_ALRMAR_SU_Pos); +} + +/** + * @brief Set Alarm A Time (hour, minute and second) in BCD format + * @rmtoll ALRMAR PM LL_RTC_ALMA_ConfigTime\n + * ALRMAR HT LL_RTC_ALMA_ConfigTime\n + * ALRMAR HU LL_RTC_ALMA_ConfigTime\n + * ALRMAR MNT LL_RTC_ALMA_ConfigTime\n + * ALRMAR MNU LL_RTC_ALMA_ConfigTime\n + * ALRMAR ST LL_RTC_ALMA_ConfigTime\n + * ALRMAR SU LL_RTC_ALMA_ConfigTime + * @param RTCx RTC Instance + * @param Format12_24 This parameter can be one of the following values: + * @arg @ref LL_RTC_ALMA_TIME_FORMAT_AM + * @arg @ref LL_RTC_ALMA_TIME_FORMAT_PM + * @param Hours Value between Min_Data=0x01 and Max_Data=0x12 or between Min_Data=0x00 and Max_Data=0x23 + * @param Minutes Value between Min_Data=0x00 and Max_Data=0x59 + * @param Seconds Value between Min_Data=0x00 and Max_Data=0x59 + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_ConfigTime(RTC_TypeDef *RTCx, uint32_t Format12_24, uint32_t Hours, uint32_t Minutes, uint32_t Seconds) +{ + uint32_t temp; + + temp = Format12_24 | \ + (((Hours & 0xF0U) << (RTC_ALRMAR_HT_Pos - 4U)) | ((Hours & 0x0FU) << RTC_ALRMAR_HU_Pos)) | \ + (((Minutes & 0xF0U) << (RTC_ALRMAR_MNT_Pos - 4U)) | ((Minutes & 0x0FU) << RTC_ALRMAR_MNU_Pos)) | \ + (((Seconds & 0xF0U) << (RTC_ALRMAR_ST_Pos - 4U)) | ((Seconds & 0x0FU) << RTC_ALRMAR_SU_Pos)); + + MODIFY_REG(RTCx->ALRMAR, RTC_ALRMAR_PM | RTC_ALRMAR_HT | RTC_ALRMAR_HU | RTC_ALRMAR_MNT | RTC_ALRMAR_MNU | RTC_ALRMAR_ST | RTC_ALRMAR_SU, temp); +} + +/** + * @brief Get Alarm B Time (hour, minute and second) in BCD format + * @note helper macros __LL_RTC_GET_HOUR, __LL_RTC_GET_MINUTE and __LL_RTC_GET_SECOND + * are available to get independently each parameter. + * @rmtoll ALRMAR HT LL_RTC_ALMA_GetTime\n + * ALRMAR HU LL_RTC_ALMA_GetTime\n + * ALRMAR MNT LL_RTC_ALMA_GetTime\n + * ALRMAR MNU LL_RTC_ALMA_GetTime\n + * ALRMAR ST LL_RTC_ALMA_GetTime\n + * ALRMAR SU LL_RTC_ALMA_GetTime + * @param RTCx RTC Instance + * @retval Combination of hours, minutes and seconds. + */ +__STATIC_INLINE uint32_t LL_RTC_ALMA_GetTime(RTC_TypeDef *RTCx) +{ + return (uint32_t)((LL_RTC_ALMA_GetHour(RTCx) << RTC_OFFSET_HOUR) | (LL_RTC_ALMA_GetMinute(RTCx) << RTC_OFFSET_MINUTE) | LL_RTC_ALMA_GetSecond(RTCx)); +} + +/** + * @brief Mask the most-significant bits of the subseconds field starting from + * the bit specified in parameter Mask + * @note This register can be written only when ALRAE is reset in RTC_CR register, + * or in initialization mode. + * @rmtoll ALRMASSR MASKSS LL_RTC_ALMA_SetSubSecondMask + * @param RTCx RTC Instance + * @param Mask Value between Min_Data=0x00 and Max_Data=0xF + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_SetSubSecondMask(RTC_TypeDef *RTCx, uint32_t Mask) +{ + MODIFY_REG(RTCx->ALRMASSR, RTC_ALRMASSR_MASKSS, Mask << RTC_ALRMASSR_MASKSS_Pos); +} + +/** + * @brief Get Alarm A subseconds mask + * @rmtoll ALRMASSR MASKSS LL_RTC_ALMA_GetSubSecondMask + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0xF + */ +__STATIC_INLINE uint32_t LL_RTC_ALMA_GetSubSecondMask(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->ALRMASSR, RTC_ALRMASSR_MASKSS) >> RTC_ALRMASSR_MASKSS_Pos); +} + +/** + * @brief Set Alarm A subseconds value + * @rmtoll ALRMASSR SS LL_RTC_ALMA_SetSubSecond + * @param RTCx RTC Instance + * @param Subsecond Value between Min_Data=0x00 and Max_Data=0x7FFF + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMA_SetSubSecond(RTC_TypeDef *RTCx, uint32_t Subsecond) +{ + MODIFY_REG(RTCx->ALRMASSR, RTC_ALRMASSR_SS, Subsecond); +} + +/** + * @brief Get Alarm A subseconds value + * @rmtoll ALRMASSR SS LL_RTC_ALMA_GetSubSecond + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x7FFF + */ +__STATIC_INLINE uint32_t LL_RTC_ALMA_GetSubSecond(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->ALRMASSR, RTC_ALRMASSR_SS)); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_ALARMB ALARMB + * @{ + */ + +/** + * @brief Enable Alarm B + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR ALRBE LL_RTC_ALMB_Enable + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_Enable(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_ALRBE); +} + +/** + * @brief Disable Alarm B + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR ALRBE LL_RTC_ALMB_Disable + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_Disable(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_ALRBE); +} + +/** + * @brief Specify the Alarm B masks. + * @rmtoll ALRMBR MSK4 LL_RTC_ALMB_SetMask\n + * ALRMBR MSK3 LL_RTC_ALMB_SetMask\n + * ALRMBR MSK2 LL_RTC_ALMB_SetMask\n + * ALRMBR MSK1 LL_RTC_ALMB_SetMask + * @param RTCx RTC Instance + * @param Mask This parameter can be a combination of the following values: + * @arg @ref LL_RTC_ALMB_MASK_NONE + * @arg @ref LL_RTC_ALMB_MASK_DATEWEEKDAY + * @arg @ref LL_RTC_ALMB_MASK_HOURS + * @arg @ref LL_RTC_ALMB_MASK_MINUTES + * @arg @ref LL_RTC_ALMB_MASK_SECONDS + * @arg @ref LL_RTC_ALMB_MASK_ALL + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_SetMask(RTC_TypeDef *RTCx, uint32_t Mask) +{ + MODIFY_REG(RTCx->ALRMBR, RTC_ALRMBR_MSK4 | RTC_ALRMBR_MSK3 | RTC_ALRMBR_MSK2 | RTC_ALRMBR_MSK1, Mask); +} + +/** + * @brief Get the Alarm B masks. + * @rmtoll ALRMBR MSK4 LL_RTC_ALMB_GetMask\n + * ALRMBR MSK3 LL_RTC_ALMB_GetMask\n + * ALRMBR MSK2 LL_RTC_ALMB_GetMask\n + * ALRMBR MSK1 LL_RTC_ALMB_GetMask + * @param RTCx RTC Instance + * @retval Returned value can be can be a combination of the following values: + * @arg @ref LL_RTC_ALMB_MASK_NONE + * @arg @ref LL_RTC_ALMB_MASK_DATEWEEKDAY + * @arg @ref LL_RTC_ALMB_MASK_HOURS + * @arg @ref LL_RTC_ALMB_MASK_MINUTES + * @arg @ref LL_RTC_ALMB_MASK_SECONDS + * @arg @ref LL_RTC_ALMB_MASK_ALL + */ +__STATIC_INLINE uint32_t LL_RTC_ALMB_GetMask(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->ALRMBR, RTC_ALRMBR_MSK4 | RTC_ALRMBR_MSK3 | RTC_ALRMBR_MSK2 | RTC_ALRMBR_MSK1)); +} + +/** + * @brief Enable AlarmB Week day selection (DU[3:0] represents the week day. DT[1:0] is do not care) + * @rmtoll ALRMBR WDSEL LL_RTC_ALMB_EnableWeekday + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_EnableWeekday(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->ALRMBR, RTC_ALRMBR_WDSEL); +} + +/** + * @brief Disable AlarmB Week day selection (DU[3:0] represents the date ) + * @rmtoll ALRMBR WDSEL LL_RTC_ALMB_DisableWeekday + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_DisableWeekday(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->ALRMBR, RTC_ALRMBR_WDSEL); +} + +/** + * @brief Set ALARM B Day in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Day from binary to BCD format + * @rmtoll ALRMBR DT LL_RTC_ALMB_SetDay\n + * ALRMBR DU LL_RTC_ALMB_SetDay + * @param RTCx RTC Instance + * @param Day Value between Min_Data=0x01 and Max_Data=0x31 + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_SetDay(RTC_TypeDef *RTCx, uint32_t Day) +{ + MODIFY_REG(RTCx->ALRMBR, (RTC_ALRMBR_DT | RTC_ALRMBR_DU), + (((Day & 0xF0U) << (RTC_ALRMBR_DT_Pos - 4U)) | ((Day & 0x0FU) << RTC_ALRMBR_DU_Pos))); +} + +/** + * @brief Get ALARM B Day in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Day from BCD to Binary format + * @rmtoll ALRMBR DT LL_RTC_ALMB_GetDay\n + * ALRMBR DU LL_RTC_ALMB_GetDay + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x01 and Max_Data=0x31 + */ +__STATIC_INLINE uint32_t LL_RTC_ALMB_GetDay(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->ALRMBR, (RTC_ALRMBR_DT | RTC_ALRMBR_DU))) >> RTC_ALRMBR_DU_Pos); +} + +/** + * @brief Set ALARM B Weekday + * @rmtoll ALRMBR DU LL_RTC_ALMB_SetWeekDay + * @param RTCx RTC Instance + * @param WeekDay This parameter can be one of the following values: + * @arg @ref LL_RTC_WEEKDAY_MONDAY + * @arg @ref LL_RTC_WEEKDAY_TUESDAY + * @arg @ref LL_RTC_WEEKDAY_WEDNESDAY + * @arg @ref LL_RTC_WEEKDAY_THURSDAY + * @arg @ref LL_RTC_WEEKDAY_FRIDAY + * @arg @ref LL_RTC_WEEKDAY_SATURDAY + * @arg @ref LL_RTC_WEEKDAY_SUNDAY + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_SetWeekDay(RTC_TypeDef *RTCx, uint32_t WeekDay) +{ + MODIFY_REG(RTCx->ALRMBR, RTC_ALRMBR_DU, WeekDay << RTC_ALRMBR_DU_Pos); +} + +/** + * @brief Get ALARM B Weekday + * @rmtoll ALRMBR DU LL_RTC_ALMB_GetWeekDay + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_WEEKDAY_MONDAY + * @arg @ref LL_RTC_WEEKDAY_TUESDAY + * @arg @ref LL_RTC_WEEKDAY_WEDNESDAY + * @arg @ref LL_RTC_WEEKDAY_THURSDAY + * @arg @ref LL_RTC_WEEKDAY_FRIDAY + * @arg @ref LL_RTC_WEEKDAY_SATURDAY + * @arg @ref LL_RTC_WEEKDAY_SUNDAY + */ +__STATIC_INLINE uint32_t LL_RTC_ALMB_GetWeekDay(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->ALRMBR, RTC_ALRMBR_DU) >> RTC_ALRMBR_DU_Pos); +} + +/** + * @brief Set ALARM B time format (AM/24-hour or PM notation) + * @rmtoll ALRMBR PM LL_RTC_ALMB_SetTimeFormat + * @param RTCx RTC Instance + * @param TimeFormat This parameter can be one of the following values: + * @arg @ref LL_RTC_ALMB_TIME_FORMAT_AM + * @arg @ref LL_RTC_ALMB_TIME_FORMAT_PM + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_SetTimeFormat(RTC_TypeDef *RTCx, uint32_t TimeFormat) +{ + MODIFY_REG(RTCx->ALRMBR, RTC_ALRMBR_PM, TimeFormat); +} + +/** + * @brief Get ALARM B time format (AM or PM notation) + * @rmtoll ALRMBR PM LL_RTC_ALMB_GetTimeFormat + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_ALMB_TIME_FORMAT_AM + * @arg @ref LL_RTC_ALMB_TIME_FORMAT_PM + */ +__STATIC_INLINE uint32_t LL_RTC_ALMB_GetTimeFormat(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->ALRMBR, RTC_ALRMBR_PM)); +} + +/** + * @brief Set ALARM B Hours in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Hours from binary to BCD format + * @rmtoll ALRMBR HT LL_RTC_ALMB_SetHour\n + * ALRMBR HU LL_RTC_ALMB_SetHour + * @param RTCx RTC Instance + * @param Hours Value between Min_Data=0x01 and Max_Data=0x12 or between Min_Data=0x00 and Max_Data=0x23 + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_SetHour(RTC_TypeDef *RTCx, uint32_t Hours) +{ + MODIFY_REG(RTCx->ALRMBR, (RTC_ALRMBR_HT | RTC_ALRMBR_HU), + (((Hours & 0xF0U) << (RTC_ALRMBR_HT_Pos - 4U)) | ((Hours & 0x0FU) << RTC_ALRMBR_HU_Pos))); +} + +/** + * @brief Get ALARM B Hours in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Hours from BCD to Binary format + * @rmtoll ALRMBR HT LL_RTC_ALMB_GetHour\n + * ALRMBR HU LL_RTC_ALMB_GetHour + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x01 and Max_Data=0x12 or between Min_Data=0x00 and Max_Data=0x23 + */ +__STATIC_INLINE uint32_t LL_RTC_ALMB_GetHour(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->ALRMBR, (RTC_ALRMBR_HT | RTC_ALRMBR_HU))) >> RTC_ALRMBR_HU_Pos); +} + +/** + * @brief Set ALARM B Minutes in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Minutes from binary to BCD format + * @rmtoll ALRMBR MNT LL_RTC_ALMB_SetMinute\n + * ALRMBR MNU LL_RTC_ALMB_SetMinute + * @param RTCx RTC Instance + * @param Minutes between Min_Data=0x00 and Max_Data=0x59 + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_SetMinute(RTC_TypeDef *RTCx, uint32_t Minutes) +{ + MODIFY_REG(RTCx->ALRMBR, (RTC_ALRMBR_MNT | RTC_ALRMBR_MNU), + (((Minutes & 0xF0U) << (RTC_ALRMBR_MNT_Pos - 4U)) | ((Minutes & 0x0FU) << RTC_ALRMBR_MNU_Pos))); +} + +/** + * @brief Get ALARM B Minutes in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Minutes from BCD to Binary format + * @rmtoll ALRMBR MNT LL_RTC_ALMB_GetMinute\n + * ALRMBR MNU LL_RTC_ALMB_GetMinute + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x59 + */ +__STATIC_INLINE uint32_t LL_RTC_ALMB_GetMinute(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->ALRMBR, (RTC_ALRMBR_MNT | RTC_ALRMBR_MNU))) >> RTC_ALRMBR_MNU_Pos); +} + +/** + * @brief Set ALARM B Seconds in BCD format + * @note helper macro __LL_RTC_CONVERT_BIN2BCD is available to convert Seconds from binary to BCD format + * @rmtoll ALRMBR ST LL_RTC_ALMB_SetSecond\n + * ALRMBR SU LL_RTC_ALMB_SetSecond + * @param RTCx RTC Instance + * @param Seconds Value between Min_Data=0x00 and Max_Data=0x59 + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_SetSecond(RTC_TypeDef *RTCx, uint32_t Seconds) +{ + MODIFY_REG(RTCx->ALRMBR, (RTC_ALRMBR_ST | RTC_ALRMBR_SU), + (((Seconds & 0xF0U) << (RTC_ALRMBR_ST_Pos - 4U)) | ((Seconds & 0x0FU) << RTC_ALRMBR_SU_Pos))); +} + +/** + * @brief Get ALARM B Seconds in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Seconds from BCD to Binary format + * @rmtoll ALRMBR ST LL_RTC_ALMB_GetSecond\n + * ALRMBR SU LL_RTC_ALMB_GetSecond + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x59 + */ +__STATIC_INLINE uint32_t LL_RTC_ALMB_GetSecond(RTC_TypeDef *RTCx) +{ + return (uint32_t)((READ_BIT(RTCx->ALRMBR, (RTC_ALRMBR_ST | RTC_ALRMBR_SU))) >> RTC_ALRMBR_SU_Pos); +} + +/** + * @brief Set Alarm B Time (hour, minute and second) in BCD format + * @rmtoll ALRMBR PM LL_RTC_ALMB_ConfigTime\n + * ALRMBR HT LL_RTC_ALMB_ConfigTime\n + * ALRMBR HU LL_RTC_ALMB_ConfigTime\n + * ALRMBR MNT LL_RTC_ALMB_ConfigTime\n + * ALRMBR MNU LL_RTC_ALMB_ConfigTime\n + * ALRMBR ST LL_RTC_ALMB_ConfigTime\n + * ALRMBR SU LL_RTC_ALMB_ConfigTime + * @param RTCx RTC Instance + * @param Format12_24 This parameter can be one of the following values: + * @arg @ref LL_RTC_ALMB_TIME_FORMAT_AM + * @arg @ref LL_RTC_ALMB_TIME_FORMAT_PM + * @param Hours Value between Min_Data=0x01 and Max_Data=0x12 or between Min_Data=0x00 and Max_Data=0x23 + * @param Minutes Value between Min_Data=0x00 and Max_Data=0x59 + * @param Seconds Value between Min_Data=0x00 and Max_Data=0x59 + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_ConfigTime(RTC_TypeDef *RTCx, uint32_t Format12_24, uint32_t Hours, uint32_t Minutes, uint32_t Seconds) +{ + uint32_t temp; + + temp = Format12_24 | \ + (((Hours & 0xF0U) << (RTC_ALRMBR_HT_Pos - 4U)) | ((Hours & 0x0FU) << RTC_ALRMBR_HU_Pos)) | \ + (((Minutes & 0xF0U) << (RTC_ALRMBR_MNT_Pos - 4U)) | ((Minutes & 0x0FU) << RTC_ALRMBR_MNU_Pos)) | \ + (((Seconds & 0xF0U) << (RTC_ALRMBR_ST_Pos - 4U)) | ((Seconds & 0x0FU) << RTC_ALRMBR_SU_Pos)); + + MODIFY_REG(RTCx->ALRMBR, RTC_ALRMBR_PM | RTC_ALRMBR_HT | RTC_ALRMBR_HU | RTC_ALRMBR_MNT | RTC_ALRMBR_MNU | RTC_ALRMBR_ST | RTC_ALRMBR_SU, temp); +} + +/** + * @brief Get Alarm B Time (hour, minute and second) in BCD format + * @note helper macros __LL_RTC_GET_HOUR, __LL_RTC_GET_MINUTE and __LL_RTC_GET_SECOND + * are available to get independently each parameter. + * @rmtoll ALRMBR HT LL_RTC_ALMB_GetTime\n + * ALRMBR HU LL_RTC_ALMB_GetTime\n + * ALRMBR MNT LL_RTC_ALMB_GetTime\n + * ALRMBR MNU LL_RTC_ALMB_GetTime\n + * ALRMBR ST LL_RTC_ALMB_GetTime\n + * ALRMBR SU LL_RTC_ALMB_GetTime + * @param RTCx RTC Instance + * @retval Combination of hours, minutes and seconds. + */ +__STATIC_INLINE uint32_t LL_RTC_ALMB_GetTime(RTC_TypeDef *RTCx) +{ + return (uint32_t)((LL_RTC_ALMB_GetHour(RTCx) << RTC_OFFSET_HOUR) | (LL_RTC_ALMB_GetMinute(RTCx) << RTC_OFFSET_MINUTE) | LL_RTC_ALMB_GetSecond(RTCx)); +} + +/** + * @brief Mask the most-significant bits of the subseconds field starting from + * the bit specified in parameter Mask + * @note This register can be written only when ALRBE is reset in RTC_CR register, + * or in initialization mode. + * @rmtoll ALRMBSSR MASKSS LL_RTC_ALMB_SetSubSecondMask + * @param RTCx RTC Instance + * @param Mask Value between Min_Data=0x00 and Max_Data=0xF + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_SetSubSecondMask(RTC_TypeDef *RTCx, uint32_t Mask) +{ + MODIFY_REG(RTCx->ALRMBSSR, RTC_ALRMBSSR_MASKSS, Mask << RTC_ALRMBSSR_MASKSS_Pos); +} + +/** + * @brief Get Alarm B subseconds mask + * @rmtoll ALRMBSSR MASKSS LL_RTC_ALMB_GetSubSecondMask + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0xF + */ +__STATIC_INLINE uint32_t LL_RTC_ALMB_GetSubSecondMask(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->ALRMBSSR, RTC_ALRMBSSR_MASKSS) >> RTC_ALRMBSSR_MASKSS_Pos); +} + +/** + * @brief Set Alarm B subseconds value + * @rmtoll ALRMBSSR SS LL_RTC_ALMB_SetSubSecond + * @param RTCx RTC Instance + * @param Subsecond Value between Min_Data=0x00 and Max_Data=0x7FFF + * @retval None + */ +__STATIC_INLINE void LL_RTC_ALMB_SetSubSecond(RTC_TypeDef *RTCx, uint32_t Subsecond) +{ + MODIFY_REG(RTCx->ALRMBSSR, RTC_ALRMBSSR_SS, Subsecond); +} + +/** + * @brief Get Alarm B subseconds value + * @rmtoll ALRMBSSR SS LL_RTC_ALMB_GetSubSecond + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x7FFF + */ +__STATIC_INLINE uint32_t LL_RTC_ALMB_GetSubSecond(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->ALRMBSSR, RTC_ALRMBSSR_SS)); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_Timestamp Timestamp + * @{ + */ + +/** + * @brief Enable Timestamp + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR TSE LL_RTC_TS_Enable + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_TS_Enable(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_TSE); +} + +/** + * @brief Disable Timestamp + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR TSE LL_RTC_TS_Disable + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_TS_Disable(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_TSE); +} + +/** + * @brief Set Time-stamp event active edge + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note TSE must be reset when TSEDGE is changed to avoid unwanted TSF setting + * @rmtoll CR TSEDGE LL_RTC_TS_SetActiveEdge + * @param RTCx RTC Instance + * @param Edge This parameter can be one of the following values: + * @arg @ref LL_RTC_TIMESTAMP_EDGE_RISING + * @arg @ref LL_RTC_TIMESTAMP_EDGE_FALLING + * @retval None + */ +__STATIC_INLINE void LL_RTC_TS_SetActiveEdge(RTC_TypeDef *RTCx, uint32_t Edge) +{ + MODIFY_REG(RTCx->CR, RTC_CR_TSEDGE, Edge); +} + +/** + * @brief Get Time-stamp event active edge + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR TSEDGE LL_RTC_TS_GetActiveEdge + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_TIMESTAMP_EDGE_RISING + * @arg @ref LL_RTC_TIMESTAMP_EDGE_FALLING + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetActiveEdge(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->CR, RTC_CR_TSEDGE)); +} + +/** + * @brief Get Timestamp AM/PM notation (AM or 24-hour format) + * @rmtoll TSTR PM LL_RTC_TS_GetTimeFormat + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_TS_TIME_FORMAT_AM + * @arg @ref LL_RTC_TS_TIME_FORMAT_PM + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetTimeFormat(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TSTR, RTC_TSTR_PM)); +} + +/** + * @brief Get Timestamp Hours in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Hours from BCD to Binary format + * @rmtoll TSTR HT LL_RTC_TS_GetHour\n + * TSTR HU LL_RTC_TS_GetHour + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x01 and Max_Data=0x12 or between Min_Data=0x00 and Max_Data=0x23 + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetHour(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TSTR, RTC_TSTR_HT | RTC_TSTR_HU) >> RTC_TSTR_HU_Pos); +} + +/** + * @brief Get Timestamp Minutes in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Minutes from BCD to Binary format + * @rmtoll TSTR MNT LL_RTC_TS_GetMinute\n + * TSTR MNU LL_RTC_TS_GetMinute + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x59 + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetMinute(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TSTR, RTC_TSTR_MNT | RTC_TSTR_MNU) >> RTC_TSTR_MNU_Pos); +} + +/** + * @brief Get Timestamp Seconds in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Seconds from BCD to Binary format + * @rmtoll TSTR ST LL_RTC_TS_GetSecond\n + * TSTR SU LL_RTC_TS_GetSecond + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x59 + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetSecond(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TSTR, RTC_TSTR_ST | RTC_TSTR_SU)); +} + +/** + * @brief Get Timestamp time (hour, minute and second) in BCD format + * @note helper macros __LL_RTC_GET_HOUR, __LL_RTC_GET_MINUTE and __LL_RTC_GET_SECOND + * are available to get independently each parameter. + * @rmtoll TSTR HT LL_RTC_TS_GetTime\n + * TSTR HU LL_RTC_TS_GetTime\n + * TSTR MNT LL_RTC_TS_GetTime\n + * TSTR MNU LL_RTC_TS_GetTime\n + * TSTR ST LL_RTC_TS_GetTime\n + * TSTR SU LL_RTC_TS_GetTime + * @param RTCx RTC Instance + * @retval Combination of hours, minutes and seconds. + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetTime(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TSTR, + RTC_TSTR_HT | RTC_TSTR_HU | RTC_TSTR_MNT | RTC_TSTR_MNU | RTC_TSTR_ST | RTC_TSTR_SU)); +} + +/** + * @brief Get Timestamp Week day + * @rmtoll TSDR WDU LL_RTC_TS_GetWeekDay + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_WEEKDAY_MONDAY + * @arg @ref LL_RTC_WEEKDAY_TUESDAY + * @arg @ref LL_RTC_WEEKDAY_WEDNESDAY + * @arg @ref LL_RTC_WEEKDAY_THURSDAY + * @arg @ref LL_RTC_WEEKDAY_FRIDAY + * @arg @ref LL_RTC_WEEKDAY_SATURDAY + * @arg @ref LL_RTC_WEEKDAY_SUNDAY + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetWeekDay(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TSDR, RTC_TSDR_WDU) >> RTC_TSDR_WDU_Pos); +} + +/** + * @brief Get Timestamp Month in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Month from BCD to Binary format + * @rmtoll TSDR MT LL_RTC_TS_GetMonth\n + * TSDR MU LL_RTC_TS_GetMonth + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_MONTH_JANUARY + * @arg @ref LL_RTC_MONTH_FEBRUARY + * @arg @ref LL_RTC_MONTH_MARCH + * @arg @ref LL_RTC_MONTH_APRIL + * @arg @ref LL_RTC_MONTH_MAY + * @arg @ref LL_RTC_MONTH_JUNE + * @arg @ref LL_RTC_MONTH_JULY + * @arg @ref LL_RTC_MONTH_AUGUST + * @arg @ref LL_RTC_MONTH_SEPTEMBER + * @arg @ref LL_RTC_MONTH_OCTOBER + * @arg @ref LL_RTC_MONTH_NOVEMBER + * @arg @ref LL_RTC_MONTH_DECEMBER + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetMonth(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TSDR, RTC_TSDR_MT | RTC_TSDR_MU) >> RTC_TSDR_MU_Pos); +} + +/** + * @brief Get Timestamp Day in BCD format + * @note helper macro __LL_RTC_CONVERT_BCD2BIN is available to convert Day from BCD to Binary format + * @rmtoll TSDR DT LL_RTC_TS_GetDay\n + * TSDR DU LL_RTC_TS_GetDay + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x01 and Max_Data=0x31 + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetDay(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TSDR, RTC_TSDR_DT | RTC_TSDR_DU)); +} + +/** + * @brief Get Timestamp date (WeekDay, Day and Month) in BCD format + * @note helper macros __LL_RTC_GET_WEEKDAY, __LL_RTC_GET_MONTH, + * and __LL_RTC_GET_DAY are available to get independently each parameter. + * @rmtoll TSDR WDU LL_RTC_TS_GetDate\n + * TSDR MT LL_RTC_TS_GetDate\n + * TSDR MU LL_RTC_TS_GetDate\n + * TSDR DT LL_RTC_TS_GetDate\n + * TSDR DU LL_RTC_TS_GetDate + * @param RTCx RTC Instance + * @retval Combination of Weekday, Day and Month + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetDate(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TSDR, RTC_TSDR_WDU | RTC_TSDR_MT | RTC_TSDR_MU | RTC_TSDR_DT | RTC_TSDR_DU)); +} + +/** + * @brief Get time-stamp subseconds value + * @rmtoll TSSSR SS LL_RTC_TS_GetSubSecond + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0xFFFF + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetSubSecond(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TSSSR, RTC_TSSSR_SS)); +} + +#if defined(RTC_TAFCR_TAMPTS) +/** + * @brief Activate timestamp on tamper detection event + * @rmtoll TAFCR TAMPTS LL_RTC_TS_EnableOnTamper + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_TS_EnableOnTamper(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->TAFCR, RTC_TAFCR_TAMPTS); +} + +/** + * @brief Disable timestamp on tamper detection event + * @rmtoll TAFCR TAMPTS LL_RTC_TS_DisableOnTamper + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_TS_DisableOnTamper(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->TAFCR, RTC_TAFCR_TAMPTS); +} +#endif /* RTC_TAFCR_TAMPTS */ + +/** + * @brief Set timestamp Pin + * @rmtoll TAFCR TSINSEL LL_RTC_TS_SetPin + * @param RTCx RTC Instance + * @param TSPin specifies the RTC Timestamp Pin. + * This parameter can be one of the following values: + * @arg LL_RTC_TimeStampPin_Default: RTC_AF1 is used as RTC Timestamp Pin. + * @arg LL_RTC_TimeStampPin_Pos1: RTC_AF2 is used as RTC Timestamp Pin. (*) + * + * (*) value not applicable to all devices. + * @retval None + */ +__STATIC_INLINE void LL_RTC_TS_SetPin(RTC_TypeDef *RTCx, uint32_t TSPin) +{ + MODIFY_REG(RTCx->TAFCR, RTC_TAFCR_TSINSEL, TSPin); +} + +/** + * @brief Get timestamp Pin + * @rmtoll TAFCR TSINSEL LL_RTC_TS_GetPin + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg LL_RTC_TimeStampPin_Default: RTC_AF1 is used as RTC Timestamp Pin. + * @arg LL_RTC_TimeStampPin_Pos1: RTC_AF2 is used as RTC Timestamp Pin. (*) + * + * (*) value not applicable to all devices. + * @retval None + */ +__STATIC_INLINE uint32_t LL_RTC_TS_GetPin(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TAFCR, RTC_TAFCR_TSINSEL)); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_Tamper Tamper + * @{ + */ + +/** + * @brief Enable RTC_TAMPx input detection + * @rmtoll TAFCR TAMP1E LL_RTC_TAMPER_Enable\n + * TAFCR TAMP2E LL_RTC_TAMPER_Enable\n + * @param RTCx RTC Instance + * @param Tamper This parameter can be a combination of the following values: + * @arg @ref LL_RTC_TAMPER_1 + * @arg @ref LL_RTC_TAMPER_2 (*) + * + * (*) value not applicable to all devices. + * @retval None + */ +__STATIC_INLINE void LL_RTC_TAMPER_Enable(RTC_TypeDef *RTCx, uint32_t Tamper) +{ + SET_BIT(RTCx->TAFCR, Tamper); +} + +/** + * @brief Clear RTC_TAMPx input detection + * @rmtoll TAFCR TAMP1E LL_RTC_TAMPER_Disable\n + * TAFCR TAMP2E LL_RTC_TAMPER_Disable\n + * @param RTCx RTC Instance + * @param Tamper This parameter can be a combination of the following values: + * @arg @ref LL_RTC_TAMPER_1 + * @arg @ref LL_RTC_TAMPER_2 (*) + * + * (*) value not applicable to all devices. + * @retval None + */ +__STATIC_INLINE void LL_RTC_TAMPER_Disable(RTC_TypeDef *RTCx, uint32_t Tamper) +{ + CLEAR_BIT(RTCx->TAFCR, Tamper); +} + +/** + * @brief Disable RTC_TAMPx pull-up disable (Disable precharge of RTC_TAMPx pins) + * @rmtoll TAFCR TAMPPUDIS LL_RTC_TAMPER_DisablePullUp + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_TAMPER_DisablePullUp(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->TAFCR, RTC_TAFCR_TAMPPUDIS); +} + +/** + * @brief Enable RTC_TAMPx pull-up disable ( Precharge RTC_TAMPx pins before sampling) + * @rmtoll TAFCR TAMPPUDIS LL_RTC_TAMPER_EnablePullUp + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_TAMPER_EnablePullUp(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->TAFCR, RTC_TAFCR_TAMPPUDIS); +} + +/** + * @brief Set RTC_TAMPx precharge duration + * @rmtoll TAFCR TAMPPRCH LL_RTC_TAMPER_SetPrecharge + * @param RTCx RTC Instance + * @param Duration This parameter can be one of the following values: + * @arg @ref LL_RTC_TAMPER_DURATION_1RTCCLK + * @arg @ref LL_RTC_TAMPER_DURATION_2RTCCLK + * @arg @ref LL_RTC_TAMPER_DURATION_4RTCCLK + * @arg @ref LL_RTC_TAMPER_DURATION_8RTCCLK + * @retval None + */ +__STATIC_INLINE void LL_RTC_TAMPER_SetPrecharge(RTC_TypeDef *RTCx, uint32_t Duration) +{ + MODIFY_REG(RTCx->TAFCR, RTC_TAFCR_TAMPPRCH, Duration); +} + +/** + * @brief Get RTC_TAMPx precharge duration + * @rmtoll TAFCR TAMPPRCH LL_RTC_TAMPER_GetPrecharge + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_TAMPER_DURATION_1RTCCLK + * @arg @ref LL_RTC_TAMPER_DURATION_2RTCCLK + * @arg @ref LL_RTC_TAMPER_DURATION_4RTCCLK + * @arg @ref LL_RTC_TAMPER_DURATION_8RTCCLK + */ +__STATIC_INLINE uint32_t LL_RTC_TAMPER_GetPrecharge(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TAFCR, RTC_TAFCR_TAMPPRCH)); +} + +/** + * @brief Set RTC_TAMPx filter count + * @rmtoll TAFCR TAMPFLT LL_RTC_TAMPER_SetFilterCount + * @param RTCx RTC Instance + * @param FilterCount This parameter can be one of the following values: + * @arg @ref LL_RTC_TAMPER_FILTER_DISABLE + * @arg @ref LL_RTC_TAMPER_FILTER_2SAMPLE + * @arg @ref LL_RTC_TAMPER_FILTER_4SAMPLE + * @arg @ref LL_RTC_TAMPER_FILTER_8SAMPLE + * @retval None + */ +__STATIC_INLINE void LL_RTC_TAMPER_SetFilterCount(RTC_TypeDef *RTCx, uint32_t FilterCount) +{ + MODIFY_REG(RTCx->TAFCR, RTC_TAFCR_TAMPFLT, FilterCount); +} + +/** + * @brief Get RTC_TAMPx filter count + * @rmtoll TAFCR TAMPFLT LL_RTC_TAMPER_GetFilterCount + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_TAMPER_FILTER_DISABLE + * @arg @ref LL_RTC_TAMPER_FILTER_2SAMPLE + * @arg @ref LL_RTC_TAMPER_FILTER_4SAMPLE + * @arg @ref LL_RTC_TAMPER_FILTER_8SAMPLE + */ +__STATIC_INLINE uint32_t LL_RTC_TAMPER_GetFilterCount(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TAFCR, RTC_TAFCR_TAMPFLT)); +} + +/** + * @brief Set Tamper sampling frequency + * @rmtoll TAFCR TAMPFREQ LL_RTC_TAMPER_SetSamplingFreq + * @param RTCx RTC Instance + * @param SamplingFreq This parameter can be one of the following values: + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_32768 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_16384 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_8192 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_4096 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_2048 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_1024 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_512 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_256 + * @retval None + */ +__STATIC_INLINE void LL_RTC_TAMPER_SetSamplingFreq(RTC_TypeDef *RTCx, uint32_t SamplingFreq) +{ + MODIFY_REG(RTCx->TAFCR, RTC_TAFCR_TAMPFREQ, SamplingFreq); +} + +/** + * @brief Get Tamper sampling frequency + * @rmtoll TAFCR TAMPFREQ LL_RTC_TAMPER_GetSamplingFreq + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_32768 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_16384 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_8192 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_4096 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_2048 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_1024 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_512 + * @arg @ref LL_RTC_TAMPER_SAMPLFREQDIV_256 + */ +__STATIC_INLINE uint32_t LL_RTC_TAMPER_GetSamplingFreq(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TAFCR, RTC_TAFCR_TAMPFREQ)); +} + +/** + * @brief Enable Active level for Tamper input + * @rmtoll TAFCR TAMP1TRG LL_RTC_TAMPER_EnableActiveLevel\n + * TAFCR TAMP2TRG LL_RTC_TAMPER_EnableActiveLevel\n + * @param RTCx RTC Instance + * @param Tamper This parameter can be a combination of the following values: + * @arg @ref LL_RTC_TAMPER_ACTIVELEVEL_TAMP1 + * @arg @ref LL_RTC_TAMPER_ACTIVELEVEL_TAMP2 (*) + * + * (*) value not applicable to all devices. + * @retval None + */ +__STATIC_INLINE void LL_RTC_TAMPER_EnableActiveLevel(RTC_TypeDef *RTCx, uint32_t Tamper) +{ + SET_BIT(RTCx->TAFCR, Tamper); +} + +/** + * @brief Disable Active level for Tamper input + * @rmtoll TAFCR TAMP1TRG LL_RTC_TAMPER_DisableActiveLevel\n + * TAFCR TAMP2TRG LL_RTC_TAMPER_DisableActiveLevel\n + * @param RTCx RTC Instance + * @param Tamper This parameter can be a combination of the following values: + * @arg @ref LL_RTC_TAMPER_ACTIVELEVEL_TAMP1 + * @arg @ref LL_RTC_TAMPER_ACTIVELEVEL_TAMP2 (*) + * + * (*) value not applicable to all devices. + * @retval None + */ +__STATIC_INLINE void LL_RTC_TAMPER_DisableActiveLevel(RTC_TypeDef *RTCx, uint32_t Tamper) +{ + CLEAR_BIT(RTCx->TAFCR, Tamper); +} + +/** + * @brief Set Tamper Pin + * @rmtoll TAFCR TAMP1INSEL LL_RTC_TAMPER_SetPin + * @param RTCx RTC Instance + * @param TamperPin specifies the RTC Tamper Pin. + * This parameter can be one of the following values: + * @arg LL_RTC_TamperPin_Default: RTC_AF1 is used as RTC Tamper Pin. + * @arg LL_RTC_TamperPin_Pos1: RTC_AF2 is used as RTC Tamper Pin. (*) + * + * (*) value not applicable to all devices. + * @retval None + */ +__STATIC_INLINE void LL_RTC_TAMPER_SetPin(RTC_TypeDef *RTCx, uint32_t TamperPin) +{ + MODIFY_REG(RTCx->TAFCR, RTC_TAFCR_TAMP1INSEL, TamperPin); +} + +/** + * @brief Get Tamper Pin + * @rmtoll TAFCR TAMP1INSEL LL_RTC_TAMPER_GetPin + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg LL_RTC_TamperPin_Default: RTC_AF1 is used as RTC Tamper Pin. + * @arg LL_RTC_TamperPin_Pos1: RTC_AF2 is selected as RTC Tamper Pin. (*) + * + * (*) value not applicable to all devices. + * @retval None + */ + +__STATIC_INLINE uint32_t LL_RTC_TAMPER_GetPin(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->TAFCR, RTC_TAFCR_TAMP1INSEL)); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_Wakeup Wakeup + * @{ + */ + +/** + * @brief Enable Wakeup timer + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR WUTE LL_RTC_WAKEUP_Enable + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_WAKEUP_Enable(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_WUTE); +} + +/** + * @brief Disable Wakeup timer + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR WUTE LL_RTC_WAKEUP_Disable + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_WAKEUP_Disable(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_WUTE); +} + +/** + * @brief Check if Wakeup timer is enabled or not + * @rmtoll CR WUTE LL_RTC_WAKEUP_IsEnabled + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_WAKEUP_IsEnabled(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->CR, RTC_CR_WUTE) == (RTC_CR_WUTE)) ? 1UL : 0UL); +} + +/** + * @brief Select Wakeup clock + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note Bit can be written only when RTC_CR WUTE bit = 0 and RTC_ISR WUTWF bit = 1 + * @rmtoll CR WUCKSEL LL_RTC_WAKEUP_SetClock + * @param RTCx RTC Instance + * @param WakeupClock This parameter can be one of the following values: + * @arg @ref LL_RTC_WAKEUPCLOCK_DIV_16 + * @arg @ref LL_RTC_WAKEUPCLOCK_DIV_8 + * @arg @ref LL_RTC_WAKEUPCLOCK_DIV_4 + * @arg @ref LL_RTC_WAKEUPCLOCK_DIV_2 + * @arg @ref LL_RTC_WAKEUPCLOCK_CKSPRE + * @arg @ref LL_RTC_WAKEUPCLOCK_CKSPRE_WUT + * @retval None + */ +__STATIC_INLINE void LL_RTC_WAKEUP_SetClock(RTC_TypeDef *RTCx, uint32_t WakeupClock) +{ + MODIFY_REG(RTCx->CR, RTC_CR_WUCKSEL, WakeupClock); +} + +/** + * @brief Get Wakeup clock + * @rmtoll CR WUCKSEL LL_RTC_WAKEUP_GetClock + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_WAKEUPCLOCK_DIV_16 + * @arg @ref LL_RTC_WAKEUPCLOCK_DIV_8 + * @arg @ref LL_RTC_WAKEUPCLOCK_DIV_4 + * @arg @ref LL_RTC_WAKEUPCLOCK_DIV_2 + * @arg @ref LL_RTC_WAKEUPCLOCK_CKSPRE + * @arg @ref LL_RTC_WAKEUPCLOCK_CKSPRE_WUT + */ +__STATIC_INLINE uint32_t LL_RTC_WAKEUP_GetClock(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->CR, RTC_CR_WUCKSEL)); +} + +/** + * @brief Set Wakeup auto-reload value + * @note Bit can be written only when WUTWF is set to 1 in RTC_ISR + * @rmtoll WUTR WUT LL_RTC_WAKEUP_SetAutoReload + * @param RTCx RTC Instance + * @param Value Value between Min_Data=0x00 and Max_Data=0xFFFF + * @retval None + */ +__STATIC_INLINE void LL_RTC_WAKEUP_SetAutoReload(RTC_TypeDef *RTCx, uint32_t Value) +{ + MODIFY_REG(RTCx->WUTR, RTC_WUTR_WUT, Value); +} + +/** + * @brief Get Wakeup auto-reload value + * @rmtoll WUTR WUT LL_RTC_WAKEUP_GetAutoReload + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data=0xFFFF + */ +__STATIC_INLINE uint32_t LL_RTC_WAKEUP_GetAutoReload(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->WUTR, RTC_WUTR_WUT)); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_Backup_Registers Backup_Registers + * @{ + */ + +/** + * @brief Writes a data in a specified RTC Backup data register. + * @rmtoll BKPxR BKP LL_RTC_BAK_SetRegister + * @param RTCx RTC Instance + * @param BackupRegister This parameter can be one of the following values: + * @arg @ref LL_RTC_BKP_DR0 + * @arg @ref LL_RTC_BKP_DR1 + * @arg @ref LL_RTC_BKP_DR2 + * @arg @ref LL_RTC_BKP_DR3 + * @arg @ref LL_RTC_BKP_DR4 + * @arg @ref LL_RTC_BKP_DR5 + * @arg @ref LL_RTC_BKP_DR6 + * @arg @ref LL_RTC_BKP_DR7 + * @arg @ref LL_RTC_BKP_DR8 + * @arg @ref LL_RTC_BKP_DR9 + * @arg @ref LL_RTC_BKP_DR10 + * @arg @ref LL_RTC_BKP_DR11 + * @arg @ref LL_RTC_BKP_DR12 + * @arg @ref LL_RTC_BKP_DR13 + * @arg @ref LL_RTC_BKP_DR14 + * @arg @ref LL_RTC_BKP_DR15 + * @arg @ref LL_RTC_BKP_DR16 + * @arg @ref LL_RTC_BKP_DR17 + * @arg @ref LL_RTC_BKP_DR18 + * @arg @ref LL_RTC_BKP_DR19 + * @param Data Value between Min_Data=0x00 and Max_Data=0xFFFFFFFF + * @retval None + */ +__STATIC_INLINE void LL_RTC_BAK_SetRegister(RTC_TypeDef *RTCx, uint32_t BackupRegister, uint32_t Data) +{ + uint32_t temp; + + temp = (uint32_t)(&(RTCx->BKP0R)); + temp += (BackupRegister * 4U); + + /* Write the specified register */ + *(__IO uint32_t *)temp = (uint32_t)Data; +} + +/** + * @brief Reads data from the specified RTC Backup data Register. + * @rmtoll BKPxR BKP LL_RTC_BAK_GetRegister + * @param RTCx RTC Instance + * @param BackupRegister This parameter can be one of the following values: + * @arg @ref LL_RTC_BKP_DR0 + * @arg @ref LL_RTC_BKP_DR1 + * @arg @ref LL_RTC_BKP_DR2 + * @arg @ref LL_RTC_BKP_DR3 + * @arg @ref LL_RTC_BKP_DR4 + * @arg @ref LL_RTC_BKP_DR5 + * @arg @ref LL_RTC_BKP_DR6 + * @arg @ref LL_RTC_BKP_DR7 + * @arg @ref LL_RTC_BKP_DR8 + * @arg @ref LL_RTC_BKP_DR9 + * @arg @ref LL_RTC_BKP_DR10 + * @arg @ref LL_RTC_BKP_DR11 + * @arg @ref LL_RTC_BKP_DR12 + * @arg @ref LL_RTC_BKP_DR13 + * @arg @ref LL_RTC_BKP_DR14 + * @arg @ref LL_RTC_BKP_DR15 + * @arg @ref LL_RTC_BKP_DR16 + * @arg @ref LL_RTC_BKP_DR17 + * @arg @ref LL_RTC_BKP_DR18 + * @arg @ref LL_RTC_BKP_DR19 + * @retval Value between Min_Data=0x00 and Max_Data=0xFFFFFFFF + */ +__STATIC_INLINE uint32_t LL_RTC_BAK_GetRegister(RTC_TypeDef *RTCx, uint32_t BackupRegister) +{ + uint32_t temp; + + temp = (uint32_t)(&(RTCx->BKP0R)); + temp += (BackupRegister * 4U); + + /* Read the specified register */ + return (*(__IO uint32_t *)temp); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_Calibration Calibration + * @{ + */ + +/** + * @brief Set Calibration output frequency (1 Hz or 512 Hz) + * @note Bits are write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR COE LL_RTC_CAL_SetOutputFreq\n + * CR COSEL LL_RTC_CAL_SetOutputFreq + * @param RTCx RTC Instance + * @param Frequency This parameter can be one of the following values: + * @arg @ref LL_RTC_CALIB_OUTPUT_NONE + * @arg @ref LL_RTC_CALIB_OUTPUT_1HZ + * @arg @ref LL_RTC_CALIB_OUTPUT_512HZ + * + * @retval None + */ +__STATIC_INLINE void LL_RTC_CAL_SetOutputFreq(RTC_TypeDef *RTCx, uint32_t Frequency) +{ + MODIFY_REG(RTCx->CR, RTC_CR_COE | RTC_CR_COSEL, Frequency); +} + +/** + * @brief Get Calibration output frequency (1 Hz or 512 Hz) + * @rmtoll CR COE LL_RTC_CAL_GetOutputFreq\n + * CR COSEL LL_RTC_CAL_GetOutputFreq + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_CALIB_OUTPUT_NONE + * @arg @ref LL_RTC_CALIB_OUTPUT_1HZ + * @arg @ref LL_RTC_CALIB_OUTPUT_512HZ + * + */ +__STATIC_INLINE uint32_t LL_RTC_CAL_GetOutputFreq(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->CR, RTC_CR_COE | RTC_CR_COSEL)); +} + +/** + * @brief Enable Coarse digital calibration + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @rmtoll CR DCE LL_RTC_CAL_EnableCoarseDigital + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_CAL_EnableCoarseDigital(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_DCE); +} + +/** + * @brief Disable Coarse digital calibration + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @rmtoll CR DCE LL_RTC_CAL_DisableCoarseDigital + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_CAL_DisableCoarseDigital(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_DCE); +} + +/** + * @brief Set the coarse digital calibration + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note It can be written in initialization mode only (@ref LL_RTC_EnableInitMode function) + * @rmtoll CALIBR DCS LL_RTC_CAL_ConfigCoarseDigital\n + * CALIBR DC LL_RTC_CAL_ConfigCoarseDigital + * @param RTCx RTC Instance + * @param Sign This parameter can be one of the following values: + * @arg @ref LL_RTC_CALIB_SIGN_POSITIVE + * @arg @ref LL_RTC_CALIB_SIGN_NEGATIVE + * @param Value value of coarse calibration expressed in ppm (coded on 5 bits) + * @note This Calibration value should be between 0 and 63 when using negative sign with a 2-ppm step. + * @note This Calibration value should be between 0 and 126 when using positive sign with a 4-ppm step. + * @retval None + */ +__STATIC_INLINE void LL_RTC_CAL_ConfigCoarseDigital(RTC_TypeDef *RTCx, uint32_t Sign, uint32_t Value) +{ + MODIFY_REG(RTCx->CALIBR, RTC_CALIBR_DCS | RTC_CALIBR_DC, Sign | Value); +} + +/** + * @brief Get the coarse digital calibration value + * @rmtoll CALIBR DC LL_RTC_CAL_GetCoarseDigitalValue + * @param RTCx RTC Instance + * @retval value of coarse calibration expressed in ppm (coded on 5 bits) + */ +__STATIC_INLINE uint32_t LL_RTC_CAL_GetCoarseDigitalValue(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->CALIBR, RTC_CALIBR_DC)); +} + +/** + * @brief Get the coarse digital calibration sign + * @rmtoll CALIBR DCS LL_RTC_CAL_GetCoarseDigitalSign + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_CALIB_SIGN_POSITIVE + * @arg @ref LL_RTC_CALIB_SIGN_NEGATIVE + */ +__STATIC_INLINE uint32_t LL_RTC_CAL_GetCoarseDigitalSign(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->CALIBR, RTC_CALIBR_DCS)); +} + +/** + * @brief Insert or not One RTCCLK pulse every 2exp11 pulses (frequency increased by 488.5 ppm) + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note Bit can be written only when RECALPF is set to 0 in RTC_ISR + * @rmtoll CALR CALP LL_RTC_CAL_SetPulse + * @param RTCx RTC Instance + * @param Pulse This parameter can be one of the following values: + * @arg @ref LL_RTC_CALIB_INSERTPULSE_NONE + * @arg @ref LL_RTC_CALIB_INSERTPULSE_SET + * @retval None + */ +__STATIC_INLINE void LL_RTC_CAL_SetPulse(RTC_TypeDef *RTCx, uint32_t Pulse) +{ + MODIFY_REG(RTCx->CALR, RTC_CALR_CALP, Pulse); +} + +/** + * @brief Check if one RTCCLK has been inserted or not every 2exp11 pulses (frequency increased by 488.5 ppm) + * @rmtoll CALR CALP LL_RTC_CAL_IsPulseInserted + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_CAL_IsPulseInserted(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->CALR, RTC_CALR_CALP) == (RTC_CALR_CALP)) ? 1UL : 0UL); +} + +/** + * @brief Set smooth calibration cycle period + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note Bit can be written only when RECALPF is set to 0 in RTC_ISR + * @rmtoll CALR CALW8 LL_RTC_CAL_SetPeriod\n + * CALR CALW16 LL_RTC_CAL_SetPeriod + * @param RTCx RTC Instance + * @param Period This parameter can be one of the following values: + * @arg @ref LL_RTC_CALIB_PERIOD_32SEC + * @arg @ref LL_RTC_CALIB_PERIOD_16SEC + * @arg @ref LL_RTC_CALIB_PERIOD_8SEC + * @retval None + */ +__STATIC_INLINE void LL_RTC_CAL_SetPeriod(RTC_TypeDef *RTCx, uint32_t Period) +{ + MODIFY_REG(RTCx->CALR, RTC_CALR_CALW8 | RTC_CALR_CALW16, Period); +} + +/** + * @brief Get smooth calibration cycle period + * @rmtoll CALR CALW8 LL_RTC_CAL_GetPeriod\n + * CALR CALW16 LL_RTC_CAL_GetPeriod + * @param RTCx RTC Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_RTC_CALIB_PERIOD_32SEC + * @arg @ref LL_RTC_CALIB_PERIOD_16SEC + * @arg @ref LL_RTC_CALIB_PERIOD_8SEC + */ +__STATIC_INLINE uint32_t LL_RTC_CAL_GetPeriod(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->CALR, RTC_CALR_CALW8 | RTC_CALR_CALW16)); +} + +/** + * @brief Set smooth Calibration minus + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @note Bit can be written only when RECALPF is set to 0 in RTC_ISR + * @rmtoll CALR CALM LL_RTC_CAL_SetMinus + * @param RTCx RTC Instance + * @param CalibMinus Value between Min_Data=0x00 and Max_Data=0x1FF + * @retval None + */ +__STATIC_INLINE void LL_RTC_CAL_SetMinus(RTC_TypeDef *RTCx, uint32_t CalibMinus) +{ + MODIFY_REG(RTCx->CALR, RTC_CALR_CALM, CalibMinus); +} + +/** + * @brief Get smooth Calibration minus + * @rmtoll CALR CALM LL_RTC_CAL_GetMinus + * @param RTCx RTC Instance + * @retval Value between Min_Data=0x00 and Max_Data= 0x1FF + */ +__STATIC_INLINE uint32_t LL_RTC_CAL_GetMinus(RTC_TypeDef *RTCx) +{ + return (uint32_t)(READ_BIT(RTCx->CALR, RTC_CALR_CALM)); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_FLAG_Management FLAG_Management + * @{ + */ + +/** + * @brief Get Recalibration pending Flag + * @rmtoll ISR RECALPF LL_RTC_IsActiveFlag_RECALP + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_RECALP(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_RECALPF) == (RTC_ISR_RECALPF)) ? 1UL : 0UL); +} + +#if defined(RTC_TAMPER2_SUPPORT) +/** + * @brief Get RTC_TAMP2 detection flag + * @rmtoll ISR TAMP2F LL_RTC_IsActiveFlag_TAMP2 + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_TAMP2(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_TAMP2F) == (RTC_ISR_TAMP2F)) ? 1UL : 0UL); +} +#endif /* RTC_TAMPER2_SUPPORT */ + +/** + * @brief Get RTC_TAMP1 detection flag + * @rmtoll ISR TAMP1F LL_RTC_IsActiveFlag_TAMP1 + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_TAMP1(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_TAMP1F) == (RTC_ISR_TAMP1F)) ? 1UL : 0UL); +} + +/** + * @brief Get Time-stamp overflow flag + * @rmtoll ISR TSOVF LL_RTC_IsActiveFlag_TSOV + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_TSOV(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_TSOVF) == (RTC_ISR_TSOVF)) ? 1UL : 0UL); +} + +/** + * @brief Get Time-stamp flag + * @rmtoll ISR TSF LL_RTC_IsActiveFlag_TS + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_TS(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_TSF) == (RTC_ISR_TSF)) ? 1UL : 0UL); +} + +/** + * @brief Get Wakeup timer flag + * @rmtoll ISR WUTF LL_RTC_IsActiveFlag_WUT + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_WUT(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_WUTF) == (RTC_ISR_WUTF)) ? 1UL : 0UL); +} + +/** + * @brief Get Alarm B flag + * @rmtoll ISR ALRBF LL_RTC_IsActiveFlag_ALRB + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_ALRB(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_ALRBF) == (RTC_ISR_ALRBF)) ? 1UL : 0UL); +} + +/** + * @brief Get Alarm A flag + * @rmtoll ISR ALRAF LL_RTC_IsActiveFlag_ALRA + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_ALRA(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_ALRAF) == (RTC_ISR_ALRAF)) ? 1UL : 0UL); +} + +#if defined(RTC_TAMPER2_SUPPORT) +/** + * @brief Clear RTC_TAMP2 detection flag + * @rmtoll ISR TAMP2F LL_RTC_ClearFlag_TAMP2 + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ClearFlag_TAMP2(RTC_TypeDef *RTCx) +{ + WRITE_REG(RTCx->ISR, (~((RTC_ISR_TAMP2F | RTC_ISR_INIT) & 0x0000FFFFU) | (RTCx->ISR & RTC_ISR_INIT))); +} +#endif /* RTC_TAMPER2_SUPPORT */ + +/** + * @brief Clear RTC_TAMP1 detection flag + * @rmtoll ISR TAMP1F LL_RTC_ClearFlag_TAMP1 + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ClearFlag_TAMP1(RTC_TypeDef *RTCx) +{ + WRITE_REG(RTCx->ISR, (~((RTC_ISR_TAMP1F | RTC_ISR_INIT) & 0x0000FFFFU) | (RTCx->ISR & RTC_ISR_INIT))); +} + +/** + * @brief Clear Time-stamp overflow flag + * @rmtoll ISR TSOVF LL_RTC_ClearFlag_TSOV + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ClearFlag_TSOV(RTC_TypeDef *RTCx) +{ + WRITE_REG(RTCx->ISR, (~((RTC_ISR_TSOVF | RTC_ISR_INIT) & 0x0000FFFFU) | (RTCx->ISR & RTC_ISR_INIT))); +} + +/** + * @brief Clear Time-stamp flag + * @rmtoll ISR TSF LL_RTC_ClearFlag_TS + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ClearFlag_TS(RTC_TypeDef *RTCx) +{ + WRITE_REG(RTCx->ISR, (~((RTC_ISR_TSF | RTC_ISR_INIT) & 0x0000FFFFU) | (RTCx->ISR & RTC_ISR_INIT))); +} + +/** + * @brief Clear Wakeup timer flag + * @rmtoll ISR WUTF LL_RTC_ClearFlag_WUT + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ClearFlag_WUT(RTC_TypeDef *RTCx) +{ + WRITE_REG(RTCx->ISR, (~((RTC_ISR_WUTF | RTC_ISR_INIT) & 0x0000FFFFU) | (RTCx->ISR & RTC_ISR_INIT))); +} + +/** + * @brief Clear Alarm B flag + * @rmtoll ISR ALRBF LL_RTC_ClearFlag_ALRB + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ClearFlag_ALRB(RTC_TypeDef *RTCx) +{ + WRITE_REG(RTCx->ISR, (~((RTC_ISR_ALRBF | RTC_ISR_INIT) & 0x0000FFFFU) | (RTCx->ISR & RTC_ISR_INIT))); +} + +/** + * @brief Clear Alarm A flag + * @rmtoll ISR ALRAF LL_RTC_ClearFlag_ALRA + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ClearFlag_ALRA(RTC_TypeDef *RTCx) +{ + WRITE_REG(RTCx->ISR, (~((RTC_ISR_ALRAF | RTC_ISR_INIT) & 0x0000FFFFU) | (RTCx->ISR & RTC_ISR_INIT))); +} + +/** + * @brief Get Initialization flag + * @rmtoll ISR INITF LL_RTC_IsActiveFlag_INIT + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_INIT(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_INITF) == (RTC_ISR_INITF)) ? 1UL : 0UL); +} + +/** + * @brief Get Registers synchronization flag + * @rmtoll ISR RSF LL_RTC_IsActiveFlag_RS + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_RS(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_RSF) == (RTC_ISR_RSF)) ? 1UL : 0UL); +} + +/** + * @brief Clear Registers synchronization flag + * @rmtoll ISR RSF LL_RTC_ClearFlag_RS + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_ClearFlag_RS(RTC_TypeDef *RTCx) +{ + WRITE_REG(RTCx->ISR, (~((RTC_ISR_RSF | RTC_ISR_INIT) & 0x0000FFFFU) | (RTCx->ISR & RTC_ISR_INIT))); +} + +/** + * @brief Get Initialization status flag + * @rmtoll ISR INITS LL_RTC_IsActiveFlag_INITS + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_INITS(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_INITS) == (RTC_ISR_INITS)) ? 1UL : 0UL); +} + +/** + * @brief Get Shift operation pending flag + * @rmtoll ISR SHPF LL_RTC_IsActiveFlag_SHP + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_SHP(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_SHPF) == (RTC_ISR_SHPF)) ? 1UL : 0UL); +} + +/** + * @brief Get Wakeup timer write flag + * @rmtoll ISR WUTWF LL_RTC_IsActiveFlag_WUTW + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_WUTW(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_WUTWF) == (RTC_ISR_WUTWF)) ? 1UL : 0UL); +} + +/** + * @brief Get Alarm B write flag + * @rmtoll ISR ALRBWF LL_RTC_IsActiveFlag_ALRBW + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_ALRBW(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_ALRBWF) == (RTC_ISR_ALRBWF)) ? 1UL : 0UL); +} + +/** + * @brief Get Alarm A write flag + * @rmtoll ISR ALRAWF LL_RTC_IsActiveFlag_ALRAW + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsActiveFlag_ALRAW(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->ISR, RTC_ISR_ALRAWF) == (RTC_ISR_ALRAWF)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup RTC_LL_EF_IT_Management IT_Management + * @{ + */ + +/** + * @brief Enable Time-stamp interrupt + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR TSIE LL_RTC_EnableIT_TS + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_EnableIT_TS(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_TSIE); +} + +/** + * @brief Disable Time-stamp interrupt + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR TSIE LL_RTC_DisableIT_TS + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_DisableIT_TS(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_TSIE); +} + +/** + * @brief Enable Wakeup timer interrupt + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR WUTIE LL_RTC_EnableIT_WUT + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_EnableIT_WUT(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_WUTIE); +} + +/** + * @brief Disable Wakeup timer interrupt + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR WUTIE LL_RTC_DisableIT_WUT + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_DisableIT_WUT(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_WUTIE); +} + +/** + * @brief Enable Alarm B interrupt + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR ALRBIE LL_RTC_EnableIT_ALRB + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_EnableIT_ALRB(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_ALRBIE); +} + +/** + * @brief Disable Alarm B interrupt + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR ALRBIE LL_RTC_DisableIT_ALRB + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_DisableIT_ALRB(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_ALRBIE); +} + +/** + * @brief Enable Alarm A interrupt + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR ALRAIE LL_RTC_EnableIT_ALRA + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_EnableIT_ALRA(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->CR, RTC_CR_ALRAIE); +} + +/** + * @brief Disable Alarm A interrupt + * @note Bit is write-protected. @ref LL_RTC_DisableWriteProtection function should be called before. + * @rmtoll CR ALRAIE LL_RTC_DisableIT_ALRA + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_DisableIT_ALRA(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->CR, RTC_CR_ALRAIE); +} + +/** + * @brief Enable all Tamper Interrupt + * @rmtoll TAFCR TAMPIE LL_RTC_EnableIT_TAMP + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_EnableIT_TAMP(RTC_TypeDef *RTCx) +{ + SET_BIT(RTCx->TAFCR, RTC_TAFCR_TAMPIE); +} + +/** + * @brief Disable all Tamper Interrupt + * @rmtoll TAFCR TAMPIE LL_RTC_DisableIT_TAMP + * @param RTCx RTC Instance + * @retval None + */ +__STATIC_INLINE void LL_RTC_DisableIT_TAMP(RTC_TypeDef *RTCx) +{ + CLEAR_BIT(RTCx->TAFCR, RTC_TAFCR_TAMPIE); +} + +/** + * @brief Check if Time-stamp interrupt is enabled or not + * @rmtoll CR TSIE LL_RTC_IsEnabledIT_TS + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsEnabledIT_TS(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->CR, RTC_CR_TSIE) == (RTC_CR_TSIE)) ? 1UL : 0UL); +} + +/** + * @brief Check if Wakeup timer interrupt is enabled or not + * @rmtoll CR WUTIE LL_RTC_IsEnabledIT_WUT + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsEnabledIT_WUT(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->CR, RTC_CR_WUTIE) == (RTC_CR_WUTIE)) ? 1UL : 0UL); +} + +/** + * @brief Check if Alarm B interrupt is enabled or not + * @rmtoll CR ALRBIE LL_RTC_IsEnabledIT_ALRB + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsEnabledIT_ALRB(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->CR, RTC_CR_ALRBIE) == (RTC_CR_ALRBIE)) ? 1UL : 0UL); +} + +/** + * @brief Check if Alarm A interrupt is enabled or not + * @rmtoll CR ALRAIE LL_RTC_IsEnabledIT_ALRA + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsEnabledIT_ALRA(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->CR, RTC_CR_ALRAIE) == (RTC_CR_ALRAIE)) ? 1UL : 0UL); +} + +/** + * @brief Check if all the TAMPER interrupts are enabled or not + * @rmtoll TAFCR TAMPIE LL_RTC_IsEnabledIT_TAMP + * @param RTCx RTC Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_RTC_IsEnabledIT_TAMP(RTC_TypeDef *RTCx) +{ + return ((READ_BIT(RTCx->TAFCR, + RTC_TAFCR_TAMPIE) == (RTC_TAFCR_TAMPIE)) ? 1UL : 0UL); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup RTC_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +ErrorStatus LL_RTC_DeInit(RTC_TypeDef *RTCx); +ErrorStatus LL_RTC_Init(RTC_TypeDef *RTCx, LL_RTC_InitTypeDef *RTC_InitStruct); +void LL_RTC_StructInit(LL_RTC_InitTypeDef *RTC_InitStruct); +ErrorStatus LL_RTC_TIME_Init(RTC_TypeDef *RTCx, uint32_t RTC_Format, LL_RTC_TimeTypeDef *RTC_TimeStruct); +void LL_RTC_TIME_StructInit(LL_RTC_TimeTypeDef *RTC_TimeStruct); +ErrorStatus LL_RTC_DATE_Init(RTC_TypeDef *RTCx, uint32_t RTC_Format, LL_RTC_DateTypeDef *RTC_DateStruct); +void LL_RTC_DATE_StructInit(LL_RTC_DateTypeDef *RTC_DateStruct); +ErrorStatus LL_RTC_ALMA_Init(RTC_TypeDef *RTCx, uint32_t RTC_Format, LL_RTC_AlarmTypeDef *RTC_AlarmStruct); +ErrorStatus LL_RTC_ALMB_Init(RTC_TypeDef *RTCx, uint32_t RTC_Format, LL_RTC_AlarmTypeDef *RTC_AlarmStruct); +void LL_RTC_ALMA_StructInit(LL_RTC_AlarmTypeDef *RTC_AlarmStruct); +void LL_RTC_ALMB_StructInit(LL_RTC_AlarmTypeDef *RTC_AlarmStruct); +ErrorStatus LL_RTC_EnterInitMode(RTC_TypeDef *RTCx); +ErrorStatus LL_RTC_ExitInitMode(RTC_TypeDef *RTCx); +ErrorStatus LL_RTC_WaitForSynchro(RTC_TypeDef *RTCx); + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined(RTC) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F4xx_LL_RTC_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_sdmmc.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_sdmmc.h index a4092d04..1d3c6529 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_sdmmc.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_sdmmc.h @@ -300,10 +300,14 @@ typedef struct #define SDMMC_SINGLE_BUS_SUPPORT 0x00010000U #define SDMMC_CARD_LOCKED 0x02000000U -#ifndef SDMMC_DATATIMEOUT -#define SDMMC_DATATIMEOUT 0xFFFFFFFFU +#ifndef SDMMC_DATATIMEOUT /*Hardware Data Timeout (ms) */ +#define SDMMC_DATATIMEOUT ((uint32_t)0xFFFFFFFFU) #endif /* SDMMC_DATATIMEOUT */ +#ifndef SDMMC_SWDATATIMEOUT /*Software Data Timeout (ms) */ +#define SDMMC_SWDATATIMEOUT SDMMC_DATATIMEOUT +#endif /* SDMMC_SWDATATIMEOUT */ + #define SDMMC_0TO7BITS 0x000000FFU #define SDMMC_8TO15BITS 0x0000FF00U #define SDMMC_16TO23BITS 0x00FF0000U diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_spi.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_spi.h new file mode 100644 index 00000000..371383e0 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_spi.h @@ -0,0 +1,2027 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_spi.h + * @author MCD Application Team + * @brief Header file of SPI LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F4xx_LL_SPI_H +#define STM32F4xx_LL_SPI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (SPI1) || defined (SPI2) || defined (SPI3) || defined (SPI4) || defined (SPI5) || defined(SPI6) + +/** @defgroup SPI_LL SPI + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup SPI_LL_ES_INIT SPI Exported Init structure + * @{ + */ + +/** + * @brief SPI Init structures definition + */ +typedef struct +{ + uint32_t TransferDirection; /*!< Specifies the SPI unidirectional or bidirectional data mode. + This parameter can be a value of @ref SPI_LL_EC_TRANSFER_MODE. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetTransferDirection().*/ + + uint32_t Mode; /*!< Specifies the SPI mode (Master/Slave). + This parameter can be a value of @ref SPI_LL_EC_MODE. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetMode().*/ + + uint32_t DataWidth; /*!< Specifies the SPI data width. + This parameter can be a value of @ref SPI_LL_EC_DATAWIDTH. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetDataWidth().*/ + + uint32_t ClockPolarity; /*!< Specifies the serial clock steady state. + This parameter can be a value of @ref SPI_LL_EC_POLARITY. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetClockPolarity().*/ + + uint32_t ClockPhase; /*!< Specifies the clock active edge for the bit capture. + This parameter can be a value of @ref SPI_LL_EC_PHASE. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetClockPhase().*/ + + uint32_t NSS; /*!< Specifies whether the NSS signal is managed by hardware (NSS pin) or by software using the SSI bit. + This parameter can be a value of @ref SPI_LL_EC_NSS_MODE. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetNSSMode().*/ + + uint32_t BaudRate; /*!< Specifies the BaudRate prescaler value which will be used to configure the transmit and receive SCK clock. + This parameter can be a value of @ref SPI_LL_EC_BAUDRATEPRESCALER. + @note The communication clock is derived from the master clock. The slave clock does not need to be set. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetBaudRatePrescaler().*/ + + uint32_t BitOrder; /*!< Specifies whether data transfers start from MSB or LSB bit. + This parameter can be a value of @ref SPI_LL_EC_BIT_ORDER. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetTransferBitOrder().*/ + + uint32_t CRCCalculation; /*!< Specifies if the CRC calculation is enabled or not. + This parameter can be a value of @ref SPI_LL_EC_CRC_CALCULATION. + + This feature can be modified afterwards using unitary functions @ref LL_SPI_EnableCRC() and @ref LL_SPI_DisableCRC().*/ + + uint32_t CRCPoly; /*!< Specifies the polynomial used for the CRC calculation. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFFFF. + + This feature can be modified afterwards using unitary function @ref LL_SPI_SetCRCPolynomial().*/ + +} LL_SPI_InitTypeDef; + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup SPI_LL_Exported_Constants SPI Exported Constants + * @{ + */ + +/** @defgroup SPI_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_SPI_ReadReg function + * @{ + */ +#define LL_SPI_SR_RXNE SPI_SR_RXNE /*!< Rx buffer not empty flag */ +#define LL_SPI_SR_TXE SPI_SR_TXE /*!< Tx buffer empty flag */ +#define LL_SPI_SR_BSY SPI_SR_BSY /*!< Busy flag */ +#define LL_SPI_SR_CRCERR SPI_SR_CRCERR /*!< CRC error flag */ +#define LL_SPI_SR_MODF SPI_SR_MODF /*!< Mode fault flag */ +#define LL_SPI_SR_OVR SPI_SR_OVR /*!< Overrun flag */ +#define LL_SPI_SR_FRE SPI_SR_FRE /*!< TI mode frame format error flag */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_SPI_ReadReg and LL_SPI_WriteReg functions + * @{ + */ +#define LL_SPI_CR2_RXNEIE SPI_CR2_RXNEIE /*!< Rx buffer not empty interrupt enable */ +#define LL_SPI_CR2_TXEIE SPI_CR2_TXEIE /*!< Tx buffer empty interrupt enable */ +#define LL_SPI_CR2_ERRIE SPI_CR2_ERRIE /*!< Error interrupt enable */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_MODE Operation Mode + * @{ + */ +#define LL_SPI_MODE_MASTER (SPI_CR1_MSTR | SPI_CR1_SSI) /*!< Master configuration */ +#define LL_SPI_MODE_SLAVE 0x00000000U /*!< Slave configuration */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_PROTOCOL Serial Protocol + * @{ + */ +#define LL_SPI_PROTOCOL_MOTOROLA 0x00000000U /*!< Motorola mode. Used as default value */ +#define LL_SPI_PROTOCOL_TI (SPI_CR2_FRF) /*!< TI mode */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_PHASE Clock Phase + * @{ + */ +#define LL_SPI_PHASE_1EDGE 0x00000000U /*!< First clock transition is the first data capture edge */ +#define LL_SPI_PHASE_2EDGE (SPI_CR1_CPHA) /*!< Second clock transition is the first data capture edge */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_POLARITY Clock Polarity + * @{ + */ +#define LL_SPI_POLARITY_LOW 0x00000000U /*!< Clock to 0 when idle */ +#define LL_SPI_POLARITY_HIGH (SPI_CR1_CPOL) /*!< Clock to 1 when idle */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_BAUDRATEPRESCALER Baud Rate Prescaler + * @{ + */ +#define LL_SPI_BAUDRATEPRESCALER_DIV2 0x00000000U /*!< BaudRate control equal to fPCLK/2 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV4 (SPI_CR1_BR_0) /*!< BaudRate control equal to fPCLK/4 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV8 (SPI_CR1_BR_1) /*!< BaudRate control equal to fPCLK/8 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV16 (SPI_CR1_BR_1 | SPI_CR1_BR_0) /*!< BaudRate control equal to fPCLK/16 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV32 (SPI_CR1_BR_2) /*!< BaudRate control equal to fPCLK/32 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV64 (SPI_CR1_BR_2 | SPI_CR1_BR_0) /*!< BaudRate control equal to fPCLK/64 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV128 (SPI_CR1_BR_2 | SPI_CR1_BR_1) /*!< BaudRate control equal to fPCLK/128 */ +#define LL_SPI_BAUDRATEPRESCALER_DIV256 (SPI_CR1_BR_2 | SPI_CR1_BR_1 | SPI_CR1_BR_0) /*!< BaudRate control equal to fPCLK/256 */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_BIT_ORDER Transmission Bit Order + * @{ + */ +#define LL_SPI_LSB_FIRST (SPI_CR1_LSBFIRST) /*!< Data is transmitted/received with the LSB first */ +#define LL_SPI_MSB_FIRST 0x00000000U /*!< Data is transmitted/received with the MSB first */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_TRANSFER_MODE Transfer Mode + * @{ + */ +#define LL_SPI_FULL_DUPLEX 0x00000000U /*!< Full-Duplex mode. Rx and Tx transfer on 2 lines */ +#define LL_SPI_SIMPLEX_RX (SPI_CR1_RXONLY) /*!< Simplex Rx mode. Rx transfer only on 1 line */ +#define LL_SPI_HALF_DUPLEX_RX (SPI_CR1_BIDIMODE) /*!< Half-Duplex Rx mode. Rx transfer on 1 line */ +#define LL_SPI_HALF_DUPLEX_TX (SPI_CR1_BIDIMODE | SPI_CR1_BIDIOE) /*!< Half-Duplex Tx mode. Tx transfer on 1 line */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_NSS_MODE Slave Select Pin Mode + * @{ + */ +#define LL_SPI_NSS_SOFT (SPI_CR1_SSM) /*!< NSS managed internally. NSS pin not used and free */ +#define LL_SPI_NSS_HARD_INPUT 0x00000000U /*!< NSS pin used in Input. Only used in Master mode */ +#define LL_SPI_NSS_HARD_OUTPUT (((uint32_t)SPI_CR2_SSOE << 16U)) /*!< NSS pin used in Output. Only used in Slave mode as chip select */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_DATAWIDTH Datawidth + * @{ + */ +#define LL_SPI_DATAWIDTH_8BIT 0x00000000U /*!< Data length for SPI transfer: 8 bits */ +#define LL_SPI_DATAWIDTH_16BIT (SPI_CR1_DFF) /*!< Data length for SPI transfer: 16 bits */ +/** + * @} + */ +#if defined(USE_FULL_LL_DRIVER) + +/** @defgroup SPI_LL_EC_CRC_CALCULATION CRC Calculation + * @{ + */ +#define LL_SPI_CRCCALCULATION_DISABLE 0x00000000U /*!< CRC calculation disabled */ +#define LL_SPI_CRCCALCULATION_ENABLE (SPI_CR1_CRCEN) /*!< CRC calculation enabled */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup SPI_LL_Exported_Macros SPI Exported Macros + * @{ + */ + +/** @defgroup SPI_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in SPI register + * @param __INSTANCE__ SPI Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_SPI_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in SPI register + * @param __INSTANCE__ SPI Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_SPI_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup SPI_LL_Exported_Functions SPI Exported Functions + * @{ + */ + +/** @defgroup SPI_LL_EF_Configuration Configuration + * @{ + */ + +/** + * @brief Enable SPI peripheral + * @rmtoll CR1 SPE LL_SPI_Enable + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_Enable(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR1, SPI_CR1_SPE); +} + +/** + * @brief Disable SPI peripheral + * @note When disabling the SPI, follow the procedure described in the Reference Manual. + * @rmtoll CR1 SPE LL_SPI_Disable + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_Disable(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR1, SPI_CR1_SPE); +} + +/** + * @brief Check if SPI peripheral is enabled + * @rmtoll CR1 SPE LL_SPI_IsEnabled + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabled(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR1, SPI_CR1_SPE) == (SPI_CR1_SPE)) ? 1UL : 0UL); +} + +/** + * @brief Set SPI operation mode to Master or Slave + * @note This bit should not be changed when communication is ongoing. + * @rmtoll CR1 MSTR LL_SPI_SetMode\n + * CR1 SSI LL_SPI_SetMode + * @param SPIx SPI Instance + * @param Mode This parameter can be one of the following values: + * @arg @ref LL_SPI_MODE_MASTER + * @arg @ref LL_SPI_MODE_SLAVE + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetMode(SPI_TypeDef *SPIx, uint32_t Mode) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_MSTR | SPI_CR1_SSI, Mode); +} + +/** + * @brief Get SPI operation mode (Master or Slave) + * @rmtoll CR1 MSTR LL_SPI_GetMode\n + * CR1 SSI LL_SPI_GetMode + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_MODE_MASTER + * @arg @ref LL_SPI_MODE_SLAVE + */ +__STATIC_INLINE uint32_t LL_SPI_GetMode(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_MSTR | SPI_CR1_SSI)); +} + +/** + * @brief Set serial protocol used + * @note This bit should be written only when SPI is disabled (SPE = 0) for correct operation. + * @rmtoll CR2 FRF LL_SPI_SetStandard + * @param SPIx SPI Instance + * @param Standard This parameter can be one of the following values: + * @arg @ref LL_SPI_PROTOCOL_MOTOROLA + * @arg @ref LL_SPI_PROTOCOL_TI + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetStandard(SPI_TypeDef *SPIx, uint32_t Standard) +{ + MODIFY_REG(SPIx->CR2, SPI_CR2_FRF, Standard); +} + +/** + * @brief Get serial protocol used + * @rmtoll CR2 FRF LL_SPI_GetStandard + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_PROTOCOL_MOTOROLA + * @arg @ref LL_SPI_PROTOCOL_TI + */ +__STATIC_INLINE uint32_t LL_SPI_GetStandard(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR2, SPI_CR2_FRF)); +} + +/** + * @brief Set clock phase + * @note This bit should not be changed when communication is ongoing. + * This bit is not used in SPI TI mode. + * @rmtoll CR1 CPHA LL_SPI_SetClockPhase + * @param SPIx SPI Instance + * @param ClockPhase This parameter can be one of the following values: + * @arg @ref LL_SPI_PHASE_1EDGE + * @arg @ref LL_SPI_PHASE_2EDGE + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetClockPhase(SPI_TypeDef *SPIx, uint32_t ClockPhase) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_CPHA, ClockPhase); +} + +/** + * @brief Get clock phase + * @rmtoll CR1 CPHA LL_SPI_GetClockPhase + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_PHASE_1EDGE + * @arg @ref LL_SPI_PHASE_2EDGE + */ +__STATIC_INLINE uint32_t LL_SPI_GetClockPhase(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_CPHA)); +} + +/** + * @brief Set clock polarity + * @note This bit should not be changed when communication is ongoing. + * This bit is not used in SPI TI mode. + * @rmtoll CR1 CPOL LL_SPI_SetClockPolarity + * @param SPIx SPI Instance + * @param ClockPolarity This parameter can be one of the following values: + * @arg @ref LL_SPI_POLARITY_LOW + * @arg @ref LL_SPI_POLARITY_HIGH + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetClockPolarity(SPI_TypeDef *SPIx, uint32_t ClockPolarity) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_CPOL, ClockPolarity); +} + +/** + * @brief Get clock polarity + * @rmtoll CR1 CPOL LL_SPI_GetClockPolarity + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_POLARITY_LOW + * @arg @ref LL_SPI_POLARITY_HIGH + */ +__STATIC_INLINE uint32_t LL_SPI_GetClockPolarity(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_CPOL)); +} + +/** + * @brief Set baud rate prescaler + * @note These bits should not be changed when communication is ongoing. SPI BaudRate = fPCLK/Prescaler. + * @rmtoll CR1 BR LL_SPI_SetBaudRatePrescaler + * @param SPIx SPI Instance + * @param BaudRate This parameter can be one of the following values: + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV2 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV4 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV8 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV16 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV32 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV64 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV128 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV256 + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetBaudRatePrescaler(SPI_TypeDef *SPIx, uint32_t BaudRate) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_BR, BaudRate); +} + +/** + * @brief Get baud rate prescaler + * @rmtoll CR1 BR LL_SPI_GetBaudRatePrescaler + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV2 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV4 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV8 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV16 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV32 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV64 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV128 + * @arg @ref LL_SPI_BAUDRATEPRESCALER_DIV256 + */ +__STATIC_INLINE uint32_t LL_SPI_GetBaudRatePrescaler(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_BR)); +} + +/** + * @brief Set transfer bit order + * @note This bit should not be changed when communication is ongoing. This bit is not used in SPI TI mode. + * @rmtoll CR1 LSBFIRST LL_SPI_SetTransferBitOrder + * @param SPIx SPI Instance + * @param BitOrder This parameter can be one of the following values: + * @arg @ref LL_SPI_LSB_FIRST + * @arg @ref LL_SPI_MSB_FIRST + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetTransferBitOrder(SPI_TypeDef *SPIx, uint32_t BitOrder) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_LSBFIRST, BitOrder); +} + +/** + * @brief Get transfer bit order + * @rmtoll CR1 LSBFIRST LL_SPI_GetTransferBitOrder + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_LSB_FIRST + * @arg @ref LL_SPI_MSB_FIRST + */ +__STATIC_INLINE uint32_t LL_SPI_GetTransferBitOrder(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_LSBFIRST)); +} + +/** + * @brief Set transfer direction mode + * @note For Half-Duplex mode, Rx Direction is set by default. + * In master mode, the MOSI pin is used and in slave mode, the MISO pin is used for Half-Duplex. + * @rmtoll CR1 RXONLY LL_SPI_SetTransferDirection\n + * CR1 BIDIMODE LL_SPI_SetTransferDirection\n + * CR1 BIDIOE LL_SPI_SetTransferDirection + * @param SPIx SPI Instance + * @param TransferDirection This parameter can be one of the following values: + * @arg @ref LL_SPI_FULL_DUPLEX + * @arg @ref LL_SPI_SIMPLEX_RX + * @arg @ref LL_SPI_HALF_DUPLEX_RX + * @arg @ref LL_SPI_HALF_DUPLEX_TX + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetTransferDirection(SPI_TypeDef *SPIx, uint32_t TransferDirection) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_RXONLY | SPI_CR1_BIDIMODE | SPI_CR1_BIDIOE, TransferDirection); +} + +/** + * @brief Get transfer direction mode + * @rmtoll CR1 RXONLY LL_SPI_GetTransferDirection\n + * CR1 BIDIMODE LL_SPI_GetTransferDirection\n + * CR1 BIDIOE LL_SPI_GetTransferDirection + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_FULL_DUPLEX + * @arg @ref LL_SPI_SIMPLEX_RX + * @arg @ref LL_SPI_HALF_DUPLEX_RX + * @arg @ref LL_SPI_HALF_DUPLEX_TX + */ +__STATIC_INLINE uint32_t LL_SPI_GetTransferDirection(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_RXONLY | SPI_CR1_BIDIMODE | SPI_CR1_BIDIOE)); +} + +/** + * @brief Set frame data width + * @rmtoll CR1 DFF LL_SPI_SetDataWidth + * @param SPIx SPI Instance + * @param DataWidth This parameter can be one of the following values: + * @arg @ref LL_SPI_DATAWIDTH_8BIT + * @arg @ref LL_SPI_DATAWIDTH_16BIT + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetDataWidth(SPI_TypeDef *SPIx, uint32_t DataWidth) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_DFF, DataWidth); +} + +/** + * @brief Get frame data width + * @rmtoll CR1 DFF LL_SPI_GetDataWidth + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_DATAWIDTH_8BIT + * @arg @ref LL_SPI_DATAWIDTH_16BIT + */ +__STATIC_INLINE uint32_t LL_SPI_GetDataWidth(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->CR1, SPI_CR1_DFF)); +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_CRC_Management CRC Management + * @{ + */ + +/** + * @brief Enable CRC + * @note This bit should be written only when SPI is disabled (SPE = 0) for correct operation. + * @rmtoll CR1 CRCEN LL_SPI_EnableCRC + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableCRC(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR1, SPI_CR1_CRCEN); +} + +/** + * @brief Disable CRC + * @note This bit should be written only when SPI is disabled (SPE = 0) for correct operation. + * @rmtoll CR1 CRCEN LL_SPI_DisableCRC + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableCRC(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR1, SPI_CR1_CRCEN); +} + +/** + * @brief Check if CRC is enabled + * @note This bit should be written only when SPI is disabled (SPE = 0) for correct operation. + * @rmtoll CR1 CRCEN LL_SPI_IsEnabledCRC + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledCRC(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR1, SPI_CR1_CRCEN) == (SPI_CR1_CRCEN)) ? 1UL : 0UL); +} + +/** + * @brief Set CRCNext to transfer CRC on the line + * @note This bit has to be written as soon as the last data is written in the SPIx_DR register. + * @rmtoll CR1 CRCNEXT LL_SPI_SetCRCNext + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetCRCNext(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR1, SPI_CR1_CRCNEXT); +} + +/** + * @brief Set polynomial for CRC calculation + * @rmtoll CRCPR CRCPOLY LL_SPI_SetCRCPolynomial + * @param SPIx SPI Instance + * @param CRCPoly This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFFFF + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetCRCPolynomial(SPI_TypeDef *SPIx, uint32_t CRCPoly) +{ + WRITE_REG(SPIx->CRCPR, (uint16_t)CRCPoly); +} + +/** + * @brief Get polynomial for CRC calculation + * @rmtoll CRCPR CRCPOLY LL_SPI_GetCRCPolynomial + * @param SPIx SPI Instance + * @retval Returned value is a number between Min_Data = 0x00 and Max_Data = 0xFFFF + */ +__STATIC_INLINE uint32_t LL_SPI_GetCRCPolynomial(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_REG(SPIx->CRCPR)); +} + +/** + * @brief Get Rx CRC + * @rmtoll RXCRCR RXCRC LL_SPI_GetRxCRC + * @param SPIx SPI Instance + * @retval Returned value is a number between Min_Data = 0x00 and Max_Data = 0xFFFF + */ +__STATIC_INLINE uint32_t LL_SPI_GetRxCRC(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_REG(SPIx->RXCRCR)); +} + +/** + * @brief Get Tx CRC + * @rmtoll TXCRCR TXCRC LL_SPI_GetTxCRC + * @param SPIx SPI Instance + * @retval Returned value is a number between Min_Data = 0x00 and Max_Data = 0xFFFF + */ +__STATIC_INLINE uint32_t LL_SPI_GetTxCRC(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_REG(SPIx->TXCRCR)); +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_NSS_Management Slave Select Pin Management + * @{ + */ + +/** + * @brief Set NSS mode + * @note LL_SPI_NSS_SOFT Mode is not used in SPI TI mode. + * @rmtoll CR1 SSM LL_SPI_SetNSSMode\n + * @rmtoll CR2 SSOE LL_SPI_SetNSSMode + * @param SPIx SPI Instance + * @param NSS This parameter can be one of the following values: + * @arg @ref LL_SPI_NSS_SOFT + * @arg @ref LL_SPI_NSS_HARD_INPUT + * @arg @ref LL_SPI_NSS_HARD_OUTPUT + * @retval None + */ +__STATIC_INLINE void LL_SPI_SetNSSMode(SPI_TypeDef *SPIx, uint32_t NSS) +{ + MODIFY_REG(SPIx->CR1, SPI_CR1_SSM, NSS); + MODIFY_REG(SPIx->CR2, SPI_CR2_SSOE, ((uint32_t)(NSS >> 16U))); +} + +/** + * @brief Get NSS mode + * @rmtoll CR1 SSM LL_SPI_GetNSSMode\n + * @rmtoll CR2 SSOE LL_SPI_GetNSSMode + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_SPI_NSS_SOFT + * @arg @ref LL_SPI_NSS_HARD_INPUT + * @arg @ref LL_SPI_NSS_HARD_OUTPUT + */ +__STATIC_INLINE uint32_t LL_SPI_GetNSSMode(const SPI_TypeDef *SPIx) +{ + uint32_t Ssm = (READ_BIT(SPIx->CR1, SPI_CR1_SSM)); + uint32_t Ssoe = (READ_BIT(SPIx->CR2, SPI_CR2_SSOE) << 16U); + return (Ssm | Ssoe); +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_FLAG_Management FLAG Management + * @{ + */ + +/** + * @brief Check if Rx buffer is not empty + * @rmtoll SR RXNE LL_SPI_IsActiveFlag_RXNE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_RXNE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_RXNE) == (SPI_SR_RXNE)) ? 1UL : 0UL); +} + +/** + * @brief Check if Tx buffer is empty + * @rmtoll SR TXE LL_SPI_IsActiveFlag_TXE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_TXE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_TXE) == (SPI_SR_TXE)) ? 1UL : 0UL); +} + +/** + * @brief Get CRC error flag + * @rmtoll SR CRCERR LL_SPI_IsActiveFlag_CRCERR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_CRCERR(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_CRCERR) == (SPI_SR_CRCERR)) ? 1UL : 0UL); +} + +/** + * @brief Get mode fault error flag + * @rmtoll SR MODF LL_SPI_IsActiveFlag_MODF + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_MODF(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_MODF) == (SPI_SR_MODF)) ? 1UL : 0UL); +} + +/** + * @brief Get overrun error flag + * @rmtoll SR OVR LL_SPI_IsActiveFlag_OVR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_OVR(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_OVR) == (SPI_SR_OVR)) ? 1UL : 0UL); +} + +/** + * @brief Get busy flag + * @note The BSY flag is cleared under any one of the following conditions: + * -When the SPI is correctly disabled + * -When a fault is detected in Master mode (MODF bit set to 1) + * -In Master mode, when it finishes a data transmission and no new data is ready to be + * sent + * -In Slave mode, when the BSY flag is set to '0' for at least one SPI clock cycle between + * each data transfer. + * @rmtoll SR BSY LL_SPI_IsActiveFlag_BSY + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_BSY(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_BSY) == (SPI_SR_BSY)) ? 1UL : 0UL); +} + +/** + * @brief Get frame format error flag + * @rmtoll SR FRE LL_SPI_IsActiveFlag_FRE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsActiveFlag_FRE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_FRE) == (SPI_SR_FRE)) ? 1UL : 0UL); +} + +/** + * @brief Clear CRC error flag + * @rmtoll SR CRCERR LL_SPI_ClearFlag_CRCERR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_ClearFlag_CRCERR(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->SR, SPI_SR_CRCERR); +} + +/** + * @brief Clear mode fault error flag + * @note Clearing this flag is done by a read access to the SPIx_SR + * register followed by a write access to the SPIx_CR1 register + * @rmtoll SR MODF LL_SPI_ClearFlag_MODF + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_ClearFlag_MODF(SPI_TypeDef *SPIx) +{ + __IO uint32_t tmpreg_sr; + tmpreg_sr = SPIx->SR; + (void) tmpreg_sr; + CLEAR_BIT(SPIx->CR1, SPI_CR1_SPE); +} + +/** + * @brief Clear overrun error flag + * @note Clearing this flag is done by a read access to the SPIx_DR + * register followed by a read access to the SPIx_SR register + * @rmtoll SR OVR LL_SPI_ClearFlag_OVR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_ClearFlag_OVR(SPI_TypeDef *SPIx) +{ + __IO uint32_t tmpreg; + tmpreg = SPIx->DR; + (void) tmpreg; + tmpreg = SPIx->SR; + (void) tmpreg; +} + +/** + * @brief Clear frame format error flag + * @note Clearing this flag is done by reading SPIx_SR register + * @rmtoll SR FRE LL_SPI_ClearFlag_FRE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_ClearFlag_FRE(SPI_TypeDef *SPIx) +{ + __IO uint32_t tmpreg; + tmpreg = SPIx->SR; + (void) tmpreg; +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_IT_Management Interrupt Management + * @{ + */ + +/** + * @brief Enable error interrupt + * @note This bit controls the generation of an interrupt when an error condition occurs (CRCERR, OVR, MODF in SPI mode, FRE at TI mode). + * @rmtoll CR2 ERRIE LL_SPI_EnableIT_ERR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableIT_ERR(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR2, SPI_CR2_ERRIE); +} + +/** + * @brief Enable Rx buffer not empty interrupt + * @rmtoll CR2 RXNEIE LL_SPI_EnableIT_RXNE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableIT_RXNE(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR2, SPI_CR2_RXNEIE); +} + +/** + * @brief Enable Tx buffer empty interrupt + * @rmtoll CR2 TXEIE LL_SPI_EnableIT_TXE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableIT_TXE(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR2, SPI_CR2_TXEIE); +} + +/** + * @brief Disable error interrupt + * @note This bit controls the generation of an interrupt when an error condition occurs (CRCERR, OVR, MODF in SPI mode, FRE at TI mode). + * @rmtoll CR2 ERRIE LL_SPI_DisableIT_ERR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableIT_ERR(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR2, SPI_CR2_ERRIE); +} + +/** + * @brief Disable Rx buffer not empty interrupt + * @rmtoll CR2 RXNEIE LL_SPI_DisableIT_RXNE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableIT_RXNE(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR2, SPI_CR2_RXNEIE); +} + +/** + * @brief Disable Tx buffer empty interrupt + * @rmtoll CR2 TXEIE LL_SPI_DisableIT_TXE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableIT_TXE(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR2, SPI_CR2_TXEIE); +} + +/** + * @brief Check if error interrupt is enabled + * @rmtoll CR2 ERRIE LL_SPI_IsEnabledIT_ERR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledIT_ERR(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR2, SPI_CR2_ERRIE) == (SPI_CR2_ERRIE)) ? 1UL : 0UL); +} + +/** + * @brief Check if Rx buffer not empty interrupt is enabled + * @rmtoll CR2 RXNEIE LL_SPI_IsEnabledIT_RXNE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledIT_RXNE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR2, SPI_CR2_RXNEIE) == (SPI_CR2_RXNEIE)) ? 1UL : 0UL); +} + +/** + * @brief Check if Tx buffer empty interrupt + * @rmtoll CR2 TXEIE LL_SPI_IsEnabledIT_TXE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledIT_TXE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR2, SPI_CR2_TXEIE) == (SPI_CR2_TXEIE)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_DMA_Management DMA Management + * @{ + */ + +/** + * @brief Enable DMA Rx + * @rmtoll CR2 RXDMAEN LL_SPI_EnableDMAReq_RX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableDMAReq_RX(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR2, SPI_CR2_RXDMAEN); +} + +/** + * @brief Disable DMA Rx + * @rmtoll CR2 RXDMAEN LL_SPI_DisableDMAReq_RX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableDMAReq_RX(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR2, SPI_CR2_RXDMAEN); +} + +/** + * @brief Check if DMA Rx is enabled + * @rmtoll CR2 RXDMAEN LL_SPI_IsEnabledDMAReq_RX + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledDMAReq_RX(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR2, SPI_CR2_RXDMAEN) == (SPI_CR2_RXDMAEN)) ? 1UL : 0UL); +} + +/** + * @brief Enable DMA Tx + * @rmtoll CR2 TXDMAEN LL_SPI_EnableDMAReq_TX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_EnableDMAReq_TX(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->CR2, SPI_CR2_TXDMAEN); +} + +/** + * @brief Disable DMA Tx + * @rmtoll CR2 TXDMAEN LL_SPI_DisableDMAReq_TX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_SPI_DisableDMAReq_TX(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->CR2, SPI_CR2_TXDMAEN); +} + +/** + * @brief Check if DMA Tx is enabled + * @rmtoll CR2 TXDMAEN LL_SPI_IsEnabledDMAReq_TX + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SPI_IsEnabledDMAReq_TX(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->CR2, SPI_CR2_TXDMAEN) == (SPI_CR2_TXDMAEN)) ? 1UL : 0UL); +} + +/** + * @brief Get the data register address used for DMA transfer + * @rmtoll DR DR LL_SPI_DMA_GetRegAddr + * @param SPIx SPI Instance + * @retval Address of data register + */ +__STATIC_INLINE uint32_t LL_SPI_DMA_GetRegAddr(const SPI_TypeDef *SPIx) +{ + return (uint32_t) &(SPIx->DR); +} + +/** + * @} + */ + +/** @defgroup SPI_LL_EF_DATA_Management DATA Management + * @{ + */ + +/** + * @brief Read 8-Bits in the data register + * @rmtoll DR DR LL_SPI_ReceiveData8 + * @param SPIx SPI Instance + * @retval RxData Value between Min_Data=0x00 and Max_Data=0xFF + */ +__STATIC_INLINE uint8_t LL_SPI_ReceiveData8(SPI_TypeDef *SPIx) +{ + return (*((__IO uint8_t *)&SPIx->DR)); +} + +/** + * @brief Read 16-Bits in the data register + * @rmtoll DR DR LL_SPI_ReceiveData16 + * @param SPIx SPI Instance + * @retval RxData Value between Min_Data=0x00 and Max_Data=0xFFFF + */ +__STATIC_INLINE uint16_t LL_SPI_ReceiveData16(SPI_TypeDef *SPIx) +{ + return (uint16_t)(READ_REG(SPIx->DR)); +} + +/** + * @brief Write 8-Bits in the data register + * @rmtoll DR DR LL_SPI_TransmitData8 + * @param SPIx SPI Instance + * @param TxData Value between Min_Data=0x00 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_SPI_TransmitData8(SPI_TypeDef *SPIx, uint8_t TxData) +{ +#if defined (__GNUC__) + __IO uint8_t *spidr = ((__IO uint8_t *)&SPIx->DR); + *spidr = TxData; +#else + *((__IO uint8_t *)&SPIx->DR) = TxData; +#endif /* __GNUC__ */ +} + +/** + * @brief Write 16-Bits in the data register + * @rmtoll DR DR LL_SPI_TransmitData16 + * @param SPIx SPI Instance + * @param TxData Value between Min_Data=0x00 and Max_Data=0xFFFF + * @retval None + */ +__STATIC_INLINE void LL_SPI_TransmitData16(SPI_TypeDef *SPIx, uint16_t TxData) +{ +#if defined (__GNUC__) + __IO uint16_t *spidr = ((__IO uint16_t *)&SPIx->DR); + *spidr = TxData; +#else + SPIx->DR = TxData; +#endif /* __GNUC__ */ +} + +/** + * @} + */ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup SPI_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +ErrorStatus LL_SPI_DeInit(const SPI_TypeDef *SPIx); +ErrorStatus LL_SPI_Init(SPI_TypeDef *SPIx, LL_SPI_InitTypeDef *SPI_InitStruct); +void LL_SPI_StructInit(LL_SPI_InitTypeDef *SPI_InitStruct); + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup I2S_LL I2S + * @{ + */ + +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup I2S_LL_ES_INIT I2S Exported Init structure + * @{ + */ + +/** + * @brief I2S Init structure definition + */ + +typedef struct +{ + uint32_t Mode; /*!< Specifies the I2S operating mode. + This parameter can be a value of @ref I2S_LL_EC_MODE + + This feature can be modified afterwards using unitary function @ref LL_I2S_SetTransferMode().*/ + + uint32_t Standard; /*!< Specifies the standard used for the I2S communication. + This parameter can be a value of @ref I2S_LL_EC_STANDARD + + This feature can be modified afterwards using unitary function @ref LL_I2S_SetStandard().*/ + + + uint32_t DataFormat; /*!< Specifies the data format for the I2S communication. + This parameter can be a value of @ref I2S_LL_EC_DATA_FORMAT + + This feature can be modified afterwards using unitary function @ref LL_I2S_SetDataFormat().*/ + + + uint32_t MCLKOutput; /*!< Specifies whether the I2S MCLK output is enabled or not. + This parameter can be a value of @ref I2S_LL_EC_MCLK_OUTPUT + + This feature can be modified afterwards using unitary functions @ref LL_I2S_EnableMasterClock() or @ref LL_I2S_DisableMasterClock.*/ + + + uint32_t AudioFreq; /*!< Specifies the frequency selected for the I2S communication. + This parameter can be a value of @ref I2S_LL_EC_AUDIO_FREQ + + Audio Frequency can be modified afterwards using Reference manual formulas to calculate Prescaler Linear, Parity + and unitary functions @ref LL_I2S_SetPrescalerLinear() and @ref LL_I2S_SetPrescalerParity() to set it.*/ + + + uint32_t ClockPolarity; /*!< Specifies the idle state of the I2S clock. + This parameter can be a value of @ref I2S_LL_EC_POLARITY + + This feature can be modified afterwards using unitary function @ref LL_I2S_SetClockPolarity().*/ + +} LL_I2S_InitTypeDef; + +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup I2S_LL_Exported_Constants I2S Exported Constants + * @{ + */ + +/** @defgroup I2S_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_I2S_ReadReg function + * @{ + */ +#define LL_I2S_SR_RXNE LL_SPI_SR_RXNE /*!< Rx buffer not empty flag */ +#define LL_I2S_SR_TXE LL_SPI_SR_TXE /*!< Tx buffer empty flag */ +#define LL_I2S_SR_BSY LL_SPI_SR_BSY /*!< Busy flag */ +#define LL_I2S_SR_UDR SPI_SR_UDR /*!< Underrun flag */ +#define LL_I2S_SR_OVR LL_SPI_SR_OVR /*!< Overrun flag */ +#define LL_I2S_SR_FRE LL_SPI_SR_FRE /*!< TI mode frame format error flag */ +/** + * @} + */ + +/** @defgroup SPI_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_SPI_ReadReg and LL_SPI_WriteReg functions + * @{ + */ +#define LL_I2S_CR2_RXNEIE LL_SPI_CR2_RXNEIE /*!< Rx buffer not empty interrupt enable */ +#define LL_I2S_CR2_TXEIE LL_SPI_CR2_TXEIE /*!< Tx buffer empty interrupt enable */ +#define LL_I2S_CR2_ERRIE LL_SPI_CR2_ERRIE /*!< Error interrupt enable */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_DATA_FORMAT Data format + * @{ + */ +#define LL_I2S_DATAFORMAT_16B 0x00000000U /*!< Data length 16 bits, Channel length 16bit */ +#define LL_I2S_DATAFORMAT_16B_EXTENDED (SPI_I2SCFGR_CHLEN) /*!< Data length 16 bits, Channel length 32bit */ +#define LL_I2S_DATAFORMAT_24B (SPI_I2SCFGR_CHLEN | SPI_I2SCFGR_DATLEN_0) /*!< Data length 24 bits, Channel length 32bit */ +#define LL_I2S_DATAFORMAT_32B (SPI_I2SCFGR_CHLEN | SPI_I2SCFGR_DATLEN_1) /*!< Data length 16 bits, Channel length 32bit */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_POLARITY Clock Polarity + * @{ + */ +#define LL_I2S_POLARITY_LOW 0x00000000U /*!< Clock steady state is low level */ +#define LL_I2S_POLARITY_HIGH (SPI_I2SCFGR_CKPOL) /*!< Clock steady state is high level */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_STANDARD I2s Standard + * @{ + */ +#define LL_I2S_STANDARD_PHILIPS 0x00000000U /*!< I2S standard philips */ +#define LL_I2S_STANDARD_MSB (SPI_I2SCFGR_I2SSTD_0) /*!< MSB justified standard (left justified) */ +#define LL_I2S_STANDARD_LSB (SPI_I2SCFGR_I2SSTD_1) /*!< LSB justified standard (right justified) */ +#define LL_I2S_STANDARD_PCM_SHORT (SPI_I2SCFGR_I2SSTD_0 | SPI_I2SCFGR_I2SSTD_1) /*!< PCM standard, short frame synchronization */ +#define LL_I2S_STANDARD_PCM_LONG (SPI_I2SCFGR_I2SSTD_0 | SPI_I2SCFGR_I2SSTD_1 | SPI_I2SCFGR_PCMSYNC) /*!< PCM standard, long frame synchronization */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_MODE Operation Mode + * @{ + */ +#define LL_I2S_MODE_SLAVE_TX 0x00000000U /*!< Slave Tx configuration */ +#define LL_I2S_MODE_SLAVE_RX (SPI_I2SCFGR_I2SCFG_0) /*!< Slave Rx configuration */ +#define LL_I2S_MODE_MASTER_TX (SPI_I2SCFGR_I2SCFG_1) /*!< Master Tx configuration */ +#define LL_I2S_MODE_MASTER_RX (SPI_I2SCFGR_I2SCFG_0 | SPI_I2SCFGR_I2SCFG_1) /*!< Master Rx configuration */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_PRESCALER_FACTOR Prescaler Factor + * @{ + */ +#define LL_I2S_PRESCALER_PARITY_EVEN 0x00000000U /*!< Odd factor: Real divider value is = I2SDIV * 2 */ +#define LL_I2S_PRESCALER_PARITY_ODD (SPI_I2SPR_ODD >> 8U) /*!< Odd factor: Real divider value is = (I2SDIV * 2)+1 */ +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) + +/** @defgroup I2S_LL_EC_MCLK_OUTPUT MCLK Output + * @{ + */ +#define LL_I2S_MCLK_OUTPUT_DISABLE 0x00000000U /*!< Master clock output is disabled */ +#define LL_I2S_MCLK_OUTPUT_ENABLE (SPI_I2SPR_MCKOE) /*!< Master clock output is enabled */ +/** + * @} + */ + +/** @defgroup I2S_LL_EC_AUDIO_FREQ Audio Frequency + * @{ + */ + +#define LL_I2S_AUDIOFREQ_192K 192000U /*!< Audio Frequency configuration 192000 Hz */ +#define LL_I2S_AUDIOFREQ_96K 96000U /*!< Audio Frequency configuration 96000 Hz */ +#define LL_I2S_AUDIOFREQ_48K 48000U /*!< Audio Frequency configuration 48000 Hz */ +#define LL_I2S_AUDIOFREQ_44K 44100U /*!< Audio Frequency configuration 44100 Hz */ +#define LL_I2S_AUDIOFREQ_32K 32000U /*!< Audio Frequency configuration 32000 Hz */ +#define LL_I2S_AUDIOFREQ_22K 22050U /*!< Audio Frequency configuration 22050 Hz */ +#define LL_I2S_AUDIOFREQ_16K 16000U /*!< Audio Frequency configuration 16000 Hz */ +#define LL_I2S_AUDIOFREQ_11K 11025U /*!< Audio Frequency configuration 11025 Hz */ +#define LL_I2S_AUDIOFREQ_8K 8000U /*!< Audio Frequency configuration 8000 Hz */ +#define LL_I2S_AUDIOFREQ_DEFAULT 2U /*!< Audio Freq not specified. Register I2SDIV = 2 */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup I2S_LL_Exported_Macros I2S Exported Macros + * @{ + */ + +/** @defgroup I2S_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in I2S register + * @param __INSTANCE__ I2S Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_I2S_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in I2S register + * @param __INSTANCE__ I2S Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_I2S_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** + * @} + */ + + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup I2S_LL_Exported_Functions I2S Exported Functions + * @{ + */ + +/** @defgroup I2S_LL_EF_Configuration Configuration + * @{ + */ + +/** + * @brief Select I2S mode and Enable I2S peripheral + * @rmtoll I2SCFGR I2SMOD LL_I2S_Enable\n + * I2SCFGR I2SE LL_I2S_Enable + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_Enable(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_I2SMOD | SPI_I2SCFGR_I2SE); +} + +/** + * @brief Disable I2S peripheral + * @rmtoll I2SCFGR I2SE LL_I2S_Disable + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_Disable(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_I2SMOD | SPI_I2SCFGR_I2SE); +} + +/** + * @brief Check if I2S peripheral is enabled + * @rmtoll I2SCFGR I2SE LL_I2S_IsEnabled + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabled(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_I2SE) == (SPI_I2SCFGR_I2SE)) ? 1UL : 0UL); +} + +/** + * @brief Set I2S data frame length + * @rmtoll I2SCFGR DATLEN LL_I2S_SetDataFormat\n + * I2SCFGR CHLEN LL_I2S_SetDataFormat + * @param SPIx SPI Instance + * @param DataFormat This parameter can be one of the following values: + * @arg @ref LL_I2S_DATAFORMAT_16B + * @arg @ref LL_I2S_DATAFORMAT_16B_EXTENDED + * @arg @ref LL_I2S_DATAFORMAT_24B + * @arg @ref LL_I2S_DATAFORMAT_32B + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetDataFormat(SPI_TypeDef *SPIx, uint32_t DataFormat) +{ + MODIFY_REG(SPIx->I2SCFGR, SPI_I2SCFGR_DATLEN | SPI_I2SCFGR_CHLEN, DataFormat); +} + +/** + * @brief Get I2S data frame length + * @rmtoll I2SCFGR DATLEN LL_I2S_GetDataFormat\n + * I2SCFGR CHLEN LL_I2S_GetDataFormat + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2S_DATAFORMAT_16B + * @arg @ref LL_I2S_DATAFORMAT_16B_EXTENDED + * @arg @ref LL_I2S_DATAFORMAT_24B + * @arg @ref LL_I2S_DATAFORMAT_32B + */ +__STATIC_INLINE uint32_t LL_I2S_GetDataFormat(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_DATLEN | SPI_I2SCFGR_CHLEN)); +} + +/** + * @brief Set I2S clock polarity + * @rmtoll I2SCFGR CKPOL LL_I2S_SetClockPolarity + * @param SPIx SPI Instance + * @param ClockPolarity This parameter can be one of the following values: + * @arg @ref LL_I2S_POLARITY_LOW + * @arg @ref LL_I2S_POLARITY_HIGH + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetClockPolarity(SPI_TypeDef *SPIx, uint32_t ClockPolarity) +{ + SET_BIT(SPIx->I2SCFGR, ClockPolarity); +} + +/** + * @brief Get I2S clock polarity + * @rmtoll I2SCFGR CKPOL LL_I2S_GetClockPolarity + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2S_POLARITY_LOW + * @arg @ref LL_I2S_POLARITY_HIGH + */ +__STATIC_INLINE uint32_t LL_I2S_GetClockPolarity(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_CKPOL)); +} + +/** + * @brief Set I2S standard protocol + * @rmtoll I2SCFGR I2SSTD LL_I2S_SetStandard\n + * I2SCFGR PCMSYNC LL_I2S_SetStandard + * @param SPIx SPI Instance + * @param Standard This parameter can be one of the following values: + * @arg @ref LL_I2S_STANDARD_PHILIPS + * @arg @ref LL_I2S_STANDARD_MSB + * @arg @ref LL_I2S_STANDARD_LSB + * @arg @ref LL_I2S_STANDARD_PCM_SHORT + * @arg @ref LL_I2S_STANDARD_PCM_LONG + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetStandard(SPI_TypeDef *SPIx, uint32_t Standard) +{ + MODIFY_REG(SPIx->I2SCFGR, SPI_I2SCFGR_I2SSTD | SPI_I2SCFGR_PCMSYNC, Standard); +} + +/** + * @brief Get I2S standard protocol + * @rmtoll I2SCFGR I2SSTD LL_I2S_GetStandard\n + * I2SCFGR PCMSYNC LL_I2S_GetStandard + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2S_STANDARD_PHILIPS + * @arg @ref LL_I2S_STANDARD_MSB + * @arg @ref LL_I2S_STANDARD_LSB + * @arg @ref LL_I2S_STANDARD_PCM_SHORT + * @arg @ref LL_I2S_STANDARD_PCM_LONG + */ +__STATIC_INLINE uint32_t LL_I2S_GetStandard(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_I2SSTD | SPI_I2SCFGR_PCMSYNC)); +} + +/** + * @brief Set I2S transfer mode + * @rmtoll I2SCFGR I2SCFG LL_I2S_SetTransferMode + * @param SPIx SPI Instance + * @param Mode This parameter can be one of the following values: + * @arg @ref LL_I2S_MODE_SLAVE_TX + * @arg @ref LL_I2S_MODE_SLAVE_RX + * @arg @ref LL_I2S_MODE_MASTER_TX + * @arg @ref LL_I2S_MODE_MASTER_RX + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetTransferMode(SPI_TypeDef *SPIx, uint32_t Mode) +{ + MODIFY_REG(SPIx->I2SCFGR, SPI_I2SCFGR_I2SCFG, Mode); +} + +/** + * @brief Get I2S transfer mode + * @rmtoll I2SCFGR I2SCFG LL_I2S_GetTransferMode + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2S_MODE_SLAVE_TX + * @arg @ref LL_I2S_MODE_SLAVE_RX + * @arg @ref LL_I2S_MODE_MASTER_TX + * @arg @ref LL_I2S_MODE_MASTER_RX + */ +__STATIC_INLINE uint32_t LL_I2S_GetTransferMode(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_I2SCFG)); +} + +/** + * @brief Set I2S linear prescaler + * @rmtoll I2SPR I2SDIV LL_I2S_SetPrescalerLinear + * @param SPIx SPI Instance + * @param PrescalerLinear Value between Min_Data=0x02 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetPrescalerLinear(SPI_TypeDef *SPIx, uint8_t PrescalerLinear) +{ + MODIFY_REG(SPIx->I2SPR, SPI_I2SPR_I2SDIV, PrescalerLinear); +} + +/** + * @brief Get I2S linear prescaler + * @rmtoll I2SPR I2SDIV LL_I2S_GetPrescalerLinear + * @param SPIx SPI Instance + * @retval PrescalerLinear Value between Min_Data=0x02 and Max_Data=0xFF + */ +__STATIC_INLINE uint32_t LL_I2S_GetPrescalerLinear(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SPR, SPI_I2SPR_I2SDIV)); +} + +/** + * @brief Set I2S parity prescaler + * @rmtoll I2SPR ODD LL_I2S_SetPrescalerParity + * @param SPIx SPI Instance + * @param PrescalerParity This parameter can be one of the following values: + * @arg @ref LL_I2S_PRESCALER_PARITY_EVEN + * @arg @ref LL_I2S_PRESCALER_PARITY_ODD + * @retval None + */ +__STATIC_INLINE void LL_I2S_SetPrescalerParity(SPI_TypeDef *SPIx, uint32_t PrescalerParity) +{ + MODIFY_REG(SPIx->I2SPR, SPI_I2SPR_ODD, PrescalerParity << 8U); +} + +/** + * @brief Get I2S parity prescaler + * @rmtoll I2SPR ODD LL_I2S_GetPrescalerParity + * @param SPIx SPI Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_I2S_PRESCALER_PARITY_EVEN + * @arg @ref LL_I2S_PRESCALER_PARITY_ODD + */ +__STATIC_INLINE uint32_t LL_I2S_GetPrescalerParity(const SPI_TypeDef *SPIx) +{ + return (uint32_t)(READ_BIT(SPIx->I2SPR, SPI_I2SPR_ODD) >> 8U); +} + +/** + * @brief Enable the master clock output (Pin MCK) + * @rmtoll I2SPR MCKOE LL_I2S_EnableMasterClock + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableMasterClock(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->I2SPR, SPI_I2SPR_MCKOE); +} + +/** + * @brief Disable the master clock output (Pin MCK) + * @rmtoll I2SPR MCKOE LL_I2S_DisableMasterClock + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableMasterClock(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->I2SPR, SPI_I2SPR_MCKOE); +} + +/** + * @brief Check if the master clock output (Pin MCK) is enabled + * @rmtoll I2SPR MCKOE LL_I2S_IsEnabledMasterClock + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledMasterClock(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->I2SPR, SPI_I2SPR_MCKOE) == (SPI_I2SPR_MCKOE)) ? 1UL : 0UL); +} + +#if defined(SPI_I2SCFGR_ASTRTEN) +/** + * @brief Enable asynchronous start + * @rmtoll I2SCFGR ASTRTEN LL_I2S_EnableAsyncStart + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableAsyncStart(SPI_TypeDef *SPIx) +{ + SET_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_ASTRTEN); +} + +/** + * @brief Disable asynchronous start + * @rmtoll I2SCFGR ASTRTEN LL_I2S_DisableAsyncStart + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableAsyncStart(SPI_TypeDef *SPIx) +{ + CLEAR_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_ASTRTEN); +} + +/** + * @brief Check if asynchronous start is enabled + * @rmtoll I2SCFGR ASTRTEN LL_I2S_IsEnabledAsyncStart + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledAsyncStart(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->I2SCFGR, SPI_I2SCFGR_ASTRTEN) == (SPI_I2SCFGR_ASTRTEN)) ? 1UL : 0UL); +} +#endif /* SPI_I2SCFGR_ASTRTEN */ + +/** + * @} + */ + +/** @defgroup I2S_LL_EF_FLAG FLAG Management + * @{ + */ + +/** + * @brief Check if Rx buffer is not empty + * @rmtoll SR RXNE LL_I2S_IsActiveFlag_RXNE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_RXNE(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsActiveFlag_RXNE(SPIx); +} + +/** + * @brief Check if Tx buffer is empty + * @rmtoll SR TXE LL_I2S_IsActiveFlag_TXE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_TXE(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsActiveFlag_TXE(SPIx); +} + +/** + * @brief Get busy flag + * @rmtoll SR BSY LL_I2S_IsActiveFlag_BSY + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_BSY(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsActiveFlag_BSY(SPIx); +} + +/** + * @brief Get overrun error flag + * @rmtoll SR OVR LL_I2S_IsActiveFlag_OVR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_OVR(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsActiveFlag_OVR(SPIx); +} + +/** + * @brief Get underrun error flag + * @rmtoll SR UDR LL_I2S_IsActiveFlag_UDR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_UDR(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_UDR) == (SPI_SR_UDR)) ? 1UL : 0UL); +} + +/** + * @brief Get frame format error flag + * @rmtoll SR FRE LL_I2S_IsActiveFlag_FRE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_FRE(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsActiveFlag_FRE(SPIx); +} + +/** + * @brief Get channel side flag. + * @note 0: Channel Left has to be transmitted or has been received\n + * 1: Channel Right has to be transmitted or has been received\n + * It has no significance in PCM mode. + * @rmtoll SR CHSIDE LL_I2S_IsActiveFlag_CHSIDE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsActiveFlag_CHSIDE(const SPI_TypeDef *SPIx) +{ + return ((READ_BIT(SPIx->SR, SPI_SR_CHSIDE) == (SPI_SR_CHSIDE)) ? 1UL : 0UL); +} + +/** + * @brief Clear overrun error flag + * @rmtoll SR OVR LL_I2S_ClearFlag_OVR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_ClearFlag_OVR(SPI_TypeDef *SPIx) +{ + LL_SPI_ClearFlag_OVR(SPIx); +} + +/** + * @brief Clear underrun error flag + * @rmtoll SR UDR LL_I2S_ClearFlag_UDR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_ClearFlag_UDR(SPI_TypeDef *SPIx) +{ + __IO uint32_t tmpreg; + tmpreg = SPIx->SR; + (void)tmpreg; +} + +/** + * @brief Clear frame format error flag + * @rmtoll SR FRE LL_I2S_ClearFlag_FRE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_ClearFlag_FRE(SPI_TypeDef *SPIx) +{ + LL_SPI_ClearFlag_FRE(SPIx); +} + +/** + * @} + */ + +/** @defgroup I2S_LL_EF_IT Interrupt Management + * @{ + */ + +/** + * @brief Enable error IT + * @note This bit controls the generation of an interrupt when an error condition occurs (OVR, UDR and FRE in I2S mode). + * @rmtoll CR2 ERRIE LL_I2S_EnableIT_ERR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableIT_ERR(SPI_TypeDef *SPIx) +{ + LL_SPI_EnableIT_ERR(SPIx); +} + +/** + * @brief Enable Rx buffer not empty IT + * @rmtoll CR2 RXNEIE LL_I2S_EnableIT_RXNE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableIT_RXNE(SPI_TypeDef *SPIx) +{ + LL_SPI_EnableIT_RXNE(SPIx); +} + +/** + * @brief Enable Tx buffer empty IT + * @rmtoll CR2 TXEIE LL_I2S_EnableIT_TXE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableIT_TXE(SPI_TypeDef *SPIx) +{ + LL_SPI_EnableIT_TXE(SPIx); +} + +/** + * @brief Disable error IT + * @note This bit controls the generation of an interrupt when an error condition occurs (OVR, UDR and FRE in I2S mode). + * @rmtoll CR2 ERRIE LL_I2S_DisableIT_ERR + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableIT_ERR(SPI_TypeDef *SPIx) +{ + LL_SPI_DisableIT_ERR(SPIx); +} + +/** + * @brief Disable Rx buffer not empty IT + * @rmtoll CR2 RXNEIE LL_I2S_DisableIT_RXNE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableIT_RXNE(SPI_TypeDef *SPIx) +{ + LL_SPI_DisableIT_RXNE(SPIx); +} + +/** + * @brief Disable Tx buffer empty IT + * @rmtoll CR2 TXEIE LL_I2S_DisableIT_TXE + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableIT_TXE(SPI_TypeDef *SPIx) +{ + LL_SPI_DisableIT_TXE(SPIx); +} + +/** + * @brief Check if ERR IT is enabled + * @rmtoll CR2 ERRIE LL_I2S_IsEnabledIT_ERR + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledIT_ERR(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsEnabledIT_ERR(SPIx); +} + +/** + * @brief Check if RXNE IT is enabled + * @rmtoll CR2 RXNEIE LL_I2S_IsEnabledIT_RXNE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledIT_RXNE(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsEnabledIT_RXNE(SPIx); +} + +/** + * @brief Check if TXE IT is enabled + * @rmtoll CR2 TXEIE LL_I2S_IsEnabledIT_TXE + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledIT_TXE(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsEnabledIT_TXE(SPIx); +} + +/** + * @} + */ + +/** @defgroup I2S_LL_EF_DMA DMA Management + * @{ + */ + +/** + * @brief Enable DMA Rx + * @rmtoll CR2 RXDMAEN LL_I2S_EnableDMAReq_RX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableDMAReq_RX(SPI_TypeDef *SPIx) +{ + LL_SPI_EnableDMAReq_RX(SPIx); +} + +/** + * @brief Disable DMA Rx + * @rmtoll CR2 RXDMAEN LL_I2S_DisableDMAReq_RX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableDMAReq_RX(SPI_TypeDef *SPIx) +{ + LL_SPI_DisableDMAReq_RX(SPIx); +} + +/** + * @brief Check if DMA Rx is enabled + * @rmtoll CR2 RXDMAEN LL_I2S_IsEnabledDMAReq_RX + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledDMAReq_RX(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsEnabledDMAReq_RX(SPIx); +} + +/** + * @brief Enable DMA Tx + * @rmtoll CR2 TXDMAEN LL_I2S_EnableDMAReq_TX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_EnableDMAReq_TX(SPI_TypeDef *SPIx) +{ + LL_SPI_EnableDMAReq_TX(SPIx); +} + +/** + * @brief Disable DMA Tx + * @rmtoll CR2 TXDMAEN LL_I2S_DisableDMAReq_TX + * @param SPIx SPI Instance + * @retval None + */ +__STATIC_INLINE void LL_I2S_DisableDMAReq_TX(SPI_TypeDef *SPIx) +{ + LL_SPI_DisableDMAReq_TX(SPIx); +} + +/** + * @brief Check if DMA Tx is enabled + * @rmtoll CR2 TXDMAEN LL_I2S_IsEnabledDMAReq_TX + * @param SPIx SPI Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_I2S_IsEnabledDMAReq_TX(const SPI_TypeDef *SPIx) +{ + return LL_SPI_IsEnabledDMAReq_TX(SPIx); +} + +/** + * @} + */ + +/** @defgroup I2S_LL_EF_DATA DATA Management + * @{ + */ + +/** + * @brief Read 16-Bits in data register + * @rmtoll DR DR LL_I2S_ReceiveData16 + * @param SPIx SPI Instance + * @retval RxData Value between Min_Data=0x0000 and Max_Data=0xFFFF + */ +__STATIC_INLINE uint16_t LL_I2S_ReceiveData16(SPI_TypeDef *SPIx) +{ + return LL_SPI_ReceiveData16(SPIx); +} + +/** + * @brief Write 16-Bits in data register + * @rmtoll DR DR LL_I2S_TransmitData16 + * @param SPIx SPI Instance + * @param TxData Value between Min_Data=0x0000 and Max_Data=0xFFFF + * @retval None + */ +__STATIC_INLINE void LL_I2S_TransmitData16(SPI_TypeDef *SPIx, uint16_t TxData) +{ + LL_SPI_TransmitData16(SPIx, TxData); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup I2S_LL_EF_Init Initialization and de-initialization functions + * @{ + */ + +ErrorStatus LL_I2S_DeInit(const SPI_TypeDef *SPIx); +ErrorStatus LL_I2S_Init(SPI_TypeDef *SPIx, LL_I2S_InitTypeDef *I2S_InitStruct); +void LL_I2S_StructInit(LL_I2S_InitTypeDef *I2S_InitStruct); +void LL_I2S_ConfigPrescaler(SPI_TypeDef *SPIx, uint32_t PrescalerLinear, uint32_t PrescalerParity); +#if defined (SPI_I2S_FULLDUPLEX_SUPPORT) +ErrorStatus LL_I2S_InitFullDuplex(SPI_TypeDef *I2Sxext, LL_I2S_InitTypeDef *I2S_InitStruct); +#endif /* SPI_I2S_FULLDUPLEX_SUPPORT */ + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined (SPI1) || defined (SPI2) || defined (SPI3) || defined (SPI4) || defined (SPI5) || defined(SPI6) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F4xx_LL_SPI_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_system.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_system.h new file mode 100644 index 00000000..84ea5c4d --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_system.h @@ -0,0 +1,1711 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_system.h + * @author MCD Application Team + * @brief Header file of SYSTEM LL module. + * + ****************************************************************************** + * @attention + * + *Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The LL SYSTEM driver contains a set of generic APIs that can be + used by user: + (+) Some of the FLASH features need to be handled in the SYSTEM file. + (+) Access to DBGCMU registers + (+) Access to SYSCFG registers + + @endverbatim + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_SYSTEM_H +#define __STM32F4xx_LL_SYSTEM_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (FLASH) || defined (SYSCFG) || defined (DBGMCU) + +/** @defgroup SYSTEM_LL SYSTEM + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup SYSTEM_LL_Private_Constants SYSTEM Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup SYSTEM_LL_Exported_Constants SYSTEM Exported Constants + * @{ + */ + +/** @defgroup SYSTEM_LL_EC_REMAP SYSCFG REMAP +* @{ +*/ +#define LL_SYSCFG_REMAP_FLASH (uint32_t)0x00000000 /*!< Main Flash memory mapped at 0x00000000 */ +#define LL_SYSCFG_REMAP_SYSTEMFLASH SYSCFG_MEMRMP_MEM_MODE_0 /*!< System Flash memory mapped at 0x00000000 */ +#if defined(FSMC_Bank1) +#define LL_SYSCFG_REMAP_FSMC SYSCFG_MEMRMP_MEM_MODE_1 /*!< FSMC(NOR/PSRAM 1 and 2) mapped at 0x00000000 */ +#endif /* FSMC_Bank1 */ +#if defined(FMC_Bank1) +#define LL_SYSCFG_REMAP_FMC SYSCFG_MEMRMP_MEM_MODE_1 /*!< FMC(NOR/PSRAM 1 and 2) mapped at 0x00000000 */ +#define LL_SYSCFG_REMAP_SDRAM SYSCFG_MEMRMP_MEM_MODE_2 /*!< FMC/SDRAM mapped at 0x00000000 */ +#endif /* FMC_Bank1 */ +#define LL_SYSCFG_REMAP_SRAM (SYSCFG_MEMRMP_MEM_MODE_1 | SYSCFG_MEMRMP_MEM_MODE_0) /*!< SRAM1 mapped at 0x00000000 */ + +/** + * @} + */ + +#if defined(SYSCFG_PMC_MII_RMII_SEL) + /** @defgroup SYSTEM_LL_EC_PMC SYSCFG PMC +* @{ +*/ +#define LL_SYSCFG_PMC_ETHMII (uint32_t)0x00000000 /*!< ETH Media MII interface */ +#define LL_SYSCFG_PMC_ETHRMII (uint32_t)SYSCFG_PMC_MII_RMII_SEL /*!< ETH Media RMII interface */ + +/** + * @} + */ +#endif /* SYSCFG_PMC_MII_RMII_SEL */ + + + +#if defined(SYSCFG_MEMRMP_UFB_MODE) +/** @defgroup SYSTEM_LL_EC_BANKMODE SYSCFG BANK MODE + * @{ + */ +#define LL_SYSCFG_BANKMODE_BANK1 (uint32_t)0x00000000 /*!< Flash Bank 1 base address mapped at 0x0800 0000 (AXI) and 0x0020 0000 (TCM) + and Flash Bank 2 base address mapped at 0x0810 0000 (AXI) and 0x0030 0000 (TCM)*/ +#define LL_SYSCFG_BANKMODE_BANK2 SYSCFG_MEMRMP_UFB_MODE /*!< Flash Bank 2 base address mapped at 0x0800 0000 (AXI) and 0x0020 0000(TCM) + and Flash Bank 1 base address mapped at 0x0810 0000 (AXI) and 0x0030 0000(TCM) */ +/** + * @} + */ +#endif /* SYSCFG_MEMRMP_UFB_MODE */ +/** @defgroup SYSTEM_LL_EC_I2C_FASTMODEPLUS SYSCFG I2C FASTMODEPLUS + * @{ + */ +#if defined(SYSCFG_CFGR_FMPI2C1_SCL) +#define LL_SYSCFG_I2C_FASTMODEPLUS_SCL SYSCFG_CFGR_FMPI2C1_SCL /*!< Enable Fast Mode Plus on FMPI2C_SCL pin */ +#define LL_SYSCFG_I2C_FASTMODEPLUS_SDA SYSCFG_CFGR_FMPI2C1_SDA /*!< Enable Fast Mode Plus on FMPI2C_SDA pin*/ +#endif /* SYSCFG_CFGR_FMPI2C1_SCL */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_EXTI_PORT SYSCFG EXTI PORT + * @{ + */ +#define LL_SYSCFG_EXTI_PORTA (uint32_t)0 /*!< EXTI PORT A */ +#define LL_SYSCFG_EXTI_PORTB (uint32_t)1 /*!< EXTI PORT B */ +#define LL_SYSCFG_EXTI_PORTC (uint32_t)2 /*!< EXTI PORT C */ +#define LL_SYSCFG_EXTI_PORTD (uint32_t)3 /*!< EXTI PORT D */ +#define LL_SYSCFG_EXTI_PORTE (uint32_t)4 /*!< EXTI PORT E */ +#if defined(GPIOF) +#define LL_SYSCFG_EXTI_PORTF (uint32_t)5 /*!< EXTI PORT F */ +#endif /* GPIOF */ +#if defined(GPIOG) +#define LL_SYSCFG_EXTI_PORTG (uint32_t)6 /*!< EXTI PORT G */ +#endif /* GPIOG */ +#define LL_SYSCFG_EXTI_PORTH (uint32_t)7 /*!< EXTI PORT H */ +#if defined(GPIOI) +#define LL_SYSCFG_EXTI_PORTI (uint32_t)8 /*!< EXTI PORT I */ +#endif /* GPIOI */ +#if defined(GPIOJ) +#define LL_SYSCFG_EXTI_PORTJ (uint32_t)9 /*!< EXTI PORT J */ +#endif /* GPIOJ */ +#if defined(GPIOK) +#define LL_SYSCFG_EXTI_PORTK (uint32_t)10 /*!< EXTI PORT k */ +#endif /* GPIOK */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_EXTI_LINE SYSCFG EXTI LINE + * @{ + */ +#define LL_SYSCFG_EXTI_LINE0 (uint32_t)(0x000FU << 16 | 0) /*!< EXTI_POSITION_0 | EXTICR[0] */ +#define LL_SYSCFG_EXTI_LINE1 (uint32_t)(0x00F0U << 16 | 0) /*!< EXTI_POSITION_4 | EXTICR[0] */ +#define LL_SYSCFG_EXTI_LINE2 (uint32_t)(0x0F00U << 16 | 0) /*!< EXTI_POSITION_8 | EXTICR[0] */ +#define LL_SYSCFG_EXTI_LINE3 (uint32_t)(0xF000U << 16 | 0) /*!< EXTI_POSITION_12 | EXTICR[0] */ +#define LL_SYSCFG_EXTI_LINE4 (uint32_t)(0x000FU << 16 | 1) /*!< EXTI_POSITION_0 | EXTICR[1] */ +#define LL_SYSCFG_EXTI_LINE5 (uint32_t)(0x00F0U << 16 | 1) /*!< EXTI_POSITION_4 | EXTICR[1] */ +#define LL_SYSCFG_EXTI_LINE6 (uint32_t)(0x0F00U << 16 | 1) /*!< EXTI_POSITION_8 | EXTICR[1] */ +#define LL_SYSCFG_EXTI_LINE7 (uint32_t)(0xF000U << 16 | 1) /*!< EXTI_POSITION_12 | EXTICR[1] */ +#define LL_SYSCFG_EXTI_LINE8 (uint32_t)(0x000FU << 16 | 2) /*!< EXTI_POSITION_0 | EXTICR[2] */ +#define LL_SYSCFG_EXTI_LINE9 (uint32_t)(0x00F0U << 16 | 2) /*!< EXTI_POSITION_4 | EXTICR[2] */ +#define LL_SYSCFG_EXTI_LINE10 (uint32_t)(0x0F00U << 16 | 2) /*!< EXTI_POSITION_8 | EXTICR[2] */ +#define LL_SYSCFG_EXTI_LINE11 (uint32_t)(0xF000U << 16 | 2) /*!< EXTI_POSITION_12 | EXTICR[2] */ +#define LL_SYSCFG_EXTI_LINE12 (uint32_t)(0x000FU << 16 | 3) /*!< EXTI_POSITION_0 | EXTICR[3] */ +#define LL_SYSCFG_EXTI_LINE13 (uint32_t)(0x00F0U << 16 | 3) /*!< EXTI_POSITION_4 | EXTICR[3] */ +#define LL_SYSCFG_EXTI_LINE14 (uint32_t)(0x0F00U << 16 | 3) /*!< EXTI_POSITION_8 | EXTICR[3] */ +#define LL_SYSCFG_EXTI_LINE15 (uint32_t)(0xF000U << 16 | 3) /*!< EXTI_POSITION_12 | EXTICR[3] */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_TIMBREAK SYSCFG TIMER BREAK + * @{ + */ +#if defined(SYSCFG_CFGR2_LOCKUP_LOCK) +#define LL_SYSCFG_TIMBREAK_LOCKUP SYSCFG_CFGR2_LOCKUP_LOCK /*!< Enables and locks the LOCKUP output of CortexM4 + with Break Input of TIM1/8 */ +#define LL_SYSCFG_TIMBREAK_PVD SYSCFG_CFGR2_PVD_LOCK /*!< Enables and locks the PVD connection with TIM1/8 Break Input + and also the PVDE and PLS bits of the Power Control Interface */ +#endif /* SYSCFG_CFGR2_CLL */ +/** + * @} + */ + +#if defined(SYSCFG_MCHDLYCR_BSCKSEL) +/** @defgroup SYSTEM_LL_DFSDM_BitStream_ClockSource SYSCFG MCHDLY BCKKSEL + * @{ + */ +#define LL_SYSCFG_BITSTREAM_CLOCK_TIM2OC1 (uint32_t)0x00000000 +#define LL_SYSCFG_BITSTREAM_CLOCK_DFSDM2 SYSCFG_MCHDLYCR_BSCKSEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM_MCHDLYEN SYSCFG MCHDLY MCHDLYEN + * @{ + */ +#define LL_SYSCFG_DFSDM1_MCHDLYEN SYSCFG_MCHDLYCR_MCHDLY1EN +#define LL_SYSCFG_DFSDM2_MCHDLYEN SYSCFG_MCHDLYCR_MCHDLY2EN +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM_DataIn0_Source SYSCFG MCHDLY DFSDMD0SEL + * @{ + */ +#define LL_SYSCFG_DFSDM1_DataIn0 SYSCFG_MCHDLYCR_DFSDM1D0SEL +#define LL_SYSCFG_DFSDM2_DataIn0 SYSCFG_MCHDLYCR_DFSDM2D0SEL + +#define LL_SYSCFG_DFSDM1_DataIn0_PAD (uint32_t)((SYSCFG_MCHDLYCR_DFSDM1D0SEL << 16) | 0x00000000) +#define LL_SYSCFG_DFSDM1_DataIn0_DM (uint32_t)((SYSCFG_MCHDLYCR_DFSDM1D0SEL << 16) | SYSCFG_MCHDLYCR_DFSDM1D0SEL) +#define LL_SYSCFG_DFSDM2_DataIn0_PAD (uint32_t)((SYSCFG_MCHDLYCR_DFSDM2D0SEL << 16) | 0x00000000) +#define LL_SYSCFG_DFSDM2_DataIn0_DM (uint32_t)((SYSCFG_MCHDLYCR_DFSDM2D0SEL << 16) | SYSCFG_MCHDLYCR_DFSDM2D0SEL) +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM_DataIn2_Source SYSCFG MCHDLY DFSDMD2SEL + * @{ + */ +#define LL_SYSCFG_DFSDM1_DataIn2 SYSCFG_MCHDLYCR_DFSDM1D2SEL +#define LL_SYSCFG_DFSDM2_DataIn2 SYSCFG_MCHDLYCR_DFSDM2D2SEL + +#define LL_SYSCFG_DFSDM1_DataIn2_PAD (uint32_t)((SYSCFG_MCHDLYCR_DFSDM1D2SEL << 16) | 0x00000000) +#define LL_SYSCFG_DFSDM1_DataIn2_DM (uint32_t)((SYSCFG_MCHDLYCR_DFSDM1D2SEL << 16) | SYSCFG_MCHDLYCR_DFSDM1D2SEL) +#define LL_SYSCFG_DFSDM2_DataIn2_PAD (uint32_t)((SYSCFG_MCHDLYCR_DFSDM2D2SEL << 16) | 0x00000000) +#define LL_SYSCFG_DFSDM2_DataIn2_DM (uint32_t)((SYSCFG_MCHDLYCR_DFSDM2D2SEL << 16) | SYSCFG_MCHDLYCR_DFSDM2D2SEL) +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM1_TIM4OC2_BitstreamDistribution SYSCFG MCHDLY DFSDM1CK02SEL + * @{ + */ +#define LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN0 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN2 SYSCFG_MCHDLYCR_DFSDM1CK02SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM1_TIM4OC1_BitstreamDistribution SYSCFG MCHDLY DFSDM1CK13SEL + * @{ + */ +#define LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN1 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN3 SYSCFG_MCHDLYCR_DFSDM1CK13SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM1_CLKIN_SourceSelection SYSCFG MCHDLY DFSDMCFG + * @{ + */ +#define LL_SYSCFG_DFSDM1_CKIN_PAD (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM1_CKIN_DM SYSCFG_MCHDLYCR_DFSDM1CFG +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM1_CLKOUT_SourceSelection SYSCFG MCHDLY DFSDM1CKOSEL + * @{ + */ +#define LL_SYSCFG_DFSDM1_CKOUT (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM1_CKOUT_M27 SYSCFG_MCHDLYCR_DFSDM1CKOSEL +/** + * @} + */ + +/** @defgroup SYSTEM_LL_DFSDM2_DataIn4_SourceSelection SYSCFG MCHDLY DFSDM2D4SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_DataIn4_PAD (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_DataIn4_DM SYSCFG_MCHDLYCR_DFSDM2D4SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_DataIn6_SourceSelection SYSCFG MCHDLY DFSDM2D6SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_DataIn6_PAD (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_DataIn6_DM SYSCFG_MCHDLYCR_DFSDM2D6SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_TIM3OC4_BitstreamDistribution SYSCFG MCHDLY DFSDM2CK04SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN0 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN4 SYSCFG_MCHDLYCR_DFSDM2CK04SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_TIM3OC3_BitstreamDistribution SYSCFG MCHDLY DFSDM2CK15SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN1 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN5 SYSCFG_MCHDLYCR_DFSDM2CK15SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_TIM3OC2_BitstreamDistribution SYSCFG MCHDLY DFSDM2CK26SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN2 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN6 SYSCFG_MCHDLYCR_DFSDM2CK26SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_TIM3OC1_BitstreamDistribution SYSCFG MCHDLY DFSDM2CK37SEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN3 (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN7 SYSCFG_MCHDLYCR_DFSDM2CK37SEL +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_CLKIN_SourceSelection SYSCFG MCHDLY DFSDM2CFG + * @{ + */ +#define LL_SYSCFG_DFSDM2_CKIN_PAD (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_CKIN_DM SYSCFG_MCHDLYCR_DFSDM2CFG +/** + * @} + */ +/** @defgroup SYSTEM_LL_DFSDM2_CLKOUT_SourceSelection SYSCFG MCHDLY DFSDM2CKOSEL + * @{ + */ +#define LL_SYSCFG_DFSDM2_CKOUT (uint32_t)0x00000000 +#define LL_SYSCFG_DFSDM2_CKOUT_M27 SYSCFG_MCHDLYCR_DFSDM2CKOSEL +/** + * @} + */ +#endif /* SYSCFG_MCHDLYCR_BSCKSEL */ + +/** @defgroup SYSTEM_LL_EC_TRACE DBGMCU TRACE Pin Assignment + * @{ + */ +#define LL_DBGMCU_TRACE_NONE 0x00000000U /*!< TRACE pins not assigned (default state) */ +#define LL_DBGMCU_TRACE_ASYNCH DBGMCU_CR_TRACE_IOEN /*!< TRACE pin assignment for Asynchronous Mode */ +#define LL_DBGMCU_TRACE_SYNCH_SIZE1 (DBGMCU_CR_TRACE_IOEN | DBGMCU_CR_TRACE_MODE_0) /*!< TRACE pin assignment for Synchronous Mode with a TRACEDATA size of 1 */ +#define LL_DBGMCU_TRACE_SYNCH_SIZE2 (DBGMCU_CR_TRACE_IOEN | DBGMCU_CR_TRACE_MODE_1) /*!< TRACE pin assignment for Synchronous Mode with a TRACEDATA size of 2 */ +#define LL_DBGMCU_TRACE_SYNCH_SIZE4 (DBGMCU_CR_TRACE_IOEN | DBGMCU_CR_TRACE_MODE) /*!< TRACE pin assignment for Synchronous Mode with a TRACEDATA size of 4 */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_APB1_GRP1_STOP_IP DBGMCU APB1 GRP1 STOP IP + * @{ + */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM2_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM2_STOP DBGMCU_APB1_FZ_DBG_TIM2_STOP /*!< TIM2 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM2_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM3_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM3_STOP DBGMCU_APB1_FZ_DBG_TIM3_STOP /*!< TIM3 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM3_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM4_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM4_STOP DBGMCU_APB1_FZ_DBG_TIM4_STOP /*!< TIM4 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM4_STOP */ +#define LL_DBGMCU_APB1_GRP1_TIM5_STOP DBGMCU_APB1_FZ_DBG_TIM5_STOP /*!< TIM5 counter stopped when core is halted */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM6_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM6_STOP DBGMCU_APB1_FZ_DBG_TIM6_STOP /*!< TIM6 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM6_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM7_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM7_STOP DBGMCU_APB1_FZ_DBG_TIM7_STOP /*!< TIM7 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM7_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM12_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM12_STOP DBGMCU_APB1_FZ_DBG_TIM12_STOP /*!< TIM12 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM12_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM13_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM13_STOP DBGMCU_APB1_FZ_DBG_TIM13_STOP /*!< TIM13 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM13_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_TIM14_STOP) +#define LL_DBGMCU_APB1_GRP1_TIM14_STOP DBGMCU_APB1_FZ_DBG_TIM14_STOP /*!< TIM14 counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_TIM14_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_LPTIM_STOP) +#define LL_DBGMCU_APB1_GRP1_LPTIM_STOP DBGMCU_APB1_FZ_DBG_LPTIM_STOP /*!< LPTIM counter stopped when core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_LPTIM_STOP */ +#define LL_DBGMCU_APB1_GRP1_RTC_STOP DBGMCU_APB1_FZ_DBG_RTC_STOP /*!< RTC counter stopped when core is halted */ +#define LL_DBGMCU_APB1_GRP1_WWDG_STOP DBGMCU_APB1_FZ_DBG_WWDG_STOP /*!< Debug Window Watchdog stopped when Core is halted */ +#define LL_DBGMCU_APB1_GRP1_IWDG_STOP DBGMCU_APB1_FZ_DBG_IWDG_STOP /*!< Debug Independent Watchdog stopped when Core is halted */ +#define LL_DBGMCU_APB1_GRP1_I2C1_STOP DBGMCU_APB1_FZ_DBG_I2C1_SMBUS_TIMEOUT /*!< I2C1 SMBUS timeout mode stopped when Core is halted */ +#define LL_DBGMCU_APB1_GRP1_I2C2_STOP DBGMCU_APB1_FZ_DBG_I2C2_SMBUS_TIMEOUT /*!< I2C2 SMBUS timeout mode stopped when Core is halted */ +#if defined(DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT) +#define LL_DBGMCU_APB1_GRP1_I2C3_STOP DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT /*!< I2C3 SMBUS timeout mode stopped when Core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT */ +#if defined(DBGMCU_APB1_FZ_DBG_I2C4_SMBUS_TIMEOUT) +#define LL_DBGMCU_APB1_GRP1_I2C4_STOP DBGMCU_APB1_FZ_DBG_I2C4_SMBUS_TIMEOUT /*!< I2C4 SMBUS timeout mode stopped when Core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_I2C4_SMBUS_TIMEOUT */ +#if defined(DBGMCU_APB1_FZ_DBG_CAN1_STOP) +#define LL_DBGMCU_APB1_GRP1_CAN1_STOP DBGMCU_APB1_FZ_DBG_CAN1_STOP /*!< CAN1 debug stopped when Core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_CAN1_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_CAN2_STOP) +#define LL_DBGMCU_APB1_GRP1_CAN2_STOP DBGMCU_APB1_FZ_DBG_CAN2_STOP /*!< CAN2 debug stopped when Core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_CAN2_STOP */ +#if defined(DBGMCU_APB1_FZ_DBG_CAN3_STOP) +#define LL_DBGMCU_APB1_GRP1_CAN3_STOP DBGMCU_APB1_FZ_DBG_CAN3_STOP /*!< CAN3 debug stopped when Core is halted */ +#endif /* DBGMCU_APB1_FZ_DBG_CAN3_STOP */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_APB2_GRP1_STOP_IP DBGMCU APB2 GRP1 STOP IP + * @{ + */ +#define LL_DBGMCU_APB2_GRP1_TIM1_STOP DBGMCU_APB2_FZ_DBG_TIM1_STOP /*!< TIM1 counter stopped when core is halted */ +#if defined(DBGMCU_APB2_FZ_DBG_TIM8_STOP) +#define LL_DBGMCU_APB2_GRP1_TIM8_STOP DBGMCU_APB2_FZ_DBG_TIM8_STOP /*!< TIM8 counter stopped when core is halted */ +#endif /* DBGMCU_APB2_FZ_DBG_TIM8_STOP */ +#define LL_DBGMCU_APB2_GRP1_TIM9_STOP DBGMCU_APB2_FZ_DBG_TIM9_STOP /*!< TIM9 counter stopped when core is halted */ +#if defined(DBGMCU_APB2_FZ_DBG_TIM10_STOP) +#define LL_DBGMCU_APB2_GRP1_TIM10_STOP DBGMCU_APB2_FZ_DBG_TIM10_STOP /*!< TIM10 counter stopped when core is halted */ +#endif /* DBGMCU_APB2_FZ_DBG_TIM10_STOP */ +#define LL_DBGMCU_APB2_GRP1_TIM11_STOP DBGMCU_APB2_FZ_DBG_TIM11_STOP /*!< TIM11 counter stopped when core is halted */ +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EC_LATENCY FLASH LATENCY + * @{ + */ +#define LL_FLASH_LATENCY_0 FLASH_ACR_LATENCY_0WS /*!< FLASH Zero wait state */ +#define LL_FLASH_LATENCY_1 FLASH_ACR_LATENCY_1WS /*!< FLASH One wait state */ +#define LL_FLASH_LATENCY_2 FLASH_ACR_LATENCY_2WS /*!< FLASH Two wait states */ +#define LL_FLASH_LATENCY_3 FLASH_ACR_LATENCY_3WS /*!< FLASH Three wait states */ +#define LL_FLASH_LATENCY_4 FLASH_ACR_LATENCY_4WS /*!< FLASH Four wait states */ +#define LL_FLASH_LATENCY_5 FLASH_ACR_LATENCY_5WS /*!< FLASH five wait state */ +#define LL_FLASH_LATENCY_6 FLASH_ACR_LATENCY_6WS /*!< FLASH six wait state */ +#define LL_FLASH_LATENCY_7 FLASH_ACR_LATENCY_7WS /*!< FLASH seven wait states */ +#define LL_FLASH_LATENCY_8 FLASH_ACR_LATENCY_8WS /*!< FLASH eight wait states */ +#define LL_FLASH_LATENCY_9 FLASH_ACR_LATENCY_9WS /*!< FLASH nine wait states */ +#define LL_FLASH_LATENCY_10 FLASH_ACR_LATENCY_10WS /*!< FLASH ten wait states */ +#define LL_FLASH_LATENCY_11 FLASH_ACR_LATENCY_11WS /*!< FLASH eleven wait states */ +#define LL_FLASH_LATENCY_12 FLASH_ACR_LATENCY_12WS /*!< FLASH twelve wait states */ +#define LL_FLASH_LATENCY_13 FLASH_ACR_LATENCY_13WS /*!< FLASH thirteen wait states */ +#define LL_FLASH_LATENCY_14 FLASH_ACR_LATENCY_14WS /*!< FLASH fourteen wait states */ +#define LL_FLASH_LATENCY_15 FLASH_ACR_LATENCY_15WS /*!< FLASH fifteen wait states */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup SYSTEM_LL_Exported_Functions SYSTEM Exported Functions + * @{ + */ + +/** @defgroup SYSTEM_LL_EF_SYSCFG SYSCFG + * @{ + */ +/** + * @brief Set memory mapping at address 0x00000000 + * @rmtoll SYSCFG_MEMRMP MEM_MODE LL_SYSCFG_SetRemapMemory + * @param Memory This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_REMAP_FLASH + * @arg @ref LL_SYSCFG_REMAP_SYSTEMFLASH + * @arg @ref LL_SYSCFG_REMAP_SRAM + * @arg @ref LL_SYSCFG_REMAP_FSMC (*) + * @arg @ref LL_SYSCFG_REMAP_FMC (*) + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_SetRemapMemory(uint32_t Memory) +{ + MODIFY_REG(SYSCFG->MEMRMP, SYSCFG_MEMRMP_MEM_MODE, Memory); +} + +/** + * @brief Get memory mapping at address 0x00000000 + * @rmtoll SYSCFG_MEMRMP MEM_MODE LL_SYSCFG_GetRemapMemory + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_REMAP_FLASH + * @arg @ref LL_SYSCFG_REMAP_SYSTEMFLASH + * @arg @ref LL_SYSCFG_REMAP_SRAM + * @arg @ref LL_SYSCFG_REMAP_FSMC (*) + * @arg @ref LL_SYSCFG_REMAP_FMC (*) + */ +__STATIC_INLINE uint32_t LL_SYSCFG_GetRemapMemory(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_MEM_MODE)); +} + +#if defined(SYSCFG_MEMRMP_SWP_FMC) +/** + * @brief Enables the FMC Memory Mapping Swapping + * @rmtoll SYSCFG_MEMRMP SWP_FMC LL_SYSCFG_EnableFMCMemorySwapping + * @note SDRAM is accessible at 0x60000000 and NOR/RAM + * is accessible at 0xC0000000 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_EnableFMCMemorySwapping(void) +{ + SET_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_SWP_FMC_0); +} + +/** + * @brief Disables the FMC Memory Mapping Swapping + * @rmtoll SYSCFG_MEMRMP SWP_FMC LL_SYSCFG_DisableFMCMemorySwapping + * @note SDRAM is accessible at 0xC0000000 (default mapping) + * and NOR/RAM is accessible at 0x60000000 (default mapping) + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DisableFMCMemorySwapping(void) +{ + CLEAR_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_SWP_FMC); +} + +#endif /* SYSCFG_MEMRMP_SWP_FMC */ +/** + * @brief Enables the Compensation cell Power Down + * @rmtoll SYSCFG_CMPCR CMP_PD LL_SYSCFG_EnableCompensationCell + * @note The I/O compensation cell can be used only when the device supply + * voltage ranges from 2.4 to 3.6 V + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_EnableCompensationCell(void) +{ + SET_BIT(SYSCFG->CMPCR, SYSCFG_CMPCR_CMP_PD); +} + +/** + * @brief Disables the Compensation cell Power Down + * @rmtoll SYSCFG_CMPCR CMP_PD LL_SYSCFG_DisableCompensationCell + * @note The I/O compensation cell can be used only when the device supply + * voltage ranges from 2.4 to 3.6 V + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DisableCompensationCell(void) +{ + CLEAR_BIT(SYSCFG->CMPCR, SYSCFG_CMPCR_CMP_PD); +} + +/** + * @brief Get Compensation Cell ready Flag + * @rmtoll SYSCFG_CMPCR READY LL_SYSCFG_IsActiveFlag_CMPCR + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_SYSCFG_IsActiveFlag_CMPCR(void) +{ + return (READ_BIT(SYSCFG->CMPCR, SYSCFG_CMPCR_READY) == (SYSCFG_CMPCR_READY)); +} + +#if defined(SYSCFG_PMC_MII_RMII_SEL) +/** + * @brief Select Ethernet PHY interface + * @rmtoll SYSCFG_PMC MII_RMII_SEL LL_SYSCFG_SetPHYInterface + * @param Interface This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_PMC_ETHMII + * @arg @ref LL_SYSCFG_PMC_ETHRMII + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_SetPHYInterface(uint32_t Interface) +{ + MODIFY_REG(SYSCFG->PMC, SYSCFG_PMC_MII_RMII_SEL, Interface); +} + +/** + * @brief Get Ethernet PHY interface + * @rmtoll SYSCFG_PMC MII_RMII_SEL LL_SYSCFG_GetPHYInterface + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_PMC_ETHMII + * @arg @ref LL_SYSCFG_PMC_ETHRMII + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_GetPHYInterface(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->PMC, SYSCFG_PMC_MII_RMII_SEL)); +} +#endif /* SYSCFG_PMC_MII_RMII_SEL */ + + + +#if defined(SYSCFG_MEMRMP_UFB_MODE) +/** + * @brief Select Flash bank mode (Bank flashed at 0x08000000) + * @rmtoll SYSCFG_MEMRMP UFB_MODE LL_SYSCFG_SetFlashBankMode + * @param Bank This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_BANKMODE_BANK1 + * @arg @ref LL_SYSCFG_BANKMODE_BANK2 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_SetFlashBankMode(uint32_t Bank) +{ + MODIFY_REG(SYSCFG->MEMRMP, SYSCFG_MEMRMP_UFB_MODE, Bank); +} + +/** + * @brief Get Flash bank mode (Bank flashed at 0x08000000) + * @rmtoll SYSCFG_MEMRMP UFB_MODE LL_SYSCFG_GetFlashBankMode + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_BANKMODE_BANK1 + * @arg @ref LL_SYSCFG_BANKMODE_BANK2 + */ +__STATIC_INLINE uint32_t LL_SYSCFG_GetFlashBankMode(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_UFB_MODE)); +} +#endif /* SYSCFG_MEMRMP_UFB_MODE */ + +#if defined(SYSCFG_CFGR_FMPI2C1_SCL) +/** + * @brief Enable the I2C fast mode plus driving capability. + * @rmtoll SYSCFG_CFGR FMPI2C1_SCL LL_SYSCFG_EnableFastModePlus\n + * SYSCFG_CFGR FMPI2C1_SDA LL_SYSCFG_EnableFastModePlus + * @param ConfigFastModePlus This parameter can be a combination of the following values: + * @arg @ref LL_SYSCFG_I2C_FASTMODEPLUS_SCL + * @arg @ref LL_SYSCFG_I2C_FASTMODEPLUS_SDA + * (*) value not defined in all devices + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_EnableFastModePlus(uint32_t ConfigFastModePlus) +{ + SET_BIT(SYSCFG->CFGR, ConfigFastModePlus); +} + +/** + * @brief Disable the I2C fast mode plus driving capability. + * @rmtoll SYSCFG_CFGR FMPI2C1_SCL LL_SYSCFG_DisableFastModePlus\n + * SYSCFG_CFGR FMPI2C1_SDA LL_SYSCFG_DisableFastModePlus\n + * @param ConfigFastModePlus This parameter can be a combination of the following values: + * @arg @ref LL_SYSCFG_I2C_FASTMODEPLUS_SCL + * @arg @ref LL_SYSCFG_I2C_FASTMODEPLUS_SDA + * (*) value not defined in all devices + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DisableFastModePlus(uint32_t ConfigFastModePlus) +{ + CLEAR_BIT(SYSCFG->CFGR, ConfigFastModePlus); +} +#endif /* SYSCFG_CFGR_FMPI2C1_SCL */ + +/** + * @brief Configure source input for the EXTI external interrupt. + * @rmtoll SYSCFG_EXTICR1 EXTIx LL_SYSCFG_SetEXTISource\n + * SYSCFG_EXTICR2 EXTIx LL_SYSCFG_SetEXTISource\n + * SYSCFG_EXTICR3 EXTIx LL_SYSCFG_SetEXTISource\n + * SYSCFG_EXTICR4 EXTIx LL_SYSCFG_SetEXTISource + * @param Port This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_EXTI_PORTA + * @arg @ref LL_SYSCFG_EXTI_PORTB + * @arg @ref LL_SYSCFG_EXTI_PORTC + * @arg @ref LL_SYSCFG_EXTI_PORTD + * @arg @ref LL_SYSCFG_EXTI_PORTE + * @arg @ref LL_SYSCFG_EXTI_PORTF (*) + * @arg @ref LL_SYSCFG_EXTI_PORTG (*) + * @arg @ref LL_SYSCFG_EXTI_PORTH + * + * (*) value not defined in all devices + * @param Line This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_EXTI_LINE0 + * @arg @ref LL_SYSCFG_EXTI_LINE1 + * @arg @ref LL_SYSCFG_EXTI_LINE2 + * @arg @ref LL_SYSCFG_EXTI_LINE3 + * @arg @ref LL_SYSCFG_EXTI_LINE4 + * @arg @ref LL_SYSCFG_EXTI_LINE5 + * @arg @ref LL_SYSCFG_EXTI_LINE6 + * @arg @ref LL_SYSCFG_EXTI_LINE7 + * @arg @ref LL_SYSCFG_EXTI_LINE8 + * @arg @ref LL_SYSCFG_EXTI_LINE9 + * @arg @ref LL_SYSCFG_EXTI_LINE10 + * @arg @ref LL_SYSCFG_EXTI_LINE11 + * @arg @ref LL_SYSCFG_EXTI_LINE12 + * @arg @ref LL_SYSCFG_EXTI_LINE13 + * @arg @ref LL_SYSCFG_EXTI_LINE14 + * @arg @ref LL_SYSCFG_EXTI_LINE15 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_SetEXTISource(uint32_t Port, uint32_t Line) +{ + MODIFY_REG(SYSCFG->EXTICR[Line & 0xFF], (Line >> 16), Port << POSITION_VAL((Line >> 16))); +} + +/** + * @brief Get the configured defined for specific EXTI Line + * @rmtoll SYSCFG_EXTICR1 EXTIx LL_SYSCFG_GetEXTISource\n + * SYSCFG_EXTICR2 EXTIx LL_SYSCFG_GetEXTISource\n + * SYSCFG_EXTICR3 EXTIx LL_SYSCFG_GetEXTISource\n + * SYSCFG_EXTICR4 EXTIx LL_SYSCFG_GetEXTISource + * @param Line This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_EXTI_LINE0 + * @arg @ref LL_SYSCFG_EXTI_LINE1 + * @arg @ref LL_SYSCFG_EXTI_LINE2 + * @arg @ref LL_SYSCFG_EXTI_LINE3 + * @arg @ref LL_SYSCFG_EXTI_LINE4 + * @arg @ref LL_SYSCFG_EXTI_LINE5 + * @arg @ref LL_SYSCFG_EXTI_LINE6 + * @arg @ref LL_SYSCFG_EXTI_LINE7 + * @arg @ref LL_SYSCFG_EXTI_LINE8 + * @arg @ref LL_SYSCFG_EXTI_LINE9 + * @arg @ref LL_SYSCFG_EXTI_LINE10 + * @arg @ref LL_SYSCFG_EXTI_LINE11 + * @arg @ref LL_SYSCFG_EXTI_LINE12 + * @arg @ref LL_SYSCFG_EXTI_LINE13 + * @arg @ref LL_SYSCFG_EXTI_LINE14 + * @arg @ref LL_SYSCFG_EXTI_LINE15 + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_EXTI_PORTA + * @arg @ref LL_SYSCFG_EXTI_PORTB + * @arg @ref LL_SYSCFG_EXTI_PORTC + * @arg @ref LL_SYSCFG_EXTI_PORTD + * @arg @ref LL_SYSCFG_EXTI_PORTE + * @arg @ref LL_SYSCFG_EXTI_PORTF (*) + * @arg @ref LL_SYSCFG_EXTI_PORTG (*) + * @arg @ref LL_SYSCFG_EXTI_PORTH + * (*) value not defined in all devices + */ +__STATIC_INLINE uint32_t LL_SYSCFG_GetEXTISource(uint32_t Line) +{ + return (uint32_t)(READ_BIT(SYSCFG->EXTICR[Line & 0xFF], (Line >> 16)) >> POSITION_VAL(Line >> 16)); +} + +#if defined(SYSCFG_CFGR2_LOCKUP_LOCK) +/** + * @brief Set connections to TIM1/8 break inputs + * @rmtoll SYSCFG_CFGR2 LockUp Lock LL_SYSCFG_SetTIMBreakInputs \n + * SYSCFG_CFGR2 PVD Lock LL_SYSCFG_SetTIMBreakInputs + * @param Break This parameter can be a combination of the following values: + * @arg @ref LL_SYSCFG_TIMBREAK_LOCKUP + * @arg @ref LL_SYSCFG_TIMBREAK_PVD + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_SetTIMBreakInputs(uint32_t Break) +{ + MODIFY_REG(SYSCFG->CFGR2, SYSCFG_CFGR2_LOCKUP_LOCK | SYSCFG_CFGR2_PVD_LOCK, Break); +} + +/** + * @brief Get connections to TIM1/8 Break inputs + * @rmtoll SYSCFG_CFGR2 LockUp Lock LL_SYSCFG_SetTIMBreakInputs \n + * SYSCFG_CFGR2 PVD Lock LL_SYSCFG_SetTIMBreakInputs + * @retval Returned value can be can be a combination of the following values: + * @arg @ref LL_SYSCFG_TIMBREAK_LOCKUP + * @arg @ref LL_SYSCFG_TIMBREAK_PVD + */ +__STATIC_INLINE uint32_t LL_SYSCFG_GetTIMBreakInputs(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->CFGR2, SYSCFG_CFGR2_LOCKUP_LOCK | SYSCFG_CFGR2_PVD_LOCK)); +} +#endif /* SYSCFG_CFGR2_LOCKUP_LOCK */ +#if defined(SYSCFG_MCHDLYCR_BSCKSEL) +/** + * @brief Select the DFSDM2 or TIM2_OC1 as clock source for the bitstream clock. + * @rmtoll SYSCFG_MCHDLYCR BSCKSEL LL_SYSCFG_DFSDM_SetBitstreamClockSourceSelection + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_BITSTREAM_CLOCK_DFSDM2 + * @arg @ref LL_SYSCFG_BITSTREAM_CLOCK_TIM2OC1 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM_SetBitstreamClockSourceSelection(uint32_t ClockSource) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_BSCKSEL, ClockSource); +} +/** + * @brief Get the DFSDM2 or TIM2_OC1 as clock source for the bitstream clock. + * @rmtoll SYSCFG_MCHDLYCR BSCKSEL LL_SYSCFG_DFSDM_GetBitstreamClockSourceSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_BITSTREAM_CLOCK_DFSDM2 + * @arg @ref LL_SYSCFG_BITSTREAM_CLOCK_TIM2OC1 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM_GetBitstreamClockSourceSelection(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_BSCKSEL)); +} +/** + * @brief Enables the DFSDM1 or DFSDM2 Delay clock + * @rmtoll SYSCFG_MCHDLYCR MCHDLYEN LL_SYSCFG_DFSDM_EnableDelayClock + * @param MCHDLY This parameter can be one of the following values + * @arg @ref LL_SYSCFG_DFSDM1_MCHDLYEN + * @arg @ref LL_SYSCFG_DFSDM2_MCHDLYEN + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM_EnableDelayClock(uint32_t MCHDLY) +{ + SET_BIT(SYSCFG->MCHDLYCR, MCHDLY); +} + +/** + * @brief Disables the DFSDM1 or the DFSDM2 Delay clock + * @rmtoll SYSCFG_MCHDLYCR MCHDLY1EN LL_SYSCFG_DFSDM1_DisableDelayClock + * @param MCHDLY This parameter can be one of the following values + * @arg @ref LL_SYSCFG_DFSDM1_MCHDLYEN + * @arg @ref LL_SYSCFG_DFSDM2_MCHDLYEN + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM_DisableDelayClock(uint32_t MCHDLY) +{ + CLEAR_BIT(SYSCFG->MCHDLYCR, MCHDLY); +} + +/** + * @brief Select the source for DFSDM1 or DFSDM2 DatIn0 + * @rmtoll SYSCFG_MCHDLYCR DFSDMD0SEL LL_SYSCFG_DFSDM_SetDataIn0Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM1_DataIn0_DM + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM_SetDataIn0Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, (Source >> 16), (Source & 0x0000FFFF)); +} +/** + * @brief Get the source for DFSDM1 or DFSDM2 DatIn0. + * @rmtoll SYSCFG_MCHDLYCR DFSDMD0SEL LL_SYSCFG_DFSDM_GetDataIn0Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn0 + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0 + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM1_DataIn0_DM + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM_GetDataIn0Source(uint32_t Source) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, Source)); +} +/** + * @brief Select the source for DFSDM1 or DFSDM2 DatIn2 + * @rmtoll SYSCFG_MCHDLYCR DFSDMD2SEL LL_SYSCFG_DFSDM_SetDataIn2Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM1_DataIn2_DM + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM_SetDataIn2Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, (Source >> 16), (Source & 0x0000FFFF)); +} +/** + * @brief Get the source for DFSDM1 or DFSDM2 DatIn2. + * @rmtoll SYSCFG_MCHDLYCR DFSDMD2SEL LL_SYSCFG_DFSDM_GetDataIn2Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn2 + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2 + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM1_DataIn2_DM + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM_GetDataIn2Source(uint32_t Source) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, Source)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM4 OC2 + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CK02SEL LL_SYSCFG_DFSDM1_SetTIM4OC2BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN0 + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN2 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM1_SetTIM4OC2BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CK02SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM4 OC2 + * @rmtoll SYSCFG_MCHDLYCR DFSDM1D2SEL LL_SYSCFG_DFSDM1_GetTIM4OC2BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN0 + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC2_CLKIN2 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM1_GetTIM4OC2BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CK02SEL)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM4 OC1 + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CK13SEL LL_SYSCFG_DFSDM1_SetTIM4OC1BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN1 + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN3 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM1_SetTIM4OC1BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CK13SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM4 OC1 + * @rmtoll SYSCFG_MCHDLYCR DFSDM1D2SEL LL_SYSCFG_DFSDM1_GetTIM4OC1BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN1 + * @arg @ref LL_SYSCFG_DFSDM1_TIM4OC1_CLKIN3 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM1_GetTIM4OC1BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CK13SEL)); +} + +/** + * @brief Select the DFSDM1 Clock In + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CFG LL_SYSCFG_DFSDM1_SetClockInSourceSelection + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_CKIN_PAD + * @arg @ref LL_SYSCFG_DFSDM1_CKIN_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM1_SetClockInSourceSelection(uint32_t ClockSource) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CFG, ClockSource); +} +/** + * @brief GET the DFSDM1 Clock In + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CFG LL_SYSCFG_DFSDM1_GetClockInSourceSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_CKIN_PAD + * @arg @ref LL_SYSCFG_DFSDM1_CKIN_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM1_GetClockInSourceSelection(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CFG)); +} + +/** + * @brief Select the DFSDM1 Clock Out + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CKOSEL LL_SYSCFG_DFSDM1_SetClockOutSourceSelection + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_CKOUT + * @arg @ref LL_SYSCFG_DFSDM1_CKOUT_M27 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM1_SetClockOutSourceSelection(uint32_t ClockSource) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CKOSEL, ClockSource); +} +/** + * @brief GET the DFSDM1 Clock Out + * @rmtoll SYSCFG_MCHDLYCR DFSDM1CKOSEL LL_SYSCFG_DFSDM1_GetClockOutSourceSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM1_CKOUT + * @arg @ref LL_SYSCFG_DFSDM1_CKOUT_M27 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM1_GetClockOutSourceSelection(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM1CKOSEL)); +} + +/** + * @brief Enables the DFSDM2 Delay clock + * @rmtoll SYSCFG_MCHDLYCR MCHDLY2EN LL_SYSCFG_DFSDM2_EnableDelayClock + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_EnableDelayClock(void) +{ + SET_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_MCHDLY2EN); +} + +/** + * @brief Disables the DFSDM2 Delay clock + * @rmtoll SYSCFG_MCHDLYCR MCHDLY2EN LL_SYSCFG_DFSDM2_DisableDelayClock + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_DisableDelayClock(void) +{ + CLEAR_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_MCHDLY2EN); +} +/** + * @brief Select the source for DFSDM2 DatIn0 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D0SEL LL_SYSCFG_DFSDM2_SetDataIn0Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetDataIn0Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D0SEL, Source); +} +/** + * @brief Get the source for DFSDM2 DatIn0. + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D0SEL LL_SYSCFG_DFSDM2_GetDataIn0Source + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn0_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetDataIn0Source(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D0SEL)); +} + +/** + * @brief Select the source for DFSDM2 DatIn2 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D2SEL LL_SYSCFG_DFSDM2_SetDataIn2Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetDataIn2Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D2SEL, Source); +} +/** + * @brief Get the source for DFSDM2 DatIn2. + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D2SEL LL_SYSCFG_DFSDM2_GetDataIn2Source + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn2_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetDataIn2Source(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D2SEL)); +} + +/** + * @brief Select the source for DFSDM2 DatIn4 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D4SEL LL_SYSCFG_DFSDM2_SetDataIn4Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn4_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn4_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetDataIn4Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D4SEL, Source); +} +/** + * @brief Get the source for DFSDM2 DatIn4. + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D4SEL LL_SYSCFG_DFSDM2_GetDataIn4Source + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn4_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn4_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetDataIn4Source(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D4SEL)); +} + +/** + * @brief Select the source for DFSDM2 DatIn6 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D6SEL LL_SYSCFG_DFSDM2_SetDataIn6Source + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn6_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn6_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetDataIn6Source(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D6SEL, Source); +} +/** + * @brief Get the source for DFSDM2 DatIn6. + * @rmtoll SYSCFG_MCHDLYCR DFSDM2D6SEL LL_SYSCFG_DFSDM2_GetDataIn6Source + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_DataIn6_PAD + * @arg @ref LL_SYSCFG_DFSDM2_DataIn6_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetDataIn6Source(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2D6SEL)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM3 OC4 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK04SEL LL_SYSCFG_DFSDM2_SetTIM3OC4BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN0 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN4 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetTIM3OC4BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK04SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM3 OC4 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK04SEL LL_SYSCFG_DFSDM2_GetTIM3OC4BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN0 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC4_CLKIN4 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetTIM3OC4BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK04SEL)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM3 OC3 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK15SEL LL_SYSCFG_DFSDM2_SetTIM3OC3BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN1 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN5 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetTIM3OC3BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK15SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM3 OC4 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK04SEL LL_SYSCFG_DFSDM2_GetTIM3OC3BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN1 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC3_CLKIN5 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetTIM3OC3BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK15SEL)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM3 OC2 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK26SEL LL_SYSCFG_DFSDM2_SetTIM3OC2BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN2 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN6 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetTIM3OC2BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK26SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM3 OC2 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK04SEL LL_SYSCFG_DFSDM2_GetTIM3OC2BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN2 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC2_CLKIN6 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetTIM3OC2BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK26SEL)); +} + +/** + * @brief Select the distribution of the bitsream lock gated by TIM3 OC1 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK37SEL LL_SYSCFG_DFSDM2_SetTIM3OC1BitStreamDistribution + * @param Source This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN3 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN7 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetTIM3OC1BitStreamDistribution(uint32_t Source) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK37SEL, Source); +} +/** + * @brief Get the distribution of the bitsream lock gated by TIM3 OC1 + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CK37SEL LL_SYSCFG_DFSDM2_GetTIM3OC1BitStreamDistribution + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN3 + * @arg @ref LL_SYSCFG_DFSDM2_TIM3OC1_CLKIN7 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetTIM3OC1BitStreamDistribution(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CK37SEL)); +} + +/** + * @brief Select the DFSDM2 Clock In + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CFG LL_SYSCFG_DFSDM2_SetClockInSourceSelection + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_CKIN_PAD + * @arg @ref LL_SYSCFG_DFSDM2_CKIN_DM + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetClockInSourceSelection(uint32_t ClockSource) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CFG, ClockSource); +} +/** + * @brief GET the DFSDM2 Clock In + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CFG LL_SYSCFG_DFSDM2_GetClockInSourceSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_CKIN_PAD + * @arg @ref LL_SYSCFG_DFSDM2_CKIN_DM + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetClockInSourceSelection(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CFG)); +} + +/** + * @brief Select the DFSDM2 Clock Out + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CKOSEL LL_SYSCFG_DFSDM2_SetClockOutSourceSelection + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_CKOUT + * @arg @ref LL_SYSCFG_DFSDM2_CKOUT_M27 + * @retval None + */ +__STATIC_INLINE void LL_SYSCFG_DFSDM2_SetClockOutSourceSelection(uint32_t ClockSource) +{ + MODIFY_REG(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CKOSEL, ClockSource); +} +/** + * @brief GET the DFSDM2 Clock Out + * @rmtoll SYSCFG_MCHDLYCR DFSDM2CKOSEL LL_SYSCFG_DFSDM2_GetClockOutSourceSelection + * @retval Returned value can be one of the following values: + * @arg @ref LL_SYSCFG_DFSDM2_CKOUT + * @arg @ref LL_SYSCFG_DFSDM2_CKOUT_M27 + * @retval None + */ +__STATIC_INLINE uint32_t LL_SYSCFG_DFSDM2_GetClockOutSourceSelection(void) +{ + return (uint32_t)(READ_BIT(SYSCFG->MCHDLYCR, SYSCFG_MCHDLYCR_DFSDM2CKOSEL)); +} + +#endif /* SYSCFG_MCHDLYCR_BSCKSEL */ +/** + * @} + */ + + +/** @defgroup SYSTEM_LL_EF_DBGMCU DBGMCU + * @{ + */ + +/** + * @brief Return the device identifier + * @note For STM32F405/407xx and STM32F415/417xx devices, the device ID is 0x413 + * @note For STM32F42xxx and STM32F43xxx devices, the device ID is 0x419 + * @note For STM32F401xx devices, the device ID is 0x423 + * @note For STM32F401xx devices, the device ID is 0x433 + * @note For STM32F411xx devices, the device ID is 0x431 + * @note For STM32F410xx devices, the device ID is 0x458 + * @note For STM32F412xx devices, the device ID is 0x441 + * @note For STM32F413xx and STM32423xx devices, the device ID is 0x463 + * @note For STM32F446xx devices, the device ID is 0x421 + * @note For STM32F469xx and STM32F479xx devices, the device ID is 0x434 + * @rmtoll DBGMCU_IDCODE DEV_ID LL_DBGMCU_GetDeviceID + * @retval Values between Min_Data=0x00 and Max_Data=0xFFF + */ +__STATIC_INLINE uint32_t LL_DBGMCU_GetDeviceID(void) +{ + return (uint32_t)(READ_BIT(DBGMCU->IDCODE, DBGMCU_IDCODE_DEV_ID)); +} + +/** + * @brief Return the device revision identifier + * @note This field indicates the revision of the device. + For example, it is read as RevA -> 0x1000, Cat 2 revZ -> 0x1001, rev1 -> 0x1003, rev2 ->0x1007, revY -> 0x100F for STM32F405/407xx and STM32F415/417xx devices + For example, it is read as RevA -> 0x1000, Cat 2 revY -> 0x1003, rev1 -> 0x1007, rev3 ->0x2001 for STM32F42xxx and STM32F43xxx devices + For example, it is read as RevZ -> 0x1000, Cat 2 revA -> 0x1001 for STM32F401xB/C devices + For example, it is read as RevA -> 0x1000, Cat 2 revZ -> 0x1001 for STM32F401xD/E devices + For example, it is read as RevA -> 0x1000 for STM32F411xx,STM32F413/423xx,STM32F469/423xx, STM32F446xx and STM32F410xx devices + For example, it is read as RevZ -> 0x1001, Cat 2 revB -> 0x2000, revC -> 0x3000 for STM32F412xx devices + * @rmtoll DBGMCU_IDCODE REV_ID LL_DBGMCU_GetRevisionID + * @retval Values between Min_Data=0x00 and Max_Data=0xFFFF + */ +__STATIC_INLINE uint32_t LL_DBGMCU_GetRevisionID(void) +{ + return (uint32_t)(READ_BIT(DBGMCU->IDCODE, DBGMCU_IDCODE_REV_ID) >> DBGMCU_IDCODE_REV_ID_Pos); +} + +/** + * @brief Enable the Debug Module during SLEEP mode + * @rmtoll DBGMCU_CR DBG_SLEEP LL_DBGMCU_EnableDBGSleepMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_EnableDBGSleepMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); +} + +/** + * @brief Disable the Debug Module during SLEEP mode + * @rmtoll DBGMCU_CR DBG_SLEEP LL_DBGMCU_DisableDBGSleepMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_DisableDBGSleepMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); +} + +/** + * @brief Enable the Debug Module during STOP mode + * @rmtoll DBGMCU_CR DBG_STOP LL_DBGMCU_EnableDBGStopMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_EnableDBGStopMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); +} + +/** + * @brief Disable the Debug Module during STOP mode + * @rmtoll DBGMCU_CR DBG_STOP LL_DBGMCU_DisableDBGStopMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_DisableDBGStopMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); +} + +/** + * @brief Enable the Debug Module during STANDBY mode + * @rmtoll DBGMCU_CR DBG_STANDBY LL_DBGMCU_EnableDBGStandbyMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_EnableDBGStandbyMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); +} + +/** + * @brief Disable the Debug Module during STANDBY mode + * @rmtoll DBGMCU_CR DBG_STANDBY LL_DBGMCU_DisableDBGStandbyMode + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_DisableDBGStandbyMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); +} + +/** + * @brief Set Trace pin assignment control + * @rmtoll DBGMCU_CR TRACE_IOEN LL_DBGMCU_SetTracePinAssignment\n + * DBGMCU_CR TRACE_MODE LL_DBGMCU_SetTracePinAssignment + * @param PinAssignment This parameter can be one of the following values: + * @arg @ref LL_DBGMCU_TRACE_NONE + * @arg @ref LL_DBGMCU_TRACE_ASYNCH + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE1 + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE2 + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE4 + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_SetTracePinAssignment(uint32_t PinAssignment) +{ + MODIFY_REG(DBGMCU->CR, DBGMCU_CR_TRACE_IOEN | DBGMCU_CR_TRACE_MODE, PinAssignment); +} + +/** + * @brief Get Trace pin assignment control + * @rmtoll DBGMCU_CR TRACE_IOEN LL_DBGMCU_GetTracePinAssignment\n + * DBGMCU_CR TRACE_MODE LL_DBGMCU_GetTracePinAssignment + * @retval Returned value can be one of the following values: + * @arg @ref LL_DBGMCU_TRACE_NONE + * @arg @ref LL_DBGMCU_TRACE_ASYNCH + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE1 + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE2 + * @arg @ref LL_DBGMCU_TRACE_SYNCH_SIZE4 + */ +__STATIC_INLINE uint32_t LL_DBGMCU_GetTracePinAssignment(void) +{ + return (uint32_t)(READ_BIT(DBGMCU->CR, DBGMCU_CR_TRACE_IOEN | DBGMCU_CR_TRACE_MODE)); +} + +/** + * @brief Freeze APB1 peripherals (group1 peripherals) + * @rmtoll DBGMCU_APB1_FZ DBG_TIM2_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM3_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM4_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM5_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM6_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM7_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM12_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM13_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM14_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_LPTIM_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_RTC_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_WWDG_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_IWDG_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C1_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C2_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C3_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C4_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN1_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN2_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN3_STOP LL_DBGMCU_APB1_GRP1_FreezePeriph + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM2_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM3_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM4_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM5_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM6_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM7_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM12_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM13_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM14_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_LPTIM_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_RTC_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_WWDG_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_IWDG_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C1_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C2_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C3_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C4_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN1_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN2_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN3_STOP (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_APB1_GRP1_FreezePeriph(uint32_t Periphs) +{ + SET_BIT(DBGMCU->APB1FZ, Periphs); +} + +/** + * @brief Unfreeze APB1 peripherals (group1 peripherals) + * @rmtoll DBGMCU_APB1_FZ DBG_TIM2_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM3_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM4_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM5_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM6_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM7_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM12_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM13_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_TIM14_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_LPTIM_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_RTC_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_WWDG_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_IWDG_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C1_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C2_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C3_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_I2C4_SMBUS_TIMEOUT LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN1_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN2_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph\n + * DBGMCU_APB1_FZ DBG_CAN3_STOP LL_DBGMCU_APB1_GRP1_UnFreezePeriph + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM2_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM3_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM4_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM5_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM6_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM7_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM12_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM13_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_TIM14_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_LPTIM_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_RTC_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_WWDG_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_IWDG_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C1_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C2_STOP + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C3_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_I2C4_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN1_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN2_STOP (*) + * @arg @ref LL_DBGMCU_APB1_GRP1_CAN3_STOP (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_APB1_GRP1_UnFreezePeriph(uint32_t Periphs) +{ + CLEAR_BIT(DBGMCU->APB1FZ, Periphs); +} + +/** + * @brief Freeze APB2 peripherals + * @rmtoll DBGMCU_APB2_FZ DBG_TIM1_STOP LL_DBGMCU_APB2_GRP1_FreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM8_STOP LL_DBGMCU_APB2_GRP1_FreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM9_STOP LL_DBGMCU_APB2_GRP1_FreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM10_STOP LL_DBGMCU_APB2_GRP1_FreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM11_STOP LL_DBGMCU_APB2_GRP1_FreezePeriph + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM1_STOP + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM8_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM9_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM10_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM11_STOP (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_APB2_GRP1_FreezePeriph(uint32_t Periphs) +{ + SET_BIT(DBGMCU->APB2FZ, Periphs); +} + +/** + * @brief Unfreeze APB2 peripherals + * @rmtoll DBGMCU_APB2_FZ DBG_TIM1_STOP LL_DBGMCU_APB2_GRP1_UnFreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM8_STOP LL_DBGMCU_APB2_GRP1_UnFreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM9_STOP LL_DBGMCU_APB2_GRP1_UnFreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM10_STOP LL_DBGMCU_APB2_GRP1_UnFreezePeriph\n + * DBGMCU_APB2_FZ DBG_TIM11_STOP LL_DBGMCU_APB2_GRP1_UnFreezePeriph + * @param Periphs This parameter can be a combination of the following values: + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM1_STOP + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM8_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM9_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM10_STOP (*) + * @arg @ref LL_DBGMCU_APB2_GRP1_TIM11_STOP (*) + * + * (*) value not defined in all devices. + * @retval None + */ +__STATIC_INLINE void LL_DBGMCU_APB2_GRP1_UnFreezePeriph(uint32_t Periphs) +{ + CLEAR_BIT(DBGMCU->APB2FZ, Periphs); +} +/** + * @} + */ + +/** @defgroup SYSTEM_LL_EF_FLASH FLASH + * @{ + */ + +/** + * @brief Set FLASH Latency + * @rmtoll FLASH_ACR LATENCY LL_FLASH_SetLatency + * @param Latency This parameter can be one of the following values: + * @arg @ref LL_FLASH_LATENCY_0 + * @arg @ref LL_FLASH_LATENCY_1 + * @arg @ref LL_FLASH_LATENCY_2 + * @arg @ref LL_FLASH_LATENCY_3 + * @arg @ref LL_FLASH_LATENCY_4 + * @arg @ref LL_FLASH_LATENCY_5 + * @arg @ref LL_FLASH_LATENCY_6 + * @arg @ref LL_FLASH_LATENCY_7 + * @arg @ref LL_FLASH_LATENCY_8 + * @arg @ref LL_FLASH_LATENCY_9 + * @arg @ref LL_FLASH_LATENCY_10 + * @arg @ref LL_FLASH_LATENCY_11 + * @arg @ref LL_FLASH_LATENCY_12 + * @arg @ref LL_FLASH_LATENCY_13 + * @arg @ref LL_FLASH_LATENCY_14 + * @arg @ref LL_FLASH_LATENCY_15 + * @retval None + */ +__STATIC_INLINE void LL_FLASH_SetLatency(uint32_t Latency) +{ + MODIFY_REG(FLASH->ACR, FLASH_ACR_LATENCY, Latency); +} + +/** + * @brief Get FLASH Latency + * @rmtoll FLASH_ACR LATENCY LL_FLASH_GetLatency + * @retval Returned value can be one of the following values: + * @arg @ref LL_FLASH_LATENCY_0 + * @arg @ref LL_FLASH_LATENCY_1 + * @arg @ref LL_FLASH_LATENCY_2 + * @arg @ref LL_FLASH_LATENCY_3 + * @arg @ref LL_FLASH_LATENCY_4 + * @arg @ref LL_FLASH_LATENCY_5 + * @arg @ref LL_FLASH_LATENCY_6 + * @arg @ref LL_FLASH_LATENCY_7 + * @arg @ref LL_FLASH_LATENCY_8 + * @arg @ref LL_FLASH_LATENCY_9 + * @arg @ref LL_FLASH_LATENCY_10 + * @arg @ref LL_FLASH_LATENCY_11 + * @arg @ref LL_FLASH_LATENCY_12 + * @arg @ref LL_FLASH_LATENCY_13 + * @arg @ref LL_FLASH_LATENCY_14 + * @arg @ref LL_FLASH_LATENCY_15 + */ +__STATIC_INLINE uint32_t LL_FLASH_GetLatency(void) +{ + return (uint32_t)(READ_BIT(FLASH->ACR, FLASH_ACR_LATENCY)); +} + +/** + * @brief Enable Prefetch + * @rmtoll FLASH_ACR PRFTEN LL_FLASH_EnablePrefetch + * @retval None + */ +__STATIC_INLINE void LL_FLASH_EnablePrefetch(void) +{ + SET_BIT(FLASH->ACR, FLASH_ACR_PRFTEN); +} + +/** + * @brief Disable Prefetch + * @rmtoll FLASH_ACR PRFTEN LL_FLASH_DisablePrefetch + * @retval None + */ +__STATIC_INLINE void LL_FLASH_DisablePrefetch(void) +{ + CLEAR_BIT(FLASH->ACR, FLASH_ACR_PRFTEN); +} + +/** + * @brief Check if Prefetch buffer is enabled + * @rmtoll FLASH_ACR PRFTEN LL_FLASH_IsPrefetchEnabled + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_FLASH_IsPrefetchEnabled(void) +{ + return (READ_BIT(FLASH->ACR, FLASH_ACR_PRFTEN) == (FLASH_ACR_PRFTEN)); +} + +/** + * @brief Enable Instruction cache + * @rmtoll FLASH_ACR ICEN LL_FLASH_EnableInstCache + * @retval None + */ +__STATIC_INLINE void LL_FLASH_EnableInstCache(void) +{ + SET_BIT(FLASH->ACR, FLASH_ACR_ICEN); +} + +/** + * @brief Disable Instruction cache + * @rmtoll FLASH_ACR ICEN LL_FLASH_DisableInstCache + * @retval None + */ +__STATIC_INLINE void LL_FLASH_DisableInstCache(void) +{ + CLEAR_BIT(FLASH->ACR, FLASH_ACR_ICEN); +} + +/** + * @brief Enable Data cache + * @rmtoll FLASH_ACR DCEN LL_FLASH_EnableDataCache + * @retval None + */ +__STATIC_INLINE void LL_FLASH_EnableDataCache(void) +{ + SET_BIT(FLASH->ACR, FLASH_ACR_DCEN); +} + +/** + * @brief Disable Data cache + * @rmtoll FLASH_ACR DCEN LL_FLASH_DisableDataCache + * @retval None + */ +__STATIC_INLINE void LL_FLASH_DisableDataCache(void) +{ + CLEAR_BIT(FLASH->ACR, FLASH_ACR_DCEN); +} + +/** + * @brief Enable Instruction cache reset + * @note bit can be written only when the instruction cache is disabled + * @rmtoll FLASH_ACR ICRST LL_FLASH_EnableInstCacheReset + * @retval None + */ +__STATIC_INLINE void LL_FLASH_EnableInstCacheReset(void) +{ + SET_BIT(FLASH->ACR, FLASH_ACR_ICRST); +} + +/** + * @brief Disable Instruction cache reset + * @rmtoll FLASH_ACR ICRST LL_FLASH_DisableInstCacheReset + * @retval None + */ +__STATIC_INLINE void LL_FLASH_DisableInstCacheReset(void) +{ + CLEAR_BIT(FLASH->ACR, FLASH_ACR_ICRST); +} + +/** + * @brief Enable Data cache reset + * @note bit can be written only when the data cache is disabled + * @rmtoll FLASH_ACR DCRST LL_FLASH_EnableDataCacheReset + * @retval None + */ +__STATIC_INLINE void LL_FLASH_EnableDataCacheReset(void) +{ + SET_BIT(FLASH->ACR, FLASH_ACR_DCRST); +} + +/** + * @brief Disable Data cache reset + * @rmtoll FLASH_ACR DCRST LL_FLASH_DisableDataCacheReset + * @retval None + */ +__STATIC_INLINE void LL_FLASH_DisableDataCacheReset(void) +{ + CLEAR_BIT(FLASH->ACR, FLASH_ACR_DCRST); +} + + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* defined (FLASH) || defined (SYSCFG) || defined (DBGMCU) */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_SYSTEM_H */ + + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_tim.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_tim.h new file mode 100644 index 00000000..a11f5619 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_tim.h @@ -0,0 +1,4096 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_tim.h + * @author MCD Application Team + * @brief Header file of TIM LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_TIM_H +#define __STM32F4xx_LL_TIM_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (TIM1) || defined (TIM2) || defined (TIM3) || defined (TIM4) || defined (TIM5) || defined (TIM6) || defined (TIM7) || defined (TIM8) || defined (TIM9) || defined (TIM10) || defined (TIM11) || defined (TIM12) || defined (TIM13) || defined (TIM14) + +/** @defgroup TIM_LL TIM + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup TIM_LL_Private_Variables TIM Private Variables + * @{ + */ +static const uint8_t OFFSET_TAB_CCMRx[] = +{ + 0x00U, /* 0: TIMx_CH1 */ + 0x00U, /* 1: TIMx_CH1N */ + 0x00U, /* 2: TIMx_CH2 */ + 0x00U, /* 3: TIMx_CH2N */ + 0x04U, /* 4: TIMx_CH3 */ + 0x04U, /* 5: TIMx_CH3N */ + 0x04U /* 6: TIMx_CH4 */ +}; + +static const uint8_t SHIFT_TAB_OCxx[] = +{ + 0U, /* 0: OC1M, OC1FE, OC1PE */ + 0U, /* 1: - NA */ + 8U, /* 2: OC2M, OC2FE, OC2PE */ + 0U, /* 3: - NA */ + 0U, /* 4: OC3M, OC3FE, OC3PE */ + 0U, /* 5: - NA */ + 8U /* 6: OC4M, OC4FE, OC4PE */ +}; + +static const uint8_t SHIFT_TAB_ICxx[] = +{ + 0U, /* 0: CC1S, IC1PSC, IC1F */ + 0U, /* 1: - NA */ + 8U, /* 2: CC2S, IC2PSC, IC2F */ + 0U, /* 3: - NA */ + 0U, /* 4: CC3S, IC3PSC, IC3F */ + 0U, /* 5: - NA */ + 8U /* 6: CC4S, IC4PSC, IC4F */ +}; + +static const uint8_t SHIFT_TAB_CCxP[] = +{ + 0U, /* 0: CC1P */ + 2U, /* 1: CC1NP */ + 4U, /* 2: CC2P */ + 6U, /* 3: CC2NP */ + 8U, /* 4: CC3P */ + 10U, /* 5: CC3NP */ + 12U /* 6: CC4P */ +}; + +static const uint8_t SHIFT_TAB_OISx[] = +{ + 0U, /* 0: OIS1 */ + 1U, /* 1: OIS1N */ + 2U, /* 2: OIS2 */ + 3U, /* 3: OIS2N */ + 4U, /* 4: OIS3 */ + 5U, /* 5: OIS3N */ + 6U /* 6: OIS4 */ +}; +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup TIM_LL_Private_Constants TIM Private Constants + * @{ + */ + + +/* Remap mask definitions */ +#define TIMx_OR_RMP_SHIFT 16U +#define TIMx_OR_RMP_MASK 0x0000FFFFU +#define TIM2_OR_RMP_MASK (TIM_OR_ITR1_RMP << TIMx_OR_RMP_SHIFT) +#define TIM5_OR_RMP_MASK (TIM_OR_TI4_RMP << TIMx_OR_RMP_SHIFT) +#define TIM11_OR_RMP_MASK (TIM_OR_TI1_RMP << TIMx_OR_RMP_SHIFT) + +/* Mask used to set the TDG[x:0] of the DTG bits of the TIMx_BDTR register */ +#define DT_DELAY_1 ((uint8_t)0x7F) +#define DT_DELAY_2 ((uint8_t)0x3F) +#define DT_DELAY_3 ((uint8_t)0x1F) +#define DT_DELAY_4 ((uint8_t)0x1F) + +/* Mask used to set the DTG[7:5] bits of the DTG bits of the TIMx_BDTR register */ +#define DT_RANGE_1 ((uint8_t)0x00) +#define DT_RANGE_2 ((uint8_t)0x80) +#define DT_RANGE_3 ((uint8_t)0xC0) +#define DT_RANGE_4 ((uint8_t)0xE0) + + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup TIM_LL_Private_Macros TIM Private Macros + * @{ + */ +/** @brief Convert channel id into channel index. + * @param __CHANNEL__ This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval none + */ +#define TIM_GET_CHANNEL_INDEX( __CHANNEL__) \ + (((__CHANNEL__) == LL_TIM_CHANNEL_CH1) ? 0U :\ + ((__CHANNEL__) == LL_TIM_CHANNEL_CH1N) ? 1U :\ + ((__CHANNEL__) == LL_TIM_CHANNEL_CH2) ? 2U :\ + ((__CHANNEL__) == LL_TIM_CHANNEL_CH2N) ? 3U :\ + ((__CHANNEL__) == LL_TIM_CHANNEL_CH3) ? 4U :\ + ((__CHANNEL__) == LL_TIM_CHANNEL_CH3N) ? 5U : 6U) + +/** @brief Calculate the deadtime sampling period(in ps). + * @param __TIMCLK__ timer input clock frequency (in Hz). + * @param __CKD__ This parameter can be one of the following values: + * @arg @ref LL_TIM_CLOCKDIVISION_DIV1 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV2 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV4 + * @retval none + */ +#define TIM_CALC_DTS(__TIMCLK__, __CKD__) \ + (((__CKD__) == LL_TIM_CLOCKDIVISION_DIV1) ? ((uint64_t)1000000000000U/(__TIMCLK__)) : \ + ((__CKD__) == LL_TIM_CLOCKDIVISION_DIV2) ? ((uint64_t)1000000000000U/((__TIMCLK__) >> 1U)) : \ + ((uint64_t)1000000000000U/((__TIMCLK__) >> 2U))) +/** + * @} + */ + + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup TIM_LL_ES_INIT TIM Exported Init structure + * @{ + */ + +/** + * @brief TIM Time Base configuration structure definition. + */ +typedef struct +{ + uint16_t Prescaler; /*!< Specifies the prescaler value used to divide the TIM clock. + This parameter can be a number between Min_Data=0x0000 and Max_Data=0xFFFF. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetPrescaler().*/ + + uint32_t CounterMode; /*!< Specifies the counter mode. + This parameter can be a value of @ref TIM_LL_EC_COUNTERMODE. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetCounterMode().*/ + + uint32_t Autoreload; /*!< Specifies the auto reload value to be loaded into the active + Auto-Reload Register at the next update event. + This parameter must be a number between Min_Data=0x0000 and Max_Data=0xFFFF. + Some timer instances may support 32 bits counters. In that case this parameter must + be a number between 0x0000 and 0xFFFFFFFF. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetAutoReload().*/ + + uint32_t ClockDivision; /*!< Specifies the clock division. + This parameter can be a value of @ref TIM_LL_EC_CLOCKDIVISION. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetClockDivision().*/ + + uint32_t RepetitionCounter; /*!< Specifies the repetition counter value. Each time the RCR downcounter + reaches zero, an update event is generated and counting restarts + from the RCR value (N). + This means in PWM mode that (N+1) corresponds to: + - the number of PWM periods in edge-aligned mode + - the number of half PWM period in center-aligned mode + GP timers: this parameter must be a number between Min_Data = 0x00 and + Max_Data = 0xFF. + Advanced timers: this parameter must be a number between Min_Data = 0x0000 and + Max_Data = 0xFFFF. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetRepetitionCounter().*/ +} LL_TIM_InitTypeDef; + +/** + * @brief TIM Output Compare configuration structure definition. + */ +typedef struct +{ + uint32_t OCMode; /*!< Specifies the output mode. + This parameter can be a value of @ref TIM_LL_EC_OCMODE. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetMode().*/ + + uint32_t OCState; /*!< Specifies the TIM Output Compare state. + This parameter can be a value of @ref TIM_LL_EC_OCSTATE. + + This feature can be modified afterwards using unitary functions + @ref LL_TIM_CC_EnableChannel() or @ref LL_TIM_CC_DisableChannel().*/ + + uint32_t OCNState; /*!< Specifies the TIM complementary Output Compare state. + This parameter can be a value of @ref TIM_LL_EC_OCSTATE. + + This feature can be modified afterwards using unitary functions + @ref LL_TIM_CC_EnableChannel() or @ref LL_TIM_CC_DisableChannel().*/ + + uint32_t CompareValue; /*!< Specifies the Compare value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data=0x0000 and Max_Data=0xFFFF. + + This feature can be modified afterwards using unitary function + LL_TIM_OC_SetCompareCHx (x=1..6).*/ + + uint32_t OCPolarity; /*!< Specifies the output polarity. + This parameter can be a value of @ref TIM_LL_EC_OCPOLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetPolarity().*/ + + uint32_t OCNPolarity; /*!< Specifies the complementary output polarity. + This parameter can be a value of @ref TIM_LL_EC_OCPOLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetPolarity().*/ + + + uint32_t OCIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_LL_EC_OCIDLESTATE. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetIdleState().*/ + + uint32_t OCNIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_LL_EC_OCIDLESTATE. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetIdleState().*/ +} LL_TIM_OC_InitTypeDef; + +/** + * @brief TIM Input Capture configuration structure definition. + */ + +typedef struct +{ + + uint32_t ICPolarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_LL_EC_IC_POLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPolarity().*/ + + uint32_t ICActiveInput; /*!< Specifies the input. + This parameter can be a value of @ref TIM_LL_EC_ACTIVEINPUT. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetActiveInput().*/ + + uint32_t ICPrescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_LL_EC_ICPSC. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPrescaler().*/ + + uint32_t ICFilter; /*!< Specifies the input capture filter. + This parameter can be a value of @ref TIM_LL_EC_IC_FILTER. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetFilter().*/ +} LL_TIM_IC_InitTypeDef; + + +/** + * @brief TIM Encoder interface configuration structure definition. + */ +typedef struct +{ + uint32_t EncoderMode; /*!< Specifies the encoder resolution (x2 or x4). + This parameter can be a value of @ref TIM_LL_EC_ENCODERMODE. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetEncoderMode().*/ + + uint32_t IC1Polarity; /*!< Specifies the active edge of TI1 input. + This parameter can be a value of @ref TIM_LL_EC_IC_POLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPolarity().*/ + + uint32_t IC1ActiveInput; /*!< Specifies the TI1 input source + This parameter can be a value of @ref TIM_LL_EC_ACTIVEINPUT. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetActiveInput().*/ + + uint32_t IC1Prescaler; /*!< Specifies the TI1 input prescaler value. + This parameter can be a value of @ref TIM_LL_EC_ICPSC. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPrescaler().*/ + + uint32_t IC1Filter; /*!< Specifies the TI1 input filter. + This parameter can be a value of @ref TIM_LL_EC_IC_FILTER. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetFilter().*/ + + uint32_t IC2Polarity; /*!< Specifies the active edge of TI2 input. + This parameter can be a value of @ref TIM_LL_EC_IC_POLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPolarity().*/ + + uint32_t IC2ActiveInput; /*!< Specifies the TI2 input source + This parameter can be a value of @ref TIM_LL_EC_ACTIVEINPUT. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetActiveInput().*/ + + uint32_t IC2Prescaler; /*!< Specifies the TI2 input prescaler value. + This parameter can be a value of @ref TIM_LL_EC_ICPSC. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPrescaler().*/ + + uint32_t IC2Filter; /*!< Specifies the TI2 input filter. + This parameter can be a value of @ref TIM_LL_EC_IC_FILTER. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetFilter().*/ + +} LL_TIM_ENCODER_InitTypeDef; + +/** + * @brief TIM Hall sensor interface configuration structure definition. + */ +typedef struct +{ + + uint32_t IC1Polarity; /*!< Specifies the active edge of TI1 input. + This parameter can be a value of @ref TIM_LL_EC_IC_POLARITY. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPolarity().*/ + + uint32_t IC1Prescaler; /*!< Specifies the TI1 input prescaler value. + Prescaler must be set to get a maximum counter period longer than the + time interval between 2 consecutive changes on the Hall inputs. + This parameter can be a value of @ref TIM_LL_EC_ICPSC. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetPrescaler().*/ + + uint32_t IC1Filter; /*!< Specifies the TI1 input filter. + This parameter can be a value of + @ref TIM_LL_EC_IC_FILTER. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_IC_SetFilter().*/ + + uint32_t CommutationDelay; /*!< Specifies the compare value to be loaded into the Capture Compare Register. + A positive pulse (TRGO event) is generated with a programmable delay every time + a change occurs on the Hall inputs. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetCompareCH2().*/ +} LL_TIM_HALLSENSOR_InitTypeDef; + +/** + * @brief BDTR (Break and Dead Time) structure definition + */ +typedef struct +{ + uint32_t OSSRState; /*!< Specifies the Off-State selection used in Run mode. + This parameter can be a value of @ref TIM_LL_EC_OSSR + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetOffStates() + + @note This bit-field cannot be modified as long as LOCK level 2 has been + programmed. */ + + uint32_t OSSIState; /*!< Specifies the Off-State used in Idle state. + This parameter can be a value of @ref TIM_LL_EC_OSSI + + This feature can be modified afterwards using unitary function + @ref LL_TIM_SetOffStates() + + @note This bit-field cannot be modified as long as LOCK level 2 has been + programmed. */ + + uint32_t LockLevel; /*!< Specifies the LOCK level parameters. + This parameter can be a value of @ref TIM_LL_EC_LOCKLEVEL + + @note The LOCK bits can be written only once after the reset. Once the TIMx_BDTR + register has been written, their content is frozen until the next reset.*/ + + uint8_t DeadTime; /*!< Specifies the delay time between the switching-off and the + switching-on of the outputs. + This parameter can be a number between Min_Data = 0x00 and Max_Data = 0xFF. + + This feature can be modified afterwards using unitary function + @ref LL_TIM_OC_SetDeadTime() + + @note This bit-field can not be modified as long as LOCK level 1, 2 or 3 has been + programmed. */ + + uint16_t BreakState; /*!< Specifies whether the TIM Break input is enabled or not. + This parameter can be a value of @ref TIM_LL_EC_BREAK_ENABLE + + This feature can be modified afterwards using unitary functions + @ref LL_TIM_EnableBRK() or @ref LL_TIM_DisableBRK() + + @note This bit-field can not be modified as long as LOCK level 1 has been + programmed. */ + + uint32_t BreakPolarity; /*!< Specifies the TIM Break Input pin polarity. + This parameter can be a value of @ref TIM_LL_EC_BREAK_POLARITY + + This feature can be modified afterwards using unitary function + @ref LL_TIM_ConfigBRK() + + @note This bit-field can not be modified as long as LOCK level 1 has been + programmed. */ + + uint32_t AutomaticOutput; /*!< Specifies whether the TIM Automatic Output feature is enabled or not. + This parameter can be a value of @ref TIM_LL_EC_AUTOMATICOUTPUT_ENABLE + + This feature can be modified afterwards using unitary functions + @ref LL_TIM_EnableAutomaticOutput() or @ref LL_TIM_DisableAutomaticOutput() + + @note This bit-field can not be modified as long as LOCK level 1 has been + programmed. */ +} LL_TIM_BDTR_InitTypeDef; + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup TIM_LL_Exported_Constants TIM Exported Constants + * @{ + */ + +/** @defgroup TIM_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_TIM_ReadReg function. + * @{ + */ +#define LL_TIM_SR_UIF TIM_SR_UIF /*!< Update interrupt flag */ +#define LL_TIM_SR_CC1IF TIM_SR_CC1IF /*!< Capture/compare 1 interrupt flag */ +#define LL_TIM_SR_CC2IF TIM_SR_CC2IF /*!< Capture/compare 2 interrupt flag */ +#define LL_TIM_SR_CC3IF TIM_SR_CC3IF /*!< Capture/compare 3 interrupt flag */ +#define LL_TIM_SR_CC4IF TIM_SR_CC4IF /*!< Capture/compare 4 interrupt flag */ +#define LL_TIM_SR_COMIF TIM_SR_COMIF /*!< COM interrupt flag */ +#define LL_TIM_SR_TIF TIM_SR_TIF /*!< Trigger interrupt flag */ +#define LL_TIM_SR_BIF TIM_SR_BIF /*!< Break interrupt flag */ +#define LL_TIM_SR_CC1OF TIM_SR_CC1OF /*!< Capture/Compare 1 overcapture flag */ +#define LL_TIM_SR_CC2OF TIM_SR_CC2OF /*!< Capture/Compare 2 overcapture flag */ +#define LL_TIM_SR_CC3OF TIM_SR_CC3OF /*!< Capture/Compare 3 overcapture flag */ +#define LL_TIM_SR_CC4OF TIM_SR_CC4OF /*!< Capture/Compare 4 overcapture flag */ +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup TIM_LL_EC_BREAK_ENABLE Break Enable + * @{ + */ +#define LL_TIM_BREAK_DISABLE 0x00000000U /*!< Break function disabled */ +#define LL_TIM_BREAK_ENABLE TIM_BDTR_BKE /*!< Break function enabled */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_AUTOMATICOUTPUT_ENABLE Automatic output enable + * @{ + */ +#define LL_TIM_AUTOMATICOUTPUT_DISABLE 0x00000000U /*!< MOE can be set only by software */ +#define LL_TIM_AUTOMATICOUTPUT_ENABLE TIM_BDTR_AOE /*!< MOE can be set by software or automatically at the next update event */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** @defgroup TIM_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_TIM_ReadReg and LL_TIM_WriteReg functions. + * @{ + */ +#define LL_TIM_DIER_UIE TIM_DIER_UIE /*!< Update interrupt enable */ +#define LL_TIM_DIER_CC1IE TIM_DIER_CC1IE /*!< Capture/compare 1 interrupt enable */ +#define LL_TIM_DIER_CC2IE TIM_DIER_CC2IE /*!< Capture/compare 2 interrupt enable */ +#define LL_TIM_DIER_CC3IE TIM_DIER_CC3IE /*!< Capture/compare 3 interrupt enable */ +#define LL_TIM_DIER_CC4IE TIM_DIER_CC4IE /*!< Capture/compare 4 interrupt enable */ +#define LL_TIM_DIER_COMIE TIM_DIER_COMIE /*!< COM interrupt enable */ +#define LL_TIM_DIER_TIE TIM_DIER_TIE /*!< Trigger interrupt enable */ +#define LL_TIM_DIER_BIE TIM_DIER_BIE /*!< Break interrupt enable */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_UPDATESOURCE Update Source + * @{ + */ +#define LL_TIM_UPDATESOURCE_REGULAR 0x00000000U /*!< Counter overflow/underflow, Setting the UG bit or Update generation through the slave mode controller generates an update request */ +#define LL_TIM_UPDATESOURCE_COUNTER TIM_CR1_URS /*!< Only counter overflow/underflow generates an update request */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_ONEPULSEMODE One Pulse Mode + * @{ + */ +#define LL_TIM_ONEPULSEMODE_SINGLE TIM_CR1_OPM /*!< Counter stops counting at the next update event */ +#define LL_TIM_ONEPULSEMODE_REPETITIVE 0x00000000U /*!< Counter is not stopped at update event */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_COUNTERMODE Counter Mode + * @{ + */ +#define LL_TIM_COUNTERMODE_UP 0x00000000U /*!< Counter used as upcounter */ +#define LL_TIM_COUNTERMODE_DOWN TIM_CR1_DIR /*!< Counter used as downcounter */ +#define LL_TIM_COUNTERMODE_CENTER_DOWN TIM_CR1_CMS_0 /*!< The counter counts up and down alternatively. Output compare interrupt flags of output channels are set only when the counter is counting down. */ +#define LL_TIM_COUNTERMODE_CENTER_UP TIM_CR1_CMS_1 /*!< The counter counts up and down alternatively. Output compare interrupt flags of output channels are set only when the counter is counting up */ +#define LL_TIM_COUNTERMODE_CENTER_UP_DOWN TIM_CR1_CMS /*!< The counter counts up and down alternatively. Output compare interrupt flags of output channels are set only when the counter is counting up or down. */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_CLOCKDIVISION Clock Division + * @{ + */ +#define LL_TIM_CLOCKDIVISION_DIV1 0x00000000U /*!< tDTS=tCK_INT */ +#define LL_TIM_CLOCKDIVISION_DIV2 TIM_CR1_CKD_0 /*!< tDTS=2*tCK_INT */ +#define LL_TIM_CLOCKDIVISION_DIV4 TIM_CR1_CKD_1 /*!< tDTS=4*tCK_INT */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_COUNTERDIRECTION Counter Direction + * @{ + */ +#define LL_TIM_COUNTERDIRECTION_UP 0x00000000U /*!< Timer counter counts up */ +#define LL_TIM_COUNTERDIRECTION_DOWN TIM_CR1_DIR /*!< Timer counter counts down */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_CCUPDATESOURCE Capture Compare Update Source + * @{ + */ +#define LL_TIM_CCUPDATESOURCE_COMG_ONLY 0x00000000U /*!< Capture/compare control bits are updated by setting the COMG bit only */ +#define LL_TIM_CCUPDATESOURCE_COMG_AND_TRGI TIM_CR2_CCUS /*!< Capture/compare control bits are updated by setting the COMG bit or when a rising edge occurs on trigger input (TRGI) */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_CCDMAREQUEST Capture Compare DMA Request + * @{ + */ +#define LL_TIM_CCDMAREQUEST_CC 0x00000000U /*!< CCx DMA request sent when CCx event occurs */ +#define LL_TIM_CCDMAREQUEST_UPDATE TIM_CR2_CCDS /*!< CCx DMA requests sent when update event occurs */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_LOCKLEVEL Lock Level + * @{ + */ +#define LL_TIM_LOCKLEVEL_OFF 0x00000000U /*!< LOCK OFF - No bit is write protected */ +#define LL_TIM_LOCKLEVEL_1 TIM_BDTR_LOCK_0 /*!< LOCK Level 1 */ +#define LL_TIM_LOCKLEVEL_2 TIM_BDTR_LOCK_1 /*!< LOCK Level 2 */ +#define LL_TIM_LOCKLEVEL_3 TIM_BDTR_LOCK /*!< LOCK Level 3 */ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_CHANNEL Channel + * @{ + */ +#define LL_TIM_CHANNEL_CH1 TIM_CCER_CC1E /*!< Timer input/output channel 1 */ +#define LL_TIM_CHANNEL_CH1N TIM_CCER_CC1NE /*!< Timer complementary output channel 1 */ +#define LL_TIM_CHANNEL_CH2 TIM_CCER_CC2E /*!< Timer input/output channel 2 */ +#define LL_TIM_CHANNEL_CH2N TIM_CCER_CC2NE /*!< Timer complementary output channel 2 */ +#define LL_TIM_CHANNEL_CH3 TIM_CCER_CC3E /*!< Timer input/output channel 3 */ +#define LL_TIM_CHANNEL_CH3N TIM_CCER_CC3NE /*!< Timer complementary output channel 3 */ +#define LL_TIM_CHANNEL_CH4 TIM_CCER_CC4E /*!< Timer input/output channel 4 */ +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup TIM_LL_EC_OCSTATE Output Configuration State + * @{ + */ +#define LL_TIM_OCSTATE_DISABLE 0x00000000U /*!< OCx is not active */ +#define LL_TIM_OCSTATE_ENABLE TIM_CCER_CC1E /*!< OCx signal is output on the corresponding output pin */ +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** @defgroup TIM_LL_EC_OCMODE Output Configuration Mode + * @{ + */ +#define LL_TIM_OCMODE_FROZEN 0x00000000U /*!TIMx_CCRy else active.*/ +#define LL_TIM_OCMODE_PWM2 (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_1 | TIM_CCMR1_OC1M_0) /*!TIMx_CCRy else inactive*/ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_OCPOLARITY Output Configuration Polarity + * @{ + */ +#define LL_TIM_OCPOLARITY_HIGH 0x00000000U /*!< OCxactive high*/ +#define LL_TIM_OCPOLARITY_LOW TIM_CCER_CC1P /*!< OCxactive low*/ +/** + * @} + */ + +/** @defgroup TIM_LL_EC_OCIDLESTATE Output Configuration Idle State + * @{ + */ +#define LL_TIM_OCIDLESTATE_LOW 0x00000000U /*!__REG__, (__VALUE__)) + +/** + * @brief Read a value in TIM register. + * @param __INSTANCE__ TIM Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_TIM_ReadReg(__INSTANCE__, __REG__) READ_REG((__INSTANCE__)->__REG__) +/** + * @} + */ + +/** + * @brief HELPER macro calculating DTG[0:7] in the TIMx_BDTR register to achieve the requested dead time duration. + * @note ex: @ref __LL_TIM_CALC_DEADTIME (80000000, @ref LL_TIM_GetClockDivision (), 120); + * @param __TIMCLK__ timer input clock frequency (in Hz) + * @param __CKD__ This parameter can be one of the following values: + * @arg @ref LL_TIM_CLOCKDIVISION_DIV1 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV2 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV4 + * @param __DT__ deadtime duration (in ns) + * @retval DTG[0:7] + */ +#define __LL_TIM_CALC_DEADTIME(__TIMCLK__, __CKD__, __DT__) \ + ( (((uint64_t)((__DT__)*1000U)) < ((DT_DELAY_1+1U) * TIM_CALC_DTS((__TIMCLK__), (__CKD__)))) ? \ + (uint8_t)(((uint64_t)((__DT__)*1000U) / TIM_CALC_DTS((__TIMCLK__), (__CKD__))) & DT_DELAY_1) : \ + (((uint64_t)((__DT__)*1000U)) < ((64U + (DT_DELAY_2+1U)) * 2U * TIM_CALC_DTS((__TIMCLK__), (__CKD__)))) ? \ + (uint8_t)(DT_RANGE_2 | ((uint8_t)((uint8_t)((((uint64_t)((__DT__)*1000U))/ TIM_CALC_DTS((__TIMCLK__), \ + (__CKD__))) >> 1U) - (uint8_t) 64) & DT_DELAY_2)) :\ + (((uint64_t)((__DT__)*1000U)) < ((32U + (DT_DELAY_3+1U)) * 8U * TIM_CALC_DTS((__TIMCLK__), (__CKD__)))) ? \ + (uint8_t)(DT_RANGE_3 | ((uint8_t)((uint8_t)(((((uint64_t)(__DT__)*1000U))/ TIM_CALC_DTS((__TIMCLK__), \ + (__CKD__))) >> 3U) - (uint8_t) 32) & DT_DELAY_3)) :\ + (((uint64_t)((__DT__)*1000U)) < ((32U + (DT_DELAY_4+1U)) * 16U * TIM_CALC_DTS((__TIMCLK__), (__CKD__)))) ? \ + (uint8_t)(DT_RANGE_4 | ((uint8_t)((uint8_t)(((((uint64_t)(__DT__)*1000U))/ TIM_CALC_DTS((__TIMCLK__), \ + (__CKD__))) >> 4U) - (uint8_t) 32) & DT_DELAY_4)) :\ + 0U) + +/** + * @brief HELPER macro calculating the prescaler value to achieve the required counter clock frequency. + * @note ex: @ref __LL_TIM_CALC_PSC (80000000, 1000000); + * @param __TIMCLK__ timer input clock frequency (in Hz) + * @param __CNTCLK__ counter clock frequency (in Hz) + * @retval Prescaler value (between Min_Data=0 and Max_Data=65535) + */ +#define __LL_TIM_CALC_PSC(__TIMCLK__, __CNTCLK__) \ + (((__TIMCLK__) >= (__CNTCLK__)) ? (uint32_t)((((__TIMCLK__) + (__CNTCLK__)/2U)/(__CNTCLK__)) - 1U) : 0U) + +/** + * @brief HELPER macro calculating the auto-reload value to achieve the required output signal frequency. + * @note ex: @ref __LL_TIM_CALC_ARR (1000000, @ref LL_TIM_GetPrescaler (), 10000); + * @param __TIMCLK__ timer input clock frequency (in Hz) + * @param __PSC__ prescaler + * @param __FREQ__ output signal frequency (in Hz) + * @retval Auto-reload value (between Min_Data=0 and Max_Data=65535) + */ +#define __LL_TIM_CALC_ARR(__TIMCLK__, __PSC__, __FREQ__) \ + ((((__TIMCLK__)/((__PSC__) + 1U)) >= (__FREQ__)) ? (((__TIMCLK__)/((__FREQ__) * ((__PSC__) + 1U))) - 1U) : 0U) + +/** + * @brief HELPER macro calculating the compare value required to achieve the required timer output compare + * active/inactive delay. + * @note ex: @ref __LL_TIM_CALC_DELAY (1000000, @ref LL_TIM_GetPrescaler (), 10); + * @param __TIMCLK__ timer input clock frequency (in Hz) + * @param __PSC__ prescaler + * @param __DELAY__ timer output compare active/inactive delay (in us) + * @retval Compare value (between Min_Data=0 and Max_Data=65535) + */ +#define __LL_TIM_CALC_DELAY(__TIMCLK__, __PSC__, __DELAY__) \ + ((uint32_t)(((uint64_t)(__TIMCLK__) * (uint64_t)(__DELAY__)) \ + / ((uint64_t)1000000U * (uint64_t)((__PSC__) + 1U)))) + +/** + * @brief HELPER macro calculating the auto-reload value to achieve the required pulse duration + * (when the timer operates in one pulse mode). + * @note ex: @ref __LL_TIM_CALC_PULSE (1000000, @ref LL_TIM_GetPrescaler (), 10, 20); + * @param __TIMCLK__ timer input clock frequency (in Hz) + * @param __PSC__ prescaler + * @param __DELAY__ timer output compare active/inactive delay (in us) + * @param __PULSE__ pulse duration (in us) + * @retval Auto-reload value (between Min_Data=0 and Max_Data=65535) + */ +#define __LL_TIM_CALC_PULSE(__TIMCLK__, __PSC__, __DELAY__, __PULSE__) \ + ((uint32_t)(__LL_TIM_CALC_DELAY((__TIMCLK__), (__PSC__), (__PULSE__)) \ + + __LL_TIM_CALC_DELAY((__TIMCLK__), (__PSC__), (__DELAY__)))) + +/** + * @brief HELPER macro retrieving the ratio of the input capture prescaler + * @note ex: @ref __LL_TIM_GET_ICPSC_RATIO (@ref LL_TIM_IC_GetPrescaler ()); + * @param __ICPSC__ This parameter can be one of the following values: + * @arg @ref LL_TIM_ICPSC_DIV1 + * @arg @ref LL_TIM_ICPSC_DIV2 + * @arg @ref LL_TIM_ICPSC_DIV4 + * @arg @ref LL_TIM_ICPSC_DIV8 + * @retval Input capture prescaler ratio (1, 2, 4 or 8) + */ +#define __LL_TIM_GET_ICPSC_RATIO(__ICPSC__) \ + ((uint32_t)(0x01U << (((__ICPSC__) >> 16U) >> TIM_CCMR1_IC1PSC_Pos))) + + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup TIM_LL_Exported_Functions TIM Exported Functions + * @{ + */ + +/** @defgroup TIM_LL_EF_Time_Base Time Base configuration + * @{ + */ +/** + * @brief Enable timer counter. + * @rmtoll CR1 CEN LL_TIM_EnableCounter + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableCounter(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->CR1, TIM_CR1_CEN); +} + +/** + * @brief Disable timer counter. + * @rmtoll CR1 CEN LL_TIM_DisableCounter + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableCounter(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->CR1, TIM_CR1_CEN); +} + +/** + * @brief Indicates whether the timer counter is enabled. + * @rmtoll CR1 CEN LL_TIM_IsEnabledCounter + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledCounter(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->CR1, TIM_CR1_CEN) == (TIM_CR1_CEN)) ? 1UL : 0UL); +} + +/** + * @brief Enable update event generation. + * @rmtoll CR1 UDIS LL_TIM_EnableUpdateEvent + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableUpdateEvent(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->CR1, TIM_CR1_UDIS); +} + +/** + * @brief Disable update event generation. + * @rmtoll CR1 UDIS LL_TIM_DisableUpdateEvent + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableUpdateEvent(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->CR1, TIM_CR1_UDIS); +} + +/** + * @brief Indicates whether update event generation is enabled. + * @rmtoll CR1 UDIS LL_TIM_IsEnabledUpdateEvent + * @param TIMx Timer instance + * @retval Inverted state of bit (0 or 1). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledUpdateEvent(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->CR1, TIM_CR1_UDIS) == (uint32_t)RESET) ? 1UL : 0UL); +} + +/** + * @brief Set update event source + * @note Update event source set to LL_TIM_UPDATESOURCE_REGULAR: any of the following events + * generate an update interrupt or DMA request if enabled: + * - Counter overflow/underflow + * - Setting the UG bit + * - Update generation through the slave mode controller + * @note Update event source set to LL_TIM_UPDATESOURCE_COUNTER: only counter + * overflow/underflow generates an update interrupt or DMA request if enabled. + * @rmtoll CR1 URS LL_TIM_SetUpdateSource + * @param TIMx Timer instance + * @param UpdateSource This parameter can be one of the following values: + * @arg @ref LL_TIM_UPDATESOURCE_REGULAR + * @arg @ref LL_TIM_UPDATESOURCE_COUNTER + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetUpdateSource(TIM_TypeDef *TIMx, uint32_t UpdateSource) +{ + MODIFY_REG(TIMx->CR1, TIM_CR1_URS, UpdateSource); +} + +/** + * @brief Get actual event update source + * @rmtoll CR1 URS LL_TIM_GetUpdateSource + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_UPDATESOURCE_REGULAR + * @arg @ref LL_TIM_UPDATESOURCE_COUNTER + */ +__STATIC_INLINE uint32_t LL_TIM_GetUpdateSource(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_URS)); +} + +/** + * @brief Set one pulse mode (one shot v.s. repetitive). + * @rmtoll CR1 OPM LL_TIM_SetOnePulseMode + * @param TIMx Timer instance + * @param OnePulseMode This parameter can be one of the following values: + * @arg @ref LL_TIM_ONEPULSEMODE_SINGLE + * @arg @ref LL_TIM_ONEPULSEMODE_REPETITIVE + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetOnePulseMode(TIM_TypeDef *TIMx, uint32_t OnePulseMode) +{ + MODIFY_REG(TIMx->CR1, TIM_CR1_OPM, OnePulseMode); +} + +/** + * @brief Get actual one pulse mode. + * @rmtoll CR1 OPM LL_TIM_GetOnePulseMode + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_ONEPULSEMODE_SINGLE + * @arg @ref LL_TIM_ONEPULSEMODE_REPETITIVE + */ +__STATIC_INLINE uint32_t LL_TIM_GetOnePulseMode(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_OPM)); +} + +/** + * @brief Set the timer counter counting mode. + * @note Macro IS_TIM_COUNTER_MODE_SELECT_INSTANCE(TIMx) can be used to + * check whether or not the counter mode selection feature is supported + * by a timer instance. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * @rmtoll CR1 DIR LL_TIM_SetCounterMode\n + * CR1 CMS LL_TIM_SetCounterMode + * @param TIMx Timer instance + * @param CounterMode This parameter can be one of the following values: + * @arg @ref LL_TIM_COUNTERMODE_UP + * @arg @ref LL_TIM_COUNTERMODE_DOWN + * @arg @ref LL_TIM_COUNTERMODE_CENTER_UP + * @arg @ref LL_TIM_COUNTERMODE_CENTER_DOWN + * @arg @ref LL_TIM_COUNTERMODE_CENTER_UP_DOWN + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetCounterMode(TIM_TypeDef *TIMx, uint32_t CounterMode) +{ + MODIFY_REG(TIMx->CR1, (TIM_CR1_DIR | TIM_CR1_CMS), CounterMode); +} + +/** + * @brief Get actual counter mode. + * @note Macro IS_TIM_COUNTER_MODE_SELECT_INSTANCE(TIMx) can be used to + * check whether or not the counter mode selection feature is supported + * by a timer instance. + * @rmtoll CR1 DIR LL_TIM_GetCounterMode\n + * CR1 CMS LL_TIM_GetCounterMode + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_COUNTERMODE_UP + * @arg @ref LL_TIM_COUNTERMODE_DOWN + * @arg @ref LL_TIM_COUNTERMODE_CENTER_UP + * @arg @ref LL_TIM_COUNTERMODE_CENTER_DOWN + * @arg @ref LL_TIM_COUNTERMODE_CENTER_UP_DOWN + */ +__STATIC_INLINE uint32_t LL_TIM_GetCounterMode(const TIM_TypeDef *TIMx) +{ + uint32_t counter_mode; + + counter_mode = (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_CMS)); + + if (counter_mode == 0U) + { + counter_mode = (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_DIR)); + } + + return counter_mode; +} + +/** + * @brief Enable auto-reload (ARR) preload. + * @rmtoll CR1 ARPE LL_TIM_EnableARRPreload + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableARRPreload(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->CR1, TIM_CR1_ARPE); +} + +/** + * @brief Disable auto-reload (ARR) preload. + * @rmtoll CR1 ARPE LL_TIM_DisableARRPreload + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableARRPreload(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->CR1, TIM_CR1_ARPE); +} + +/** + * @brief Indicates whether auto-reload (ARR) preload is enabled. + * @rmtoll CR1 ARPE LL_TIM_IsEnabledARRPreload + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledARRPreload(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->CR1, TIM_CR1_ARPE) == (TIM_CR1_ARPE)) ? 1UL : 0UL); +} + +/** + * @brief Set the division ratio between the timer clock and the sampling clock used by the dead-time generators + * (when supported) and the digital filters. + * @note Macro IS_TIM_CLOCK_DIVISION_INSTANCE(TIMx) can be used to check + * whether or not the clock division feature is supported by the timer + * instance. + * @rmtoll CR1 CKD LL_TIM_SetClockDivision + * @param TIMx Timer instance + * @param ClockDivision This parameter can be one of the following values: + * @arg @ref LL_TIM_CLOCKDIVISION_DIV1 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV2 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetClockDivision(TIM_TypeDef *TIMx, uint32_t ClockDivision) +{ + MODIFY_REG(TIMx->CR1, TIM_CR1_CKD, ClockDivision); +} + +/** + * @brief Get the actual division ratio between the timer clock and the sampling clock used by the dead-time + * generators (when supported) and the digital filters. + * @note Macro IS_TIM_CLOCK_DIVISION_INSTANCE(TIMx) can be used to check + * whether or not the clock division feature is supported by the timer + * instance. + * @rmtoll CR1 CKD LL_TIM_GetClockDivision + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_CLOCKDIVISION_DIV1 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV2 + * @arg @ref LL_TIM_CLOCKDIVISION_DIV4 + */ +__STATIC_INLINE uint32_t LL_TIM_GetClockDivision(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_CKD)); +} + +/** + * @brief Set the counter value. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @rmtoll CNT CNT LL_TIM_SetCounter + * @param TIMx Timer instance + * @param Counter Counter value (between Min_Data=0 and Max_Data=0xFFFF or 0xFFFFFFFF) + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetCounter(TIM_TypeDef *TIMx, uint32_t Counter) +{ + WRITE_REG(TIMx->CNT, Counter); +} + +/** + * @brief Get the counter value. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @rmtoll CNT CNT LL_TIM_GetCounter + * @param TIMx Timer instance + * @retval Counter value (between Min_Data=0 and Max_Data=0xFFFF or 0xFFFFFFFF) + */ +__STATIC_INLINE uint32_t LL_TIM_GetCounter(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CNT)); +} + +/** + * @brief Get the current direction of the counter + * @rmtoll CR1 DIR LL_TIM_GetDirection + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_COUNTERDIRECTION_UP + * @arg @ref LL_TIM_COUNTERDIRECTION_DOWN + */ +__STATIC_INLINE uint32_t LL_TIM_GetDirection(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_BIT(TIMx->CR1, TIM_CR1_DIR)); +} + +/** + * @brief Set the prescaler value. + * @note The counter clock frequency CK_CNT is equal to fCK_PSC / (PSC[15:0] + 1). + * @note The prescaler can be changed on the fly as this control register is buffered. The new + * prescaler ratio is taken into account at the next update event. + * @note Helper macro @ref __LL_TIM_CALC_PSC can be used to calculate the Prescaler parameter + * @rmtoll PSC PSC LL_TIM_SetPrescaler + * @param TIMx Timer instance + * @param Prescaler between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetPrescaler(TIM_TypeDef *TIMx, uint32_t Prescaler) +{ + WRITE_REG(TIMx->PSC, Prescaler); +} + +/** + * @brief Get the prescaler value. + * @rmtoll PSC PSC LL_TIM_GetPrescaler + * @param TIMx Timer instance + * @retval Prescaler value between Min_Data=0 and Max_Data=65535 + */ +__STATIC_INLINE uint32_t LL_TIM_GetPrescaler(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->PSC)); +} + +/** + * @brief Set the auto-reload value. + * @note The counter is blocked while the auto-reload value is null. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Helper macro @ref __LL_TIM_CALC_ARR can be used to calculate the AutoReload parameter + * @rmtoll ARR ARR LL_TIM_SetAutoReload + * @param TIMx Timer instance + * @param AutoReload between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetAutoReload(TIM_TypeDef *TIMx, uint32_t AutoReload) +{ + WRITE_REG(TIMx->ARR, AutoReload); +} + +/** + * @brief Get the auto-reload value. + * @rmtoll ARR ARR LL_TIM_GetAutoReload + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @param TIMx Timer instance + * @retval Auto-reload value + */ +__STATIC_INLINE uint32_t LL_TIM_GetAutoReload(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->ARR)); +} + +/** + * @brief Set the repetition counter value. + * @note Macro IS_TIM_REPETITION_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a repetition counter. + * @rmtoll RCR REP LL_TIM_SetRepetitionCounter + * @param TIMx Timer instance + * @param RepetitionCounter between Min_Data=0 and Max_Data=255 or 65535 for advanced timer. + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetRepetitionCounter(TIM_TypeDef *TIMx, uint32_t RepetitionCounter) +{ + WRITE_REG(TIMx->RCR, RepetitionCounter); +} + +/** + * @brief Get the repetition counter value. + * @note Macro IS_TIM_REPETITION_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a repetition counter. + * @rmtoll RCR REP LL_TIM_GetRepetitionCounter + * @param TIMx Timer instance + * @retval Repetition counter value + */ +__STATIC_INLINE uint32_t LL_TIM_GetRepetitionCounter(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->RCR)); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Capture_Compare Capture Compare configuration + * @{ + */ +/** + * @brief Enable the capture/compare control bits (CCxE, CCxNE and OCxM) preload. + * @note CCxE, CCxNE and OCxM bits are preloaded, after having been written, + * they are updated only when a commutation event (COM) occurs. + * @note Only on channels that have a complementary output. + * @note Macro IS_TIM_COMMUTATION_EVENT_INSTANCE(TIMx) can be used to check + * whether or not a timer instance is able to generate a commutation event. + * @rmtoll CR2 CCPC LL_TIM_CC_EnablePreload + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_EnablePreload(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->CR2, TIM_CR2_CCPC); +} + +/** + * @brief Disable the capture/compare control bits (CCxE, CCxNE and OCxM) preload. + * @note Macro IS_TIM_COMMUTATION_EVENT_INSTANCE(TIMx) can be used to check + * whether or not a timer instance is able to generate a commutation event. + * @rmtoll CR2 CCPC LL_TIM_CC_DisablePreload + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_DisablePreload(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->CR2, TIM_CR2_CCPC); +} + +/** + * @brief Indicates whether the capture/compare control bits (CCxE, CCxNE and OCxM) preload is enabled. + * @rmtoll CR2 CCPC LL_TIM_CC_IsEnabledPreload + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_CC_IsEnabledPreload(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->CR2, TIM_CR2_CCPC) == (TIM_CR2_CCPC)) ? 1UL : 0UL); +} + +/** + * @brief Set the updated source of the capture/compare control bits (CCxE, CCxNE and OCxM). + * @note Macro IS_TIM_COMMUTATION_EVENT_INSTANCE(TIMx) can be used to check + * whether or not a timer instance is able to generate a commutation event. + * @rmtoll CR2 CCUS LL_TIM_CC_SetUpdate + * @param TIMx Timer instance + * @param CCUpdateSource This parameter can be one of the following values: + * @arg @ref LL_TIM_CCUPDATESOURCE_COMG_ONLY + * @arg @ref LL_TIM_CCUPDATESOURCE_COMG_AND_TRGI + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_SetUpdate(TIM_TypeDef *TIMx, uint32_t CCUpdateSource) +{ + MODIFY_REG(TIMx->CR2, TIM_CR2_CCUS, CCUpdateSource); +} + +/** + * @brief Set the trigger of the capture/compare DMA request. + * @rmtoll CR2 CCDS LL_TIM_CC_SetDMAReqTrigger + * @param TIMx Timer instance + * @param DMAReqTrigger This parameter can be one of the following values: + * @arg @ref LL_TIM_CCDMAREQUEST_CC + * @arg @ref LL_TIM_CCDMAREQUEST_UPDATE + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_SetDMAReqTrigger(TIM_TypeDef *TIMx, uint32_t DMAReqTrigger) +{ + MODIFY_REG(TIMx->CR2, TIM_CR2_CCDS, DMAReqTrigger); +} + +/** + * @brief Get actual trigger of the capture/compare DMA request. + * @rmtoll CR2 CCDS LL_TIM_CC_GetDMAReqTrigger + * @param TIMx Timer instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_CCDMAREQUEST_CC + * @arg @ref LL_TIM_CCDMAREQUEST_UPDATE + */ +__STATIC_INLINE uint32_t LL_TIM_CC_GetDMAReqTrigger(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_BIT(TIMx->CR2, TIM_CR2_CCDS)); +} + +/** + * @brief Set the lock level to freeze the + * configuration of several capture/compare parameters. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * the lock mechanism is supported by a timer instance. + * @rmtoll BDTR LOCK LL_TIM_CC_SetLockLevel + * @param TIMx Timer instance + * @param LockLevel This parameter can be one of the following values: + * @arg @ref LL_TIM_LOCKLEVEL_OFF + * @arg @ref LL_TIM_LOCKLEVEL_1 + * @arg @ref LL_TIM_LOCKLEVEL_2 + * @arg @ref LL_TIM_LOCKLEVEL_3 + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_SetLockLevel(TIM_TypeDef *TIMx, uint32_t LockLevel) +{ + MODIFY_REG(TIMx->BDTR, TIM_BDTR_LOCK, LockLevel); +} + +/** + * @brief Enable capture/compare channels. + * @rmtoll CCER CC1E LL_TIM_CC_EnableChannel\n + * CCER CC1NE LL_TIM_CC_EnableChannel\n + * CCER CC2E LL_TIM_CC_EnableChannel\n + * CCER CC2NE LL_TIM_CC_EnableChannel\n + * CCER CC3E LL_TIM_CC_EnableChannel\n + * CCER CC3NE LL_TIM_CC_EnableChannel\n + * CCER CC4E LL_TIM_CC_EnableChannel + * @param TIMx Timer instance + * @param Channels This parameter can be a combination of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_EnableChannel(TIM_TypeDef *TIMx, uint32_t Channels) +{ + SET_BIT(TIMx->CCER, Channels); +} + +/** + * @brief Disable capture/compare channels. + * @rmtoll CCER CC1E LL_TIM_CC_DisableChannel\n + * CCER CC1NE LL_TIM_CC_DisableChannel\n + * CCER CC2E LL_TIM_CC_DisableChannel\n + * CCER CC2NE LL_TIM_CC_DisableChannel\n + * CCER CC3E LL_TIM_CC_DisableChannel\n + * CCER CC3NE LL_TIM_CC_DisableChannel\n + * CCER CC4E LL_TIM_CC_DisableChannel + * @param TIMx Timer instance + * @param Channels This parameter can be a combination of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_CC_DisableChannel(TIM_TypeDef *TIMx, uint32_t Channels) +{ + CLEAR_BIT(TIMx->CCER, Channels); +} + +/** + * @brief Indicate whether channel(s) is(are) enabled. + * @rmtoll CCER CC1E LL_TIM_CC_IsEnabledChannel\n + * CCER CC1NE LL_TIM_CC_IsEnabledChannel\n + * CCER CC2E LL_TIM_CC_IsEnabledChannel\n + * CCER CC2NE LL_TIM_CC_IsEnabledChannel\n + * CCER CC3E LL_TIM_CC_IsEnabledChannel\n + * CCER CC3NE LL_TIM_CC_IsEnabledChannel\n + * CCER CC4E LL_TIM_CC_IsEnabledChannel + * @param TIMx Timer instance + * @param Channels This parameter can be a combination of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_CC_IsEnabledChannel(const TIM_TypeDef *TIMx, uint32_t Channels) +{ + return ((READ_BIT(TIMx->CCER, Channels) == (Channels)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Output_Channel Output channel configuration + * @{ + */ +/** + * @brief Configure an output channel. + * @rmtoll CCMR1 CC1S LL_TIM_OC_ConfigOutput\n + * CCMR1 CC2S LL_TIM_OC_ConfigOutput\n + * CCMR2 CC3S LL_TIM_OC_ConfigOutput\n + * CCMR2 CC4S LL_TIM_OC_ConfigOutput\n + * CCER CC1P LL_TIM_OC_ConfigOutput\n + * CCER CC2P LL_TIM_OC_ConfigOutput\n + * CCER CC3P LL_TIM_OC_ConfigOutput\n + * CCER CC4P LL_TIM_OC_ConfigOutput\n + * CR2 OIS1 LL_TIM_OC_ConfigOutput\n + * CR2 OIS2 LL_TIM_OC_ConfigOutput\n + * CR2 OIS3 LL_TIM_OC_ConfigOutput\n + * CR2 OIS4 LL_TIM_OC_ConfigOutput + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param Configuration This parameter must be a combination of all the following values: + * @arg @ref LL_TIM_OCPOLARITY_HIGH or @ref LL_TIM_OCPOLARITY_LOW + * @arg @ref LL_TIM_OCIDLESTATE_LOW or @ref LL_TIM_OCIDLESTATE_HIGH + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_ConfigOutput(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Configuration) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + CLEAR_BIT(*pReg, (TIM_CCMR1_CC1S << SHIFT_TAB_OCxx[iChannel])); + MODIFY_REG(TIMx->CCER, (TIM_CCER_CC1P << SHIFT_TAB_CCxP[iChannel]), + (Configuration & TIM_CCER_CC1P) << SHIFT_TAB_CCxP[iChannel]); + MODIFY_REG(TIMx->CR2, (TIM_CR2_OIS1 << SHIFT_TAB_OISx[iChannel]), + (Configuration & TIM_CR2_OIS1) << SHIFT_TAB_OISx[iChannel]); +} + +/** + * @brief Define the behavior of the output reference signal OCxREF from which + * OCx and OCxN (when relevant) are derived. + * @rmtoll CCMR1 OC1M LL_TIM_OC_SetMode\n + * CCMR1 OC2M LL_TIM_OC_SetMode\n + * CCMR2 OC3M LL_TIM_OC_SetMode\n + * CCMR2 OC4M LL_TIM_OC_SetMode + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param Mode This parameter can be one of the following values: + * @arg @ref LL_TIM_OCMODE_FROZEN + * @arg @ref LL_TIM_OCMODE_ACTIVE + * @arg @ref LL_TIM_OCMODE_INACTIVE + * @arg @ref LL_TIM_OCMODE_TOGGLE + * @arg @ref LL_TIM_OCMODE_FORCED_INACTIVE + * @arg @ref LL_TIM_OCMODE_FORCED_ACTIVE + * @arg @ref LL_TIM_OCMODE_PWM1 + * @arg @ref LL_TIM_OCMODE_PWM2 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetMode(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Mode) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + MODIFY_REG(*pReg, ((TIM_CCMR1_OC1M | TIM_CCMR1_CC1S) << SHIFT_TAB_OCxx[iChannel]), Mode << SHIFT_TAB_OCxx[iChannel]); +} + +/** + * @brief Get the output compare mode of an output channel. + * @rmtoll CCMR1 OC1M LL_TIM_OC_GetMode\n + * CCMR1 OC2M LL_TIM_OC_GetMode\n + * CCMR2 OC3M LL_TIM_OC_GetMode\n + * CCMR2 OC4M LL_TIM_OC_GetMode + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_OCMODE_FROZEN + * @arg @ref LL_TIM_OCMODE_ACTIVE + * @arg @ref LL_TIM_OCMODE_INACTIVE + * @arg @ref LL_TIM_OCMODE_TOGGLE + * @arg @ref LL_TIM_OCMODE_FORCED_INACTIVE + * @arg @ref LL_TIM_OCMODE_FORCED_ACTIVE + * @arg @ref LL_TIM_OCMODE_PWM1 + * @arg @ref LL_TIM_OCMODE_PWM2 + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetMode(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + return (READ_BIT(*pReg, ((TIM_CCMR1_OC1M | TIM_CCMR1_CC1S) << SHIFT_TAB_OCxx[iChannel])) >> SHIFT_TAB_OCxx[iChannel]); +} + +/** + * @brief Set the polarity of an output channel. + * @rmtoll CCER CC1P LL_TIM_OC_SetPolarity\n + * CCER CC1NP LL_TIM_OC_SetPolarity\n + * CCER CC2P LL_TIM_OC_SetPolarity\n + * CCER CC2NP LL_TIM_OC_SetPolarity\n + * CCER CC3P LL_TIM_OC_SetPolarity\n + * CCER CC3NP LL_TIM_OC_SetPolarity\n + * CCER CC4P LL_TIM_OC_SetPolarity + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param Polarity This parameter can be one of the following values: + * @arg @ref LL_TIM_OCPOLARITY_HIGH + * @arg @ref LL_TIM_OCPOLARITY_LOW + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetPolarity(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Polarity) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + MODIFY_REG(TIMx->CCER, (TIM_CCER_CC1P << SHIFT_TAB_CCxP[iChannel]), Polarity << SHIFT_TAB_CCxP[iChannel]); +} + +/** + * @brief Get the polarity of an output channel. + * @rmtoll CCER CC1P LL_TIM_OC_GetPolarity\n + * CCER CC1NP LL_TIM_OC_GetPolarity\n + * CCER CC2P LL_TIM_OC_GetPolarity\n + * CCER CC2NP LL_TIM_OC_GetPolarity\n + * CCER CC3P LL_TIM_OC_GetPolarity\n + * CCER CC3NP LL_TIM_OC_GetPolarity\n + * CCER CC4P LL_TIM_OC_GetPolarity + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_OCPOLARITY_HIGH + * @arg @ref LL_TIM_OCPOLARITY_LOW + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetPolarity(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + return (READ_BIT(TIMx->CCER, (TIM_CCER_CC1P << SHIFT_TAB_CCxP[iChannel])) >> SHIFT_TAB_CCxP[iChannel]); +} + +/** + * @brief Set the IDLE state of an output channel + * @note This function is significant only for the timer instances + * supporting the break feature. Macro IS_TIM_BREAK_INSTANCE(TIMx) + * can be used to check whether or not a timer instance provides + * a break input. + * @rmtoll CR2 OIS1 LL_TIM_OC_SetIdleState\n + * CR2 OIS1N LL_TIM_OC_SetIdleState\n + * CR2 OIS2 LL_TIM_OC_SetIdleState\n + * CR2 OIS2N LL_TIM_OC_SetIdleState\n + * CR2 OIS3 LL_TIM_OC_SetIdleState\n + * CR2 OIS3N LL_TIM_OC_SetIdleState\n + * CR2 OIS4 LL_TIM_OC_SetIdleState + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param IdleState This parameter can be one of the following values: + * @arg @ref LL_TIM_OCIDLESTATE_LOW + * @arg @ref LL_TIM_OCIDLESTATE_HIGH + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetIdleState(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t IdleState) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + MODIFY_REG(TIMx->CR2, (TIM_CR2_OIS1 << SHIFT_TAB_OISx[iChannel]), IdleState << SHIFT_TAB_OISx[iChannel]); +} + +/** + * @brief Get the IDLE state of an output channel + * @rmtoll CR2 OIS1 LL_TIM_OC_GetIdleState\n + * CR2 OIS1N LL_TIM_OC_GetIdleState\n + * CR2 OIS2 LL_TIM_OC_GetIdleState\n + * CR2 OIS2N LL_TIM_OC_GetIdleState\n + * CR2 OIS3 LL_TIM_OC_GetIdleState\n + * CR2 OIS3N LL_TIM_OC_GetIdleState\n + * CR2 OIS4 LL_TIM_OC_GetIdleState + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH1N + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH2N + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH3N + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_OCIDLESTATE_LOW + * @arg @ref LL_TIM_OCIDLESTATE_HIGH + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetIdleState(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + return (READ_BIT(TIMx->CR2, (TIM_CR2_OIS1 << SHIFT_TAB_OISx[iChannel])) >> SHIFT_TAB_OISx[iChannel]); +} + +/** + * @brief Enable fast mode for the output channel. + * @note Acts only if the channel is configured in PWM1 or PWM2 mode. + * @rmtoll CCMR1 OC1FE LL_TIM_OC_EnableFast\n + * CCMR1 OC2FE LL_TIM_OC_EnableFast\n + * CCMR2 OC3FE LL_TIM_OC_EnableFast\n + * CCMR2 OC4FE LL_TIM_OC_EnableFast + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_EnableFast(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + SET_BIT(*pReg, (TIM_CCMR1_OC1FE << SHIFT_TAB_OCxx[iChannel])); + +} + +/** + * @brief Disable fast mode for the output channel. + * @rmtoll CCMR1 OC1FE LL_TIM_OC_DisableFast\n + * CCMR1 OC2FE LL_TIM_OC_DisableFast\n + * CCMR2 OC3FE LL_TIM_OC_DisableFast\n + * CCMR2 OC4FE LL_TIM_OC_DisableFast + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_DisableFast(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + CLEAR_BIT(*pReg, (TIM_CCMR1_OC1FE << SHIFT_TAB_OCxx[iChannel])); + +} + +/** + * @brief Indicates whether fast mode is enabled for the output channel. + * @rmtoll CCMR1 OC1FE LL_TIM_OC_IsEnabledFast\n + * CCMR1 OC2FE LL_TIM_OC_IsEnabledFast\n + * CCMR2 OC3FE LL_TIM_OC_IsEnabledFast\n + * CCMR2 OC4FE LL_TIM_OC_IsEnabledFast\n + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_OC_IsEnabledFast(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + uint32_t bitfield = TIM_CCMR1_OC1FE << SHIFT_TAB_OCxx[iChannel]; + return ((READ_BIT(*pReg, bitfield) == bitfield) ? 1UL : 0UL); +} + +/** + * @brief Enable compare register (TIMx_CCRx) preload for the output channel. + * @rmtoll CCMR1 OC1PE LL_TIM_OC_EnablePreload\n + * CCMR1 OC2PE LL_TIM_OC_EnablePreload\n + * CCMR2 OC3PE LL_TIM_OC_EnablePreload\n + * CCMR2 OC4PE LL_TIM_OC_EnablePreload + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_EnablePreload(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + SET_BIT(*pReg, (TIM_CCMR1_OC1PE << SHIFT_TAB_OCxx[iChannel])); +} + +/** + * @brief Disable compare register (TIMx_CCRx) preload for the output channel. + * @rmtoll CCMR1 OC1PE LL_TIM_OC_DisablePreload\n + * CCMR1 OC2PE LL_TIM_OC_DisablePreload\n + * CCMR2 OC3PE LL_TIM_OC_DisablePreload\n + * CCMR2 OC4PE LL_TIM_OC_DisablePreload + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_DisablePreload(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + CLEAR_BIT(*pReg, (TIM_CCMR1_OC1PE << SHIFT_TAB_OCxx[iChannel])); +} + +/** + * @brief Indicates whether compare register (TIMx_CCRx) preload is enabled for the output channel. + * @rmtoll CCMR1 OC1PE LL_TIM_OC_IsEnabledPreload\n + * CCMR1 OC2PE LL_TIM_OC_IsEnabledPreload\n + * CCMR2 OC3PE LL_TIM_OC_IsEnabledPreload\n + * CCMR2 OC4PE LL_TIM_OC_IsEnabledPreload\n + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_OC_IsEnabledPreload(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + uint32_t bitfield = TIM_CCMR1_OC1PE << SHIFT_TAB_OCxx[iChannel]; + return ((READ_BIT(*pReg, bitfield) == bitfield) ? 1UL : 0UL); +} + +/** + * @brief Enable clearing the output channel on an external event. + * @note This function can only be used in Output compare and PWM modes. It does not work in Forced mode. + * @note Macro IS_TIM_OCXREF_CLEAR_INSTANCE(TIMx) can be used to check whether + * or not a timer instance can clear the OCxREF signal on an external event. + * @rmtoll CCMR1 OC1CE LL_TIM_OC_EnableClear\n + * CCMR1 OC2CE LL_TIM_OC_EnableClear\n + * CCMR2 OC3CE LL_TIM_OC_EnableClear\n + * CCMR2 OC4CE LL_TIM_OC_EnableClear + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_EnableClear(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + SET_BIT(*pReg, (TIM_CCMR1_OC1CE << SHIFT_TAB_OCxx[iChannel])); +} + +/** + * @brief Disable clearing the output channel on an external event. + * @note Macro IS_TIM_OCXREF_CLEAR_INSTANCE(TIMx) can be used to check whether + * or not a timer instance can clear the OCxREF signal on an external event. + * @rmtoll CCMR1 OC1CE LL_TIM_OC_DisableClear\n + * CCMR1 OC2CE LL_TIM_OC_DisableClear\n + * CCMR2 OC3CE LL_TIM_OC_DisableClear\n + * CCMR2 OC4CE LL_TIM_OC_DisableClear + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_DisableClear(TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + CLEAR_BIT(*pReg, (TIM_CCMR1_OC1CE << SHIFT_TAB_OCxx[iChannel])); +} + +/** + * @brief Indicates clearing the output channel on an external event is enabled for the output channel. + * @note This function enables clearing the output channel on an external event. + * @note This function can only be used in Output compare and PWM modes. It does not work in Forced mode. + * @note Macro IS_TIM_OCXREF_CLEAR_INSTANCE(TIMx) can be used to check whether + * or not a timer instance can clear the OCxREF signal on an external event. + * @rmtoll CCMR1 OC1CE LL_TIM_OC_IsEnabledClear\n + * CCMR1 OC2CE LL_TIM_OC_IsEnabledClear\n + * CCMR2 OC3CE LL_TIM_OC_IsEnabledClear\n + * CCMR2 OC4CE LL_TIM_OC_IsEnabledClear\n + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_OC_IsEnabledClear(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + uint32_t bitfield = TIM_CCMR1_OC1CE << SHIFT_TAB_OCxx[iChannel]; + return ((READ_BIT(*pReg, bitfield) == bitfield) ? 1UL : 0UL); +} + +/** + * @brief Set the dead-time delay (delay inserted between the rising edge of the OCxREF signal and the rising edge of + * the Ocx and OCxN signals). + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * dead-time insertion feature is supported by a timer instance. + * @note Helper macro @ref __LL_TIM_CALC_DEADTIME can be used to calculate the DeadTime parameter + * @rmtoll BDTR DTG LL_TIM_OC_SetDeadTime + * @param TIMx Timer instance + * @param DeadTime between Min_Data=0 and Max_Data=255 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetDeadTime(TIM_TypeDef *TIMx, uint32_t DeadTime) +{ + MODIFY_REG(TIMx->BDTR, TIM_BDTR_DTG, DeadTime); +} + +/** + * @brief Set compare value for output channel 1 (TIMx_CCR1). + * @note In 32-bit timer implementations compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC1_INSTANCE(TIMx) can be used to check whether or not + * output channel 1 is supported by a timer instance. + * @rmtoll CCR1 CCR1 LL_TIM_OC_SetCompareCH1 + * @param TIMx Timer instance + * @param CompareValue between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetCompareCH1(TIM_TypeDef *TIMx, uint32_t CompareValue) +{ + WRITE_REG(TIMx->CCR1, CompareValue); +} + +/** + * @brief Set compare value for output channel 2 (TIMx_CCR2). + * @note In 32-bit timer implementations compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC2_INSTANCE(TIMx) can be used to check whether or not + * output channel 2 is supported by a timer instance. + * @rmtoll CCR2 CCR2 LL_TIM_OC_SetCompareCH2 + * @param TIMx Timer instance + * @param CompareValue between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetCompareCH2(TIM_TypeDef *TIMx, uint32_t CompareValue) +{ + WRITE_REG(TIMx->CCR2, CompareValue); +} + +/** + * @brief Set compare value for output channel 3 (TIMx_CCR3). + * @note In 32-bit timer implementations compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC3_INSTANCE(TIMx) can be used to check whether or not + * output channel is supported by a timer instance. + * @rmtoll CCR3 CCR3 LL_TIM_OC_SetCompareCH3 + * @param TIMx Timer instance + * @param CompareValue between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetCompareCH3(TIM_TypeDef *TIMx, uint32_t CompareValue) +{ + WRITE_REG(TIMx->CCR3, CompareValue); +} + +/** + * @brief Set compare value for output channel 4 (TIMx_CCR4). + * @note In 32-bit timer implementations compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC4_INSTANCE(TIMx) can be used to check whether or not + * output channel 4 is supported by a timer instance. + * @rmtoll CCR4 CCR4 LL_TIM_OC_SetCompareCH4 + * @param TIMx Timer instance + * @param CompareValue between Min_Data=0 and Max_Data=65535 + * @retval None + */ +__STATIC_INLINE void LL_TIM_OC_SetCompareCH4(TIM_TypeDef *TIMx, uint32_t CompareValue) +{ + WRITE_REG(TIMx->CCR4, CompareValue); +} + +/** + * @brief Get compare value (TIMx_CCR1) set for output channel 1. + * @note In 32-bit timer implementations returned compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC1_INSTANCE(TIMx) can be used to check whether or not + * output channel 1 is supported by a timer instance. + * @rmtoll CCR1 CCR1 LL_TIM_OC_GetCompareCH1 + * @param TIMx Timer instance + * @retval CompareValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetCompareCH1(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR1)); +} + +/** + * @brief Get compare value (TIMx_CCR2) set for output channel 2. + * @note In 32-bit timer implementations returned compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC2_INSTANCE(TIMx) can be used to check whether or not + * output channel 2 is supported by a timer instance. + * @rmtoll CCR2 CCR2 LL_TIM_OC_GetCompareCH2 + * @param TIMx Timer instance + * @retval CompareValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetCompareCH2(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR2)); +} + +/** + * @brief Get compare value (TIMx_CCR3) set for output channel 3. + * @note In 32-bit timer implementations returned compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC3_INSTANCE(TIMx) can be used to check whether or not + * output channel 3 is supported by a timer instance. + * @rmtoll CCR3 CCR3 LL_TIM_OC_GetCompareCH3 + * @param TIMx Timer instance + * @retval CompareValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetCompareCH3(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR3)); +} + +/** + * @brief Get compare value (TIMx_CCR4) set for output channel 4. + * @note In 32-bit timer implementations returned compare value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC4_INSTANCE(TIMx) can be used to check whether or not + * output channel 4 is supported by a timer instance. + * @rmtoll CCR4 CCR4 LL_TIM_OC_GetCompareCH4 + * @param TIMx Timer instance + * @retval CompareValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_OC_GetCompareCH4(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR4)); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Input_Channel Input channel configuration + * @{ + */ +/** + * @brief Configure input channel. + * @rmtoll CCMR1 CC1S LL_TIM_IC_Config\n + * CCMR1 IC1PSC LL_TIM_IC_Config\n + * CCMR1 IC1F LL_TIM_IC_Config\n + * CCMR1 CC2S LL_TIM_IC_Config\n + * CCMR1 IC2PSC LL_TIM_IC_Config\n + * CCMR1 IC2F LL_TIM_IC_Config\n + * CCMR2 CC3S LL_TIM_IC_Config\n + * CCMR2 IC3PSC LL_TIM_IC_Config\n + * CCMR2 IC3F LL_TIM_IC_Config\n + * CCMR2 CC4S LL_TIM_IC_Config\n + * CCMR2 IC4PSC LL_TIM_IC_Config\n + * CCMR2 IC4F LL_TIM_IC_Config\n + * CCER CC1P LL_TIM_IC_Config\n + * CCER CC1NP LL_TIM_IC_Config\n + * CCER CC2P LL_TIM_IC_Config\n + * CCER CC2NP LL_TIM_IC_Config\n + * CCER CC3P LL_TIM_IC_Config\n + * CCER CC3NP LL_TIM_IC_Config\n + * CCER CC4P LL_TIM_IC_Config\n + * CCER CC4NP LL_TIM_IC_Config + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param Configuration This parameter must be a combination of all the following values: + * @arg @ref LL_TIM_ACTIVEINPUT_DIRECTTI or @ref LL_TIM_ACTIVEINPUT_INDIRECTTI or @ref LL_TIM_ACTIVEINPUT_TRC + * @arg @ref LL_TIM_ICPSC_DIV1 or ... or @ref LL_TIM_ICPSC_DIV8 + * @arg @ref LL_TIM_IC_FILTER_FDIV1 or ... or @ref LL_TIM_IC_FILTER_FDIV32_N8 + * @arg @ref LL_TIM_IC_POLARITY_RISING or @ref LL_TIM_IC_POLARITY_FALLING or @ref LL_TIM_IC_POLARITY_BOTHEDGE + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_Config(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Configuration) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + MODIFY_REG(*pReg, ((TIM_CCMR1_IC1F | TIM_CCMR1_IC1PSC | TIM_CCMR1_CC1S) << SHIFT_TAB_ICxx[iChannel]), + ((Configuration >> 16U) & (TIM_CCMR1_IC1F | TIM_CCMR1_IC1PSC | TIM_CCMR1_CC1S)) \ + << SHIFT_TAB_ICxx[iChannel]); + MODIFY_REG(TIMx->CCER, ((TIM_CCER_CC1NP | TIM_CCER_CC1P) << SHIFT_TAB_CCxP[iChannel]), + (Configuration & (TIM_CCER_CC1NP | TIM_CCER_CC1P)) << SHIFT_TAB_CCxP[iChannel]); +} + +/** + * @brief Set the active input. + * @rmtoll CCMR1 CC1S LL_TIM_IC_SetActiveInput\n + * CCMR1 CC2S LL_TIM_IC_SetActiveInput\n + * CCMR2 CC3S LL_TIM_IC_SetActiveInput\n + * CCMR2 CC4S LL_TIM_IC_SetActiveInput + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param ICActiveInput This parameter can be one of the following values: + * @arg @ref LL_TIM_ACTIVEINPUT_DIRECTTI + * @arg @ref LL_TIM_ACTIVEINPUT_INDIRECTTI + * @arg @ref LL_TIM_ACTIVEINPUT_TRC + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_SetActiveInput(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ICActiveInput) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + MODIFY_REG(*pReg, ((TIM_CCMR1_CC1S) << SHIFT_TAB_ICxx[iChannel]), (ICActiveInput >> 16U) << SHIFT_TAB_ICxx[iChannel]); +} + +/** + * @brief Get the current active input. + * @rmtoll CCMR1 CC1S LL_TIM_IC_GetActiveInput\n + * CCMR1 CC2S LL_TIM_IC_GetActiveInput\n + * CCMR2 CC3S LL_TIM_IC_GetActiveInput\n + * CCMR2 CC4S LL_TIM_IC_GetActiveInput + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_ACTIVEINPUT_DIRECTTI + * @arg @ref LL_TIM_ACTIVEINPUT_INDIRECTTI + * @arg @ref LL_TIM_ACTIVEINPUT_TRC + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetActiveInput(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + return ((READ_BIT(*pReg, ((TIM_CCMR1_CC1S) << SHIFT_TAB_ICxx[iChannel])) >> SHIFT_TAB_ICxx[iChannel]) << 16U); +} + +/** + * @brief Set the prescaler of input channel. + * @rmtoll CCMR1 IC1PSC LL_TIM_IC_SetPrescaler\n + * CCMR1 IC2PSC LL_TIM_IC_SetPrescaler\n + * CCMR2 IC3PSC LL_TIM_IC_SetPrescaler\n + * CCMR2 IC4PSC LL_TIM_IC_SetPrescaler + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param ICPrescaler This parameter can be one of the following values: + * @arg @ref LL_TIM_ICPSC_DIV1 + * @arg @ref LL_TIM_ICPSC_DIV2 + * @arg @ref LL_TIM_ICPSC_DIV4 + * @arg @ref LL_TIM_ICPSC_DIV8 + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_SetPrescaler(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ICPrescaler) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + MODIFY_REG(*pReg, ((TIM_CCMR1_IC1PSC) << SHIFT_TAB_ICxx[iChannel]), (ICPrescaler >> 16U) << SHIFT_TAB_ICxx[iChannel]); +} + +/** + * @brief Get the current prescaler value acting on an input channel. + * @rmtoll CCMR1 IC1PSC LL_TIM_IC_GetPrescaler\n + * CCMR1 IC2PSC LL_TIM_IC_GetPrescaler\n + * CCMR2 IC3PSC LL_TIM_IC_GetPrescaler\n + * CCMR2 IC4PSC LL_TIM_IC_GetPrescaler + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_ICPSC_DIV1 + * @arg @ref LL_TIM_ICPSC_DIV2 + * @arg @ref LL_TIM_ICPSC_DIV4 + * @arg @ref LL_TIM_ICPSC_DIV8 + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetPrescaler(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + return ((READ_BIT(*pReg, ((TIM_CCMR1_IC1PSC) << SHIFT_TAB_ICxx[iChannel])) >> SHIFT_TAB_ICxx[iChannel]) << 16U); +} + +/** + * @brief Set the input filter duration. + * @rmtoll CCMR1 IC1F LL_TIM_IC_SetFilter\n + * CCMR1 IC2F LL_TIM_IC_SetFilter\n + * CCMR2 IC3F LL_TIM_IC_SetFilter\n + * CCMR2 IC4F LL_TIM_IC_SetFilter + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param ICFilter This parameter can be one of the following values: + * @arg @ref LL_TIM_IC_FILTER_FDIV1 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N2 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N4 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV2_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV2_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV4_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV4_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV8_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV8_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N5 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N5 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N8 + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_SetFilter(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ICFilter) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + MODIFY_REG(*pReg, ((TIM_CCMR1_IC1F) << SHIFT_TAB_ICxx[iChannel]), (ICFilter >> 16U) << SHIFT_TAB_ICxx[iChannel]); +} + +/** + * @brief Get the input filter duration. + * @rmtoll CCMR1 IC1F LL_TIM_IC_GetFilter\n + * CCMR1 IC2F LL_TIM_IC_GetFilter\n + * CCMR2 IC3F LL_TIM_IC_GetFilter\n + * CCMR2 IC4F LL_TIM_IC_GetFilter + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_IC_FILTER_FDIV1 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N2 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N4 + * @arg @ref LL_TIM_IC_FILTER_FDIV1_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV2_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV2_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV4_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV4_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV8_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV8_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N5 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV16_N8 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N5 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N6 + * @arg @ref LL_TIM_IC_FILTER_FDIV32_N8 + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetFilter(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + const __IO uint32_t *pReg = (__IO uint32_t *)((uint32_t)((uint32_t)(&TIMx->CCMR1) + OFFSET_TAB_CCMRx[iChannel])); + return ((READ_BIT(*pReg, ((TIM_CCMR1_IC1F) << SHIFT_TAB_ICxx[iChannel])) >> SHIFT_TAB_ICxx[iChannel]) << 16U); +} + +/** + * @brief Set the input channel polarity. + * @rmtoll CCER CC1P LL_TIM_IC_SetPolarity\n + * CCER CC1NP LL_TIM_IC_SetPolarity\n + * CCER CC2P LL_TIM_IC_SetPolarity\n + * CCER CC2NP LL_TIM_IC_SetPolarity\n + * CCER CC3P LL_TIM_IC_SetPolarity\n + * CCER CC3NP LL_TIM_IC_SetPolarity\n + * CCER CC4P LL_TIM_IC_SetPolarity\n + * CCER CC4NP LL_TIM_IC_SetPolarity + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @param ICPolarity This parameter can be one of the following values: + * @arg @ref LL_TIM_IC_POLARITY_RISING + * @arg @ref LL_TIM_IC_POLARITY_FALLING + * @arg @ref LL_TIM_IC_POLARITY_BOTHEDGE + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_SetPolarity(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ICPolarity) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + MODIFY_REG(TIMx->CCER, ((TIM_CCER_CC1NP | TIM_CCER_CC1P) << SHIFT_TAB_CCxP[iChannel]), + ICPolarity << SHIFT_TAB_CCxP[iChannel]); +} + +/** + * @brief Get the current input channel polarity. + * @rmtoll CCER CC1P LL_TIM_IC_GetPolarity\n + * CCER CC1NP LL_TIM_IC_GetPolarity\n + * CCER CC2P LL_TIM_IC_GetPolarity\n + * CCER CC2NP LL_TIM_IC_GetPolarity\n + * CCER CC3P LL_TIM_IC_GetPolarity\n + * CCER CC3NP LL_TIM_IC_GetPolarity\n + * CCER CC4P LL_TIM_IC_GetPolarity\n + * CCER CC4NP LL_TIM_IC_GetPolarity + * @param TIMx Timer instance + * @param Channel This parameter can be one of the following values: + * @arg @ref LL_TIM_CHANNEL_CH1 + * @arg @ref LL_TIM_CHANNEL_CH2 + * @arg @ref LL_TIM_CHANNEL_CH3 + * @arg @ref LL_TIM_CHANNEL_CH4 + * @retval Returned value can be one of the following values: + * @arg @ref LL_TIM_IC_POLARITY_RISING + * @arg @ref LL_TIM_IC_POLARITY_FALLING + * @arg @ref LL_TIM_IC_POLARITY_BOTHEDGE + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetPolarity(const TIM_TypeDef *TIMx, uint32_t Channel) +{ + uint8_t iChannel = TIM_GET_CHANNEL_INDEX(Channel); + return (READ_BIT(TIMx->CCER, ((TIM_CCER_CC1NP | TIM_CCER_CC1P) << SHIFT_TAB_CCxP[iChannel])) >> + SHIFT_TAB_CCxP[iChannel]); +} + +/** + * @brief Connect the TIMx_CH1, CH2 and CH3 pins to the TI1 input (XOR combination). + * @note Macro IS_TIM_XOR_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides an XOR input. + * @rmtoll CR2 TI1S LL_TIM_IC_EnableXORCombination + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_EnableXORCombination(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->CR2, TIM_CR2_TI1S); +} + +/** + * @brief Disconnect the TIMx_CH1, CH2 and CH3 pins from the TI1 input. + * @note Macro IS_TIM_XOR_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides an XOR input. + * @rmtoll CR2 TI1S LL_TIM_IC_DisableXORCombination + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_IC_DisableXORCombination(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->CR2, TIM_CR2_TI1S); +} + +/** + * @brief Indicates whether the TIMx_CH1, CH2 and CH3 pins are connectected to the TI1 input. + * @note Macro IS_TIM_XOR_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides an XOR input. + * @rmtoll CR2 TI1S LL_TIM_IC_IsEnabledXORCombination + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IC_IsEnabledXORCombination(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->CR2, TIM_CR2_TI1S) == (TIM_CR2_TI1S)) ? 1UL : 0UL); +} + +/** + * @brief Get captured value for input channel 1. + * @note In 32-bit timer implementations returned captured value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC1_INSTANCE(TIMx) can be used to check whether or not + * input channel 1 is supported by a timer instance. + * @rmtoll CCR1 CCR1 LL_TIM_IC_GetCaptureCH1 + * @param TIMx Timer instance + * @retval CapturedValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetCaptureCH1(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR1)); +} + +/** + * @brief Get captured value for input channel 2. + * @note In 32-bit timer implementations returned captured value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC2_INSTANCE(TIMx) can be used to check whether or not + * input channel 2 is supported by a timer instance. + * @rmtoll CCR2 CCR2 LL_TIM_IC_GetCaptureCH2 + * @param TIMx Timer instance + * @retval CapturedValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetCaptureCH2(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR2)); +} + +/** + * @brief Get captured value for input channel 3. + * @note In 32-bit timer implementations returned captured value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC3_INSTANCE(TIMx) can be used to check whether or not + * input channel 3 is supported by a timer instance. + * @rmtoll CCR3 CCR3 LL_TIM_IC_GetCaptureCH3 + * @param TIMx Timer instance + * @retval CapturedValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetCaptureCH3(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR3)); +} + +/** + * @brief Get captured value for input channel 4. + * @note In 32-bit timer implementations returned captured value can be between 0x00000000 and 0xFFFFFFFF. + * @note Macro IS_TIM_32B_COUNTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports a 32 bits counter. + * @note Macro IS_TIM_CC4_INSTANCE(TIMx) can be used to check whether or not + * input channel 4 is supported by a timer instance. + * @rmtoll CCR4 CCR4 LL_TIM_IC_GetCaptureCH4 + * @param TIMx Timer instance + * @retval CapturedValue (between Min_Data=0 and Max_Data=65535) + */ +__STATIC_INLINE uint32_t LL_TIM_IC_GetCaptureCH4(const TIM_TypeDef *TIMx) +{ + return (uint32_t)(READ_REG(TIMx->CCR4)); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Clock_Selection Counter clock selection + * @{ + */ +/** + * @brief Enable external clock mode 2. + * @note When external clock mode 2 is enabled the counter is clocked by any active edge on the ETRF signal. + * @note Macro IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports external clock mode2. + * @rmtoll SMCR ECE LL_TIM_EnableExternalClock + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableExternalClock(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->SMCR, TIM_SMCR_ECE); +} + +/** + * @brief Disable external clock mode 2. + * @note Macro IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports external clock mode2. + * @rmtoll SMCR ECE LL_TIM_DisableExternalClock + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableExternalClock(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->SMCR, TIM_SMCR_ECE); +} + +/** + * @brief Indicate whether external clock mode 2 is enabled. + * @note Macro IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports external clock mode2. + * @rmtoll SMCR ECE LL_TIM_IsEnabledExternalClock + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledExternalClock(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SMCR, TIM_SMCR_ECE) == (TIM_SMCR_ECE)) ? 1UL : 0UL); +} + +/** + * @brief Set the clock source of the counter clock. + * @note when selected clock source is external clock mode 1, the timer input + * the external clock is applied is selected by calling the @ref LL_TIM_SetTriggerInput() + * function. This timer input must be configured by calling + * the @ref LL_TIM_IC_Config() function. + * @note Macro IS_TIM_CLOCKSOURCE_ETRMODE1_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports external clock mode1. + * @note Macro IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports external clock mode2. + * @rmtoll SMCR SMS LL_TIM_SetClockSource\n + * SMCR ECE LL_TIM_SetClockSource + * @param TIMx Timer instance + * @param ClockSource This parameter can be one of the following values: + * @arg @ref LL_TIM_CLOCKSOURCE_INTERNAL + * @arg @ref LL_TIM_CLOCKSOURCE_EXT_MODE1 + * @arg @ref LL_TIM_CLOCKSOURCE_EXT_MODE2 + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetClockSource(TIM_TypeDef *TIMx, uint32_t ClockSource) +{ + MODIFY_REG(TIMx->SMCR, TIM_SMCR_SMS | TIM_SMCR_ECE, ClockSource); +} + +/** + * @brief Set the encoder interface mode. + * @note Macro IS_TIM_ENCODER_INTERFACE_INSTANCE(TIMx) can be used to check + * whether or not a timer instance supports the encoder mode. + * @rmtoll SMCR SMS LL_TIM_SetEncoderMode + * @param TIMx Timer instance + * @param EncoderMode This parameter can be one of the following values: + * @arg @ref LL_TIM_ENCODERMODE_X2_TI1 + * @arg @ref LL_TIM_ENCODERMODE_X2_TI2 + * @arg @ref LL_TIM_ENCODERMODE_X4_TI12 + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetEncoderMode(TIM_TypeDef *TIMx, uint32_t EncoderMode) +{ + MODIFY_REG(TIMx->SMCR, TIM_SMCR_SMS, EncoderMode); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Timer_Synchronization Timer synchronisation configuration + * @{ + */ +/** + * @brief Set the trigger output (TRGO) used for timer synchronization . + * @note Macro IS_TIM_MASTER_INSTANCE(TIMx) can be used to check + * whether or not a timer instance can operate as a master timer. + * @rmtoll CR2 MMS LL_TIM_SetTriggerOutput + * @param TIMx Timer instance + * @param TimerSynchronization This parameter can be one of the following values: + * @arg @ref LL_TIM_TRGO_RESET + * @arg @ref LL_TIM_TRGO_ENABLE + * @arg @ref LL_TIM_TRGO_UPDATE + * @arg @ref LL_TIM_TRGO_CC1IF + * @arg @ref LL_TIM_TRGO_OC1REF + * @arg @ref LL_TIM_TRGO_OC2REF + * @arg @ref LL_TIM_TRGO_OC3REF + * @arg @ref LL_TIM_TRGO_OC4REF + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetTriggerOutput(TIM_TypeDef *TIMx, uint32_t TimerSynchronization) +{ + MODIFY_REG(TIMx->CR2, TIM_CR2_MMS, TimerSynchronization); +} + +/** + * @brief Set the synchronization mode of a slave timer. + * @note Macro IS_TIM_SLAVE_INSTANCE(TIMx) can be used to check whether or not + * a timer instance can operate as a slave timer. + * @rmtoll SMCR SMS LL_TIM_SetSlaveMode + * @param TIMx Timer instance + * @param SlaveMode This parameter can be one of the following values: + * @arg @ref LL_TIM_SLAVEMODE_DISABLED + * @arg @ref LL_TIM_SLAVEMODE_RESET + * @arg @ref LL_TIM_SLAVEMODE_GATED + * @arg @ref LL_TIM_SLAVEMODE_TRIGGER + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetSlaveMode(TIM_TypeDef *TIMx, uint32_t SlaveMode) +{ + MODIFY_REG(TIMx->SMCR, TIM_SMCR_SMS, SlaveMode); +} + +/** + * @brief Set the selects the trigger input to be used to synchronize the counter. + * @note Macro IS_TIM_SLAVE_INSTANCE(TIMx) can be used to check whether or not + * a timer instance can operate as a slave timer. + * @rmtoll SMCR TS LL_TIM_SetTriggerInput + * @param TIMx Timer instance + * @param TriggerInput This parameter can be one of the following values: + * @arg @ref LL_TIM_TS_ITR0 + * @arg @ref LL_TIM_TS_ITR1 + * @arg @ref LL_TIM_TS_ITR2 + * @arg @ref LL_TIM_TS_ITR3 + * @arg @ref LL_TIM_TS_TI1F_ED + * @arg @ref LL_TIM_TS_TI1FP1 + * @arg @ref LL_TIM_TS_TI2FP2 + * @arg @ref LL_TIM_TS_ETRF + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetTriggerInput(TIM_TypeDef *TIMx, uint32_t TriggerInput) +{ + MODIFY_REG(TIMx->SMCR, TIM_SMCR_TS, TriggerInput); +} + +/** + * @brief Enable the Master/Slave mode. + * @note Macro IS_TIM_SLAVE_INSTANCE(TIMx) can be used to check whether or not + * a timer instance can operate as a slave timer. + * @rmtoll SMCR MSM LL_TIM_EnableMasterSlaveMode + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableMasterSlaveMode(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->SMCR, TIM_SMCR_MSM); +} + +/** + * @brief Disable the Master/Slave mode. + * @note Macro IS_TIM_SLAVE_INSTANCE(TIMx) can be used to check whether or not + * a timer instance can operate as a slave timer. + * @rmtoll SMCR MSM LL_TIM_DisableMasterSlaveMode + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableMasterSlaveMode(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->SMCR, TIM_SMCR_MSM); +} + +/** + * @brief Indicates whether the Master/Slave mode is enabled. + * @note Macro IS_TIM_SLAVE_INSTANCE(TIMx) can be used to check whether or not + * a timer instance can operate as a slave timer. + * @rmtoll SMCR MSM LL_TIM_IsEnabledMasterSlaveMode + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledMasterSlaveMode(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SMCR, TIM_SMCR_MSM) == (TIM_SMCR_MSM)) ? 1UL : 0UL); +} + +/** + * @brief Configure the external trigger (ETR) input. + * @note Macro IS_TIM_ETR_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides an external trigger input. + * @rmtoll SMCR ETP LL_TIM_ConfigETR\n + * SMCR ETPS LL_TIM_ConfigETR\n + * SMCR ETF LL_TIM_ConfigETR + * @param TIMx Timer instance + * @param ETRPolarity This parameter can be one of the following values: + * @arg @ref LL_TIM_ETR_POLARITY_NONINVERTED + * @arg @ref LL_TIM_ETR_POLARITY_INVERTED + * @param ETRPrescaler This parameter can be one of the following values: + * @arg @ref LL_TIM_ETR_PRESCALER_DIV1 + * @arg @ref LL_TIM_ETR_PRESCALER_DIV2 + * @arg @ref LL_TIM_ETR_PRESCALER_DIV4 + * @arg @ref LL_TIM_ETR_PRESCALER_DIV8 + * @param ETRFilter This parameter can be one of the following values: + * @arg @ref LL_TIM_ETR_FILTER_FDIV1 + * @arg @ref LL_TIM_ETR_FILTER_FDIV1_N2 + * @arg @ref LL_TIM_ETR_FILTER_FDIV1_N4 + * @arg @ref LL_TIM_ETR_FILTER_FDIV1_N8 + * @arg @ref LL_TIM_ETR_FILTER_FDIV2_N6 + * @arg @ref LL_TIM_ETR_FILTER_FDIV2_N8 + * @arg @ref LL_TIM_ETR_FILTER_FDIV4_N6 + * @arg @ref LL_TIM_ETR_FILTER_FDIV4_N8 + * @arg @ref LL_TIM_ETR_FILTER_FDIV8_N6 + * @arg @ref LL_TIM_ETR_FILTER_FDIV8_N8 + * @arg @ref LL_TIM_ETR_FILTER_FDIV16_N5 + * @arg @ref LL_TIM_ETR_FILTER_FDIV16_N6 + * @arg @ref LL_TIM_ETR_FILTER_FDIV16_N8 + * @arg @ref LL_TIM_ETR_FILTER_FDIV32_N5 + * @arg @ref LL_TIM_ETR_FILTER_FDIV32_N6 + * @arg @ref LL_TIM_ETR_FILTER_FDIV32_N8 + * @retval None + */ +__STATIC_INLINE void LL_TIM_ConfigETR(TIM_TypeDef *TIMx, uint32_t ETRPolarity, uint32_t ETRPrescaler, + uint32_t ETRFilter) +{ + MODIFY_REG(TIMx->SMCR, TIM_SMCR_ETP | TIM_SMCR_ETPS | TIM_SMCR_ETF, ETRPolarity | ETRPrescaler | ETRFilter); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Break_Function Break function configuration + * @{ + */ +/** + * @brief Enable the break function. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR BKE LL_TIM_EnableBRK + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableBRK(TIM_TypeDef *TIMx) +{ + __IO uint32_t tmpreg; + SET_BIT(TIMx->BDTR, TIM_BDTR_BKE); + /* Note: Any write operation to this bit takes a delay of 1 APB clock cycle to become effective. */ + tmpreg = READ_REG(TIMx->BDTR); + (void)(tmpreg); +} + +/** + * @brief Disable the break function. + * @rmtoll BDTR BKE LL_TIM_DisableBRK + * @param TIMx Timer instance + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableBRK(TIM_TypeDef *TIMx) +{ + __IO uint32_t tmpreg; + CLEAR_BIT(TIMx->BDTR, TIM_BDTR_BKE); + /* Note: Any write operation to this bit takes a delay of 1 APB clock cycle to become effective. */ + tmpreg = READ_REG(TIMx->BDTR); + (void)(tmpreg); +} + +/** + * @brief Configure the break input. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR BKP LL_TIM_ConfigBRK + * @param TIMx Timer instance + * @param BreakPolarity This parameter can be one of the following values: + * @arg @ref LL_TIM_BREAK_POLARITY_LOW + * @arg @ref LL_TIM_BREAK_POLARITY_HIGH + * @retval None + */ +__STATIC_INLINE void LL_TIM_ConfigBRK(TIM_TypeDef *TIMx, uint32_t BreakPolarity) +{ + __IO uint32_t tmpreg; + MODIFY_REG(TIMx->BDTR, TIM_BDTR_BKP, BreakPolarity); + /* Note: Any write operation to BKP bit takes a delay of 1 APB clock cycle to become effective. */ + tmpreg = READ_REG(TIMx->BDTR); + (void)(tmpreg); +} + +/** + * @brief Select the outputs off state (enabled v.s. disabled) in Idle and Run modes. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR OSSI LL_TIM_SetOffStates\n + * BDTR OSSR LL_TIM_SetOffStates + * @param TIMx Timer instance + * @param OffStateIdle This parameter can be one of the following values: + * @arg @ref LL_TIM_OSSI_DISABLE + * @arg @ref LL_TIM_OSSI_ENABLE + * @param OffStateRun This parameter can be one of the following values: + * @arg @ref LL_TIM_OSSR_DISABLE + * @arg @ref LL_TIM_OSSR_ENABLE + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetOffStates(TIM_TypeDef *TIMx, uint32_t OffStateIdle, uint32_t OffStateRun) +{ + MODIFY_REG(TIMx->BDTR, TIM_BDTR_OSSI | TIM_BDTR_OSSR, OffStateIdle | OffStateRun); +} + +/** + * @brief Enable automatic output (MOE can be set by software or automatically when a break input is active). + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR AOE LL_TIM_EnableAutomaticOutput + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableAutomaticOutput(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->BDTR, TIM_BDTR_AOE); +} + +/** + * @brief Disable automatic output (MOE can be set only by software). + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR AOE LL_TIM_DisableAutomaticOutput + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableAutomaticOutput(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->BDTR, TIM_BDTR_AOE); +} + +/** + * @brief Indicate whether automatic output is enabled. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR AOE LL_TIM_IsEnabledAutomaticOutput + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledAutomaticOutput(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->BDTR, TIM_BDTR_AOE) == (TIM_BDTR_AOE)) ? 1UL : 0UL); +} + +/** + * @brief Enable the outputs (set the MOE bit in TIMx_BDTR register). + * @note The MOE bit in TIMx_BDTR register allows to enable /disable the outputs by + * software and is reset in case of break or break2 event + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR MOE LL_TIM_EnableAllOutputs + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableAllOutputs(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->BDTR, TIM_BDTR_MOE); +} + +/** + * @brief Disable the outputs (reset the MOE bit in TIMx_BDTR register). + * @note The MOE bit in TIMx_BDTR register allows to enable /disable the outputs by + * software and is reset in case of break or break2 event. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR MOE LL_TIM_DisableAllOutputs + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableAllOutputs(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->BDTR, TIM_BDTR_MOE); +} + +/** + * @brief Indicates whether outputs are enabled. + * @note Macro IS_TIM_BREAK_INSTANCE(TIMx) can be used to check whether or not + * a timer instance provides a break input. + * @rmtoll BDTR MOE LL_TIM_IsEnabledAllOutputs + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledAllOutputs(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->BDTR, TIM_BDTR_MOE) == (TIM_BDTR_MOE)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_DMA_Burst_Mode DMA burst mode configuration + * @{ + */ +/** + * @brief Configures the timer DMA burst feature. + * @note Macro IS_TIM_DMABURST_INSTANCE(TIMx) can be used to check whether or + * not a timer instance supports the DMA burst mode. + * @rmtoll DCR DBL LL_TIM_ConfigDMABurst\n + * DCR DBA LL_TIM_ConfigDMABurst + * @param TIMx Timer instance + * @param DMABurstBaseAddress This parameter can be one of the following values: + * @arg @ref LL_TIM_DMABURST_BASEADDR_CR1 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CR2 + * @arg @ref LL_TIM_DMABURST_BASEADDR_SMCR + * @arg @ref LL_TIM_DMABURST_BASEADDR_DIER + * @arg @ref LL_TIM_DMABURST_BASEADDR_SR + * @arg @ref LL_TIM_DMABURST_BASEADDR_EGR + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCMR1 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCMR2 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCER + * @arg @ref LL_TIM_DMABURST_BASEADDR_CNT + * @arg @ref LL_TIM_DMABURST_BASEADDR_PSC + * @arg @ref LL_TIM_DMABURST_BASEADDR_ARR + * @arg @ref LL_TIM_DMABURST_BASEADDR_RCR + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCR1 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCR2 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCR3 + * @arg @ref LL_TIM_DMABURST_BASEADDR_CCR4 + * @arg @ref LL_TIM_DMABURST_BASEADDR_BDTR + * @param DMABurstLength This parameter can be one of the following values: + * @arg @ref LL_TIM_DMABURST_LENGTH_1TRANSFER + * @arg @ref LL_TIM_DMABURST_LENGTH_2TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_3TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_4TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_5TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_6TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_7TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_8TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_9TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_10TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_11TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_12TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_13TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_14TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_15TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_16TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_17TRANSFERS + * @arg @ref LL_TIM_DMABURST_LENGTH_18TRANSFERS + * @retval None + */ +__STATIC_INLINE void LL_TIM_ConfigDMABurst(TIM_TypeDef *TIMx, uint32_t DMABurstBaseAddress, uint32_t DMABurstLength) +{ + MODIFY_REG(TIMx->DCR, (TIM_DCR_DBL | TIM_DCR_DBA), (DMABurstBaseAddress | DMABurstLength)); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_Timer_Inputs_Remapping Timer input remapping + * @{ + */ +/** + * @brief Remap TIM inputs (input channel, internal/external triggers). + * @note Macro IS_TIM_REMAP_INSTANCE(TIMx) can be used to check whether or not + * a some timer inputs can be remapped. + * @rmtoll TIM1_OR ITR2_RMP LL_TIM_SetRemap\n + * TIM2_OR ITR1_RMP LL_TIM_SetRemap\n + * TIM5_OR ITR1_RMP LL_TIM_SetRemap\n + * TIM5_OR TI4_RMP LL_TIM_SetRemap\n + * TIM9_OR ITR1_RMP LL_TIM_SetRemap\n + * TIM11_OR TI1_RMP LL_TIM_SetRemap\n + * LPTIM1_OR OR LL_TIM_SetRemap + * @param TIMx Timer instance + * @param Remap Remap param depends on the TIMx. Description available only + * in CHM version of the User Manual (not in .pdf). + * Otherwise see Reference Manual description of OR registers. + * + * Below description summarizes "Timer Instance" and "Remap" param combinations: + * + * TIM1: one of the following values + * + * ITR2_RMP can be one of the following values + * @arg @ref LL_TIM_TIM1_ITR2_RMP_TIM3_TRGO (*) + * @arg @ref LL_TIM_TIM1_ITR2_RMP_LPTIM (*) + * + * TIM2: one of the following values + * + * ITR1_RMP can be one of the following values + * @arg @ref LL_TIM_TIM2_ITR1_RMP_TIM8_TRGO + * @arg @ref LL_TIM_TIM2_ITR1_RMP_OTG_FS_SOF + * @arg @ref LL_TIM_TIM2_ITR1_RMP_OTG_HS_SOF + * + * TIM5: one of the following values + * + * @arg @ref LL_TIM_TIM5_TI4_RMP_GPIO + * @arg @ref LL_TIM_TIM5_TI4_RMP_LSI + * @arg @ref LL_TIM_TIM5_TI4_RMP_LSE + * @arg @ref LL_TIM_TIM5_TI4_RMP_RTC + * @arg @ref LL_TIM_TIM5_ITR1_RMP_TIM3_TRGO (*) + * @arg @ref LL_TIM_TIM5_ITR1_RMP_LPTIM (*) + * + * TIM9: one of the following values + * + * ITR1_RMP can be one of the following values + * @arg @ref LL_TIM_TIM9_ITR1_RMP_TIM3_TRGO (*) + * @arg @ref LL_TIM_TIM9_ITR1_RMP_LPTIM (*) + * + * TIM11: one of the following values + * + * @arg @ref LL_TIM_TIM11_TI1_RMP_GPIO + * @arg @ref LL_TIM_TIM11_TI1_RMP_GPIO1 (*) + * @arg @ref LL_TIM_TIM11_TI1_RMP_HSE_RTC + * @arg @ref LL_TIM_TIM11_TI1_RMP_GPIO2 + * @arg @ref LL_TIM_TIM11_TI1_RMP_SPDIFRX (*) + * + * (*) Value not defined in all devices. \n + * + * @retval None + */ +__STATIC_INLINE void LL_TIM_SetRemap(TIM_TypeDef *TIMx, uint32_t Remap) +{ +#if defined(LPTIM_OR_TIM1_ITR2_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) && defined(LPTIM_OR_TIM9_ITR1_RMP) + if ((Remap & LL_TIM_LPTIM_REMAP_MASK) == LL_TIM_LPTIM_REMAP_MASK) + { + /* Connect TIMx internal trigger to LPTIM1 output */ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN); + MODIFY_REG(LPTIM1->OR, + (LPTIM_OR_TIM1_ITR2_RMP | LPTIM_OR_TIM5_ITR1_RMP | LPTIM_OR_TIM9_ITR1_RMP), + Remap & ~(LL_TIM_LPTIM_REMAP_MASK)); + } + else + { + MODIFY_REG(TIMx->OR, (Remap >> TIMx_OR_RMP_SHIFT), (Remap & TIMx_OR_RMP_MASK)); + } +#else + MODIFY_REG(TIMx->OR, (Remap >> TIMx_OR_RMP_SHIFT), (Remap & TIMx_OR_RMP_MASK)); +#endif /* LPTIM_OR_TIM1_ITR2_RMP && LPTIM_OR_TIM5_ITR1_RMP && LPTIM_OR_TIM9_ITR1_RMP */ +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_FLAG_Management FLAG-Management + * @{ + */ +/** + * @brief Clear the update interrupt flag (UIF). + * @rmtoll SR UIF LL_TIM_ClearFlag_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_UPDATE(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_UIF)); +} + +/** + * @brief Indicate whether update interrupt flag (UIF) is set (update interrupt is pending). + * @rmtoll SR UIF LL_TIM_IsActiveFlag_UPDATE + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_UPDATE(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_UIF) == (TIM_SR_UIF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 1 interrupt flag (CC1F). + * @rmtoll SR CC1IF LL_TIM_ClearFlag_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC1(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC1IF)); +} + +/** + * @brief Indicate whether Capture/Compare 1 interrupt flag (CC1F) is set (Capture/Compare 1 interrupt is pending). + * @rmtoll SR CC1IF LL_TIM_IsActiveFlag_CC1 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC1(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC1IF) == (TIM_SR_CC1IF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 2 interrupt flag (CC2F). + * @rmtoll SR CC2IF LL_TIM_ClearFlag_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC2(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC2IF)); +} + +/** + * @brief Indicate whether Capture/Compare 2 interrupt flag (CC2F) is set (Capture/Compare 2 interrupt is pending). + * @rmtoll SR CC2IF LL_TIM_IsActiveFlag_CC2 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC2(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC2IF) == (TIM_SR_CC2IF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 3 interrupt flag (CC3F). + * @rmtoll SR CC3IF LL_TIM_ClearFlag_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC3(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC3IF)); +} + +/** + * @brief Indicate whether Capture/Compare 3 interrupt flag (CC3F) is set (Capture/Compare 3 interrupt is pending). + * @rmtoll SR CC3IF LL_TIM_IsActiveFlag_CC3 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC3(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC3IF) == (TIM_SR_CC3IF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 4 interrupt flag (CC4F). + * @rmtoll SR CC4IF LL_TIM_ClearFlag_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC4(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC4IF)); +} + +/** + * @brief Indicate whether Capture/Compare 4 interrupt flag (CC4F) is set (Capture/Compare 4 interrupt is pending). + * @rmtoll SR CC4IF LL_TIM_IsActiveFlag_CC4 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC4(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC4IF) == (TIM_SR_CC4IF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the commutation interrupt flag (COMIF). + * @rmtoll SR COMIF LL_TIM_ClearFlag_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_COM(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_COMIF)); +} + +/** + * @brief Indicate whether commutation interrupt flag (COMIF) is set (commutation interrupt is pending). + * @rmtoll SR COMIF LL_TIM_IsActiveFlag_COM + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_COM(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_COMIF) == (TIM_SR_COMIF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the trigger interrupt flag (TIF). + * @rmtoll SR TIF LL_TIM_ClearFlag_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_TRIG(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_TIF)); +} + +/** + * @brief Indicate whether trigger interrupt flag (TIF) is set (trigger interrupt is pending). + * @rmtoll SR TIF LL_TIM_IsActiveFlag_TRIG + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_TRIG(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_TIF) == (TIM_SR_TIF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the break interrupt flag (BIF). + * @rmtoll SR BIF LL_TIM_ClearFlag_BRK + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_BRK(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_BIF)); +} + +/** + * @brief Indicate whether break interrupt flag (BIF) is set (break interrupt is pending). + * @rmtoll SR BIF LL_TIM_IsActiveFlag_BRK + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_BRK(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_BIF) == (TIM_SR_BIF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 1 over-capture interrupt flag (CC1OF). + * @rmtoll SR CC1OF LL_TIM_ClearFlag_CC1OVR + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC1OVR(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC1OF)); +} + +/** + * @brief Indicate whether Capture/Compare 1 over-capture interrupt flag (CC1OF) is set + * (Capture/Compare 1 interrupt is pending). + * @rmtoll SR CC1OF LL_TIM_IsActiveFlag_CC1OVR + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC1OVR(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC1OF) == (TIM_SR_CC1OF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 2 over-capture interrupt flag (CC2OF). + * @rmtoll SR CC2OF LL_TIM_ClearFlag_CC2OVR + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC2OVR(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC2OF)); +} + +/** + * @brief Indicate whether Capture/Compare 2 over-capture interrupt flag (CC2OF) is set + * (Capture/Compare 2 over-capture interrupt is pending). + * @rmtoll SR CC2OF LL_TIM_IsActiveFlag_CC2OVR + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC2OVR(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC2OF) == (TIM_SR_CC2OF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 3 over-capture interrupt flag (CC3OF). + * @rmtoll SR CC3OF LL_TIM_ClearFlag_CC3OVR + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC3OVR(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC3OF)); +} + +/** + * @brief Indicate whether Capture/Compare 3 over-capture interrupt flag (CC3OF) is set + * (Capture/Compare 3 over-capture interrupt is pending). + * @rmtoll SR CC3OF LL_TIM_IsActiveFlag_CC3OVR + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC3OVR(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC3OF) == (TIM_SR_CC3OF)) ? 1UL : 0UL); +} + +/** + * @brief Clear the Capture/Compare 4 over-capture interrupt flag (CC4OF). + * @rmtoll SR CC4OF LL_TIM_ClearFlag_CC4OVR + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_ClearFlag_CC4OVR(TIM_TypeDef *TIMx) +{ + WRITE_REG(TIMx->SR, ~(TIM_SR_CC4OF)); +} + +/** + * @brief Indicate whether Capture/Compare 4 over-capture interrupt flag (CC4OF) is set + * (Capture/Compare 4 over-capture interrupt is pending). + * @rmtoll SR CC4OF LL_TIM_IsActiveFlag_CC4OVR + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsActiveFlag_CC4OVR(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->SR, TIM_SR_CC4OF) == (TIM_SR_CC4OF)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_IT_Management IT-Management + * @{ + */ +/** + * @brief Enable update interrupt (UIE). + * @rmtoll DIER UIE LL_TIM_EnableIT_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_UPDATE(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_UIE); +} + +/** + * @brief Disable update interrupt (UIE). + * @rmtoll DIER UIE LL_TIM_DisableIT_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_UPDATE(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_UIE); +} + +/** + * @brief Indicates whether the update interrupt (UIE) is enabled. + * @rmtoll DIER UIE LL_TIM_IsEnabledIT_UPDATE + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_UPDATE(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_UIE) == (TIM_DIER_UIE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 1 interrupt (CC1IE). + * @rmtoll DIER CC1IE LL_TIM_EnableIT_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_CC1(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC1IE); +} + +/** + * @brief Disable capture/compare 1 interrupt (CC1IE). + * @rmtoll DIER CC1IE LL_TIM_DisableIT_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_CC1(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC1IE); +} + +/** + * @brief Indicates whether the capture/compare 1 interrupt (CC1IE) is enabled. + * @rmtoll DIER CC1IE LL_TIM_IsEnabledIT_CC1 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_CC1(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC1IE) == (TIM_DIER_CC1IE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 2 interrupt (CC2IE). + * @rmtoll DIER CC2IE LL_TIM_EnableIT_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_CC2(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC2IE); +} + +/** + * @brief Disable capture/compare 2 interrupt (CC2IE). + * @rmtoll DIER CC2IE LL_TIM_DisableIT_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_CC2(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC2IE); +} + +/** + * @brief Indicates whether the capture/compare 2 interrupt (CC2IE) is enabled. + * @rmtoll DIER CC2IE LL_TIM_IsEnabledIT_CC2 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_CC2(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC2IE) == (TIM_DIER_CC2IE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 3 interrupt (CC3IE). + * @rmtoll DIER CC3IE LL_TIM_EnableIT_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_CC3(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC3IE); +} + +/** + * @brief Disable capture/compare 3 interrupt (CC3IE). + * @rmtoll DIER CC3IE LL_TIM_DisableIT_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_CC3(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC3IE); +} + +/** + * @brief Indicates whether the capture/compare 3 interrupt (CC3IE) is enabled. + * @rmtoll DIER CC3IE LL_TIM_IsEnabledIT_CC3 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_CC3(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC3IE) == (TIM_DIER_CC3IE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 4 interrupt (CC4IE). + * @rmtoll DIER CC4IE LL_TIM_EnableIT_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_CC4(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC4IE); +} + +/** + * @brief Disable capture/compare 4 interrupt (CC4IE). + * @rmtoll DIER CC4IE LL_TIM_DisableIT_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_CC4(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC4IE); +} + +/** + * @brief Indicates whether the capture/compare 4 interrupt (CC4IE) is enabled. + * @rmtoll DIER CC4IE LL_TIM_IsEnabledIT_CC4 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_CC4(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC4IE) == (TIM_DIER_CC4IE)) ? 1UL : 0UL); +} + +/** + * @brief Enable commutation interrupt (COMIE). + * @rmtoll DIER COMIE LL_TIM_EnableIT_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_COM(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_COMIE); +} + +/** + * @brief Disable commutation interrupt (COMIE). + * @rmtoll DIER COMIE LL_TIM_DisableIT_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_COM(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_COMIE); +} + +/** + * @brief Indicates whether the commutation interrupt (COMIE) is enabled. + * @rmtoll DIER COMIE LL_TIM_IsEnabledIT_COM + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_COM(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_COMIE) == (TIM_DIER_COMIE)) ? 1UL : 0UL); +} + +/** + * @brief Enable trigger interrupt (TIE). + * @rmtoll DIER TIE LL_TIM_EnableIT_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_TRIG(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_TIE); +} + +/** + * @brief Disable trigger interrupt (TIE). + * @rmtoll DIER TIE LL_TIM_DisableIT_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_TRIG(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_TIE); +} + +/** + * @brief Indicates whether the trigger interrupt (TIE) is enabled. + * @rmtoll DIER TIE LL_TIM_IsEnabledIT_TRIG + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_TRIG(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_TIE) == (TIM_DIER_TIE)) ? 1UL : 0UL); +} + +/** + * @brief Enable break interrupt (BIE). + * @rmtoll DIER BIE LL_TIM_EnableIT_BRK + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableIT_BRK(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_BIE); +} + +/** + * @brief Disable break interrupt (BIE). + * @rmtoll DIER BIE LL_TIM_DisableIT_BRK + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableIT_BRK(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_BIE); +} + +/** + * @brief Indicates whether the break interrupt (BIE) is enabled. + * @rmtoll DIER BIE LL_TIM_IsEnabledIT_BRK + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledIT_BRK(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_BIE) == (TIM_DIER_BIE)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_DMA_Management DMA Management + * @{ + */ +/** + * @brief Enable update DMA request (UDE). + * @rmtoll DIER UDE LL_TIM_EnableDMAReq_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_UPDATE(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_UDE); +} + +/** + * @brief Disable update DMA request (UDE). + * @rmtoll DIER UDE LL_TIM_DisableDMAReq_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_UPDATE(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_UDE); +} + +/** + * @brief Indicates whether the update DMA request (UDE) is enabled. + * @rmtoll DIER UDE LL_TIM_IsEnabledDMAReq_UPDATE + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_UPDATE(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_UDE) == (TIM_DIER_UDE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 1 DMA request (CC1DE). + * @rmtoll DIER CC1DE LL_TIM_EnableDMAReq_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_CC1(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC1DE); +} + +/** + * @brief Disable capture/compare 1 DMA request (CC1DE). + * @rmtoll DIER CC1DE LL_TIM_DisableDMAReq_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_CC1(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC1DE); +} + +/** + * @brief Indicates whether the capture/compare 1 DMA request (CC1DE) is enabled. + * @rmtoll DIER CC1DE LL_TIM_IsEnabledDMAReq_CC1 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_CC1(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC1DE) == (TIM_DIER_CC1DE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 2 DMA request (CC2DE). + * @rmtoll DIER CC2DE LL_TIM_EnableDMAReq_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_CC2(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC2DE); +} + +/** + * @brief Disable capture/compare 2 DMA request (CC2DE). + * @rmtoll DIER CC2DE LL_TIM_DisableDMAReq_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_CC2(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC2DE); +} + +/** + * @brief Indicates whether the capture/compare 2 DMA request (CC2DE) is enabled. + * @rmtoll DIER CC2DE LL_TIM_IsEnabledDMAReq_CC2 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_CC2(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC2DE) == (TIM_DIER_CC2DE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 3 DMA request (CC3DE). + * @rmtoll DIER CC3DE LL_TIM_EnableDMAReq_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_CC3(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC3DE); +} + +/** + * @brief Disable capture/compare 3 DMA request (CC3DE). + * @rmtoll DIER CC3DE LL_TIM_DisableDMAReq_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_CC3(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC3DE); +} + +/** + * @brief Indicates whether the capture/compare 3 DMA request (CC3DE) is enabled. + * @rmtoll DIER CC3DE LL_TIM_IsEnabledDMAReq_CC3 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_CC3(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC3DE) == (TIM_DIER_CC3DE)) ? 1UL : 0UL); +} + +/** + * @brief Enable capture/compare 4 DMA request (CC4DE). + * @rmtoll DIER CC4DE LL_TIM_EnableDMAReq_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_CC4(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_CC4DE); +} + +/** + * @brief Disable capture/compare 4 DMA request (CC4DE). + * @rmtoll DIER CC4DE LL_TIM_DisableDMAReq_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_CC4(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_CC4DE); +} + +/** + * @brief Indicates whether the capture/compare 4 DMA request (CC4DE) is enabled. + * @rmtoll DIER CC4DE LL_TIM_IsEnabledDMAReq_CC4 + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_CC4(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_CC4DE) == (TIM_DIER_CC4DE)) ? 1UL : 0UL); +} + +/** + * @brief Enable commutation DMA request (COMDE). + * @rmtoll DIER COMDE LL_TIM_EnableDMAReq_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_COM(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_COMDE); +} + +/** + * @brief Disable commutation DMA request (COMDE). + * @rmtoll DIER COMDE LL_TIM_DisableDMAReq_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_COM(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_COMDE); +} + +/** + * @brief Indicates whether the commutation DMA request (COMDE) is enabled. + * @rmtoll DIER COMDE LL_TIM_IsEnabledDMAReq_COM + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_COM(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_COMDE) == (TIM_DIER_COMDE)) ? 1UL : 0UL); +} + +/** + * @brief Enable trigger interrupt (TDE). + * @rmtoll DIER TDE LL_TIM_EnableDMAReq_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_EnableDMAReq_TRIG(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->DIER, TIM_DIER_TDE); +} + +/** + * @brief Disable trigger interrupt (TDE). + * @rmtoll DIER TDE LL_TIM_DisableDMAReq_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_DisableDMAReq_TRIG(TIM_TypeDef *TIMx) +{ + CLEAR_BIT(TIMx->DIER, TIM_DIER_TDE); +} + +/** + * @brief Indicates whether the trigger interrupt (TDE) is enabled. + * @rmtoll DIER TDE LL_TIM_IsEnabledDMAReq_TRIG + * @param TIMx Timer instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_TIM_IsEnabledDMAReq_TRIG(const TIM_TypeDef *TIMx) +{ + return ((READ_BIT(TIMx->DIER, TIM_DIER_TDE) == (TIM_DIER_TDE)) ? 1UL : 0UL); +} + +/** + * @} + */ + +/** @defgroup TIM_LL_EF_EVENT_Management EVENT-Management + * @{ + */ +/** + * @brief Generate an update event. + * @rmtoll EGR UG LL_TIM_GenerateEvent_UPDATE + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_UPDATE(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_UG); +} + +/** + * @brief Generate Capture/Compare 1 event. + * @rmtoll EGR CC1G LL_TIM_GenerateEvent_CC1 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_CC1(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_CC1G); +} + +/** + * @brief Generate Capture/Compare 2 event. + * @rmtoll EGR CC2G LL_TIM_GenerateEvent_CC2 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_CC2(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_CC2G); +} + +/** + * @brief Generate Capture/Compare 3 event. + * @rmtoll EGR CC3G LL_TIM_GenerateEvent_CC3 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_CC3(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_CC3G); +} + +/** + * @brief Generate Capture/Compare 4 event. + * @rmtoll EGR CC4G LL_TIM_GenerateEvent_CC4 + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_CC4(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_CC4G); +} + +/** + * @brief Generate commutation event. + * @rmtoll EGR COMG LL_TIM_GenerateEvent_COM + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_COM(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_COMG); +} + +/** + * @brief Generate trigger event. + * @rmtoll EGR TG LL_TIM_GenerateEvent_TRIG + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_TRIG(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_TG); +} + +/** + * @brief Generate break event. + * @rmtoll EGR BG LL_TIM_GenerateEvent_BRK + * @param TIMx Timer instance + * @retval None + */ +__STATIC_INLINE void LL_TIM_GenerateEvent_BRK(TIM_TypeDef *TIMx) +{ + SET_BIT(TIMx->EGR, TIM_EGR_BG); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup TIM_LL_EF_Init Initialisation and deinitialisation functions + * @{ + */ + +ErrorStatus LL_TIM_DeInit(const TIM_TypeDef *TIMx); +void LL_TIM_StructInit(LL_TIM_InitTypeDef *TIM_InitStruct); +ErrorStatus LL_TIM_Init(TIM_TypeDef *TIMx, const LL_TIM_InitTypeDef *TIM_InitStruct); +void LL_TIM_OC_StructInit(LL_TIM_OC_InitTypeDef *TIM_OC_InitStruct); +ErrorStatus LL_TIM_OC_Init(TIM_TypeDef *TIMx, uint32_t Channel, const LL_TIM_OC_InitTypeDef *TIM_OC_InitStruct); +void LL_TIM_IC_StructInit(LL_TIM_IC_InitTypeDef *TIM_ICInitStruct); +ErrorStatus LL_TIM_IC_Init(TIM_TypeDef *TIMx, uint32_t Channel, const LL_TIM_IC_InitTypeDef *TIM_IC_InitStruct); +void LL_TIM_ENCODER_StructInit(LL_TIM_ENCODER_InitTypeDef *TIM_EncoderInitStruct); +ErrorStatus LL_TIM_ENCODER_Init(TIM_TypeDef *TIMx, const LL_TIM_ENCODER_InitTypeDef *TIM_EncoderInitStruct); +void LL_TIM_HALLSENSOR_StructInit(LL_TIM_HALLSENSOR_InitTypeDef *TIM_HallSensorInitStruct); +ErrorStatus LL_TIM_HALLSENSOR_Init(TIM_TypeDef *TIMx, const LL_TIM_HALLSENSOR_InitTypeDef *TIM_HallSensorInitStruct); +void LL_TIM_BDTR_StructInit(LL_TIM_BDTR_InitTypeDef *TIM_BDTRInitStruct); +ErrorStatus LL_TIM_BDTR_Init(TIM_TypeDef *TIMx, const LL_TIM_BDTR_InitTypeDef *TIM_BDTRInitStruct); +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* TIM1 || TIM2 || TIM3 || TIM4 || TIM5 || TIM6 || TIM7 || TIM8 || TIM9 || TIM10 || TIM11 || TIM12 || TIM13 || TIM14 */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_TIM_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_usart.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_usart.h new file mode 100644 index 00000000..ed83b6c6 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_usart.h @@ -0,0 +1,2521 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_usart.h + * @author MCD Application Team + * @brief Header file of USART LL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_USART_H +#define __STM32F4xx_LL_USART_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (USART1) || defined (USART2) || defined (USART3) || defined (USART6) || defined (UART4) || defined (UART5) || defined (UART7) || defined (UART8) || defined (UART9) || defined (UART10) + +/** @defgroup USART_LL USART + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup USART_LL_Private_Constants USART Private Constants + * @{ + */ + +/* Defines used for the bit position in the register and perform offsets*/ +#define USART_POSITION_GTPR_GT USART_GTPR_GT_Pos +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup USART_LL_Private_Macros USART Private Macros + * @{ + */ +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/* Exported types ------------------------------------------------------------*/ +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup USART_LL_ES_INIT USART Exported Init structures + * @{ + */ + +/** + * @brief LL USART Init Structure definition + */ +typedef struct +{ + uint32_t BaudRate; /*!< This field defines expected Usart communication baud rate. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetBaudRate().*/ + + uint32_t DataWidth; /*!< Specifies the number of data bits transmitted or received in a frame. + This parameter can be a value of @ref USART_LL_EC_DATAWIDTH. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetDataWidth().*/ + + uint32_t StopBits; /*!< Specifies the number of stop bits transmitted. + This parameter can be a value of @ref USART_LL_EC_STOPBITS. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetStopBitsLength().*/ + + uint32_t Parity; /*!< Specifies the parity mode. + This parameter can be a value of @ref USART_LL_EC_PARITY. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetParity().*/ + + uint32_t TransferDirection; /*!< Specifies whether the Receive and/or Transmit mode is enabled or disabled. + This parameter can be a value of @ref USART_LL_EC_DIRECTION. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetTransferDirection().*/ + + uint32_t HardwareFlowControl; /*!< Specifies whether the hardware flow control mode is enabled or disabled. + This parameter can be a value of @ref USART_LL_EC_HWCONTROL. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetHWFlowCtrl().*/ + + uint32_t OverSampling; /*!< Specifies whether USART oversampling mode is 16 or 8. + This parameter can be a value of @ref USART_LL_EC_OVERSAMPLING. + + This feature can be modified afterwards using unitary function @ref LL_USART_SetOverSampling().*/ + +} LL_USART_InitTypeDef; + +/** + * @brief LL USART Clock Init Structure definition + */ +typedef struct +{ + uint32_t ClockOutput; /*!< Specifies whether the USART clock is enabled or disabled. + This parameter can be a value of @ref USART_LL_EC_CLOCK. + + USART HW configuration can be modified afterwards using unitary functions + @ref LL_USART_EnableSCLKOutput() or @ref LL_USART_DisableSCLKOutput(). + For more details, refer to description of this function. */ + + uint32_t ClockPolarity; /*!< Specifies the steady state of the serial clock. + This parameter can be a value of @ref USART_LL_EC_POLARITY. + + USART HW configuration can be modified afterwards using unitary functions @ref LL_USART_SetClockPolarity(). + For more details, refer to description of this function. */ + + uint32_t ClockPhase; /*!< Specifies the clock transition on which the bit capture is made. + This parameter can be a value of @ref USART_LL_EC_PHASE. + + USART HW configuration can be modified afterwards using unitary functions @ref LL_USART_SetClockPhase(). + For more details, refer to description of this function. */ + + uint32_t LastBitClockPulse; /*!< Specifies whether the clock pulse corresponding to the last transmitted + data bit (MSB) has to be output on the SCLK pin in synchronous mode. + This parameter can be a value of @ref USART_LL_EC_LASTCLKPULSE. + + USART HW configuration can be modified afterwards using unitary functions @ref LL_USART_SetLastClkPulseOutput(). + For more details, refer to description of this function. */ + +} LL_USART_ClockInitTypeDef; + +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup USART_LL_Exported_Constants USART Exported Constants + * @{ + */ + +/** @defgroup USART_LL_EC_GET_FLAG Get Flags Defines + * @brief Flags defines which can be used with LL_USART_ReadReg function + * @{ + */ +#define LL_USART_SR_PE USART_SR_PE /*!< Parity error flag */ +#define LL_USART_SR_FE USART_SR_FE /*!< Framing error flag */ +#define LL_USART_SR_NE USART_SR_NE /*!< Noise detected flag */ +#define LL_USART_SR_ORE USART_SR_ORE /*!< Overrun error flag */ +#define LL_USART_SR_IDLE USART_SR_IDLE /*!< Idle line detected flag */ +#define LL_USART_SR_RXNE USART_SR_RXNE /*!< Read data register not empty flag */ +#define LL_USART_SR_TC USART_SR_TC /*!< Transmission complete flag */ +#define LL_USART_SR_TXE USART_SR_TXE /*!< Transmit data register empty flag */ +#define LL_USART_SR_LBD USART_SR_LBD /*!< LIN break detection flag */ +#define LL_USART_SR_CTS USART_SR_CTS /*!< CTS flag */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_IT IT Defines + * @brief IT defines which can be used with LL_USART_ReadReg and LL_USART_WriteReg functions + * @{ + */ +#define LL_USART_CR1_IDLEIE USART_CR1_IDLEIE /*!< IDLE interrupt enable */ +#define LL_USART_CR1_RXNEIE USART_CR1_RXNEIE /*!< Read data register not empty interrupt enable */ +#define LL_USART_CR1_TCIE USART_CR1_TCIE /*!< Transmission complete interrupt enable */ +#define LL_USART_CR1_TXEIE USART_CR1_TXEIE /*!< Transmit data register empty interrupt enable */ +#define LL_USART_CR1_PEIE USART_CR1_PEIE /*!< Parity error */ +#define LL_USART_CR2_LBDIE USART_CR2_LBDIE /*!< LIN break detection interrupt enable */ +#define LL_USART_CR3_EIE USART_CR3_EIE /*!< Error interrupt enable */ +#define LL_USART_CR3_CTSIE USART_CR3_CTSIE /*!< CTS interrupt enable */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_DIRECTION Communication Direction + * @{ + */ +#define LL_USART_DIRECTION_NONE 0x00000000U /*!< Transmitter and Receiver are disabled */ +#define LL_USART_DIRECTION_RX USART_CR1_RE /*!< Transmitter is disabled and Receiver is enabled */ +#define LL_USART_DIRECTION_TX USART_CR1_TE /*!< Transmitter is enabled and Receiver is disabled */ +#define LL_USART_DIRECTION_TX_RX (USART_CR1_TE |USART_CR1_RE) /*!< Transmitter and Receiver are enabled */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_PARITY Parity Control + * @{ + */ +#define LL_USART_PARITY_NONE 0x00000000U /*!< Parity control disabled */ +#define LL_USART_PARITY_EVEN USART_CR1_PCE /*!< Parity control enabled and Even Parity is selected */ +#define LL_USART_PARITY_ODD (USART_CR1_PCE | USART_CR1_PS) /*!< Parity control enabled and Odd Parity is selected */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_WAKEUP Wakeup + * @{ + */ +#define LL_USART_WAKEUP_IDLELINE 0x00000000U /*!< USART wake up from Mute mode on Idle Line */ +#define LL_USART_WAKEUP_ADDRESSMARK USART_CR1_WAKE /*!< USART wake up from Mute mode on Address Mark */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_DATAWIDTH Datawidth + * @{ + */ +#define LL_USART_DATAWIDTH_8B 0x00000000U /*!< 8 bits word length : Start bit, 8 data bits, n stop bits */ +#define LL_USART_DATAWIDTH_9B USART_CR1_M /*!< 9 bits word length : Start bit, 9 data bits, n stop bits */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_OVERSAMPLING Oversampling + * @{ + */ +#define LL_USART_OVERSAMPLING_16 0x00000000U /*!< Oversampling by 16 */ +#define LL_USART_OVERSAMPLING_8 USART_CR1_OVER8 /*!< Oversampling by 8 */ +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup USART_LL_EC_CLOCK Clock Signal + * @{ + */ + +#define LL_USART_CLOCK_DISABLE 0x00000000U /*!< Clock signal not provided */ +#define LL_USART_CLOCK_ENABLE USART_CR2_CLKEN /*!< Clock signal provided */ +/** + * @} + */ +#endif /*USE_FULL_LL_DRIVER*/ + +/** @defgroup USART_LL_EC_LASTCLKPULSE Last Clock Pulse + * @{ + */ +#define LL_USART_LASTCLKPULSE_NO_OUTPUT 0x00000000U /*!< The clock pulse of the last data bit is not output to the SCLK pin */ +#define LL_USART_LASTCLKPULSE_OUTPUT USART_CR2_LBCL /*!< The clock pulse of the last data bit is output to the SCLK pin */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_PHASE Clock Phase + * @{ + */ +#define LL_USART_PHASE_1EDGE 0x00000000U /*!< The first clock transition is the first data capture edge */ +#define LL_USART_PHASE_2EDGE USART_CR2_CPHA /*!< The second clock transition is the first data capture edge */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_POLARITY Clock Polarity + * @{ + */ +#define LL_USART_POLARITY_LOW 0x00000000U /*!< Steady low value on SCLK pin outside transmission window*/ +#define LL_USART_POLARITY_HIGH USART_CR2_CPOL /*!< Steady high value on SCLK pin outside transmission window */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_STOPBITS Stop Bits + * @{ + */ +#define LL_USART_STOPBITS_0_5 USART_CR2_STOP_0 /*!< 0.5 stop bit */ +#define LL_USART_STOPBITS_1 0x00000000U /*!< 1 stop bit */ +#define LL_USART_STOPBITS_1_5 (USART_CR2_STOP_0 | USART_CR2_STOP_1) /*!< 1.5 stop bits */ +#define LL_USART_STOPBITS_2 USART_CR2_STOP_1 /*!< 2 stop bits */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_HWCONTROL Hardware Control + * @{ + */ +#define LL_USART_HWCONTROL_NONE 0x00000000U /*!< CTS and RTS hardware flow control disabled */ +#define LL_USART_HWCONTROL_RTS USART_CR3_RTSE /*!< RTS output enabled, data is only requested when there is space in the receive buffer */ +#define LL_USART_HWCONTROL_CTS USART_CR3_CTSE /*!< CTS mode enabled, data is only transmitted when the nCTS input is asserted (tied to 0) */ +#define LL_USART_HWCONTROL_RTS_CTS (USART_CR3_RTSE | USART_CR3_CTSE) /*!< CTS and RTS hardware flow control enabled */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_IRDA_POWER IrDA Power + * @{ + */ +#define LL_USART_IRDA_POWER_NORMAL 0x00000000U /*!< IrDA normal power mode */ +#define LL_USART_IRDA_POWER_LOW USART_CR3_IRLP /*!< IrDA low power mode */ +/** + * @} + */ + +/** @defgroup USART_LL_EC_LINBREAK_DETECT LIN Break Detection Length + * @{ + */ +#define LL_USART_LINBREAK_DETECT_10B 0x00000000U /*!< 10-bit break detection method selected */ +#define LL_USART_LINBREAK_DETECT_11B USART_CR2_LBDL /*!< 11-bit break detection method selected */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup USART_LL_Exported_Macros USART Exported Macros + * @{ + */ + +/** @defgroup USART_LL_EM_WRITE_READ Common Write and read registers Macros + * @{ + */ + +/** + * @brief Write a value in USART register + * @param __INSTANCE__ USART Instance + * @param __REG__ Register to be written + * @param __VALUE__ Value to be written in the register + * @retval None + */ +#define LL_USART_WriteReg(__INSTANCE__, __REG__, __VALUE__) WRITE_REG(__INSTANCE__->__REG__, (__VALUE__)) + +/** + * @brief Read a value in USART register + * @param __INSTANCE__ USART Instance + * @param __REG__ Register to be read + * @retval Register value + */ +#define LL_USART_ReadReg(__INSTANCE__, __REG__) READ_REG(__INSTANCE__->__REG__) +/** + * @} + */ + +/** @defgroup USART_LL_EM_Exported_Macros_Helper Exported Macros Helper + * @{ + */ + +/** + * @brief Compute USARTDIV value according to Peripheral Clock and + * expected Baud Rate in 8 bits sampling mode (32 bits value of USARTDIV is returned) + * @param __PERIPHCLK__ Peripheral Clock frequency used for USART instance + * @param __BAUDRATE__ Baud rate value to achieve + * @retval USARTDIV value to be used for BRR register filling in OverSampling_8 case + */ +#define __LL_USART_DIV_SAMPLING8_100(__PERIPHCLK__, __BAUDRATE__) ((uint32_t)((((uint64_t)(__PERIPHCLK__))*25)/(2*((uint64_t)(__BAUDRATE__))))) +#define __LL_USART_DIVMANT_SAMPLING8(__PERIPHCLK__, __BAUDRATE__) (__LL_USART_DIV_SAMPLING8_100((__PERIPHCLK__), (__BAUDRATE__))/100) +#define __LL_USART_DIVFRAQ_SAMPLING8(__PERIPHCLK__, __BAUDRATE__) ((((__LL_USART_DIV_SAMPLING8_100((__PERIPHCLK__), (__BAUDRATE__)) - (__LL_USART_DIVMANT_SAMPLING8((__PERIPHCLK__), (__BAUDRATE__)) * 100)) * 8)\ + + 50) / 100) +/* UART BRR = mantissa + overflow + fraction + = (UART DIVMANT << 4) + ((UART DIVFRAQ & 0xF8) << 1) + (UART DIVFRAQ & 0x07) */ +#define __LL_USART_DIV_SAMPLING8(__PERIPHCLK__, __BAUDRATE__) (((__LL_USART_DIVMANT_SAMPLING8((__PERIPHCLK__), (__BAUDRATE__)) << 4) + \ + ((__LL_USART_DIVFRAQ_SAMPLING8((__PERIPHCLK__), (__BAUDRATE__)) & 0xF8) << 1)) + \ + (__LL_USART_DIVFRAQ_SAMPLING8((__PERIPHCLK__), (__BAUDRATE__)) & 0x07)) + +/** + * @brief Compute USARTDIV value according to Peripheral Clock and + * expected Baud Rate in 16 bits sampling mode (32 bits value of USARTDIV is returned) + * @param __PERIPHCLK__ Peripheral Clock frequency used for USART instance + * @param __BAUDRATE__ Baud rate value to achieve + * @retval USARTDIV value to be used for BRR register filling in OverSampling_16 case + */ +#define __LL_USART_DIV_SAMPLING16_100(__PERIPHCLK__, __BAUDRATE__) ((uint32_t)((((uint64_t)(__PERIPHCLK__))*25)/(4*((uint64_t)(__BAUDRATE__))))) +#define __LL_USART_DIVMANT_SAMPLING16(__PERIPHCLK__, __BAUDRATE__) (__LL_USART_DIV_SAMPLING16_100((__PERIPHCLK__), (__BAUDRATE__))/100) +#define __LL_USART_DIVFRAQ_SAMPLING16(__PERIPHCLK__, __BAUDRATE__) ((((__LL_USART_DIV_SAMPLING16_100((__PERIPHCLK__), (__BAUDRATE__)) - (__LL_USART_DIVMANT_SAMPLING16((__PERIPHCLK__), (__BAUDRATE__)) * 100)) * 16)\ + + 50) / 100) +/* USART BRR = mantissa + overflow + fraction + = (USART DIVMANT << 4) + (USART DIVFRAQ & 0xF0) + (USART DIVFRAQ & 0x0F) */ +#define __LL_USART_DIV_SAMPLING16(__PERIPHCLK__, __BAUDRATE__) (((__LL_USART_DIVMANT_SAMPLING16((__PERIPHCLK__), (__BAUDRATE__)) << 4) + \ + (__LL_USART_DIVFRAQ_SAMPLING16((__PERIPHCLK__), (__BAUDRATE__)) & 0xF0)) + \ + (__LL_USART_DIVFRAQ_SAMPLING16((__PERIPHCLK__), (__BAUDRATE__)) & 0x0F)) + +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup USART_LL_Exported_Functions USART Exported Functions + * @{ + */ + +/** @defgroup USART_LL_EF_Configuration Configuration functions + * @{ + */ + +/** + * @brief USART Enable + * @rmtoll CR1 UE LL_USART_Enable + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_Enable(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR1, USART_CR1_UE); +} + +/** + * @brief USART Disable (all USART prescalers and outputs are disabled) + * @note When USART is disabled, USART prescalers and outputs are stopped immediately, + * and current operations are discarded. The configuration of the USART is kept, but all the status + * flags, in the USARTx_SR are set to their default values. + * @rmtoll CR1 UE LL_USART_Disable + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_Disable(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR1, USART_CR1_UE); +} + +/** + * @brief Indicate if USART is enabled + * @rmtoll CR1 UE LL_USART_IsEnabled + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabled(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_UE) == (USART_CR1_UE)); +} + +/** + * @brief Receiver Enable (Receiver is enabled and begins searching for a start bit) + * @rmtoll CR1 RE LL_USART_EnableDirectionRx + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableDirectionRx(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_RE); +} + +/** + * @brief Receiver Disable + * @rmtoll CR1 RE LL_USART_DisableDirectionRx + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableDirectionRx(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_RE); +} + +/** + * @brief Transmitter Enable + * @rmtoll CR1 TE LL_USART_EnableDirectionTx + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableDirectionTx(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_TE); +} + +/** + * @brief Transmitter Disable + * @rmtoll CR1 TE LL_USART_DisableDirectionTx + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableDirectionTx(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_TE); +} + +/** + * @brief Configure simultaneously enabled/disabled states + * of Transmitter and Receiver + * @rmtoll CR1 RE LL_USART_SetTransferDirection\n + * CR1 TE LL_USART_SetTransferDirection + * @param USARTx USART Instance + * @param TransferDirection This parameter can be one of the following values: + * @arg @ref LL_USART_DIRECTION_NONE + * @arg @ref LL_USART_DIRECTION_RX + * @arg @ref LL_USART_DIRECTION_TX + * @arg @ref LL_USART_DIRECTION_TX_RX + * @retval None + */ +__STATIC_INLINE void LL_USART_SetTransferDirection(USART_TypeDef *USARTx, uint32_t TransferDirection) +{ + ATOMIC_MODIFY_REG(USARTx->CR1, USART_CR1_RE | USART_CR1_TE, TransferDirection); +} + +/** + * @brief Return enabled/disabled states of Transmitter and Receiver + * @rmtoll CR1 RE LL_USART_GetTransferDirection\n + * CR1 TE LL_USART_GetTransferDirection + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_DIRECTION_NONE + * @arg @ref LL_USART_DIRECTION_RX + * @arg @ref LL_USART_DIRECTION_TX + * @arg @ref LL_USART_DIRECTION_TX_RX + */ +__STATIC_INLINE uint32_t LL_USART_GetTransferDirection(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR1, USART_CR1_RE | USART_CR1_TE)); +} + +/** + * @brief Configure Parity (enabled/disabled and parity mode if enabled). + * @note This function selects if hardware parity control (generation and detection) is enabled or disabled. + * When the parity control is enabled (Odd or Even), computed parity bit is inserted at the MSB position + * (9th or 8th bit depending on data width) and parity is checked on the received data. + * @rmtoll CR1 PS LL_USART_SetParity\n + * CR1 PCE LL_USART_SetParity + * @param USARTx USART Instance + * @param Parity This parameter can be one of the following values: + * @arg @ref LL_USART_PARITY_NONE + * @arg @ref LL_USART_PARITY_EVEN + * @arg @ref LL_USART_PARITY_ODD + * @retval None + */ +__STATIC_INLINE void LL_USART_SetParity(USART_TypeDef *USARTx, uint32_t Parity) +{ + MODIFY_REG(USARTx->CR1, USART_CR1_PS | USART_CR1_PCE, Parity); +} + +/** + * @brief Return Parity configuration (enabled/disabled and parity mode if enabled) + * @rmtoll CR1 PS LL_USART_GetParity\n + * CR1 PCE LL_USART_GetParity + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_PARITY_NONE + * @arg @ref LL_USART_PARITY_EVEN + * @arg @ref LL_USART_PARITY_ODD + */ +__STATIC_INLINE uint32_t LL_USART_GetParity(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR1, USART_CR1_PS | USART_CR1_PCE)); +} + +/** + * @brief Set Receiver Wake Up method from Mute mode. + * @rmtoll CR1 WAKE LL_USART_SetWakeUpMethod + * @param USARTx USART Instance + * @param Method This parameter can be one of the following values: + * @arg @ref LL_USART_WAKEUP_IDLELINE + * @arg @ref LL_USART_WAKEUP_ADDRESSMARK + * @retval None + */ +__STATIC_INLINE void LL_USART_SetWakeUpMethod(USART_TypeDef *USARTx, uint32_t Method) +{ + MODIFY_REG(USARTx->CR1, USART_CR1_WAKE, Method); +} + +/** + * @brief Return Receiver Wake Up method from Mute mode + * @rmtoll CR1 WAKE LL_USART_GetWakeUpMethod + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_WAKEUP_IDLELINE + * @arg @ref LL_USART_WAKEUP_ADDRESSMARK + */ +__STATIC_INLINE uint32_t LL_USART_GetWakeUpMethod(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR1, USART_CR1_WAKE)); +} + +/** + * @brief Set Word length (i.e. nb of data bits, excluding start and stop bits) + * @rmtoll CR1 M LL_USART_SetDataWidth + * @param USARTx USART Instance + * @param DataWidth This parameter can be one of the following values: + * @arg @ref LL_USART_DATAWIDTH_8B + * @arg @ref LL_USART_DATAWIDTH_9B + * @retval None + */ +__STATIC_INLINE void LL_USART_SetDataWidth(USART_TypeDef *USARTx, uint32_t DataWidth) +{ + MODIFY_REG(USARTx->CR1, USART_CR1_M, DataWidth); +} + +/** + * @brief Return Word length (i.e. nb of data bits, excluding start and stop bits) + * @rmtoll CR1 M LL_USART_GetDataWidth + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_DATAWIDTH_8B + * @arg @ref LL_USART_DATAWIDTH_9B + */ +__STATIC_INLINE uint32_t LL_USART_GetDataWidth(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR1, USART_CR1_M)); +} + +/** + * @brief Set Oversampling to 8-bit or 16-bit mode + * @rmtoll CR1 OVER8 LL_USART_SetOverSampling + * @param USARTx USART Instance + * @param OverSampling This parameter can be one of the following values: + * @arg @ref LL_USART_OVERSAMPLING_16 + * @arg @ref LL_USART_OVERSAMPLING_8 + * @retval None + */ +__STATIC_INLINE void LL_USART_SetOverSampling(USART_TypeDef *USARTx, uint32_t OverSampling) +{ + MODIFY_REG(USARTx->CR1, USART_CR1_OVER8, OverSampling); +} + +/** + * @brief Return Oversampling mode + * @rmtoll CR1 OVER8 LL_USART_GetOverSampling + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_OVERSAMPLING_16 + * @arg @ref LL_USART_OVERSAMPLING_8 + */ +__STATIC_INLINE uint32_t LL_USART_GetOverSampling(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR1, USART_CR1_OVER8)); +} + +/** + * @brief Configure if Clock pulse of the last data bit is output to the SCLK pin or not + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 LBCL LL_USART_SetLastClkPulseOutput + * @param USARTx USART Instance + * @param LastBitClockPulse This parameter can be one of the following values: + * @arg @ref LL_USART_LASTCLKPULSE_NO_OUTPUT + * @arg @ref LL_USART_LASTCLKPULSE_OUTPUT + * @retval None + */ +__STATIC_INLINE void LL_USART_SetLastClkPulseOutput(USART_TypeDef *USARTx, uint32_t LastBitClockPulse) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_LBCL, LastBitClockPulse); +} + +/** + * @brief Retrieve Clock pulse of the last data bit output configuration + * (Last bit Clock pulse output to the SCLK pin or not) + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 LBCL LL_USART_GetLastClkPulseOutput + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_LASTCLKPULSE_NO_OUTPUT + * @arg @ref LL_USART_LASTCLKPULSE_OUTPUT + */ +__STATIC_INLINE uint32_t LL_USART_GetLastClkPulseOutput(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_LBCL)); +} + +/** + * @brief Select the phase of the clock output on the SCLK pin in synchronous mode + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CPHA LL_USART_SetClockPhase + * @param USARTx USART Instance + * @param ClockPhase This parameter can be one of the following values: + * @arg @ref LL_USART_PHASE_1EDGE + * @arg @ref LL_USART_PHASE_2EDGE + * @retval None + */ +__STATIC_INLINE void LL_USART_SetClockPhase(USART_TypeDef *USARTx, uint32_t ClockPhase) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_CPHA, ClockPhase); +} + +/** + * @brief Return phase of the clock output on the SCLK pin in synchronous mode + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CPHA LL_USART_GetClockPhase + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_PHASE_1EDGE + * @arg @ref LL_USART_PHASE_2EDGE + */ +__STATIC_INLINE uint32_t LL_USART_GetClockPhase(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_CPHA)); +} + +/** + * @brief Select the polarity of the clock output on the SCLK pin in synchronous mode + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CPOL LL_USART_SetClockPolarity + * @param USARTx USART Instance + * @param ClockPolarity This parameter can be one of the following values: + * @arg @ref LL_USART_POLARITY_LOW + * @arg @ref LL_USART_POLARITY_HIGH + * @retval None + */ +__STATIC_INLINE void LL_USART_SetClockPolarity(USART_TypeDef *USARTx, uint32_t ClockPolarity) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_CPOL, ClockPolarity); +} + +/** + * @brief Return polarity of the clock output on the SCLK pin in synchronous mode + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CPOL LL_USART_GetClockPolarity + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_POLARITY_LOW + * @arg @ref LL_USART_POLARITY_HIGH + */ +__STATIC_INLINE uint32_t LL_USART_GetClockPolarity(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_CPOL)); +} + +/** + * @brief Configure Clock signal format (Phase Polarity and choice about output of last bit clock pulse) + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clock Phase configuration using @ref LL_USART_SetClockPhase() function + * - Clock Polarity configuration using @ref LL_USART_SetClockPolarity() function + * - Output of Last bit Clock pulse configuration using @ref LL_USART_SetLastClkPulseOutput() function + * @rmtoll CR2 CPHA LL_USART_ConfigClock\n + * CR2 CPOL LL_USART_ConfigClock\n + * CR2 LBCL LL_USART_ConfigClock + * @param USARTx USART Instance + * @param Phase This parameter can be one of the following values: + * @arg @ref LL_USART_PHASE_1EDGE + * @arg @ref LL_USART_PHASE_2EDGE + * @param Polarity This parameter can be one of the following values: + * @arg @ref LL_USART_POLARITY_LOW + * @arg @ref LL_USART_POLARITY_HIGH + * @param LBCPOutput This parameter can be one of the following values: + * @arg @ref LL_USART_LASTCLKPULSE_NO_OUTPUT + * @arg @ref LL_USART_LASTCLKPULSE_OUTPUT + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigClock(USART_TypeDef *USARTx, uint32_t Phase, uint32_t Polarity, uint32_t LBCPOutput) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_CPHA | USART_CR2_CPOL | USART_CR2_LBCL, Phase | Polarity | LBCPOutput); +} + +/** + * @brief Enable Clock output on SCLK pin + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CLKEN LL_USART_EnableSCLKOutput + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableSCLKOutput(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR2, USART_CR2_CLKEN); +} + +/** + * @brief Disable Clock output on SCLK pin + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CLKEN LL_USART_DisableSCLKOutput + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableSCLKOutput(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR2, USART_CR2_CLKEN); +} + +/** + * @brief Indicate if Clock output on SCLK pin is enabled + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @rmtoll CR2 CLKEN LL_USART_IsEnabledSCLKOutput + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledSCLKOutput(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR2, USART_CR2_CLKEN) == (USART_CR2_CLKEN)); +} + +/** + * @brief Set the length of the stop bits + * @rmtoll CR2 STOP LL_USART_SetStopBitsLength + * @param USARTx USART Instance + * @param StopBits This parameter can be one of the following values: + * @arg @ref LL_USART_STOPBITS_0_5 + * @arg @ref LL_USART_STOPBITS_1 + * @arg @ref LL_USART_STOPBITS_1_5 + * @arg @ref LL_USART_STOPBITS_2 + * @retval None + */ +__STATIC_INLINE void LL_USART_SetStopBitsLength(USART_TypeDef *USARTx, uint32_t StopBits) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_STOP, StopBits); +} + +/** + * @brief Retrieve the length of the stop bits + * @rmtoll CR2 STOP LL_USART_GetStopBitsLength + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_STOPBITS_0_5 + * @arg @ref LL_USART_STOPBITS_1 + * @arg @ref LL_USART_STOPBITS_1_5 + * @arg @ref LL_USART_STOPBITS_2 + */ +__STATIC_INLINE uint32_t LL_USART_GetStopBitsLength(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_STOP)); +} + +/** + * @brief Configure Character frame format (Datawidth, Parity control, Stop Bits) + * @note Call of this function is equivalent to following function call sequence : + * - Data Width configuration using @ref LL_USART_SetDataWidth() function + * - Parity Control and mode configuration using @ref LL_USART_SetParity() function + * - Stop bits configuration using @ref LL_USART_SetStopBitsLength() function + * @rmtoll CR1 PS LL_USART_ConfigCharacter\n + * CR1 PCE LL_USART_ConfigCharacter\n + * CR1 M LL_USART_ConfigCharacter\n + * CR2 STOP LL_USART_ConfigCharacter + * @param USARTx USART Instance + * @param DataWidth This parameter can be one of the following values: + * @arg @ref LL_USART_DATAWIDTH_8B + * @arg @ref LL_USART_DATAWIDTH_9B + * @param Parity This parameter can be one of the following values: + * @arg @ref LL_USART_PARITY_NONE + * @arg @ref LL_USART_PARITY_EVEN + * @arg @ref LL_USART_PARITY_ODD + * @param StopBits This parameter can be one of the following values: + * @arg @ref LL_USART_STOPBITS_0_5 + * @arg @ref LL_USART_STOPBITS_1 + * @arg @ref LL_USART_STOPBITS_1_5 + * @arg @ref LL_USART_STOPBITS_2 + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigCharacter(USART_TypeDef *USARTx, uint32_t DataWidth, uint32_t Parity, + uint32_t StopBits) +{ + MODIFY_REG(USARTx->CR1, USART_CR1_PS | USART_CR1_PCE | USART_CR1_M, Parity | DataWidth); + MODIFY_REG(USARTx->CR2, USART_CR2_STOP, StopBits); +} + +/** + * @brief Set Address of the USART node. + * @note This is used in multiprocessor communication during Mute mode or Stop mode, + * for wake up with address mark detection. + * @rmtoll CR2 ADD LL_USART_SetNodeAddress + * @param USARTx USART Instance + * @param NodeAddress 4 bit Address of the USART node. + * @retval None + */ +__STATIC_INLINE void LL_USART_SetNodeAddress(USART_TypeDef *USARTx, uint32_t NodeAddress) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_ADD, (NodeAddress & USART_CR2_ADD)); +} + +/** + * @brief Return 4 bit Address of the USART node as set in ADD field of CR2. + * @note only 4bits (b3-b0) of returned value are relevant (b31-b4 are not relevant) + * @rmtoll CR2 ADD LL_USART_GetNodeAddress + * @param USARTx USART Instance + * @retval Address of the USART node (Value between Min_Data=0 and Max_Data=255) + */ +__STATIC_INLINE uint32_t LL_USART_GetNodeAddress(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_ADD)); +} + +/** + * @brief Enable RTS HW Flow Control + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 RTSE LL_USART_EnableRTSHWFlowCtrl + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableRTSHWFlowCtrl(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_RTSE); +} + +/** + * @brief Disable RTS HW Flow Control + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 RTSE LL_USART_DisableRTSHWFlowCtrl + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableRTSHWFlowCtrl(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_RTSE); +} + +/** + * @brief Enable CTS HW Flow Control + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 CTSE LL_USART_EnableCTSHWFlowCtrl + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableCTSHWFlowCtrl(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_CTSE); +} + +/** + * @brief Disable CTS HW Flow Control + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 CTSE LL_USART_DisableCTSHWFlowCtrl + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableCTSHWFlowCtrl(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_CTSE); +} + +/** + * @brief Configure HW Flow Control mode (both CTS and RTS) + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 RTSE LL_USART_SetHWFlowCtrl\n + * CR3 CTSE LL_USART_SetHWFlowCtrl + * @param USARTx USART Instance + * @param HardwareFlowControl This parameter can be one of the following values: + * @arg @ref LL_USART_HWCONTROL_NONE + * @arg @ref LL_USART_HWCONTROL_RTS + * @arg @ref LL_USART_HWCONTROL_CTS + * @arg @ref LL_USART_HWCONTROL_RTS_CTS + * @retval None + */ +__STATIC_INLINE void LL_USART_SetHWFlowCtrl(USART_TypeDef *USARTx, uint32_t HardwareFlowControl) +{ + MODIFY_REG(USARTx->CR3, USART_CR3_RTSE | USART_CR3_CTSE, HardwareFlowControl); +} + +/** + * @brief Return HW Flow Control configuration (both CTS and RTS) + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 RTSE LL_USART_GetHWFlowCtrl\n + * CR3 CTSE LL_USART_GetHWFlowCtrl + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_HWCONTROL_NONE + * @arg @ref LL_USART_HWCONTROL_RTS + * @arg @ref LL_USART_HWCONTROL_CTS + * @arg @ref LL_USART_HWCONTROL_RTS_CTS + */ +__STATIC_INLINE uint32_t LL_USART_GetHWFlowCtrl(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR3, USART_CR3_RTSE | USART_CR3_CTSE)); +} + +/** + * @brief Enable One bit sampling method + * @rmtoll CR3 ONEBIT LL_USART_EnableOneBitSamp + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableOneBitSamp(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_ONEBIT); +} + +/** + * @brief Disable One bit sampling method + * @rmtoll CR3 ONEBIT LL_USART_DisableOneBitSamp + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableOneBitSamp(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_ONEBIT); +} + +/** + * @brief Indicate if One bit sampling method is enabled + * @rmtoll CR3 ONEBIT LL_USART_IsEnabledOneBitSamp + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledOneBitSamp(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_ONEBIT) == (USART_CR3_ONEBIT)); +} + +/** + * @brief Configure USART BRR register for achieving expected Baud Rate value. + * @note Compute and set USARTDIV value in BRR Register (full BRR content) + * according to used Peripheral Clock, Oversampling mode, and expected Baud Rate values + * @note Peripheral clock and Baud rate values provided as function parameters should be valid + * (Baud rate value != 0) + * @rmtoll BRR BRR LL_USART_SetBaudRate + * @param USARTx USART Instance + * @param PeriphClk Peripheral Clock + * @param OverSampling This parameter can be one of the following values: + * @arg @ref LL_USART_OVERSAMPLING_16 + * @arg @ref LL_USART_OVERSAMPLING_8 + * @param BaudRate Baud Rate + * @retval None + */ +__STATIC_INLINE void LL_USART_SetBaudRate(USART_TypeDef *USARTx, uint32_t PeriphClk, uint32_t OverSampling, + uint32_t BaudRate) +{ + if (OverSampling == LL_USART_OVERSAMPLING_8) + { + USARTx->BRR = (uint16_t)(__LL_USART_DIV_SAMPLING8(PeriphClk, BaudRate)); + } + else + { + USARTx->BRR = (uint16_t)(__LL_USART_DIV_SAMPLING16(PeriphClk, BaudRate)); + } +} + +/** + * @brief Return current Baud Rate value, according to USARTDIV present in BRR register + * (full BRR content), and to used Peripheral Clock and Oversampling mode values + * @note In case of non-initialized or invalid value stored in BRR register, value 0 will be returned. + * @rmtoll BRR BRR LL_USART_GetBaudRate + * @param USARTx USART Instance + * @param PeriphClk Peripheral Clock + * @param OverSampling This parameter can be one of the following values: + * @arg @ref LL_USART_OVERSAMPLING_16 + * @arg @ref LL_USART_OVERSAMPLING_8 + * @retval Baud Rate + */ +__STATIC_INLINE uint32_t LL_USART_GetBaudRate(const USART_TypeDef *USARTx, uint32_t PeriphClk, uint32_t OverSampling) +{ + uint32_t usartdiv = 0x0U; + uint32_t brrresult = 0x0U; + + usartdiv = USARTx->BRR; + + if (OverSampling == LL_USART_OVERSAMPLING_8) + { + if ((usartdiv & 0xFFF7U) != 0U) + { + usartdiv = (uint16_t)((usartdiv & 0xFFF0U) | ((usartdiv & 0x0007U) << 1U)) ; + brrresult = (PeriphClk * 2U) / usartdiv; + } + } + else + { + if ((usartdiv & 0xFFFFU) != 0U) + { + brrresult = PeriphClk / usartdiv; + } + } + return (brrresult); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Configuration_IRDA Configuration functions related to Irda feature + * @{ + */ + +/** + * @brief Enable IrDA mode + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll CR3 IREN LL_USART_EnableIrda + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIrda(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_IREN); +} + +/** + * @brief Disable IrDA mode + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll CR3 IREN LL_USART_DisableIrda + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIrda(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_IREN); +} + +/** + * @brief Indicate if IrDA mode is enabled + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll CR3 IREN LL_USART_IsEnabledIrda + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIrda(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_IREN) == (USART_CR3_IREN)); +} + +/** + * @brief Configure IrDA Power Mode (Normal or Low Power) + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll CR3 IRLP LL_USART_SetIrdaPowerMode + * @param USARTx USART Instance + * @param PowerMode This parameter can be one of the following values: + * @arg @ref LL_USART_IRDA_POWER_NORMAL + * @arg @ref LL_USART_IRDA_POWER_LOW + * @retval None + */ +__STATIC_INLINE void LL_USART_SetIrdaPowerMode(USART_TypeDef *USARTx, uint32_t PowerMode) +{ + MODIFY_REG(USARTx->CR3, USART_CR3_IRLP, PowerMode); +} + +/** + * @brief Retrieve IrDA Power Mode configuration (Normal or Low Power) + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll CR3 IRLP LL_USART_GetIrdaPowerMode + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_IRDA_POWER_NORMAL + * @arg @ref LL_USART_PHASE_2EDGE + */ +__STATIC_INLINE uint32_t LL_USART_GetIrdaPowerMode(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR3, USART_CR3_IRLP)); +} + +/** + * @brief Set Irda prescaler value, used for dividing the USART clock source + * to achieve the Irda Low Power frequency (8 bits value) + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll GTPR PSC LL_USART_SetIrdaPrescaler + * @param USARTx USART Instance + * @param PrescalerValue Value between Min_Data=0x00 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_USART_SetIrdaPrescaler(USART_TypeDef *USARTx, uint32_t PrescalerValue) +{ + MODIFY_REG(USARTx->GTPR, USART_GTPR_PSC, PrescalerValue); +} + +/** + * @brief Return Irda prescaler value, used for dividing the USART clock source + * to achieve the Irda Low Power frequency (8 bits value) + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @rmtoll GTPR PSC LL_USART_GetIrdaPrescaler + * @param USARTx USART Instance + * @retval Irda prescaler value (Value between Min_Data=0x00 and Max_Data=0xFF) + */ +__STATIC_INLINE uint32_t LL_USART_GetIrdaPrescaler(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->GTPR, USART_GTPR_PSC)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Configuration_Smartcard Configuration functions related to Smartcard feature + * @{ + */ + +/** + * @brief Enable Smartcard NACK transmission + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 NACK LL_USART_EnableSmartcardNACK + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableSmartcardNACK(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_NACK); +} + +/** + * @brief Disable Smartcard NACK transmission + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 NACK LL_USART_DisableSmartcardNACK + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableSmartcardNACK(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_NACK); +} + +/** + * @brief Indicate if Smartcard NACK transmission is enabled + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 NACK LL_USART_IsEnabledSmartcardNACK + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledSmartcardNACK(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_NACK) == (USART_CR3_NACK)); +} + +/** + * @brief Enable Smartcard mode + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 SCEN LL_USART_EnableSmartcard + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableSmartcard(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_SCEN); +} + +/** + * @brief Disable Smartcard mode + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 SCEN LL_USART_DisableSmartcard + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableSmartcard(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_SCEN); +} + +/** + * @brief Indicate if Smartcard mode is enabled + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll CR3 SCEN LL_USART_IsEnabledSmartcard + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledSmartcard(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_SCEN) == (USART_CR3_SCEN)); +} + +/** + * @brief Set Smartcard prescaler value, used for dividing the USART clock + * source to provide the SMARTCARD Clock (5 bits value) + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll GTPR PSC LL_USART_SetSmartcardPrescaler + * @param USARTx USART Instance + * @param PrescalerValue Value between Min_Data=0 and Max_Data=31 + * @retval None + */ +__STATIC_INLINE void LL_USART_SetSmartcardPrescaler(USART_TypeDef *USARTx, uint32_t PrescalerValue) +{ + MODIFY_REG(USARTx->GTPR, USART_GTPR_PSC, PrescalerValue); +} + +/** + * @brief Return Smartcard prescaler value, used for dividing the USART clock + * source to provide the SMARTCARD Clock (5 bits value) + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll GTPR PSC LL_USART_GetSmartcardPrescaler + * @param USARTx USART Instance + * @retval Smartcard prescaler value (Value between Min_Data=0 and Max_Data=31) + */ +__STATIC_INLINE uint32_t LL_USART_GetSmartcardPrescaler(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->GTPR, USART_GTPR_PSC)); +} + +/** + * @brief Set Smartcard Guard time value, expressed in nb of baud clocks periods + * (GT[7:0] bits : Guard time value) + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll GTPR GT LL_USART_SetSmartcardGuardTime + * @param USARTx USART Instance + * @param GuardTime Value between Min_Data=0x00 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_USART_SetSmartcardGuardTime(USART_TypeDef *USARTx, uint32_t GuardTime) +{ + MODIFY_REG(USARTx->GTPR, USART_GTPR_GT, GuardTime << USART_POSITION_GTPR_GT); +} + +/** + * @brief Return Smartcard Guard time value, expressed in nb of baud clocks periods + * (GT[7:0] bits : Guard time value) + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @rmtoll GTPR GT LL_USART_GetSmartcardGuardTime + * @param USARTx USART Instance + * @retval Smartcard Guard time value (Value between Min_Data=0x00 and Max_Data=0xFF) + */ +__STATIC_INLINE uint32_t LL_USART_GetSmartcardGuardTime(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->GTPR, USART_GTPR_GT) >> USART_POSITION_GTPR_GT); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Configuration_HalfDuplex Configuration functions related to Half Duplex feature + * @{ + */ + +/** + * @brief Enable Single Wire Half-Duplex mode + * @note Macro IS_UART_HALFDUPLEX_INSTANCE(USARTx) can be used to check whether or not + * Half-Duplex mode is supported by the USARTx instance. + * @rmtoll CR3 HDSEL LL_USART_EnableHalfDuplex + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableHalfDuplex(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR3, USART_CR3_HDSEL); +} + +/** + * @brief Disable Single Wire Half-Duplex mode + * @note Macro IS_UART_HALFDUPLEX_INSTANCE(USARTx) can be used to check whether or not + * Half-Duplex mode is supported by the USARTx instance. + * @rmtoll CR3 HDSEL LL_USART_DisableHalfDuplex + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableHalfDuplex(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR3, USART_CR3_HDSEL); +} + +/** + * @brief Indicate if Single Wire Half-Duplex mode is enabled + * @note Macro IS_UART_HALFDUPLEX_INSTANCE(USARTx) can be used to check whether or not + * Half-Duplex mode is supported by the USARTx instance. + * @rmtoll CR3 HDSEL LL_USART_IsEnabledHalfDuplex + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledHalfDuplex(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_HDSEL) == (USART_CR3_HDSEL)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Configuration_LIN Configuration functions related to LIN feature + * @{ + */ + +/** + * @brief Set LIN Break Detection Length + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LBDL LL_USART_SetLINBrkDetectionLen + * @param USARTx USART Instance + * @param LINBDLength This parameter can be one of the following values: + * @arg @ref LL_USART_LINBREAK_DETECT_10B + * @arg @ref LL_USART_LINBREAK_DETECT_11B + * @retval None + */ +__STATIC_INLINE void LL_USART_SetLINBrkDetectionLen(USART_TypeDef *USARTx, uint32_t LINBDLength) +{ + MODIFY_REG(USARTx->CR2, USART_CR2_LBDL, LINBDLength); +} + +/** + * @brief Return LIN Break Detection Length + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LBDL LL_USART_GetLINBrkDetectionLen + * @param USARTx USART Instance + * @retval Returned value can be one of the following values: + * @arg @ref LL_USART_LINBREAK_DETECT_10B + * @arg @ref LL_USART_LINBREAK_DETECT_11B + */ +__STATIC_INLINE uint32_t LL_USART_GetLINBrkDetectionLen(const USART_TypeDef *USARTx) +{ + return (uint32_t)(READ_BIT(USARTx->CR2, USART_CR2_LBDL)); +} + +/** + * @brief Enable LIN mode + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LINEN LL_USART_EnableLIN + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableLIN(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR2, USART_CR2_LINEN); +} + +/** + * @brief Disable LIN mode + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LINEN LL_USART_DisableLIN + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableLIN(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR2, USART_CR2_LINEN); +} + +/** + * @brief Indicate if LIN mode is enabled + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LINEN LL_USART_IsEnabledLIN + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledLIN(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR2, USART_CR2_LINEN) == (USART_CR2_LINEN)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_AdvancedConfiguration Advanced Configurations services + * @{ + */ + +/** + * @brief Perform basic configuration of USART for enabling use in Asynchronous Mode (UART) + * @note In UART mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - CLKEN bit in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - IREN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear CLKEN in CR2 using @ref LL_USART_DisableSCLKOutput() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * @note Other remaining configurations items related to Asynchronous Mode + * (as Baud Rate, Word length, Parity, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigAsyncMode\n + * CR2 CLKEN LL_USART_ConfigAsyncMode\n + * CR3 SCEN LL_USART_ConfigAsyncMode\n + * CR3 IREN LL_USART_ConfigAsyncMode\n + * CR3 HDSEL LL_USART_ConfigAsyncMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigAsyncMode(USART_TypeDef *USARTx) +{ + /* In Asynchronous mode, the following bits must be kept cleared: + - LINEN, CLKEN bits in the USART_CR2 register, + - SCEN, IREN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_SCEN | USART_CR3_IREN | USART_CR3_HDSEL)); +} + +/** + * @brief Perform basic configuration of USART for enabling use in Synchronous Mode + * @note In Synchronous mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - IREN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * This function also sets the USART in Synchronous mode. + * @note Macro IS_USART_INSTANCE(USARTx) can be used to check whether or not + * Synchronous mode is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * - Set CLKEN in CR2 using @ref LL_USART_EnableSCLKOutput() function + * @note Other remaining configurations items related to Synchronous Mode + * (as Baud Rate, Word length, Parity, Clock Polarity, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigSyncMode\n + * CR2 CLKEN LL_USART_ConfigSyncMode\n + * CR3 SCEN LL_USART_ConfigSyncMode\n + * CR3 IREN LL_USART_ConfigSyncMode\n + * CR3 HDSEL LL_USART_ConfigSyncMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigSyncMode(USART_TypeDef *USARTx) +{ + /* In Synchronous mode, the following bits must be kept cleared: + - LINEN bit in the USART_CR2 register, + - SCEN, IREN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_SCEN | USART_CR3_IREN | USART_CR3_HDSEL)); + /* set the UART/USART in Synchronous mode */ + SET_BIT(USARTx->CR2, USART_CR2_CLKEN); +} + +/** + * @brief Perform basic configuration of USART for enabling use in LIN Mode + * @note In LIN mode, the following bits must be kept cleared: + * - STOP and CLKEN bits in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - IREN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * This function also set the UART/USART in LIN mode. + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clear CLKEN in CR2 using @ref LL_USART_DisableSCLKOutput() function + * - Clear STOP in CR2 using @ref LL_USART_SetStopBitsLength() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * - Set LINEN in CR2 using @ref LL_USART_EnableLIN() function + * @note Other remaining configurations items related to LIN Mode + * (as Baud Rate, Word length, LIN Break Detection Length, ...) should be set using + * dedicated functions + * @rmtoll CR2 CLKEN LL_USART_ConfigLINMode\n + * CR2 STOP LL_USART_ConfigLINMode\n + * CR2 LINEN LL_USART_ConfigLINMode\n + * CR3 IREN LL_USART_ConfigLINMode\n + * CR3 SCEN LL_USART_ConfigLINMode\n + * CR3 HDSEL LL_USART_ConfigLINMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigLINMode(USART_TypeDef *USARTx) +{ + /* In LIN mode, the following bits must be kept cleared: + - STOP and CLKEN bits in the USART_CR2 register, + - IREN, SCEN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_CLKEN | USART_CR2_STOP)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_IREN | USART_CR3_SCEN | USART_CR3_HDSEL)); + /* Set the UART/USART in LIN mode */ + SET_BIT(USARTx->CR2, USART_CR2_LINEN); +} + +/** + * @brief Perform basic configuration of USART for enabling use in Half Duplex Mode + * @note In Half Duplex mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - CLKEN bit in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - IREN bit in the USART_CR3 register, + * This function also sets the UART/USART in Half Duplex mode. + * @note Macro IS_UART_HALFDUPLEX_INSTANCE(USARTx) can be used to check whether or not + * Half-Duplex mode is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear CLKEN in CR2 using @ref LL_USART_DisableSCLKOutput() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Set HDSEL in CR3 using @ref LL_USART_EnableHalfDuplex() function + * @note Other remaining configurations items related to Half Duplex Mode + * (as Baud Rate, Word length, Parity, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigHalfDuplexMode\n + * CR2 CLKEN LL_USART_ConfigHalfDuplexMode\n + * CR3 HDSEL LL_USART_ConfigHalfDuplexMode\n + * CR3 SCEN LL_USART_ConfigHalfDuplexMode\n + * CR3 IREN LL_USART_ConfigHalfDuplexMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigHalfDuplexMode(USART_TypeDef *USARTx) +{ + /* In Half Duplex mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_SCEN | USART_CR3_IREN)); + /* set the UART/USART in Half Duplex mode */ + SET_BIT(USARTx->CR3, USART_CR3_HDSEL); +} + +/** + * @brief Perform basic configuration of USART for enabling use in Smartcard Mode + * @note In Smartcard mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - IREN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * This function also configures Stop bits to 1.5 bits and + * sets the USART in Smartcard mode (SCEN bit). + * Clock Output is also enabled (CLKEN). + * @note Macro IS_SMARTCARD_INSTANCE(USARTx) can be used to check whether or not + * Smartcard feature is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * - Configure STOP in CR2 using @ref LL_USART_SetStopBitsLength() function + * - Set CLKEN in CR2 using @ref LL_USART_EnableSCLKOutput() function + * - Set SCEN in CR3 using @ref LL_USART_EnableSmartcard() function + * @note Other remaining configurations items related to Smartcard Mode + * (as Baud Rate, Word length, Parity, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigSmartcardMode\n + * CR2 STOP LL_USART_ConfigSmartcardMode\n + * CR2 CLKEN LL_USART_ConfigSmartcardMode\n + * CR3 HDSEL LL_USART_ConfigSmartcardMode\n + * CR3 SCEN LL_USART_ConfigSmartcardMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigSmartcardMode(USART_TypeDef *USARTx) +{ + /* In Smartcard mode, the following bits must be kept cleared: + - LINEN bit in the USART_CR2 register, + - IREN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_IREN | USART_CR3_HDSEL)); + /* Configure Stop bits to 1.5 bits */ + /* Synchronous mode is activated by default */ + SET_BIT(USARTx->CR2, (USART_CR2_STOP_0 | USART_CR2_STOP_1 | USART_CR2_CLKEN)); + /* set the UART/USART in Smartcard mode */ + SET_BIT(USARTx->CR3, USART_CR3_SCEN); +} + +/** + * @brief Perform basic configuration of USART for enabling use in Irda Mode + * @note In IRDA mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - STOP and CLKEN bits in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * This function also sets the UART/USART in IRDA mode (IREN bit). + * @note Macro IS_IRDA_INSTANCE(USARTx) can be used to check whether or not + * IrDA feature is supported by the USARTx instance. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear CLKEN in CR2 using @ref LL_USART_DisableSCLKOutput() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * - Configure STOP in CR2 using @ref LL_USART_SetStopBitsLength() function + * - Set IREN in CR3 using @ref LL_USART_EnableIrda() function + * @note Other remaining configurations items related to Irda Mode + * (as Baud Rate, Word length, Power mode, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigIrdaMode\n + * CR2 CLKEN LL_USART_ConfigIrdaMode\n + * CR2 STOP LL_USART_ConfigIrdaMode\n + * CR3 SCEN LL_USART_ConfigIrdaMode\n + * CR3 HDSEL LL_USART_ConfigIrdaMode\n + * CR3 IREN LL_USART_ConfigIrdaMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigIrdaMode(USART_TypeDef *USARTx) +{ + /* In IRDA mode, the following bits must be kept cleared: + - LINEN, STOP and CLKEN bits in the USART_CR2 register, + - SCEN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN | USART_CR2_STOP)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL)); + /* set the UART/USART in IRDA mode */ + SET_BIT(USARTx->CR3, USART_CR3_IREN); +} + +/** + * @brief Perform basic configuration of USART for enabling use in Multi processor Mode + * (several USARTs connected in a network, one of the USARTs can be the master, + * its TX output connected to the RX inputs of the other slaves USARTs). + * @note In MultiProcessor mode, the following bits must be kept cleared: + * - LINEN bit in the USART_CR2 register, + * - CLKEN bit in the USART_CR2 register, + * - SCEN bit in the USART_CR3 register, + * - IREN bit in the USART_CR3 register, + * - HDSEL bit in the USART_CR3 register. + * @note Call of this function is equivalent to following function call sequence : + * - Clear LINEN in CR2 using @ref LL_USART_DisableLIN() function + * - Clear CLKEN in CR2 using @ref LL_USART_DisableSCLKOutput() function + * - Clear SCEN in CR3 using @ref LL_USART_DisableSmartcard() function + * - Clear IREN in CR3 using @ref LL_USART_DisableIrda() function + * - Clear HDSEL in CR3 using @ref LL_USART_DisableHalfDuplex() function + * @note Other remaining configurations items related to Multi processor Mode + * (as Baud Rate, Wake Up Method, Node address, ...) should be set using + * dedicated functions + * @rmtoll CR2 LINEN LL_USART_ConfigMultiProcessMode\n + * CR2 CLKEN LL_USART_ConfigMultiProcessMode\n + * CR3 SCEN LL_USART_ConfigMultiProcessMode\n + * CR3 HDSEL LL_USART_ConfigMultiProcessMode\n + * CR3 IREN LL_USART_ConfigMultiProcessMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ConfigMultiProcessMode(USART_TypeDef *USARTx) +{ + /* In Multi Processor mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - IREN, SCEN and HDSEL bits in the USART_CR3 register.*/ + CLEAR_BIT(USARTx->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(USARTx->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL | USART_CR3_IREN)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_FLAG_Management FLAG_Management + * @{ + */ + +/** + * @brief Check if the USART Parity Error Flag is set or not + * @rmtoll SR PE LL_USART_IsActiveFlag_PE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_PE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_PE) == (USART_SR_PE)); +} + +/** + * @brief Check if the USART Framing Error Flag is set or not + * @rmtoll SR FE LL_USART_IsActiveFlag_FE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_FE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_FE) == (USART_SR_FE)); +} + +/** + * @brief Check if the USART Noise error detected Flag is set or not + * @rmtoll SR NF LL_USART_IsActiveFlag_NE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_NE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_NE) == (USART_SR_NE)); +} + +/** + * @brief Check if the USART OverRun Error Flag is set or not + * @rmtoll SR ORE LL_USART_IsActiveFlag_ORE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_ORE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_ORE) == (USART_SR_ORE)); +} + +/** + * @brief Check if the USART IDLE line detected Flag is set or not + * @rmtoll SR IDLE LL_USART_IsActiveFlag_IDLE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_IDLE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_IDLE) == (USART_SR_IDLE)); +} + +/** + * @brief Check if the USART Read Data Register Not Empty Flag is set or not + * @rmtoll SR RXNE LL_USART_IsActiveFlag_RXNE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_RXNE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_RXNE) == (USART_SR_RXNE)); +} + +/** + * @brief Check if the USART Transmission Complete Flag is set or not + * @rmtoll SR TC LL_USART_IsActiveFlag_TC + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_TC(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_TC) == (USART_SR_TC)); +} + +/** + * @brief Check if the USART Transmit Data Register Empty Flag is set or not + * @rmtoll SR TXE LL_USART_IsActiveFlag_TXE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_TXE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_TXE) == (USART_SR_TXE)); +} + +/** + * @brief Check if the USART LIN Break Detection Flag is set or not + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll SR LBD LL_USART_IsActiveFlag_LBD + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_LBD(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_LBD) == (USART_SR_LBD)); +} + +/** + * @brief Check if the USART CTS Flag is set or not + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll SR CTS LL_USART_IsActiveFlag_nCTS + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_nCTS(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->SR, USART_SR_CTS) == (USART_SR_CTS)); +} + +/** + * @brief Check if the USART Send Break Flag is set or not + * @rmtoll CR1 SBK LL_USART_IsActiveFlag_SBK + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_SBK(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_SBK) == (USART_CR1_SBK)); +} + +/** + * @brief Check if the USART Receive Wake Up from mute mode Flag is set or not + * @rmtoll CR1 RWU LL_USART_IsActiveFlag_RWU + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsActiveFlag_RWU(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_RWU) == (USART_CR1_RWU)); +} + +/** + * @brief Clear Parity Error Flag + * @note Clearing this flag is done by a read access to the USARTx_SR + * register followed by a read access to the USARTx_DR register. + * @note Please also consider that when clearing this flag, other flags as + * NE, FE, ORE, IDLE would also be cleared. + * @rmtoll SR PE LL_USART_ClearFlag_PE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_PE(USART_TypeDef *USARTx) +{ + __IO uint32_t tmpreg; + tmpreg = USARTx->SR; + (void) tmpreg; + tmpreg = USARTx->DR; + (void) tmpreg; +} + +/** + * @brief Clear Framing Error Flag + * @note Clearing this flag is done by a read access to the USARTx_SR + * register followed by a read access to the USARTx_DR register. + * @note Please also consider that when clearing this flag, other flags as + * PE, NE, ORE, IDLE would also be cleared. + * @rmtoll SR FE LL_USART_ClearFlag_FE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_FE(USART_TypeDef *USARTx) +{ + __IO uint32_t tmpreg; + tmpreg = USARTx->SR; + (void) tmpreg; + tmpreg = USARTx->DR; + (void) tmpreg; +} + +/** + * @brief Clear Noise detected Flag + * @note Clearing this flag is done by a read access to the USARTx_SR + * register followed by a read access to the USARTx_DR register. + * @note Please also consider that when clearing this flag, other flags as + * PE, FE, ORE, IDLE would also be cleared. + * @rmtoll SR NF LL_USART_ClearFlag_NE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_NE(USART_TypeDef *USARTx) +{ + __IO uint32_t tmpreg; + tmpreg = USARTx->SR; + (void) tmpreg; + tmpreg = USARTx->DR; + (void) tmpreg; +} + +/** + * @brief Clear OverRun Error Flag + * @note Clearing this flag is done by a read access to the USARTx_SR + * register followed by a read access to the USARTx_DR register. + * @note Please also consider that when clearing this flag, other flags as + * PE, NE, FE, IDLE would also be cleared. + * @rmtoll SR ORE LL_USART_ClearFlag_ORE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_ORE(USART_TypeDef *USARTx) +{ + __IO uint32_t tmpreg; + tmpreg = USARTx->SR; + (void) tmpreg; + tmpreg = USARTx->DR; + (void) tmpreg; +} + +/** + * @brief Clear IDLE line detected Flag + * @note Clearing this flag is done by a read access to the USARTx_SR + * register followed by a read access to the USARTx_DR register. + * @note Please also consider that when clearing this flag, other flags as + * PE, NE, FE, ORE would also be cleared. + * @rmtoll SR IDLE LL_USART_ClearFlag_IDLE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_IDLE(USART_TypeDef *USARTx) +{ + __IO uint32_t tmpreg; + tmpreg = USARTx->SR; + (void) tmpreg; + tmpreg = USARTx->DR; + (void) tmpreg; +} + +/** + * @brief Clear Transmission Complete Flag + * @rmtoll SR TC LL_USART_ClearFlag_TC + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_TC(USART_TypeDef *USARTx) +{ + WRITE_REG(USARTx->SR, ~(USART_SR_TC)); +} + +/** + * @brief Clear RX Not Empty Flag + * @rmtoll SR RXNE LL_USART_ClearFlag_RXNE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_RXNE(USART_TypeDef *USARTx) +{ + WRITE_REG(USARTx->SR, ~(USART_SR_RXNE)); +} + +/** + * @brief Clear LIN Break Detection Flag + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll SR LBD LL_USART_ClearFlag_LBD + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_LBD(USART_TypeDef *USARTx) +{ + WRITE_REG(USARTx->SR, ~(USART_SR_LBD)); +} + +/** + * @brief Clear CTS Interrupt Flag + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll SR CTS LL_USART_ClearFlag_nCTS + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_ClearFlag_nCTS(USART_TypeDef *USARTx) +{ + WRITE_REG(USARTx->SR, ~(USART_SR_CTS)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_IT_Management IT_Management + * @{ + */ + +/** + * @brief Enable IDLE Interrupt + * @rmtoll CR1 IDLEIE LL_USART_EnableIT_IDLE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_IDLE(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_IDLEIE); +} + +/** + * @brief Enable RX Not Empty Interrupt + * @rmtoll CR1 RXNEIE LL_USART_EnableIT_RXNE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_RXNE(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_RXNEIE); +} + +/** + * @brief Enable Transmission Complete Interrupt + * @rmtoll CR1 TCIE LL_USART_EnableIT_TC + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_TC(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_TCIE); +} + +/** + * @brief Enable TX Empty Interrupt + * @rmtoll CR1 TXEIE LL_USART_EnableIT_TXE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_TXE(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_TXEIE); +} + +/** + * @brief Enable Parity Error Interrupt + * @rmtoll CR1 PEIE LL_USART_EnableIT_PE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_PE(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR1, USART_CR1_PEIE); +} + +/** + * @brief Enable LIN Break Detection Interrupt + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LBDIE LL_USART_EnableIT_LBD + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_LBD(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR2, USART_CR2_LBDIE); +} + +/** + * @brief Enable Error Interrupt + * @note When set, Error Interrupt Enable Bit is enabling interrupt generation in case of a framing + * error, overrun error or noise flag (FE=1 or ORE=1 or NF=1 in the USARTx_SR register). + * 0: Interrupt is inhibited + * 1: An interrupt is generated when FE=1 or ORE=1 or NF=1 in the USARTx_SR register. + * @rmtoll CR3 EIE LL_USART_EnableIT_ERROR + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_ERROR(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR3, USART_CR3_EIE); +} + +/** + * @brief Enable CTS Interrupt + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 CTSIE LL_USART_EnableIT_CTS + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableIT_CTS(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR3, USART_CR3_CTSIE); +} + +/** + * @brief Disable IDLE Interrupt + * @rmtoll CR1 IDLEIE LL_USART_DisableIT_IDLE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_IDLE(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_IDLEIE); +} + +/** + * @brief Disable RX Not Empty Interrupt + * @rmtoll CR1 RXNEIE LL_USART_DisableIT_RXNE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_RXNE(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_RXNEIE); +} + +/** + * @brief Disable Transmission Complete Interrupt + * @rmtoll CR1 TCIE LL_USART_DisableIT_TC + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_TC(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_TCIE); +} + +/** + * @brief Disable TX Empty Interrupt + * @rmtoll CR1 TXEIE LL_USART_DisableIT_TXE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_TXE(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_TXEIE); +} + +/** + * @brief Disable Parity Error Interrupt + * @rmtoll CR1 PEIE LL_USART_DisableIT_PE + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_PE(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR1, USART_CR1_PEIE); +} + +/** + * @brief Disable LIN Break Detection Interrupt + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LBDIE LL_USART_DisableIT_LBD + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_LBD(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR2, USART_CR2_LBDIE); +} + +/** + * @brief Disable Error Interrupt + * @note When set, Error Interrupt Enable Bit is enabling interrupt generation in case of a framing + * error, overrun error or noise flag (FE=1 or ORE=1 or NF=1 in the USARTx_SR register). + * 0: Interrupt is inhibited + * 1: An interrupt is generated when FE=1 or ORE=1 or NF=1 in the USARTx_SR register. + * @rmtoll CR3 EIE LL_USART_DisableIT_ERROR + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_ERROR(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR3, USART_CR3_EIE); +} + +/** + * @brief Disable CTS Interrupt + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 CTSIE LL_USART_DisableIT_CTS + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableIT_CTS(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR3, USART_CR3_CTSIE); +} + +/** + * @brief Check if the USART IDLE Interrupt source is enabled or disabled. + * @rmtoll CR1 IDLEIE LL_USART_IsEnabledIT_IDLE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_IDLE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_IDLEIE) == (USART_CR1_IDLEIE)); +} + +/** + * @brief Check if the USART RX Not Empty Interrupt is enabled or disabled. + * @rmtoll CR1 RXNEIE LL_USART_IsEnabledIT_RXNE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_RXNE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_RXNEIE) == (USART_CR1_RXNEIE)); +} + +/** + * @brief Check if the USART Transmission Complete Interrupt is enabled or disabled. + * @rmtoll CR1 TCIE LL_USART_IsEnabledIT_TC + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_TC(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_TCIE) == (USART_CR1_TCIE)); +} + +/** + * @brief Check if the USART TX Empty Interrupt is enabled or disabled. + * @rmtoll CR1 TXEIE LL_USART_IsEnabledIT_TXE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_TXE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_TXEIE) == (USART_CR1_TXEIE)); +} + +/** + * @brief Check if the USART Parity Error Interrupt is enabled or disabled. + * @rmtoll CR1 PEIE LL_USART_IsEnabledIT_PE + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_PE(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR1, USART_CR1_PEIE) == (USART_CR1_PEIE)); +} + +/** + * @brief Check if the USART LIN Break Detection Interrupt is enabled or disabled. + * @note Macro IS_UART_LIN_INSTANCE(USARTx) can be used to check whether or not + * LIN feature is supported by the USARTx instance. + * @rmtoll CR2 LBDIE LL_USART_IsEnabledIT_LBD + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_LBD(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR2, USART_CR2_LBDIE) == (USART_CR2_LBDIE)); +} + +/** + * @brief Check if the USART Error Interrupt is enabled or disabled. + * @rmtoll CR3 EIE LL_USART_IsEnabledIT_ERROR + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_ERROR(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_EIE) == (USART_CR3_EIE)); +} + +/** + * @brief Check if the USART CTS Interrupt is enabled or disabled. + * @note Macro IS_UART_HWFLOW_INSTANCE(USARTx) can be used to check whether or not + * Hardware Flow control feature is supported by the USARTx instance. + * @rmtoll CR3 CTSIE LL_USART_IsEnabledIT_CTS + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledIT_CTS(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_CTSIE) == (USART_CR3_CTSIE)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_DMA_Management DMA_Management + * @{ + */ + +/** + * @brief Enable DMA Mode for reception + * @rmtoll CR3 DMAR LL_USART_EnableDMAReq_RX + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableDMAReq_RX(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR3, USART_CR3_DMAR); +} + +/** + * @brief Disable DMA Mode for reception + * @rmtoll CR3 DMAR LL_USART_DisableDMAReq_RX + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableDMAReq_RX(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR3, USART_CR3_DMAR); +} + +/** + * @brief Check if DMA Mode is enabled for reception + * @rmtoll CR3 DMAR LL_USART_IsEnabledDMAReq_RX + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledDMAReq_RX(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_DMAR) == (USART_CR3_DMAR)); +} + +/** + * @brief Enable DMA Mode for transmission + * @rmtoll CR3 DMAT LL_USART_EnableDMAReq_TX + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_EnableDMAReq_TX(USART_TypeDef *USARTx) +{ + ATOMIC_SET_BIT(USARTx->CR3, USART_CR3_DMAT); +} + +/** + * @brief Disable DMA Mode for transmission + * @rmtoll CR3 DMAT LL_USART_DisableDMAReq_TX + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_DisableDMAReq_TX(USART_TypeDef *USARTx) +{ + ATOMIC_CLEAR_BIT(USARTx->CR3, USART_CR3_DMAT); +} + +/** + * @brief Check if DMA Mode is enabled for transmission + * @rmtoll CR3 DMAT LL_USART_IsEnabledDMAReq_TX + * @param USARTx USART Instance + * @retval State of bit (1 or 0). + */ +__STATIC_INLINE uint32_t LL_USART_IsEnabledDMAReq_TX(const USART_TypeDef *USARTx) +{ + return (READ_BIT(USARTx->CR3, USART_CR3_DMAT) == (USART_CR3_DMAT)); +} + +/** + * @brief Get the data register address used for DMA transfer + * @rmtoll DR DR LL_USART_DMA_GetRegAddr + * @note Address of Data Register is valid for both Transmit and Receive transfers. + * @param USARTx USART Instance + * @retval Address of data register + */ +__STATIC_INLINE uint32_t LL_USART_DMA_GetRegAddr(const USART_TypeDef *USARTx) +{ + /* return address of DR register */ + return ((uint32_t) &(USARTx->DR)); +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Data_Management Data_Management + * @{ + */ + +/** + * @brief Read Receiver Data register (Receive Data value, 8 bits) + * @rmtoll DR DR LL_USART_ReceiveData8 + * @param USARTx USART Instance + * @retval Value between Min_Data=0x00 and Max_Data=0xFF + */ +__STATIC_INLINE uint8_t LL_USART_ReceiveData8(const USART_TypeDef *USARTx) +{ + return (uint8_t)(READ_BIT(USARTx->DR, USART_DR_DR)); +} + +/** + * @brief Read Receiver Data register (Receive Data value, 9 bits) + * @rmtoll DR DR LL_USART_ReceiveData9 + * @param USARTx USART Instance + * @retval Value between Min_Data=0x00 and Max_Data=0x1FF + */ +__STATIC_INLINE uint16_t LL_USART_ReceiveData9(const USART_TypeDef *USARTx) +{ + return (uint16_t)(READ_BIT(USARTx->DR, USART_DR_DR)); +} + +/** + * @brief Write in Transmitter Data Register (Transmit Data value, 8 bits) + * @rmtoll DR DR LL_USART_TransmitData8 + * @param USARTx USART Instance + * @param Value between Min_Data=0x00 and Max_Data=0xFF + * @retval None + */ +__STATIC_INLINE void LL_USART_TransmitData8(USART_TypeDef *USARTx, uint8_t Value) +{ + USARTx->DR = Value; +} + +/** + * @brief Write in Transmitter Data Register (Transmit Data value, 9 bits) + * @rmtoll DR DR LL_USART_TransmitData9 + * @param USARTx USART Instance + * @param Value between Min_Data=0x00 and Max_Data=0x1FF + * @retval None + */ +__STATIC_INLINE void LL_USART_TransmitData9(USART_TypeDef *USARTx, uint16_t Value) +{ + USARTx->DR = Value & 0x1FFU; +} + +/** + * @} + */ + +/** @defgroup USART_LL_EF_Execution Execution + * @{ + */ + +/** + * @brief Request Break sending + * @rmtoll CR1 SBK LL_USART_RequestBreakSending + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_RequestBreakSending(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR1, USART_CR1_SBK); +} + +/** + * @brief Put USART in Mute mode + * @rmtoll CR1 RWU LL_USART_RequestEnterMuteMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_RequestEnterMuteMode(USART_TypeDef *USARTx) +{ + SET_BIT(USARTx->CR1, USART_CR1_RWU); +} + +/** + * @brief Put USART in Active mode + * @rmtoll CR1 RWU LL_USART_RequestExitMuteMode + * @param USARTx USART Instance + * @retval None + */ +__STATIC_INLINE void LL_USART_RequestExitMuteMode(USART_TypeDef *USARTx) +{ + CLEAR_BIT(USARTx->CR1, USART_CR1_RWU); +} + +/** + * @} + */ + +#if defined(USE_FULL_LL_DRIVER) +/** @defgroup USART_LL_EF_Init Initialization and de-initialization functions + * @{ + */ +ErrorStatus LL_USART_DeInit(const USART_TypeDef *USARTx); +ErrorStatus LL_USART_Init(USART_TypeDef *USARTx, const LL_USART_InitTypeDef *USART_InitStruct); +void LL_USART_StructInit(LL_USART_InitTypeDef *USART_InitStruct); +ErrorStatus LL_USART_ClockInit(USART_TypeDef *USARTx, const LL_USART_ClockInitTypeDef *USART_ClockInitStruct); +void LL_USART_ClockStructInit(LL_USART_ClockInitTypeDef *USART_ClockInitStruct); +/** + * @} + */ +#endif /* USE_FULL_LL_DRIVER */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* USART1 || USART2 || USART3 || USART6 || UART4 || UART5 || UART7 || UART8 || UART9 || UART10 */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_USART_H */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_usb.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_usb.h index 302ab815..caabcf33 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_usb.h +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_usb.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -23,7 +22,7 @@ #ifdef __cplusplus extern "C" { -#endif +#endif /* __cplusplus */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_hal_def.h" @@ -38,18 +37,24 @@ extern "C" { */ /* Exported types ------------------------------------------------------------*/ +#ifndef HAL_USB_TIMEOUT +#define HAL_USB_TIMEOUT 0xF000000U +#endif /* define HAL_USB_TIMEOUT */ + +#ifndef HAL_USB_CURRENT_MODE_MAX_DELAY_MS +#define HAL_USB_CURRENT_MODE_MAX_DELAY_MS 200U +#endif /* define HAL_USB_CURRENT_MODE_MAX_DELAY_MS */ /** * @brief USB Mode definition */ -#if defined (USB_OTG_FS) || defined (USB_OTG_HS) typedef enum { - USB_DEVICE_MODE = 0, - USB_HOST_MODE = 1, - USB_DRD_MODE = 2 -} USB_OTG_ModeTypeDef; + USB_DEVICE_MODE = 0, + USB_HOST_MODE = 1, + USB_DRD_MODE = 2 +} USB_ModeTypeDef; /** * @brief URB States definition @@ -62,7 +67,7 @@ typedef enum URB_NYET, URB_ERROR, URB_STALL -} USB_OTG_URBStateTypeDef; +} USB_URBStateTypeDef; /** * @brief Host channel States definition @@ -72,52 +77,56 @@ typedef enum HC_IDLE = 0, HC_XFRC, HC_HALTED, + HC_ACK, HC_NAK, HC_NYET, HC_STALL, HC_XACTERR, HC_BBLERR, HC_DATATGLERR -} USB_OTG_HCStateTypeDef; +} USB_HCStateTypeDef; + /** * @brief USB Instance Initialization Structure definition */ typedef struct { - uint32_t dev_endpoints; /*!< Device Endpoints number. + uint8_t dev_endpoints; /*!< Device Endpoints number. This parameter depends on the used USB core. This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ - uint32_t Host_channels; /*!< Host Channels number. + uint8_t Host_channels; /*!< Host Channels number. This parameter Depends on the used USB core. This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ - uint32_t speed; /*!< USB Core speed. - This parameter can be any value of @ref USB_Core_Speed */ + uint8_t dma_enable; /*!< USB DMA state. + If DMA is not supported this parameter shall be set by default to zero */ - uint32_t dma_enable; /*!< Enable or disable of the USB embedded DMA used only for OTG HS. */ + uint8_t speed; /*!< USB Core speed. + This parameter can be any value of @ref PCD_Speed/HCD_Speed + (HCD_SPEED_xxx, HCD_SPEED_xxx) */ - uint32_t ep0_mps; /*!< Set the Endpoint 0 Max Packet size. */ + uint8_t ep0_mps; /*!< Set the Endpoint 0 Max Packet size. */ - uint32_t phy_itface; /*!< Select the used PHY interface. - This parameter can be any value of @ref USB_Core_PHY */ + uint8_t phy_itface; /*!< Select the used PHY interface. + This parameter can be any value of @ref PCD_PHY_Module/HCD_PHY_Module */ - uint32_t Sof_enable; /*!< Enable or disable the output of the SOF signal. */ + uint8_t Sof_enable; /*!< Enable or disable the output of the SOF signal. */ - uint32_t low_power_enable; /*!< Enable or disable the low power mode. */ + uint8_t low_power_enable; /*!< Enable or disable the low Power Mode. */ - uint32_t lpm_enable; /*!< Enable or disable Link Power Management. */ + uint8_t lpm_enable; /*!< Enable or disable Link Power Management. */ - uint32_t battery_charging_enable; /*!< Enable or disable Battery charging. */ + uint8_t battery_charging_enable; /*!< Enable or disable Battery charging. */ - uint32_t vbus_sensing_enable; /*!< Enable or disable the VBUS Sensing feature. */ + uint8_t vbus_sensing_enable; /*!< Enable or disable the VBUS Sensing feature. */ - uint32_t use_dedicated_ep1; /*!< Enable or disable the use of the dedicated EP1 interrupt. */ + uint8_t use_dedicated_ep1; /*!< Enable or disable the use of the dedicated EP1 interrupt. */ - uint32_t use_external_vbus; /*!< Enable or disable the use of the external VBUS. */ + uint8_t use_external_vbus; /*!< Enable or disable the use of the external VBUS. */ -} USB_OTG_CfgTypeDef; +} USB_CfgTypeDef; typedef struct { @@ -130,29 +139,34 @@ typedef struct uint8_t is_stall; /*!< Endpoint stall condition This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + uint8_t is_iso_incomplete; /*!< Endpoint isoc condition + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + uint8_t type; /*!< Endpoint type - This parameter can be any value of @ref USB_EP_Type_ */ + This parameter can be any value of @ref USB_LL_EP_Type */ uint8_t data_pid_start; /*!< Initial data PID This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ - uint8_t even_odd_frame; /*!< IFrame parity - This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ - - uint16_t tx_fifo_num; /*!< Transmission FIFO number - This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ - uint32_t maxpacket; /*!< Endpoint Max packet size This parameter must be a number between Min_Data = 0 and Max_Data = 64KB */ uint8_t *xfer_buff; /*!< Pointer to transfer buffer */ - uint32_t dma_addr; /*!< 32 bits aligned transfer buffer address */ - uint32_t xfer_len; /*!< Current transfer length */ uint32_t xfer_count; /*!< Partial transfer length in case of multi packet transfer */ -} USB_OTG_EPTypeDef; + + uint8_t even_odd_frame; /*!< IFrame parity + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint16_t tx_fifo_num; /*!< Transmission FIFO number + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint32_t dma_addr; /*!< 32 bits aligned transfer buffer address */ + + uint32_t xfer_size; /*!< requested transfer size */ +} USB_EPTypeDef; typedef struct { @@ -168,15 +182,21 @@ typedef struct uint8_t ep_is_in; /*!< Endpoint direction This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ - uint8_t speed; /*!< USB Host speed. - This parameter can be any value of @ref USB_Core_Speed_ */ + uint8_t speed; /*!< USB Host Channel speed. + This parameter can be any value of @ref HCD_Device_Speed: + (HCD_DEVICE_SPEED_xxx) */ uint8_t do_ping; /*!< Enable or disable the use of the PING protocol for HS mode. */ + uint8_t do_ssplit; /*!< Enable start split transaction in HS mode. */ + uint8_t do_csplit; /*!< Enable complete split transaction in HS mode. */ + uint8_t ep_ss_schedule; /*!< Enable periodic endpoint start split schedule . */ + uint32_t iso_splt_xactPos; /*!< iso split transfer transaction position. */ - uint8_t process_ping; /*!< Execute the PING protocol for HS mode. */ + uint8_t hub_port_nbr; /*!< USB HUB port number */ + uint8_t hub_addr; /*!< USB HUB address */ uint8_t ep_type; /*!< Endpoint Type. - This parameter can be any value of @ref USB_EP_Type_ */ + This parameter can be any value of @ref USB_LL_EP_Type */ uint16_t max_packet; /*!< Endpoint Max packet size. This parameter must be a number between Min_Data = 0 and Max_Data = 64KB */ @@ -186,6 +206,8 @@ typedef struct uint8_t *xfer_buff; /*!< Pointer to transfer buffer. */ + uint32_t XferSize; /*!< OTG Channel transfer size. */ + uint32_t xfer_len; /*!< Current transfer length. */ uint32_t xfer_count; /*!< Partial transfer length in case of multi packet transfer. */ @@ -199,15 +221,21 @@ typedef struct uint32_t dma_addr; /*!< 32 bits aligned transfer buffer address. */ uint32_t ErrCnt; /*!< Host channel error count. */ + uint32_t NyetErrCnt; /*!< Complete Split NYET Host channel error count. */ - USB_OTG_URBStateTypeDef urb_state; /*!< URB state. - This parameter can be any value of @ref USB_OTG_URBStateTypeDef */ + USB_URBStateTypeDef urb_state; /*!< URB state. + This parameter can be any value of @ref USB_URBStateTypeDef */ - USB_OTG_HCStateTypeDef state; /*!< Host Channel state. - This parameter can be any value of @ref USB_OTG_HCStateTypeDef */ -} USB_OTG_HCTypeDef; -#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + USB_HCStateTypeDef state; /*!< Host Channel state. + This parameter can be any value of @ref USB_HCStateTypeDef */ +} USB_HCTypeDef; +typedef USB_ModeTypeDef USB_OTG_ModeTypeDef; +typedef USB_CfgTypeDef USB_OTG_CfgTypeDef; +typedef USB_EPTypeDef USB_OTG_EPTypeDef; +typedef USB_URBStateTypeDef USB_OTG_URBStateTypeDef; +typedef USB_HCStateTypeDef USB_OTG_HCStateTypeDef; +typedef USB_HCTypeDef USB_OTG_HCTypeDef; /* Exported constants --------------------------------------------------------*/ @@ -235,18 +263,6 @@ typedef struct * @} */ -/** @defgroup USB_LL Device Speed - * @{ - */ -#define USBD_HS_SPEED 0U -#define USBD_HSINFS_SPEED 1U -#define USBH_HS_SPEED 0U -#define USBD_FS_SPEED 2U -#define USBH_FSLS_SPEED 1U -/** - * @} - */ - /** @defgroup USB_LL_Core_Speed USB Low Layer Core Speed * @{ */ @@ -310,14 +326,26 @@ typedef struct /** * @} */ - +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ /** @defgroup USB_LL_EP0_MPS USB Low Layer EP0 MPS * @{ */ -#define EP_MPS_64 0U -#define EP_MPS_32 1U -#define EP_MPS_16 2U -#define EP_MPS_8 3U +#define EP_MPS_64 0U +#define EP_MPS_32 1U +#define EP_MPS_16 2U +#define EP_MPS_8 3U +/** + * @} + */ + +/** @defgroup USB_LL_EP_Type USB Low Layer EP Type + * @{ + */ +#define EP_TYPE_CTRL 0U +#define EP_TYPE_ISOC 1U +#define EP_TYPE_BULK 2U +#define EP_TYPE_INTR 3U +#define EP_TYPE_MSK 3U /** * @} */ @@ -332,18 +360,30 @@ typedef struct * @} */ -/** @defgroup USB_LL_EP_Type USB Low Layer EP Type +/** @defgroup USB_LL_CH_PID_Type USB Low Layer Channel PID Type * @{ */ -#define EP_TYPE_CTRL 0U -#define EP_TYPE_ISOC 1U -#define EP_TYPE_BULK 2U -#define EP_TYPE_INTR 3U -#define EP_TYPE_MSK 3U +#define HC_PID_DATA0 0U +#define HC_PID_DATA2 1U +#define HC_PID_DATA1 2U +#define HC_PID_SETUP 3U /** * @} */ +/** @defgroup USB_LL Device Speed + * @{ + */ +#define USBD_HS_SPEED 0U +#define USBD_HSINFS_SPEED 1U +#define USBH_HS_SPEED 0U +#define USBD_FS_SPEED 2U +#define USBH_FSLS_SPEED 1U +/** + * @} + */ + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) /** @defgroup USB_LL_STS_Defines USB Low Layer STS Defines * @{ */ @@ -366,6 +406,16 @@ typedef struct * @} */ +/** @defgroup USB_LL_HFIR_Defines USB Low Layer frame interval Defines + * @{ + */ +#define HFIR_6_MHZ 6000U +#define HFIR_60_MHZ 60000U +#define HFIR_48_MHZ 48000U +/** + * @} + */ + /** @defgroup USB_LL_HPRT0_PRTSPD_SPEED_Defines USB Low Layer HPRT0 PRTSPD Speed Defines * @{ */ @@ -381,29 +431,47 @@ typedef struct #define HCCHAR_BULK 2U #define HCCHAR_INTR 3U -#define HC_PID_DATA0 0U -#define HC_PID_DATA2 1U -#define HC_PID_DATA1 2U -#define HC_PID_SETUP 3U - #define GRXSTS_PKTSTS_IN 2U #define GRXSTS_PKTSTS_IN_XFER_COMP 3U #define GRXSTS_PKTSTS_DATA_TOGGLE_ERR 5U #define GRXSTS_PKTSTS_CH_HALTED 7U +#define CLEAR_INTERRUPT_MASK 0xFFFFFFFFU + +#define HC_MAX_PKT_CNT 256U +#define ISO_SPLT_MPS 188U + +#define HCSPLT_BEGIN 1U +#define HCSPLT_MIDDLE 2U +#define HCSPLT_END 3U +#define HCSPLT_FULL 4U + +#define TEST_J 1U +#define TEST_K 2U +#define TEST_SE0_NAK 3U +#define TEST_PACKET 4U +#define TEST_FORCE_EN 5U + #define USBx_PCGCCTL *(__IO uint32_t *)((uint32_t)USBx_BASE + USB_OTG_PCGCCTL_BASE) #define USBx_HPRT0 *(__IO uint32_t *)((uint32_t)USBx_BASE + USB_OTG_HOST_PORT_BASE) #define USBx_DEVICE ((USB_OTG_DeviceTypeDef *)(USBx_BASE + USB_OTG_DEVICE_BASE)) -#define USBx_INEP(i) ((USB_OTG_INEndpointTypeDef *)(USBx_BASE + USB_OTG_IN_ENDPOINT_BASE + ((i) * USB_OTG_EP_REG_SIZE))) -#define USBx_OUTEP(i) ((USB_OTG_OUTEndpointTypeDef *)(USBx_BASE + USB_OTG_OUT_ENDPOINT_BASE + ((i) * USB_OTG_EP_REG_SIZE))) +#define USBx_INEP(i) ((USB_OTG_INEndpointTypeDef *)(USBx_BASE\ + + USB_OTG_IN_ENDPOINT_BASE + ((i) * USB_OTG_EP_REG_SIZE))) + +#define USBx_OUTEP(i) ((USB_OTG_OUTEndpointTypeDef *)(USBx_BASE\ + + USB_OTG_OUT_ENDPOINT_BASE + ((i) * USB_OTG_EP_REG_SIZE))) + #define USBx_DFIFO(i) *(__IO uint32_t *)(USBx_BASE + USB_OTG_FIFO_BASE + ((i) * USB_OTG_FIFO_SIZE)) #define USBx_HOST ((USB_OTG_HostTypeDef *)(USBx_BASE + USB_OTG_HOST_BASE)) -#define USBx_HC(i) ((USB_OTG_HostChannelTypeDef *)(USBx_BASE + USB_OTG_HOST_CHANNEL_BASE + ((i) * USB_OTG_HOST_CHANNEL_SIZE))) -#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +#define USBx_HC(i) ((USB_OTG_HostChannelTypeDef *)(USBx_BASE\ + + USB_OTG_HOST_CHANNEL_BASE\ + + ((i) * USB_OTG_HOST_CHANNEL_SIZE))) + #define EP_ADDR_MSK 0xFU +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ /** * @} */ @@ -434,54 +502,55 @@ HAL_StatusTypeDef USB_EnableGlobalInt(USB_OTG_GlobalTypeDef *USBx); HAL_StatusTypeDef USB_DisableGlobalInt(USB_OTG_GlobalTypeDef *USBx); HAL_StatusTypeDef USB_SetTurnaroundTime(USB_OTG_GlobalTypeDef *USBx, uint32_t hclk, uint8_t speed); HAL_StatusTypeDef USB_SetCurrentMode(USB_OTG_GlobalTypeDef *USBx, USB_OTG_ModeTypeDef mode); -HAL_StatusTypeDef USB_SetDevSpeed(USB_OTG_GlobalTypeDef *USBx, uint8_t speed); +HAL_StatusTypeDef USB_SetDevSpeed(const USB_OTG_GlobalTypeDef *USBx, uint8_t speed); HAL_StatusTypeDef USB_FlushRxFifo(USB_OTG_GlobalTypeDef *USBx); HAL_StatusTypeDef USB_FlushTxFifo(USB_OTG_GlobalTypeDef *USBx, uint32_t num); -HAL_StatusTypeDef USB_ActivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); -HAL_StatusTypeDef USB_DeactivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); -HAL_StatusTypeDef USB_ActivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); -HAL_StatusTypeDef USB_DeactivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_ActivateEndpoint(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_DeactivateEndpoint(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_ActivateDedicatedEndpoint(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_DeactivateDedicatedEndpoint(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep); HAL_StatusTypeDef USB_EPStartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma); -HAL_StatusTypeDef USB_EP0StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma); -HAL_StatusTypeDef USB_WritePacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *src, +HAL_StatusTypeDef USB_WritePacket(const USB_OTG_GlobalTypeDef *USBx, uint8_t *src, uint8_t ch_ep_num, uint16_t len, uint8_t dma); -void *USB_ReadPacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *dest, uint16_t len); -HAL_StatusTypeDef USB_EPSetStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); -HAL_StatusTypeDef USB_EPClearStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); -HAL_StatusTypeDef USB_SetDevAddress(USB_OTG_GlobalTypeDef *USBx, uint8_t address); -HAL_StatusTypeDef USB_DevConnect(USB_OTG_GlobalTypeDef *USBx); -HAL_StatusTypeDef USB_DevDisconnect(USB_OTG_GlobalTypeDef *USBx); +void *USB_ReadPacket(const USB_OTG_GlobalTypeDef *USBx, uint8_t *dest, uint16_t len); +HAL_StatusTypeDef USB_EPSetStall(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_EPClearStall(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_EPStopXfer(const USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_SetDevAddress(const USB_OTG_GlobalTypeDef *USBx, uint8_t address); +HAL_StatusTypeDef USB_DevConnect(const USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DevDisconnect(const USB_OTG_GlobalTypeDef *USBx); HAL_StatusTypeDef USB_StopDevice(USB_OTG_GlobalTypeDef *USBx); -HAL_StatusTypeDef USB_ActivateSetup(USB_OTG_GlobalTypeDef *USBx); -HAL_StatusTypeDef USB_EP0_OutStart(USB_OTG_GlobalTypeDef *USBx, uint8_t dma, uint8_t *psetup); -uint8_t USB_GetDevSpeed(USB_OTG_GlobalTypeDef *USBx); -uint32_t USB_GetMode(USB_OTG_GlobalTypeDef *USBx); -uint32_t USB_ReadInterrupts(USB_OTG_GlobalTypeDef *USBx); -uint32_t USB_ReadDevAllOutEpInterrupt(USB_OTG_GlobalTypeDef *USBx); -uint32_t USB_ReadDevOutEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum); -uint32_t USB_ReadDevAllInEpInterrupt(USB_OTG_GlobalTypeDef *USBx); -uint32_t USB_ReadDevInEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum); +HAL_StatusTypeDef USB_ActivateSetup(const USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_EP0_OutStart(const USB_OTG_GlobalTypeDef *USBx, uint8_t dma, const uint8_t *psetup); +uint8_t USB_GetDevSpeed(const USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_GetMode(const USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadInterrupts(USB_OTG_GlobalTypeDef const *USBx); +uint32_t USB_ReadChInterrupts(const USB_OTG_GlobalTypeDef *USBx, uint8_t chnum); +uint32_t USB_ReadDevAllOutEpInterrupt(const USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadDevOutEPInterrupt(const USB_OTG_GlobalTypeDef *USBx, uint8_t epnum); +uint32_t USB_ReadDevAllInEpInterrupt(const USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadDevInEPInterrupt(const USB_OTG_GlobalTypeDef *USBx, uint8_t epnum); void USB_ClearInterrupts(USB_OTG_GlobalTypeDef *USBx, uint32_t interrupt); HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg); -HAL_StatusTypeDef USB_InitFSLSPClkSel(USB_OTG_GlobalTypeDef *USBx, uint8_t freq); -HAL_StatusTypeDef USB_ResetPort(USB_OTG_GlobalTypeDef *USBx); -HAL_StatusTypeDef USB_DriveVbus(USB_OTG_GlobalTypeDef *USBx, uint8_t state); -uint32_t USB_GetHostSpeed(USB_OTG_GlobalTypeDef *USBx); -uint32_t USB_GetCurrentFrame(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_InitFSLSPClkSel(const USB_OTG_GlobalTypeDef *USBx, uint8_t freq); +HAL_StatusTypeDef USB_ResetPort(const USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DriveVbus(const USB_OTG_GlobalTypeDef *USBx, uint8_t state); +uint32_t USB_GetHostSpeed(USB_OTG_GlobalTypeDef const *USBx); +uint32_t USB_GetCurrentFrame(USB_OTG_GlobalTypeDef const *USBx); HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num, uint8_t epnum, uint8_t dev_address, uint8_t speed, uint8_t ep_type, uint16_t mps); HAL_StatusTypeDef USB_HC_StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_HCTypeDef *hc, uint8_t dma); -uint32_t USB_HC_ReadInterrupt(USB_OTG_GlobalTypeDef *USBx); -HAL_StatusTypeDef USB_HC_Halt(USB_OTG_GlobalTypeDef *USBx, uint8_t hc_num); -HAL_StatusTypeDef USB_DoPing(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num); +uint32_t USB_HC_ReadInterrupt(const USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_HC_Halt(const USB_OTG_GlobalTypeDef *USBx, uint8_t hc_num); +HAL_StatusTypeDef USB_DoPing(const USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num); HAL_StatusTypeDef USB_StopHost(USB_OTG_GlobalTypeDef *USBx); -HAL_StatusTypeDef USB_ActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx); -HAL_StatusTypeDef USB_DeActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_ActivateRemoteWakeup(const USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DeActivateRemoteWakeup(const USB_OTG_GlobalTypeDef *USBx); #endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ /** @@ -503,9 +572,7 @@ HAL_StatusTypeDef USB_DeActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx); #ifdef __cplusplus } -#endif +#endif /* __cplusplus */ #endif /* STM32F4xx_LL_USB_H */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_utils.h b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_utils.h new file mode 100644 index 00000000..accdac7b --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Inc/stm32f4xx_ll_utils.h @@ -0,0 +1,307 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_utils.h + * @author MCD Application Team + * @brief Header file of UTILS LL module. + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The LL UTILS driver contains a set of generic APIs that can be + used by user: + (+) Device electronic signature + (+) Timing functions + (+) PLL configuration functions + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F4xx_LL_UTILS_H +#define __STM32F4xx_LL_UTILS_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx.h" + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +/** @defgroup UTILS_LL UTILS + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup UTILS_LL_Private_Constants UTILS Private Constants + * @{ + */ + +/* Max delay can be used in LL_mDelay */ +#define LL_MAX_DELAY 0xFFFFFFFFU + +/** + * @brief Unique device ID register base address + */ +#define UID_BASE_ADDRESS UID_BASE + +/** + * @brief Flash size data register base address + */ +#define FLASHSIZE_BASE_ADDRESS FLASHSIZE_BASE + +/** + * @brief Package data register base address + */ +#define PACKAGE_BASE_ADDRESS PACKAGE_BASE + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup UTILS_LL_Private_Macros UTILS Private Macros + * @{ + */ +/** + * @} + */ +/* Exported types ------------------------------------------------------------*/ +/** @defgroup UTILS_LL_ES_INIT UTILS Exported structures + * @{ + */ +/** + * @brief UTILS PLL structure definition + */ +typedef struct +{ + uint32_t PLLM; /*!< Division factor for PLL VCO input clock. + This parameter can be a value of @ref RCC_LL_EC_PLLM_DIV + + This feature can be modified afterwards using unitary function + @ref LL_RCC_PLL_ConfigDomain_SYS(). */ + + uint32_t PLLN; /*!< Multiplication factor for PLL VCO output clock. + This parameter must be a number between Min_Data = @ref RCC_PLLN_MIN_VALUE + and Max_Data = @ref RCC_PLLN_MIN_VALUE + + This feature can be modified afterwards using unitary function + @ref LL_RCC_PLL_ConfigDomain_SYS(). */ + + uint32_t PLLP; /*!< Division for the main system clock. + This parameter can be a value of @ref RCC_LL_EC_PLLP_DIV + + This feature can be modified afterwards using unitary function + @ref LL_RCC_PLL_ConfigDomain_SYS(). */ +} LL_UTILS_PLLInitTypeDef; + +/** + * @brief UTILS System, AHB and APB buses clock configuration structure definition + */ +typedef struct +{ + uint32_t AHBCLKDivider; /*!< The AHB clock (HCLK) divider. This clock is derived from the system clock (SYSCLK). + This parameter can be a value of @ref RCC_LL_EC_SYSCLK_DIV + + This feature can be modified afterwards using unitary function + @ref LL_RCC_SetAHBPrescaler(). */ + + uint32_t APB1CLKDivider; /*!< The APB1 clock (PCLK1) divider. This clock is derived from the AHB clock (HCLK). + This parameter can be a value of @ref RCC_LL_EC_APB1_DIV + + This feature can be modified afterwards using unitary function + @ref LL_RCC_SetAPB1Prescaler(). */ + + uint32_t APB2CLKDivider; /*!< The APB2 clock (PCLK2) divider. This clock is derived from the AHB clock (HCLK). + This parameter can be a value of @ref RCC_LL_EC_APB2_DIV + + This feature can be modified afterwards using unitary function + @ref LL_RCC_SetAPB2Prescaler(). */ + +} LL_UTILS_ClkInitTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup UTILS_LL_Exported_Constants UTILS Exported Constants + * @{ + */ + +/** @defgroup UTILS_EC_HSE_BYPASS HSE Bypass activation + * @{ + */ +#define LL_UTILS_HSEBYPASS_OFF 0x00000000U /*!< HSE Bypass is not enabled */ +#define LL_UTILS_HSEBYPASS_ON 0x00000001U /*!< HSE Bypass is enabled */ +/** + * @} + */ + +/** @defgroup UTILS_EC_PACKAGETYPE PACKAGE TYPE + * @{ + */ +#define LL_UTILS_PACKAGETYPE_WLCSP36_UFQFPN48_LQFP64 0x00000000U /*!< WLCSP36 or UFQFPN48 or LQFP64 package type */ +#define LL_UTILS_PACKAGETYPE_WLCSP168_FBGA169_LQFP100_LQFP64_UFQFPN48 0x00000100U /*!< WLCSP168 or FBGA169 or LQFP100 or LQFP64 or UFQFPN48 package type */ +#define LL_UTILS_PACKAGETYPE_WLCSP64_WLCSP81_LQFP176_UFBGA176 0x00000200U /*!< WLCSP64 or WLCSP81 or LQFP176 or UFBGA176 package type */ +#define LL_UTILS_PACKAGETYPE_LQFP144_UFBGA144_UFBGA144_UFBGA100 0x00000300U /*!< LQFP144 or UFBGA144 or UFBGA144 or UFBGA100 package type */ +#define LL_UTILS_PACKAGETYPE_LQFP100_LQFP208_TFBGA216 0x00000400U /*!< LQFP100 or LQFP208 or TFBGA216 package type */ +#define LL_UTILS_PACKAGETYPE_LQFP208_TFBGA216 0x00000500U /*!< LQFP208 or TFBGA216 package type */ +#define LL_UTILS_PACKAGETYPE_TQFP64_UFBGA144_LQFP144 0x00000700U /*!< TQFP64 or UFBGA144 or LQFP144 package type */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup UTILS_LL_Exported_Functions UTILS Exported Functions + * @{ + */ + +/** @defgroup UTILS_EF_DEVICE_ELECTRONIC_SIGNATURE DEVICE ELECTRONIC SIGNATURE + * @{ + */ + +/** + * @brief Get Word0 of the unique device identifier (UID based on 96 bits) + * @retval UID[31:0] + */ +__STATIC_INLINE uint32_t LL_GetUID_Word0(void) +{ + return (uint32_t)(READ_REG(*((uint32_t *)UID_BASE_ADDRESS))); +} + +/** + * @brief Get Word1 of the unique device identifier (UID based on 96 bits) + * @retval UID[63:32] + */ +__STATIC_INLINE uint32_t LL_GetUID_Word1(void) +{ + return (uint32_t)(READ_REG(*((uint32_t *)(UID_BASE_ADDRESS + 4U)))); +} + +/** + * @brief Get Word2 of the unique device identifier (UID based on 96 bits) + * @retval UID[95:64] + */ +__STATIC_INLINE uint32_t LL_GetUID_Word2(void) +{ + return (uint32_t)(READ_REG(*((uint32_t *)(UID_BASE_ADDRESS + 8U)))); +} + +/** + * @brief Get Flash memory size + * @note This bitfield indicates the size of the device Flash memory expressed in + * Kbytes. As an example, 0x040 corresponds to 64 Kbytes. + * @retval FLASH_SIZE[15:0]: Flash memory size + */ +__STATIC_INLINE uint32_t LL_GetFlashSize(void) +{ + return (uint32_t)(READ_REG(*((uint32_t *)FLASHSIZE_BASE_ADDRESS)) & 0xFFFF); +} + +/** + * @brief Get Package type + * @retval Returned value can be one of the following values: + * @arg @ref LL_UTILS_PACKAGETYPE_WLCSP36_UFQFPN48_LQFP64 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_WLCSP168_FBGA169_LQFP100_LQFP64_UFQFPN48 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_WLCSP64_WLCSP81_LQFP176_UFBGA176 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_LQFP144_UFBGA144_UFBGA144_UFBGA100 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_LQFP100_LQFP208_TFBGA216 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_LQFP208_TFBGA216 (*) + * @arg @ref LL_UTILS_PACKAGETYPE_TQFP64_UFBGA144_LQFP144 (*) + * + * (*) value not defined in all devices. + */ +__STATIC_INLINE uint32_t LL_GetPackageType(void) +{ + return (uint32_t)(READ_REG(*((uint32_t *)PACKAGE_BASE_ADDRESS)) & 0x0700U); +} + +/** + * @} + */ + +/** @defgroup UTILS_LL_EF_DELAY DELAY + * @{ + */ + +/** + * @brief This function configures the Cortex-M SysTick source of the time base. + * @param HCLKFrequency HCLK frequency in Hz (can be calculated thanks to RCC helper macro) + * @note When a RTOS is used, it is recommended to avoid changing the SysTick + * configuration by calling this function, for a delay use rather osDelay RTOS service. + * @param Ticks Frequency of Ticks (Hz) + * @retval None + */ +__STATIC_INLINE void LL_InitTick(uint32_t HCLKFrequency, uint32_t Ticks) +{ + /* Configure the SysTick to have interrupt in 1ms time base */ + SysTick->LOAD = (uint32_t)((HCLKFrequency / Ticks) - 1UL); /* set reload register */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable the Systick Timer */ +} + +void LL_Init1msTick(uint32_t HCLKFrequency); +void LL_mDelay(uint32_t Delay); + +/** + * @} + */ + +/** @defgroup UTILS_EF_SYSTEM SYSTEM + * @{ + */ + +void LL_SetSystemCoreClock(uint32_t HCLKFrequency); +ErrorStatus LL_SetFlashLatency(uint32_t HCLK_Frequency); +ErrorStatus LL_PLL_ConfigSystemClock_HSI(LL_UTILS_PLLInitTypeDef *UTILS_PLLInitStruct, + LL_UTILS_ClkInitTypeDef *UTILS_ClkInitStruct); +ErrorStatus LL_PLL_ConfigSystemClock_HSE(uint32_t HSEFrequency, uint32_t HSEBypass, + LL_UTILS_PLLInitTypeDef *UTILS_PLLInitStruct, LL_UTILS_ClkInitTypeDef *UTILS_ClkInitStruct); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F4xx_LL_UTILS_H */ diff --git a/Drivers/STM32F4xx_HAL_Driver/LICENSE.txt b/Drivers/STM32F4xx_HAL_Driver/LICENSE.txt new file mode 100644 index 00000000..3edc4d14 --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/LICENSE.txt @@ -0,0 +1,6 @@ +This software component is provided to you as part of a software package and +applicable license terms are in the Package_license file. If you received this +software component outside of a package or without applicable license terms, +the terms of the BSD-3-Clause license shall apply. +You may obtain a copy of the BSD-3-Clause at: +https://opensource.org/licenses/BSD-3-Clause diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal.c index 918c28f5..4497de66 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal.c @@ -5,6 +5,17 @@ * @brief HAL module driver. * This is the common part of the HAL initialization * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### How to use this driver ##### @@ -19,17 +30,6 @@ @endverbatim ****************************************************************************** - * @attention - * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -50,11 +50,11 @@ * @{ */ /** - * @brief STM32F4xx HAL Driver version number V1.7.10 + * @brief STM32F4xx HAL Driver version number V1.8.3 */ #define __STM32F4xx_HAL_VERSION_MAIN (0x01U) /*!< [31:24] main version */ -#define __STM32F4xx_HAL_VERSION_SUB1 (0x07U) /*!< [23:16] sub1 version */ -#define __STM32F4xx_HAL_VERSION_SUB2 (0x0AU) /*!< [15:8] sub2 version */ +#define __STM32F4xx_HAL_VERSION_SUB1 (0x08U) /*!< [23:16] sub1 version */ +#define __STM32F4xx_HAL_VERSION_SUB2 (0x03U) /*!< [15:8] sub2 version */ #define __STM32F4xx_HAL_VERSION_RC (0x00U) /*!< [7:0] release candidate */ #define __STM32F4xx_HAL_VERSION ((__STM32F4xx_HAL_VERSION_MAIN << 24U)\ |(__STM32F4xx_HAL_VERSION_SUB1 << 16U)\ @@ -156,7 +156,7 @@ HAL_TickFreqTypeDef uwTickFreq = HAL_TICK_FREQ_DEFAULT; /* 1KHz */ */ HAL_StatusTypeDef HAL_Init(void) { - /* Configure Flash prefetch, Instruction cache, Data cache */ + /* Configure Flash prefetch, Instruction cache, Data cache */ #if (INSTRUCTION_CACHE_ENABLE != 0U) __HAL_FLASH_INSTRUCTION_CACHE_ENABLE(); #endif /* INSTRUCTION_CACHE_ENABLE */ @@ -368,7 +368,8 @@ HAL_StatusTypeDef HAL_SetTickFreq(HAL_TickFreqTypeDef Freq) /** * @brief Return tick frequency. - * @retval tick period in Hz + * @retval Tick frequency. + * Value of @ref HAL_TickFreqTypeDef. */ HAL_TickFreqTypeDef HAL_GetTickFreq(void) { @@ -612,4 +613,4 @@ void HAL_DisableMemorySwappingBank(void) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_adc.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_adc.c index cf3df802..9ad943d8 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_adc.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_adc.c @@ -2,31 +2,42 @@ ****************************************************************************** * @file stm32f4xx_hal_adc.c * @author MCD Application Team - * @brief This file provides firmware functions to manage the following + * @brief This file provides firmware functions to manage the following * functionalities of the Analog to Digital Converter (ADC) peripheral: * + Initialization and de-initialization functions - * + IO operation functions - * + State and errors functions - * + * + Peripheral Control functions + * + Peripheral State functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### ADC Peripheral features ##### ============================================================================== - [..] + [..] (#) 12-bit, 10-bit, 8-bit or 6-bit configurable resolution. - (#) Interrupt generation at the end of conversion, end of injected conversion, + (#) Interrupt generation at the end of conversion, end of injected conversion, and in case of analog watchdog or overrun events (#) Single and continuous conversion modes. (#) Scan mode for automatic conversion of channel 0 to channel x. (#) Data alignment with in-built data coherency. (#) Channel-wise programmable sampling time. - (#) External trigger option with configurable polarity for both regular and + (#) External trigger option with configurable polarity for both regular and injected conversion. (#) Dual/Triple mode (on devices with 2 ADCs or more). - (#) Configurable DMA data storage in Dual/Triple ADC mode. + (#) Configurable DMA data storage in Dual/Triple ADC mode. (#) Configurable delay between conversions in Dual/Triple interleaved mode. (#) ADC conversion type (refer to the datasheets). - (#) ADC supply requirements: 2.4 V to 3.6 V at full speed and down to 1.8 V at + (#) ADC supply requirements: 2.4 V to 3.6 V at full speed and down to 1.8 V at slower speed. (#) ADC input range: VREF(minus) = VIN = VREF(plus). (#) DMA request generation during regular channel conversion. @@ -39,8 +50,8 @@ (##) Enable the ADC interface clock using __HAL_RCC_ADC_CLK_ENABLE() (##) ADC pins configuration (+++) Enable the clock for the ADC GPIOs using the following function: - __HAL_RCC_GPIOx_CLK_ENABLE() - (+++) Configure these ADC pins in analog mode using HAL_GPIO_Init() + __HAL_RCC_GPIOx_CLK_ENABLE() + (+++) Configure these ADC pins in analog mode using HAL_GPIO_Init() (##) In case of using interrupts (e.g. HAL_ADC_Start_IT()) (+++) Configure the ADC interrupt priority using HAL_NVIC_SetPriority() (+++) Enable the ADC IRQ handler using HAL_NVIC_EnableIRQ() @@ -54,7 +65,7 @@ (+++) Configure the priority and enable the NVIC for the transfer complete interrupt on the two DMA Streams. The output stream should have higher priority than the input stream. - + *** Configuration of ADC, groups regular/injected, channels parameters *** ============================================================================== [..] @@ -62,64 +73,64 @@ and regular group parameters (conversion trigger, sequencer, ...) using function HAL_ADC_Init(). - (#) Configure the channels for regular group parameters (channel number, + (#) Configure the channels for regular group parameters (channel number, channel rank into sequencer, ..., into regular group) using function HAL_ADC_ConfigChannel(). - (#) Optionally, configure the injected group parameters (conversion trigger, + (#) Optionally, configure the injected group parameters (conversion trigger, sequencer, ..., of injected group) - and the channels for injected group parameters (channel number, + and the channels for injected group parameters (channel number, channel rank into sequencer, ..., into injected group) using function HAL_ADCEx_InjectedConfigChannel(). (#) Optionally, configure the analog watchdog parameters (channels monitored, thresholds, ...) using function HAL_ADC_AnalogWDGConfig(). - (#) Optionally, for devices with several ADC instances: configure the + (#) Optionally, for devices with several ADC instances: configure the multimode parameters using function HAL_ADCEx_MultiModeConfigChannel(). *** Execution of ADC conversions *** ============================================================================== - [..] + [..] (#) ADC driver can be used among three modes: polling, interruption, - transfer by DMA. + transfer by DMA. *** Polling mode IO operation *** ================================= - [..] - (+) Start the ADC peripheral using HAL_ADC_Start() + [..] + (+) Start the ADC peripheral using HAL_ADC_Start() (+) Wait for end of conversion using HAL_ADC_PollForConversion(), at this stage - user can specify the value of timeout according to his end application + user can specify the value of timeout according to his end application (+) To read the ADC converted values, use the HAL_ADC_GetValue() function. (+) Stop the ADC peripheral using HAL_ADC_Stop() - - *** Interrupt mode IO operation *** + + *** Interrupt mode IO operation *** =================================== - [..] - (+) Start the ADC peripheral using HAL_ADC_Start_IT() + [..] + (+) Start the ADC peripheral using HAL_ADC_Start_IT() (+) Use HAL_ADC_IRQHandler() called under ADC_IRQHandler() Interrupt subroutine - (+) At ADC end of conversion HAL_ADC_ConvCpltCallback() function is executed and user can - add his own code by customization of function pointer HAL_ADC_ConvCpltCallback - (+) In case of ADC Error, HAL_ADC_ErrorCallback() function is executed and user can + (+) At ADC end of conversion HAL_ADC_ConvCpltCallback() function is executed and user can + add his own code by customization of function pointer HAL_ADC_ConvCpltCallback + (+) In case of ADC Error, HAL_ADC_ErrorCallback() function is executed and user can add his own code by customization of function pointer HAL_ADC_ErrorCallback - (+) Stop the ADC peripheral using HAL_ADC_Stop_IT() + (+) Stop the ADC peripheral using HAL_ADC_Stop_IT() - *** DMA mode IO operation *** + *** DMA mode IO operation *** ============================== - [..] - (+) Start the ADC peripheral using HAL_ADC_Start_DMA(), at this stage the user specify the length - of data to be transferred at each end of conversion - (+) At The end of data transfer by HAL_ADC_ConvCpltCallback() function is executed and user can - add his own code by customization of function pointer HAL_ADC_ConvCpltCallback - (+) In case of transfer Error, HAL_ADC_ErrorCallback() function is executed and user can + [..] + (+) Start the ADC peripheral using HAL_ADC_Start_DMA(), at this stage the user specify the length + of data to be transferred at each end of conversion + (+) At The end of data transfer by HAL_ADC_ConvCpltCallback() function is executed and user can + add his own code by customization of function pointer HAL_ADC_ConvCpltCallback + (+) In case of transfer Error, HAL_ADC_ErrorCallback() function is executed and user can add his own code by customization of function pointer HAL_ADC_ErrorCallback (+) Stop the ADC peripheral using HAL_ADC_Stop_DMA() - + *** ADC HAL driver macros list *** - ============================================= + ============================================= [..] Below the list of most used macros in ADC HAL driver. - + (+) __HAL_ADC_ENABLE : Enable the ADC peripheral (+) __HAL_ADC_DISABLE : Disable the ADC peripheral (+) __HAL_ADC_ENABLE_IT: Enable the ADC end of conversion interrupt @@ -127,10 +138,10 @@ (+) __HAL_ADC_GET_IT_SOURCE: Check if the specified ADC interrupt source is enabled or disabled (+) __HAL_ADC_CLEAR_FLAG: Clear the ADC's pending flags (+) __HAL_ADC_GET_FLAG: Get the selected ADC's flag status - (+) ADC_GET_RESOLUTION: Return resolution bits in CR1 register - - [..] - (@) You can refer to the ADC HAL driver header file for more useful macros + (+) ADC_GET_RESOLUTION: Return resolution bits in CR1 register + + [..] + (@) You can refer to the ADC HAL driver header file for more useful macros *** Deinitialization of ADC *** ============================================================================== @@ -156,7 +167,7 @@ (#) Optionally, in case of usage of DMA: (++) Deinitialize the DMA using function HAL_DMA_DeInit(). - (++) Disable the NVIC for DMA using function HAL_NVIC_DisableIRQ(DMAx_Channelx_IRQn) + (++) Disable the NVIC for DMA using function HAL_NVIC_DisableIRQ(DMAx_Channelx_IRQn) *** Callback registration *** ============================================================================== [..] @@ -231,19 +242,7 @@ are set to the corresponding weak functions. @endverbatim - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** - */ + */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_hal.h" @@ -255,10 +254,10 @@ /** @defgroup ADC ADC * @brief ADC driver modules * @{ - */ + */ #ifdef HAL_ADC_MODULE_ENABLED - + /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /* Private macro -------------------------------------------------------------*/ @@ -267,7 +266,7 @@ * @{ */ /* Private function prototypes -----------------------------------------------*/ -static void ADC_Init(ADC_HandleTypeDef* hadc); +static void ADC_Init(ADC_HandleTypeDef *hadc); static void ADC_DMAConvCplt(DMA_HandleTypeDef *hdma); static void ADC_DMAError(DMA_HandleTypeDef *hdma); static void ADC_DMAHalfConvCplt(DMA_HandleTypeDef *hdma); @@ -279,46 +278,46 @@ static void ADC_DMAHalfConvCplt(DMA_HandleTypeDef *hdma); * @{ */ -/** @defgroup ADC_Exported_Functions_Group1 Initialization and de-initialization functions - * @brief Initialization and Configuration functions +/** @defgroup ADC_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions * -@verbatim +@verbatim =============================================================================== ##### Initialization and de-initialization functions ##### =============================================================================== [..] This section provides functions allowing to: - (+) Initialize and configure the ADC. - (+) De-initialize the ADC. - + (+) Initialize and configure the ADC. + (+) De-initialize the ADC. + @endverbatim * @{ */ /** - * @brief Initializes the ADCx peripheral according to the specified parameters + * @brief Initializes the ADCx peripheral according to the specified parameters * in the ADC_InitStruct and initializes the ADC MSP. - * - * @note This function is used to configure the global features of the ADC ( + * + * @note This function is used to configure the global features of the ADC ( * ClockPrescaler, Resolution, Data Alignment and number of conversion), however, * the rest of the configuration parameters are specific to the regular * channels group (scan mode activation, continuous mode activation, - * External trigger source and edge, DMA continuous request after the + * External trigger source and edge, DMA continuous request after the * last transfer and End of conversion selection). - * + * * @param hadc pointer to a ADC_HandleTypeDef structure that contains - * the configuration information for the specified ADC. + * the configuration information for the specified ADC. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef *hadc) { HAL_StatusTypeDef tmp_hal_status = HAL_OK; - + /* Check ADC handle */ - if(hadc == NULL) + if (hadc == NULL) { return HAL_ERROR; } - + /* Check the parameters */ assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); assert_param(IS_ADC_CLOCKPRESCALER(hadc->Init.ClockPrescaler)); @@ -331,13 +330,13 @@ HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef* hadc) assert_param(IS_FUNCTIONAL_STATE(hadc->Init.DMAContinuousRequests)); assert_param(IS_ADC_EOCSelection(hadc->Init.EOCSelection)); assert_param(IS_FUNCTIONAL_STATE(hadc->Init.DiscontinuousConvMode)); - - if(hadc->Init.ExternalTrigConv != ADC_SOFTWARE_START) + + if (hadc->Init.ExternalTrigConv != ADC_SOFTWARE_START) { assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); } - - if(hadc->State == HAL_ADC_STATE_RESET) + + if (hadc->State == HAL_ADC_STATE_RESET) { #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) /* Init the ADC Callback settings */ @@ -360,12 +359,12 @@ HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef* hadc) /* Initialize ADC error code */ ADC_CLEAR_ERRORCODE(hadc); - + /* Allocate lock resource and initialize it */ hadc->Lock = HAL_UNLOCKED; } - - /* Configuration of ADC parameters if previous preliminary actions are */ + + /* Configuration of ADC parameters if previous preliminary actions are */ /* correctly completed. */ if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_ERROR_INTERNAL)) { @@ -373,13 +372,13 @@ HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef* hadc) ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, HAL_ADC_STATE_BUSY_INTERNAL); - + /* Set ADC parameters */ ADC_Init(hadc); - + /* Set ADC error code to none */ ADC_CLEAR_ERRORCODE(hadc); - + /* Set the ADC state */ ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_BUSY_INTERNAL, @@ -389,7 +388,7 @@ HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef* hadc) { tmp_hal_status = HAL_ERROR; } - + /* Release Lock */ __HAL_UNLOCK(hadc); @@ -398,58 +397,58 @@ HAL_StatusTypeDef HAL_ADC_Init(ADC_HandleTypeDef* hadc) } /** - * @brief Deinitializes the ADCx peripheral registers to their default reset values. + * @brief Deinitializes the ADCx peripheral registers to their default reset values. * @param hadc pointer to a ADC_HandleTypeDef structure that contains - * the configuration information for the specified ADC. + * the configuration information for the specified ADC. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADC_DeInit(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADC_DeInit(ADC_HandleTypeDef *hadc) { HAL_StatusTypeDef tmp_hal_status = HAL_OK; - + /* Check ADC handle */ - if(hadc == NULL) + if (hadc == NULL) { return HAL_ERROR; } - + /* Check the parameters */ assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); - + /* Set ADC state */ SET_BIT(hadc->State, HAL_ADC_STATE_BUSY_INTERNAL); - + /* Stop potential conversion on going, on regular and injected groups */ /* Disable ADC peripheral */ __HAL_ADC_DISABLE(hadc); - - /* Configuration of ADC parameters if previous preliminary actions are */ + + /* Configuration of ADC parameters if previous preliminary actions are */ /* correctly completed. */ - if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) { #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) - if (hadc->MspDeInitCallback == NULL) - { - hadc->MspDeInitCallback = HAL_ADC_MspDeInit; /* Legacy weak MspDeInit */ - } + if (hadc->MspDeInitCallback == NULL) + { + hadc->MspDeInitCallback = HAL_ADC_MspDeInit; /* Legacy weak MspDeInit */ + } - /* DeInit the low level hardware: RCC clock, NVIC */ - hadc->MspDeInitCallback(hadc); + /* DeInit the low level hardware: RCC clock, NVIC */ + hadc->MspDeInitCallback(hadc); #else - /* DeInit the low level hardware: RCC clock, NVIC */ - HAL_ADC_MspDeInit(hadc); + /* DeInit the low level hardware: RCC clock, NVIC */ + HAL_ADC_MspDeInit(hadc); #endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ - + /* Set ADC error code to none */ ADC_CLEAR_ERRORCODE(hadc); - + /* Set ADC state */ hadc->State = HAL_ADC_STATE_RESET; } - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return tmp_hal_status; } @@ -657,31 +656,31 @@ HAL_StatusTypeDef HAL_ADC_UnRegisterCallback(ADC_HandleTypeDef *hadc, HAL_ADC_Ca /** * @brief Initializes the ADC MSP. * @param hadc pointer to a ADC_HandleTypeDef structure that contains - * the configuration information for the specified ADC. + * the configuration information for the specified ADC. * @retval None */ -__weak void HAL_ADC_MspInit(ADC_HandleTypeDef* hadc) +__weak void HAL_ADC_MspInit(ADC_HandleTypeDef *hadc) { /* Prevent unused argument(s) compilation warning */ UNUSED(hadc); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_ADC_MspInit could be implemented in the user file - */ + */ } /** * @brief DeInitializes the ADC MSP. * @param hadc pointer to a ADC_HandleTypeDef structure that contains - * the configuration information for the specified ADC. + * the configuration information for the specified ADC. * @retval None */ -__weak void HAL_ADC_MspDeInit(ADC_HandleTypeDef* hadc) +__weak void HAL_ADC_MspDeInit(ADC_HandleTypeDef *hadc) { /* Prevent unused argument(s) compilation warning */ UNUSED(hadc); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_ADC_MspDeInit could be implemented in the user file - */ + */ } /** @@ -689,12 +688,12 @@ __weak void HAL_ADC_MspDeInit(ADC_HandleTypeDef* hadc) */ /** @defgroup ADC_Exported_Functions_Group2 IO operation functions - * @brief IO operation functions + * @brief IO operation functions * -@verbatim +@verbatim =============================================================================== ##### IO operation functions ##### - =============================================================================== + =============================================================================== [..] This section provides functions allowing to: (+) Start conversion of regular channel. (+) Stop conversion of regular channel. @@ -702,8 +701,8 @@ __weak void HAL_ADC_MspDeInit(ADC_HandleTypeDef* hadc) (+) Stop conversion of regular channel and disable interrupt. (+) Start conversion of regular channel and enable DMA transfer. (+) Stop conversion of regular channel and disable DMA transfer. - (+) Handle ADC interrupt request. - + (+) Handle ADC interrupt request. + @endverbatim * @{ */ @@ -714,37 +713,37 @@ __weak void HAL_ADC_MspDeInit(ADC_HandleTypeDef* hadc) * the configuration information for the specified ADC. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADC_Start(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADC_Start(ADC_HandleTypeDef *hadc) { __IO uint32_t counter = 0U; ADC_Common_TypeDef *tmpADC_Common; - + /* Check the parameters */ assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); - assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); - + assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); + /* Process locked */ __HAL_LOCK(hadc); - + /* Enable the ADC peripheral */ - /* Check if ADC peripheral is disabled in order to enable it and wait during + /* Check if ADC peripheral is disabled in order to enable it and wait during Tstab time the ADC's stabilization */ - if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) - { + if ((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { /* Enable the Peripheral */ __HAL_ADC_ENABLE(hadc); - + /* Delay for ADC stabilization time */ /* Compute number of CPU cycles to wait for */ counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000U)); - while(counter != 0U) + while (counter != 0U) { counter--; } } - + /* Start conversion if ADC is effectively enabled */ - if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Set ADC state */ /* - Clear state bitfield related to regular group conversion results */ @@ -752,25 +751,25 @@ HAL_StatusTypeDef HAL_ADC_Start(ADC_HandleTypeDef* hadc) ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_READY | HAL_ADC_STATE_REG_EOC | HAL_ADC_STATE_REG_OVR, HAL_ADC_STATE_REG_BUSY); - + /* If conversions on group regular are also triggering group injected, */ /* update ADC state. */ if (READ_BIT(hadc->Instance->CR1, ADC_CR1_JAUTO) != RESET) { - ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); + ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); } - + /* State machine update: Check if an injected conversion is ongoing */ if (HAL_IS_BIT_SET(hadc->State, HAL_ADC_STATE_INJ_BUSY)) { /* Reset ADC error code fields related to conversions on group regular */ - CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); + CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); } else { /* Reset ADC all error code fields */ ADC_CLEAR_ERRORCODE(hadc); - } + } /* Process unlocked */ /* Unlock before starting ADC conversions: in case of potential */ @@ -785,17 +784,17 @@ HAL_StatusTypeDef HAL_ADC_Start(ADC_HandleTypeDef* hadc) /* Clear regular group conversion flag and overrun flag */ /* (To ensure of no unknown state from potential previous ADC operations) */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_EOC | ADC_FLAG_OVR); - + /* Check if Multimode enabled */ - if(HAL_IS_BIT_CLR(tmpADC_Common->CCR, ADC_CCR_MULTI)) + if (HAL_IS_BIT_CLR(tmpADC_Common->CCR, ADC_CCR_MULTI)) { #if defined(ADC2) && defined(ADC3) - if((hadc->Instance == ADC1) || ((hadc->Instance == ADC2) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_0)) \ - || ((hadc->Instance == ADC3) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_4))) + if ((hadc->Instance == ADC1) || ((hadc->Instance == ADC2) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_0)) \ + || ((hadc->Instance == ADC3) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_4))) { #endif /* ADC2 || ADC3 */ /* if no external trigger present enable software conversion of regular channels */ - if((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) + if ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) { /* Enable the selected ADC software conversion for regular group */ hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; @@ -807,10 +806,10 @@ HAL_StatusTypeDef HAL_ADC_Start(ADC_HandleTypeDef* hadc) else { /* if instance of handle correspond to ADC1 and no external trigger present enable software conversion of regular channels */ - if((hadc->Instance == ADC1) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) + if ((hadc->Instance == ADC1) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) { /* Enable the selected ADC software conversion for regular group */ - hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; } } } @@ -822,45 +821,45 @@ HAL_StatusTypeDef HAL_ADC_Start(ADC_HandleTypeDef* hadc) /* Set ADC error code to ADC IP internal error */ SET_BIT(hadc->ErrorCode, HAL_ADC_ERROR_INTERNAL); } - + /* Return function status */ return HAL_OK; } /** * @brief Disables ADC and stop conversion of regular channels. - * - * @note Caution: This function will stop also injected channels. + * + * @note Caution: This function will stop also injected channels. * * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * * @retval HAL status. */ -HAL_StatusTypeDef HAL_ADC_Stop(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADC_Stop(ADC_HandleTypeDef *hadc) { /* Check the parameters */ assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); - + /* Process locked */ __HAL_LOCK(hadc); - + /* Stop potential conversion on going, on regular and injected groups */ /* Disable ADC peripheral */ __HAL_ADC_DISABLE(hadc); - + /* Check if ADC is effectively disabled */ - if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Set ADC state */ ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, HAL_ADC_STATE_READY); } - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return HAL_OK; } @@ -869,21 +868,21 @@ HAL_StatusTypeDef HAL_ADC_Stop(ADC_HandleTypeDef* hadc) * @brief Poll for regular conversion complete * @note ADC conversion flags EOS (end of sequence) and EOC (end of * conversion) are cleared by this function. - * @note This function cannot be used in a particular setup: ADC configured + * @note This function cannot be used in a particular setup: ADC configured * in DMA mode and polling for end of each conversion (ADC init * parameter "EOCSelection" set to ADC_EOC_SINGLE_CONV). * In this case, DMA resets the flag EOC and polling cannot be - * performed on each conversion. Nevertheless, polling can still + * performed on each conversion. Nevertheless, polling can still * be performed on the complete sequence. * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. - * @param Timeout Timeout value in millisecond. + * @param Timeout Timeout value in millisecond. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADC_PollForConversion(ADC_HandleTypeDef* hadc, uint32_t Timeout) +HAL_StatusTypeDef HAL_ADC_PollForConversion(ADC_HandleTypeDef *hadc, uint32_t Timeout) { uint32_t tickstart = 0U; - + /* Verification that ADC configuration is compliant with polling for */ /* each conversion: */ /* Particular case is ADC configured in DMA mode and ADC sequencer with */ @@ -891,69 +890,69 @@ HAL_StatusTypeDef HAL_ADC_PollForConversion(ADC_HandleTypeDef* hadc, uint32_t Ti /* For code simplicity sake, this particular case is generalized to */ /* ADC configured in DMA mode and polling for end of each conversion. */ if (HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_EOCS) && - HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_DMA) ) + HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_DMA)) { /* Update ADC state machine to error */ SET_BIT(hadc->State, HAL_ADC_STATE_ERROR_CONFIG); - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + return HAL_ERROR; } - /* Get tick */ + /* Get tick */ tickstart = HAL_GetTick(); /* Check End of conversion flag */ - while(!(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_EOC))) + while (!(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_EOC))) { /* Check if timeout is disabled (set to infinite wait) */ - if(Timeout != HAL_MAX_DELAY) + if (Timeout != HAL_MAX_DELAY) { - if((Timeout == 0U) || ((HAL_GetTick() - tickstart ) > Timeout)) + if ((Timeout == 0U) || ((HAL_GetTick() - tickstart) > Timeout)) { /* New check to avoid false timeout detection in case of preemption */ - if(!(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_EOC))) + if (!(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_EOC))) { /* Update ADC state machine to timeout */ SET_BIT(hadc->State, HAL_ADC_STATE_TIMEOUT); - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + return HAL_TIMEOUT; } } } } - + /* Clear regular group conversion flag */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_STRT | ADC_FLAG_EOC); - + /* Update ADC state machine */ SET_BIT(hadc->State, HAL_ADC_STATE_REG_EOC); - + /* Determine whether any further conversion upcoming on group regular */ /* by external trigger, continuous mode or scan sequence on going. */ /* Note: On STM32F4, there is no independent flag of end of sequence. */ /* The test of scan sequence on going is done either with scan */ /* sequence disabled or with end of conversion flag set to */ /* of end of sequence. */ - if(ADC_IS_SOFTWARE_START_REGULAR(hadc) && - (hadc->Init.ContinuousConvMode == DISABLE) && - (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || - HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) ) + if (ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE) && + (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS))) { /* Set ADC state */ - CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); - + CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_INJ_BUSY)) - { + { SET_BIT(hadc->State, HAL_ADC_STATE_READY); } } - + /* Return ADC state */ return HAL_OK; } @@ -966,13 +965,13 @@ HAL_StatusTypeDef HAL_ADC_PollForConversion(ADC_HandleTypeDef* hadc, uint32_t Ti * This parameter can be one of the following values: * @arg ADC_AWD_EVENT: ADC Analog watch Dog event. * @arg ADC_OVR_EVENT: ADC Overrun event. - * @param Timeout Timeout value in millisecond. + * @param Timeout Timeout value in millisecond. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADC_PollForEvent(ADC_HandleTypeDef* hadc, uint32_t EventType, uint32_t Timeout) +HAL_StatusTypeDef HAL_ADC_PollForEvent(ADC_HandleTypeDef *hadc, uint32_t EventType, uint32_t Timeout) { uint32_t tickstart = 0U; - + /* Check the parameters */ assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); assert_param(IS_ADC_EVENT_TYPE(EventType)); @@ -981,34 +980,34 @@ HAL_StatusTypeDef HAL_ADC_PollForEvent(ADC_HandleTypeDef* hadc, uint32_t EventTy tickstart = HAL_GetTick(); /* Check selected event flag */ - while(!(__HAL_ADC_GET_FLAG(hadc,EventType))) + while (!(__HAL_ADC_GET_FLAG(hadc, EventType))) { /* Check for the Timeout */ - if(Timeout != HAL_MAX_DELAY) + if (Timeout != HAL_MAX_DELAY) { - if((Timeout == 0U) || ((HAL_GetTick() - tickstart ) > Timeout)) + if ((Timeout == 0U) || ((HAL_GetTick() - tickstart) > Timeout)) { /* New check to avoid false timeout detection in case of preemption */ - if(!(__HAL_ADC_GET_FLAG(hadc,EventType))) + if (!(__HAL_ADC_GET_FLAG(hadc, EventType))) { /* Update ADC state machine to timeout */ SET_BIT(hadc->State, HAL_ADC_STATE_TIMEOUT); - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + return HAL_TIMEOUT; } } } } - + /* Analog watchdog (level out of window) event */ - if(EventType == ADC_AWD_EVENT) + if (EventType == ADC_AWD_EVENT) { /* Set ADC state */ SET_BIT(hadc->State, HAL_ADC_STATE_AWD1); - + /* Clear ADC analog watchdog flag */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_AWD); } @@ -1019,11 +1018,11 @@ HAL_StatusTypeDef HAL_ADC_PollForEvent(ADC_HandleTypeDef* hadc, uint32_t EventTy SET_BIT(hadc->State, HAL_ADC_STATE_REG_OVR); /* Set ADC error code to overrun */ SET_BIT(hadc->ErrorCode, HAL_ADC_ERROR_OVR); - + /* Clear ADC overrun flag */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_OVR); } - + /* Return ADC state */ return HAL_OK; } @@ -1035,37 +1034,37 @@ HAL_StatusTypeDef HAL_ADC_PollForEvent(ADC_HandleTypeDef* hadc, uint32_t EventTy * the configuration information for the specified ADC. * @retval HAL status. */ -HAL_StatusTypeDef HAL_ADC_Start_IT(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADC_Start_IT(ADC_HandleTypeDef *hadc) { __IO uint32_t counter = 0U; ADC_Common_TypeDef *tmpADC_Common; - + /* Check the parameters */ assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); - assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); - + assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); + /* Process locked */ __HAL_LOCK(hadc); - + /* Enable the ADC peripheral */ - /* Check if ADC peripheral is disabled in order to enable it and wait during + /* Check if ADC peripheral is disabled in order to enable it and wait during Tstab time the ADC's stabilization */ - if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) - { + if ((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { /* Enable the Peripheral */ __HAL_ADC_ENABLE(hadc); - + /* Delay for ADC stabilization time */ /* Compute number of CPU cycles to wait for */ counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000U)); - while(counter != 0U) + while (counter != 0U) { counter--; } } - + /* Start conversion if ADC is effectively enabled */ - if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Set ADC state */ /* - Clear state bitfield related to regular group conversion results */ @@ -1073,19 +1072,19 @@ HAL_StatusTypeDef HAL_ADC_Start_IT(ADC_HandleTypeDef* hadc) ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_READY | HAL_ADC_STATE_REG_EOC | HAL_ADC_STATE_REG_OVR, HAL_ADC_STATE_REG_BUSY); - + /* If conversions on group regular are also triggering group injected, */ /* update ADC state. */ if (READ_BIT(hadc->Instance->CR1, ADC_CR1_JAUTO) != RESET) { - ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); + ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); } - + /* State machine update: Check if an injected conversion is ongoing */ if (HAL_IS_BIT_SET(hadc->State, HAL_ADC_STATE_INJ_BUSY)) { /* Reset ADC error code fields related to conversions on group regular */ - CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); + CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); } else { @@ -1106,20 +1105,20 @@ HAL_StatusTypeDef HAL_ADC_Start_IT(ADC_HandleTypeDef* hadc) /* Clear regular group conversion flag and overrun flag */ /* (To ensure of no unknown state from potential previous ADC operations) */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_EOC | ADC_FLAG_OVR); - + /* Enable end of conversion interrupt for regular group */ __HAL_ADC_ENABLE_IT(hadc, (ADC_IT_EOC | ADC_IT_OVR)); - + /* Check if Multimode enabled */ - if(HAL_IS_BIT_CLR(tmpADC_Common->CCR, ADC_CCR_MULTI)) + if (HAL_IS_BIT_CLR(tmpADC_Common->CCR, ADC_CCR_MULTI)) { #if defined(ADC2) && defined(ADC3) - if((hadc->Instance == ADC1) || ((hadc->Instance == ADC2) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_0)) \ - || ((hadc->Instance == ADC3) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_4))) + if ((hadc->Instance == ADC1) || ((hadc->Instance == ADC2) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_0)) \ + || ((hadc->Instance == ADC3) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_4))) { #endif /* ADC2 || ADC3 */ /* if no external trigger present enable software conversion of regular channels */ - if((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) + if ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) { /* Enable the selected ADC software conversion for regular group */ hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; @@ -1131,10 +1130,10 @@ HAL_StatusTypeDef HAL_ADC_Start_IT(ADC_HandleTypeDef* hadc) else { /* if instance of handle correspond to ADC1 and no external trigger present enable software conversion of regular channels */ - if((hadc->Instance == ADC1) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) + if ((hadc->Instance == ADC1) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) { /* Enable the selected ADC software conversion for regular group */ - hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; } } } @@ -1146,36 +1145,36 @@ HAL_StatusTypeDef HAL_ADC_Start_IT(ADC_HandleTypeDef* hadc) /* Set ADC error code to ADC IP internal error */ SET_BIT(hadc->ErrorCode, HAL_ADC_ERROR_INTERNAL); } - + /* Return function status */ return HAL_OK; } /** * @brief Disables the interrupt and stop ADC conversion of regular channels. - * - * @note Caution: This function will stop also injected channels. + * + * @note Caution: This function will stop also injected channels. * * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @retval HAL status. */ -HAL_StatusTypeDef HAL_ADC_Stop_IT(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADC_Stop_IT(ADC_HandleTypeDef *hadc) { /* Check the parameters */ assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); - + /* Process locked */ __HAL_LOCK(hadc); - + /* Stop potential conversion on going, on regular and injected groups */ /* Disable ADC peripheral */ __HAL_ADC_DISABLE(hadc); - + /* Check if ADC is effectively disabled */ - if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) { - /* Disable ADC end of conversion interrupt for regular group */ + /* Disable ADC end of conversion interrupt for regular group */ __HAL_ADC_DISABLE_IT(hadc, (ADC_IT_EOC | ADC_IT_OVR)); /* Set ADC state */ @@ -1183,82 +1182,85 @@ HAL_StatusTypeDef HAL_ADC_Stop_IT(ADC_HandleTypeDef* hadc) HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, HAL_ADC_STATE_READY); } - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return HAL_OK; } /** - * @brief Handles ADC interrupt request + * @brief Handles ADC interrupt request * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @retval None */ -void HAL_ADC_IRQHandler(ADC_HandleTypeDef* hadc) +void HAL_ADC_IRQHandler(ADC_HandleTypeDef *hadc) { uint32_t tmp1 = 0U, tmp2 = 0U; - + + uint32_t tmp_sr = hadc->Instance->SR; + uint32_t tmp_cr1 = hadc->Instance->CR1; + /* Check the parameters */ assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); assert_param(IS_ADC_REGULAR_LENGTH(hadc->Init.NbrOfConversion)); assert_param(IS_ADC_EOCSelection(hadc->Init.EOCSelection)); - - tmp1 = __HAL_ADC_GET_FLAG(hadc, ADC_FLAG_EOC); - tmp2 = __HAL_ADC_GET_IT_SOURCE(hadc, ADC_IT_EOC); + + tmp1 = tmp_sr & ADC_FLAG_EOC; + tmp2 = tmp_cr1 & ADC_IT_EOC; /* Check End of conversion flag for regular channels */ - if(tmp1 && tmp2) + if (tmp1 && tmp2) { /* Update state machine on conversion status if not in error state */ if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_ERROR_INTERNAL)) { /* Set ADC state */ - SET_BIT(hadc->State, HAL_ADC_STATE_REG_EOC); + SET_BIT(hadc->State, HAL_ADC_STATE_REG_EOC); } - + /* Determine whether any further conversion upcoming on group regular */ /* by external trigger, continuous mode or scan sequence on going. */ /* Note: On STM32F4, there is no independent flag of end of sequence. */ /* The test of scan sequence on going is done either with scan */ /* sequence disabled or with end of conversion flag set to */ /* of end of sequence. */ - if(ADC_IS_SOFTWARE_START_REGULAR(hadc) && - (hadc->Init.ContinuousConvMode == DISABLE) && - (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || - HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) ) + if (ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE) && + (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS))) { /* Disable ADC end of single conversion interrupt on group regular */ /* Note: Overrun interrupt was enabled with EOC interrupt in */ /* HAL_ADC_Start_IT(), but is not disabled here because can be used */ /* by overrun IRQ process below. */ __HAL_ADC_DISABLE_IT(hadc, ADC_IT_EOC); - + /* Set ADC state */ CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); - + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_INJ_BUSY)) { SET_BIT(hadc->State, HAL_ADC_STATE_READY); } } - + /* Conversion complete callback */ #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) hadc->ConvCpltCallback(hadc); #else HAL_ADC_ConvCpltCallback(hadc); #endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ - + /* Clear regular group conversion flag */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_STRT | ADC_FLAG_EOC); } - - tmp1 = __HAL_ADC_GET_FLAG(hadc, ADC_FLAG_JEOC); - tmp2 = __HAL_ADC_GET_IT_SOURCE(hadc, ADC_IT_JEOC); + + tmp1 = tmp_sr & ADC_FLAG_JEOC; + tmp2 = tmp_cr1 & ADC_IT_JEOC; /* Check End of conversion flag for injected channels */ - if(tmp1 && tmp2) + if (tmp1 && tmp2) { /* Update state machine on conversion status if not in error state */ if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_ERROR_INTERNAL)) @@ -1271,132 +1273,132 @@ void HAL_ADC_IRQHandler(ADC_HandleTypeDef* hadc) /* by external trigger, scan sequence on going or by automatic injected */ /* conversion from group regular (same conditions as group regular */ /* interruption disabling above). */ - if(ADC_IS_SOFTWARE_START_INJECTED(hadc) && - (HAL_IS_BIT_CLR(hadc->Instance->JSQR, ADC_JSQR_JL) || - HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) && - (HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO) && - (ADC_IS_SOFTWARE_START_REGULAR(hadc) && - (hadc->Init.ContinuousConvMode == DISABLE) ) ) ) + if (ADC_IS_SOFTWARE_START_INJECTED(hadc) && + (HAL_IS_BIT_CLR(hadc->Instance->JSQR, ADC_JSQR_JL) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS)) && + (HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO) && + (ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE)))) { /* Disable ADC end of single conversion interrupt on group injected */ __HAL_ADC_DISABLE_IT(hadc, ADC_IT_JEOC); - + /* Set ADC state */ - CLEAR_BIT(hadc->State, HAL_ADC_STATE_INJ_BUSY); + CLEAR_BIT(hadc->State, HAL_ADC_STATE_INJ_BUSY); if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_REG_BUSY)) - { + { SET_BIT(hadc->State, HAL_ADC_STATE_READY); } } - /* Conversion complete callback */ - /* Conversion complete callback */ + /* Conversion complete callback */ + /* Conversion complete callback */ #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) - hadc->InjectedConvCpltCallback(hadc); + hadc->InjectedConvCpltCallback(hadc); #else - HAL_ADCEx_InjectedConvCpltCallback(hadc); + HAL_ADCEx_InjectedConvCpltCallback(hadc); #endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ - + /* Clear injected group conversion flag */ __HAL_ADC_CLEAR_FLAG(hadc, (ADC_FLAG_JSTRT | ADC_FLAG_JEOC)); } - - tmp1 = __HAL_ADC_GET_FLAG(hadc, ADC_FLAG_AWD); - tmp2 = __HAL_ADC_GET_IT_SOURCE(hadc, ADC_IT_AWD); + + tmp1 = tmp_sr & ADC_FLAG_AWD; + tmp2 = tmp_cr1 & ADC_IT_AWD; /* Check Analog watchdog flag */ - if(tmp1 && tmp2) + if (tmp1 && tmp2) { - if(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_AWD)) + if (__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_AWD)) { /* Set ADC state */ SET_BIT(hadc->State, HAL_ADC_STATE_AWD1); - + /* Level out of window callback */ #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) hadc->LevelOutOfWindowCallback(hadc); #else HAL_ADC_LevelOutOfWindowCallback(hadc); #endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ - + /* Clear the ADC analog watchdog flag */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_AWD); } } - - tmp1 = __HAL_ADC_GET_FLAG(hadc, ADC_FLAG_OVR); - tmp2 = __HAL_ADC_GET_IT_SOURCE(hadc, ADC_IT_OVR); + + tmp1 = tmp_sr & ADC_FLAG_OVR; + tmp2 = tmp_cr1 & ADC_IT_OVR; /* Check Overrun flag */ - if(tmp1 && tmp2) + if (tmp1 && tmp2) { /* Note: On STM32F4, ADC overrun can be set through other parameters */ /* refer to description of parameter "EOCSelection" for more */ /* details. */ - + /* Set ADC error code to overrun */ SET_BIT(hadc->ErrorCode, HAL_ADC_ERROR_OVR); - + /* Clear ADC overrun flag */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_OVR); - - /* Error callback */ + + /* Error callback */ #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) - hadc->ErrorCallback(hadc); + hadc->ErrorCallback(hadc); #else - HAL_ADC_ErrorCallback(hadc); + HAL_ADC_ErrorCallback(hadc); #endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ - + /* Clear the Overrun flag */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_OVR); } } /** - * @brief Enables ADC DMA request after last transfer (Single-ADC mode) and enables ADC peripheral + * @brief Enables ADC DMA request after last transfer (Single-ADC mode) and enables ADC peripheral * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @param pData The destination Buffer address. * @param Length The length of data to be transferred from ADC peripheral to memory. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, uint32_t Length) +HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef *hadc, uint32_t *pData, uint32_t Length) { __IO uint32_t counter = 0U; ADC_Common_TypeDef *tmpADC_Common; - + /* Check the parameters */ assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); - assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); - + assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); + /* Process locked */ __HAL_LOCK(hadc); - + /* Enable the ADC peripheral */ - /* Check if ADC peripheral is disabled in order to enable it and wait during + /* Check if ADC peripheral is disabled in order to enable it and wait during Tstab time the ADC's stabilization */ - if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) - { + if ((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { /* Enable the Peripheral */ __HAL_ADC_ENABLE(hadc); - + /* Delay for ADC stabilization time */ /* Compute number of CPU cycles to wait for */ counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000U)); - while(counter != 0U) + while (counter != 0U) { counter--; } } - + /* Check ADC DMA Mode */ /* - disable the DMA Mode if it is already enabled */ - if((hadc->Instance->CR2 & ADC_CR2_DMA) == ADC_CR2_DMA) + if ((hadc->Instance->CR2 & ADC_CR2_DMA) == ADC_CR2_DMA) { CLEAR_BIT(hadc->Instance->CR2, ADC_CR2_DMA); } - + /* Start conversion if ADC is effectively enabled */ - if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Set ADC state */ /* - Clear state bitfield related to regular group conversion results */ @@ -1404,19 +1406,19 @@ HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, ui ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_READY | HAL_ADC_STATE_REG_EOC | HAL_ADC_STATE_REG_OVR, HAL_ADC_STATE_REG_BUSY); - + /* If conversions on group regular are also triggering group injected, */ /* update ADC state. */ if (READ_BIT(hadc->Instance->CR1, ADC_CR1_JAUTO) != RESET) { - ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); + ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); } - + /* State machine update: Check if an injected conversion is ongoing */ if (HAL_IS_BIT_SET(hadc->State, HAL_ADC_STATE_INJ_BUSY)) { /* Reset ADC error code fields related to conversions on group regular */ - CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); + CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); } else { @@ -1427,7 +1429,7 @@ HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, ui /* Process unlocked */ /* Unlock before starting ADC conversions: in case of potential */ /* interruption, to let the process to ADC IRQ Handler. */ - __HAL_UNLOCK(hadc); + __HAL_UNLOCK(hadc); /* Pointer to the common control register to which is belonging hadc */ /* (Depending on STM32F4 product, there may be up to 3 ADCs and 1 common */ @@ -1439,37 +1441,37 @@ HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, ui /* Set the DMA half transfer complete callback */ hadc->DMA_Handle->XferHalfCpltCallback = ADC_DMAHalfConvCplt; - + /* Set the DMA error callback */ hadc->DMA_Handle->XferErrorCallback = ADC_DMAError; - + /* Manage ADC and DMA start: ADC overrun interruption, DMA start, ADC */ /* start (in case of SW start): */ - + /* Clear regular group conversion flag and overrun flag */ /* (To ensure of no unknown state from potential previous ADC operations) */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_EOC | ADC_FLAG_OVR); /* Enable ADC overrun interrupt */ __HAL_ADC_ENABLE_IT(hadc, ADC_IT_OVR); - + /* Enable ADC DMA mode */ hadc->Instance->CR2 |= ADC_CR2_DMA; - + /* Start the DMA channel */ HAL_DMA_Start_IT(hadc->DMA_Handle, (uint32_t)&hadc->Instance->DR, (uint32_t)pData, Length); - + /* Check if Multimode enabled */ - if(HAL_IS_BIT_CLR(tmpADC_Common->CCR, ADC_CCR_MULTI)) + if (HAL_IS_BIT_CLR(tmpADC_Common->CCR, ADC_CCR_MULTI)) { #if defined(ADC2) && defined(ADC3) - if((hadc->Instance == ADC1) || ((hadc->Instance == ADC2) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_0)) \ - || ((hadc->Instance == ADC3) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_4))) + if ((hadc->Instance == ADC1) || ((hadc->Instance == ADC2) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_0)) \ + || ((hadc->Instance == ADC3) && ((ADC->CCR & ADC_CCR_MULTI_Msk) < ADC_CCR_MULTI_4))) { #endif /* ADC2 || ADC3 */ /* if no external trigger present enable software conversion of regular channels */ - if((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) + if ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) { /* Enable the selected ADC software conversion for regular group */ hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; @@ -1481,10 +1483,10 @@ HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, ui else { /* if instance of handle correspond to ADC1 and no external trigger present enable software conversion of regular channels */ - if((hadc->Instance == ADC1) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) + if ((hadc->Instance == ADC1) && ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET)) { /* Enable the selected ADC software conversion for regular group */ - hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; + hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; } } } @@ -1496,43 +1498,43 @@ HAL_StatusTypeDef HAL_ADC_Start_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, ui /* Set ADC error code to ADC IP internal error */ SET_BIT(hadc->ErrorCode, HAL_ADC_ERROR_INTERNAL); } - + /* Return function status */ return HAL_OK; } /** - * @brief Disables ADC DMA (Single-ADC mode) and disables ADC peripheral + * @brief Disables ADC DMA (Single-ADC mode) and disables ADC peripheral * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADC_Stop_DMA(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADC_Stop_DMA(ADC_HandleTypeDef *hadc) { HAL_StatusTypeDef tmp_hal_status = HAL_OK; - + /* Check the parameters */ assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); - + /* Process locked */ __HAL_LOCK(hadc); - + /* Stop potential conversion on going, on regular and injected groups */ /* Disable ADC peripheral */ __HAL_ADC_DISABLE(hadc); - + /* Check if ADC is effectively disabled */ - if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Disable the selected ADC DMA mode */ hadc->Instance->CR2 &= ~ADC_CR2_DMA; - + /* Disable the DMA channel (in case of DMA in circular mode or stop while */ /* DMA transfer is on going) */ if (hadc->DMA_Handle->State == HAL_DMA_STATE_BUSY) { tmp_hal_status = HAL_DMA_Abort(hadc->DMA_Handle); - + /* Check if DMA channel effectively disabled */ if (tmp_hal_status != HAL_OK) { @@ -1540,19 +1542,19 @@ HAL_StatusTypeDef HAL_ADC_Stop_DMA(ADC_HandleTypeDef* hadc) SET_BIT(hadc->State, HAL_ADC_STATE_ERROR_DMA); } } - + /* Disable ADC overrun interrupt */ __HAL_ADC_DISABLE_IT(hadc, ADC_IT_OVR); - + /* Set ADC state */ ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, HAL_ADC_STATE_READY); } - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return tmp_hal_status; } @@ -1563,19 +1565,19 @@ HAL_StatusTypeDef HAL_ADC_Stop_DMA(ADC_HandleTypeDef* hadc) * the configuration information for the specified ADC. * @retval Converted value */ -uint32_t HAL_ADC_GetValue(ADC_HandleTypeDef* hadc) -{ - /* Return the selected ADC converted value */ +uint32_t HAL_ADC_GetValue(ADC_HandleTypeDef *hadc) +{ + /* Return the selected ADC converted value */ return hadc->Instance->DR; } /** - * @brief Regular conversion complete callback in non blocking mode + * @brief Regular conversion complete callback in non blocking mode * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @retval None */ -__weak void HAL_ADC_ConvCpltCallback(ADC_HandleTypeDef* hadc) +__weak void HAL_ADC_ConvCpltCallback(ADC_HandleTypeDef *hadc) { /* Prevent unused argument(s) compilation warning */ UNUSED(hadc); @@ -1585,12 +1587,12 @@ __weak void HAL_ADC_ConvCpltCallback(ADC_HandleTypeDef* hadc) } /** - * @brief Regular conversion half DMA transfer callback in non blocking mode + * @brief Regular conversion half DMA transfer callback in non blocking mode * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @retval None */ -__weak void HAL_ADC_ConvHalfCpltCallback(ADC_HandleTypeDef* hadc) +__weak void HAL_ADC_ConvHalfCpltCallback(ADC_HandleTypeDef *hadc) { /* Prevent unused argument(s) compilation warning */ UNUSED(hadc); @@ -1600,12 +1602,12 @@ __weak void HAL_ADC_ConvHalfCpltCallback(ADC_HandleTypeDef* hadc) } /** - * @brief Analog watchdog callback in non blocking mode + * @brief Analog watchdog callback in non blocking mode * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @retval None */ -__weak void HAL_ADC_LevelOutOfWindowCallback(ADC_HandleTypeDef* hadc) +__weak void HAL_ADC_LevelOutOfWindowCallback(ADC_HandleTypeDef *hadc) { /* Prevent unused argument(s) compilation warning */ UNUSED(hadc); @@ -1616,8 +1618,8 @@ __weak void HAL_ADC_LevelOutOfWindowCallback(ADC_HandleTypeDef* hadc) /** * @brief Error ADC callback. - * @note In case of error due to overrun when using ADC with DMA transfer - * (HAL ADC handle paramater "ErrorCode" to state "HAL_ADC_ERROR_OVR"): + * @note In case of error due to overrun when using ADC with DMA transfer + * (HAL ADC handle parameter "ErrorCode" to state "HAL_ADC_ERROR_OVR"): * - Reinitialize the DMA using function "HAL_ADC_Stop_DMA()". * - If needed, restart a new ADC conversion using function * "HAL_ADC_Start_DMA()" @@ -1638,51 +1640,51 @@ __weak void HAL_ADC_ErrorCallback(ADC_HandleTypeDef *hadc) /** * @} */ - + /** @defgroup ADC_Exported_Functions_Group3 Peripheral Control functions - * @brief Peripheral Control functions + * @brief Peripheral Control functions * -@verbatim +@verbatim =============================================================================== ##### Peripheral Control functions ##### - =============================================================================== + =============================================================================== [..] This section provides functions allowing to: - (+) Configure regular channels. + (+) Configure regular channels. (+) Configure injected channels. (+) Configure multimode. (+) Configure the analog watch dog. - + @endverbatim * @{ */ - /** - * @brief Configures for the selected ADC regular channel its corresponding - * rank in the sequencer and its sample time. - * @param hadc pointer to a ADC_HandleTypeDef structure that contains - * the configuration information for the specified ADC. - * @param sConfig ADC configuration structure. - * @retval HAL status - */ -HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef* hadc, ADC_ChannelConfTypeDef* sConfig) +/** +* @brief Configures for the selected ADC regular channel its corresponding +* rank in the sequencer and its sample time. +* @param hadc pointer to a ADC_HandleTypeDef structure that contains +* the configuration information for the specified ADC. +* @param sConfig ADC configuration structure. +* @retval HAL status +*/ +HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef *hadc, ADC_ChannelConfTypeDef *sConfig) { __IO uint32_t counter = 0U; ADC_Common_TypeDef *tmpADC_Common; - + /* Check the parameters */ assert_param(IS_ADC_CHANNEL(sConfig->Channel)); assert_param(IS_ADC_REGULAR_RANK(sConfig->Rank)); assert_param(IS_ADC_SAMPLE_TIME(sConfig->SamplingTime)); - + /* Process locked */ __HAL_LOCK(hadc); - + /* if ADC_Channel_10 ... ADC_Channel_18 is selected */ if (sConfig->Channel > ADC_CHANNEL_9) { /* Clear the old sample time */ hadc->Instance->SMPR1 &= ~ADC_SMPR1(ADC_SMPR1_SMP10, sConfig->Channel); - + /* Set the new sample time */ hadc->Instance->SMPR1 |= ADC_SMPR1(sConfig->SamplingTime, sConfig->Channel); } @@ -1690,17 +1692,17 @@ HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef* hadc, ADC_ChannelConf { /* Clear the old sample time */ hadc->Instance->SMPR2 &= ~ADC_SMPR2(ADC_SMPR2_SMP0, sConfig->Channel); - + /* Set the new sample time */ hadc->Instance->SMPR2 |= ADC_SMPR2(sConfig->SamplingTime, sConfig->Channel); } - + /* For Rank 1 to 6 */ if (sConfig->Rank < 7U) { /* Clear the old SQx bits for the selected rank */ hadc->Instance->SQR3 &= ~ADC_SQR3_RK(ADC_SQR3_SQ1, sConfig->Rank); - + /* Set the SQx bits for the selected rank */ hadc->Instance->SQR3 |= ADC_SQR3_RK(sConfig->Channel, sConfig->Rank); } @@ -1709,7 +1711,7 @@ HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef* hadc, ADC_ChannelConf { /* Clear the old SQx bits for the selected rank */ hadc->Instance->SQR2 &= ~ADC_SQR2_RK(ADC_SQR2_SQ7, sConfig->Rank); - + /* Set the SQx bits for the selected rank */ hadc->Instance->SQR2 |= ADC_SQR2_RK(sConfig->Channel, sConfig->Rank); } @@ -1718,20 +1720,20 @@ HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef* hadc, ADC_ChannelConf { /* Clear the old SQx bits for the selected rank */ hadc->Instance->SQR1 &= ~ADC_SQR1_RK(ADC_SQR1_SQ13, sConfig->Rank); - + /* Set the SQx bits for the selected rank */ hadc->Instance->SQR1 |= ADC_SQR1_RK(sConfig->Channel, sConfig->Rank); } - /* Pointer to the common control register to which is belonging hadc */ - /* (Depending on STM32F4 product, there may be up to 3 ADCs and 1 common */ - /* control register) */ - tmpADC_Common = ADC_COMMON_REGISTER(hadc); + /* Pointer to the common control register to which is belonging hadc */ + /* (Depending on STM32F4 product, there may be up to 3 ADCs and 1 common */ + /* control register) */ + tmpADC_Common = ADC_COMMON_REGISTER(hadc); /* if ADC1 Channel_18 is selected for VBAT Channel ennable VBATE */ if ((hadc->Instance == ADC1) && (sConfig->Channel == ADC_CHANNEL_VBAT)) { - /* Disable the TEMPSENSOR channel in case of using board with multiplixed ADC_CHANNEL_VBAT & ADC_CHANNEL_TEMPSENSOR*/ + /* Disable the TEMPSENSOR channel in case of using board with multiplixed ADC_CHANNEL_VBAT & ADC_CHANNEL_TEMPSENSOR*/ if ((uint16_t)ADC_CHANNEL_TEMPSENSOR == (uint16_t)ADC_CHANNEL_VBAT) { tmpADC_Common->CCR &= ~ADC_CCR_TSVREFE; @@ -1739,8 +1741,8 @@ HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef* hadc, ADC_ChannelConf /* Enable the VBAT channel*/ tmpADC_Common->CCR |= ADC_CCR_VBATE; } - - /* if ADC1 Channel_16 or Channel_18 is selected for Temperature sensor or + + /* if ADC1 Channel_16 or Channel_18 is selected for Temperature sensor or Channel_17 is selected for VREFINT enable TSVREFE */ if ((hadc->Instance == ADC1) && ((sConfig->Channel == ADC_CHANNEL_TEMPSENSOR) || (sConfig->Channel == ADC_CHANNEL_VREFINT))) { @@ -1751,22 +1753,22 @@ HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef* hadc, ADC_ChannelConf } /* Enable the Temperature sensor and VREFINT channel*/ tmpADC_Common->CCR |= ADC_CCR_TSVREFE; - - if(sConfig->Channel == ADC_CHANNEL_TEMPSENSOR) + + if (sConfig->Channel == ADC_CHANNEL_TEMPSENSOR) { /* Delay for temperature sensor stabilization time */ /* Compute number of CPU cycles to wait for */ counter = (ADC_TEMPSENSOR_DELAY_US * (SystemCoreClock / 1000000U)); - while(counter != 0U) + while (counter != 0U) { counter--; } } } - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return HAL_OK; } @@ -1783,31 +1785,31 @@ HAL_StatusTypeDef HAL_ADC_ConfigChannel(ADC_HandleTypeDef* hadc, ADC_ChannelConf * effective timing of the new programmed threshold values. * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. - * @param AnalogWDGConfig pointer to an ADC_AnalogWDGConfTypeDef structure + * @param AnalogWDGConfig pointer to an ADC_AnalogWDGConfTypeDef structure * that contains the configuration information of ADC analog watchdog. - * @retval HAL status + * @retval HAL status */ -HAL_StatusTypeDef HAL_ADC_AnalogWDGConfig(ADC_HandleTypeDef* hadc, ADC_AnalogWDGConfTypeDef* AnalogWDGConfig) +HAL_StatusTypeDef HAL_ADC_AnalogWDGConfig(ADC_HandleTypeDef *hadc, ADC_AnalogWDGConfTypeDef *AnalogWDGConfig) { -#ifdef USE_FULL_ASSERT +#ifdef USE_FULL_ASSERT uint32_t tmp = 0U; -#endif /* USE_FULL_ASSERT */ - +#endif /* USE_FULL_ASSERT */ + /* Check the parameters */ assert_param(IS_ADC_ANALOG_WATCHDOG(AnalogWDGConfig->WatchdogMode)); assert_param(IS_ADC_CHANNEL(AnalogWDGConfig->Channel)); assert_param(IS_FUNCTIONAL_STATE(AnalogWDGConfig->ITMode)); -#ifdef USE_FULL_ASSERT +#ifdef USE_FULL_ASSERT tmp = ADC_GET_RESOLUTION(hadc); assert_param(IS_ADC_RANGE(tmp, AnalogWDGConfig->HighThreshold)); assert_param(IS_ADC_RANGE(tmp, AnalogWDGConfig->LowThreshold)); #endif /* USE_FULL_ASSERT */ - + /* Process locked */ __HAL_LOCK(hadc); - - if(AnalogWDGConfig->ITMode == ENABLE) + + if (AnalogWDGConfig->ITMode == ENABLE) { /* Enable the ADC Analog watchdog interrupt */ __HAL_ADC_ENABLE_IT(hadc, ADC_IT_AWD); @@ -1817,28 +1819,28 @@ HAL_StatusTypeDef HAL_ADC_AnalogWDGConfig(ADC_HandleTypeDef* hadc, ADC_AnalogWDG /* Disable the ADC Analog watchdog interrupt */ __HAL_ADC_DISABLE_IT(hadc, ADC_IT_AWD); } - + /* Clear AWDEN, JAWDEN and AWDSGL bits */ hadc->Instance->CR1 &= ~(ADC_CR1_AWDSGL | ADC_CR1_JAWDEN | ADC_CR1_AWDEN); - + /* Set the analog watchdog enable mode */ hadc->Instance->CR1 |= AnalogWDGConfig->WatchdogMode; - + /* Set the high threshold */ hadc->Instance->HTR = AnalogWDGConfig->HighThreshold; - + /* Set the low threshold */ hadc->Instance->LTR = AnalogWDGConfig->LowThreshold; - + /* Clear the Analog watchdog channel select bits */ hadc->Instance->CR1 &= ~ADC_CR1_AWDCH; - + /* Set the Analog watchdog channel */ hadc->Instance->CR1 |= (uint32_t)((uint16_t)(AnalogWDGConfig->Channel)); - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return HAL_OK; } @@ -1848,28 +1850,28 @@ HAL_StatusTypeDef HAL_ADC_AnalogWDGConfig(ADC_HandleTypeDef* hadc, ADC_AnalogWDG */ /** @defgroup ADC_Exported_Functions_Group4 ADC Peripheral State functions - * @brief ADC Peripheral State functions + * @brief ADC Peripheral State functions * -@verbatim +@verbatim =============================================================================== ##### Peripheral State and errors functions ##### - =============================================================================== + =============================================================================== [..] This subsection provides functions allowing to (+) Check the ADC state (+) Check the ADC Error - + @endverbatim * @{ */ - + /** * @brief return the ADC state * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @retval HAL state */ -uint32_t HAL_ADC_GetState(ADC_HandleTypeDef* hadc) +uint32_t HAL_ADC_GetState(ADC_HandleTypeDef *hadc) { /* Return ADC state */ return hadc->State; @@ -1895,49 +1897,49 @@ uint32_t HAL_ADC_GetError(ADC_HandleTypeDef *hadc) */ /** - * @brief Initializes the ADCx peripheral according to the specified parameters - * in the ADC_InitStruct without initializing the ADC MSP. + * @brief Initializes the ADCx peripheral according to the specified parameters + * in the ADC_InitStruct without initializing the ADC MSP. * @param hadc pointer to a ADC_HandleTypeDef structure that contains - * the configuration information for the specified ADC. + * the configuration information for the specified ADC. * @retval None */ -static void ADC_Init(ADC_HandleTypeDef* hadc) +static void ADC_Init(ADC_HandleTypeDef *hadc) { ADC_Common_TypeDef *tmpADC_Common; - + /* Set ADC parameters */ /* Pointer to the common control register to which is belonging hadc */ /* (Depending on STM32F4 product, there may be up to 3 ADCs and 1 common */ /* control register) */ tmpADC_Common = ADC_COMMON_REGISTER(hadc); - + /* Set the ADC clock prescaler */ tmpADC_Common->CCR &= ~(ADC_CCR_ADCPRE); tmpADC_Common->CCR |= hadc->Init.ClockPrescaler; - + /* Set ADC scan mode */ hadc->Instance->CR1 &= ~(ADC_CR1_SCAN); hadc->Instance->CR1 |= ADC_CR1_SCANCONV(hadc->Init.ScanConvMode); - + /* Set ADC resolution */ hadc->Instance->CR1 &= ~(ADC_CR1_RES); hadc->Instance->CR1 |= hadc->Init.Resolution; - + /* Set ADC data alignment */ hadc->Instance->CR2 &= ~(ADC_CR2_ALIGN); hadc->Instance->CR2 |= hadc->Init.DataAlign; - + /* Enable external trigger if trigger selection is different of software */ /* start. */ /* Note: This configuration keeps the hardware feature of parameter */ /* ExternalTrigConvEdge "trigger edge none" equivalent to */ /* software start. */ - if(hadc->Init.ExternalTrigConv != ADC_SOFTWARE_START) + if (hadc->Init.ExternalTrigConv != ADC_SOFTWARE_START) { /* Select external trigger to start conversion */ hadc->Instance->CR2 &= ~(ADC_CR2_EXTSEL); hadc->Instance->CR2 |= hadc->Init.ExternalTrigConv; - + /* Select external trigger polarity */ hadc->Instance->CR2 &= ~(ADC_CR2_EXTEN); hadc->Instance->CR2 |= hadc->Init.ExternalTrigConvEdge; @@ -1948,18 +1950,18 @@ static void ADC_Init(ADC_HandleTypeDef* hadc) hadc->Instance->CR2 &= ~(ADC_CR2_EXTSEL); hadc->Instance->CR2 &= ~(ADC_CR2_EXTEN); } - + /* Enable or disable ADC continuous conversion mode */ hadc->Instance->CR2 &= ~(ADC_CR2_CONT); hadc->Instance->CR2 |= ADC_CR2_CONTINUOUS((uint32_t)hadc->Init.ContinuousConvMode); - - if(hadc->Init.DiscontinuousConvMode != DISABLE) + + if (hadc->Init.DiscontinuousConvMode != DISABLE) { assert_param(IS_ADC_REGULAR_DISC_NUMBER(hadc->Init.NbrOfDiscConversion)); - + /* Enable the selected ADC regular discontinuous mode */ hadc->Instance->CR1 |= (uint32_t)ADC_CR1_DISCEN; - + /* Set the number of channels to be converted in discontinuous mode */ hadc->Instance->CR1 &= ~(ADC_CR1_DISCNUM); hadc->Instance->CR1 |= ADC_CR1_DISCONTINUOUS(hadc->Init.NbrOfDiscConversion); @@ -1969,63 +1971,63 @@ static void ADC_Init(ADC_HandleTypeDef* hadc) /* Disable the selected ADC regular discontinuous mode */ hadc->Instance->CR1 &= ~(ADC_CR1_DISCEN); } - + /* Set ADC number of conversion */ hadc->Instance->SQR1 &= ~(ADC_SQR1_L); hadc->Instance->SQR1 |= ADC_SQR1(hadc->Init.NbrOfConversion); - + /* Enable or disable ADC DMA continuous request */ hadc->Instance->CR2 &= ~(ADC_CR2_DDS); hadc->Instance->CR2 |= ADC_CR2_DMAContReq((uint32_t)hadc->Init.DMAContinuousRequests); - + /* Enable or disable ADC end of conversion selection */ hadc->Instance->CR2 &= ~(ADC_CR2_EOCS); hadc->Instance->CR2 |= ADC_CR2_EOCSelection(hadc->Init.EOCSelection); } /** - * @brief DMA transfer complete callback. + * @brief DMA transfer complete callback. * @param hdma pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ -static void ADC_DMAConvCplt(DMA_HandleTypeDef *hdma) +static void ADC_DMAConvCplt(DMA_HandleTypeDef *hdma) { /* Retrieve ADC handle corresponding to current DMA handle */ - ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; - + ADC_HandleTypeDef *hadc = (ADC_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + /* Update state machine on conversion status if not in error state */ if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_ERROR_INTERNAL | HAL_ADC_STATE_ERROR_DMA)) { /* Update ADC state machine */ SET_BIT(hadc->State, HAL_ADC_STATE_REG_EOC); - + /* Determine whether any further conversion upcoming on group regular */ /* by external trigger, continuous mode or scan sequence on going. */ /* Note: On STM32F4, there is no independent flag of end of sequence. */ /* The test of scan sequence on going is done either with scan */ /* sequence disabled or with end of conversion flag set to */ /* of end of sequence. */ - if(ADC_IS_SOFTWARE_START_REGULAR(hadc) && - (hadc->Init.ContinuousConvMode == DISABLE) && - (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || - HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) ) + if (ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE) && + (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS))) { /* Disable ADC end of single conversion interrupt on group regular */ /* Note: Overrun interrupt was enabled with EOC interrupt in */ /* HAL_ADC_Start_IT(), but is not disabled here because can be used */ /* by overrun IRQ process below. */ __HAL_ADC_DISABLE_IT(hadc, ADC_IT_EOC); - + /* Set ADC state */ - CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); - + CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_INJ_BUSY)) { SET_BIT(hadc->State, HAL_ADC_STATE_READY); } } - + /* Conversion complete callback */ #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) hadc->ConvCpltCallback(hadc); @@ -2044,8 +2046,8 @@ static void ADC_DMAConvCplt(DMA_HandleTypeDef *hdma) HAL_ADC_ErrorCallback(hadc); #endif /* USE_HAL_ADC_REGISTER_CALLBACKS */ } - else - { + else + { /* Call DMA error callback */ hadc->DMA_Handle->XferErrorCallback(hdma); } @@ -2053,15 +2055,15 @@ static void ADC_DMAConvCplt(DMA_HandleTypeDef *hdma) } /** - * @brief DMA half transfer complete callback. + * @brief DMA half transfer complete callback. * @param hdma pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ -static void ADC_DMAHalfConvCplt(DMA_HandleTypeDef *hdma) +static void ADC_DMAHalfConvCplt(DMA_HandleTypeDef *hdma) { - ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; - /* Half conversion callback */ + ADC_HandleTypeDef *hadc = (ADC_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + /* Half conversion callback */ #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) hadc->ConvHalfCpltCallback(hadc); #else @@ -2070,18 +2072,18 @@ static void ADC_DMAHalfConvCplt(DMA_HandleTypeDef *hdma) } /** - * @brief DMA error callback + * @brief DMA error callback * @param hdma pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ -static void ADC_DMAError(DMA_HandleTypeDef *hdma) +static void ADC_DMAError(DMA_HandleTypeDef *hdma) { - ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; - hadc->State= HAL_ADC_STATE_ERROR_DMA; + ADC_HandleTypeDef *hadc = (ADC_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + hadc->State = HAL_ADC_STATE_ERROR_DMA; /* Set ADC error code to DMA error */ hadc->ErrorCode |= HAL_ADC_ERROR_DMA; - /* Error callback */ + /* Error callback */ #if (USE_HAL_ADC_REGISTER_CALLBACKS == 1) hadc->ErrorCallback(hadc); #else @@ -2100,10 +2102,9 @@ static void ADC_DMAError(DMA_HandleTypeDef *hdma) #endif /* HAL_ADC_MODULE_ENABLED */ /** * @} - */ + */ /** * @} - */ + */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_adc_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_adc_ex.c index c5489522..1d8ad7b5 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_adc_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_adc_ex.c @@ -6,6 +6,17 @@ * functionalities of the ADC extension peripheral: * + Extended features functions * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### How to use this driver ##### @@ -53,8 +64,8 @@ (+) Start the ADC peripheral using HAL_ADCEx_InjectedStart_IT() (+) Use HAL_ADC_IRQHandler() called under ADC_IRQHandler() Interrupt subroutine (+) At ADC end of conversion HAL_ADCEx_InjectedConvCpltCallback() function is executed and user can - add his own code by customization of function pointer HAL_ADCEx_InjectedConvCpltCallback - (+) In case of ADC Error, HAL_ADCEx_InjectedErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_ADCEx_InjectedConvCpltCallback + (+) In case of ADC Error, HAL_ADCEx_InjectedErrorCallback() function is executed and user can add his own code by customization of function pointer HAL_ADCEx_InjectedErrorCallback (+) Stop the ADC peripheral using HAL_ADCEx_InjectedStop_IT() @@ -69,18 +80,6 @@ @endverbatim - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -93,12 +92,12 @@ /** @defgroup ADCEx ADCEx * @brief ADC Extended driver modules * @{ - */ + */ #ifdef HAL_ADC_MODULE_ENABLED - + /* Private typedef -----------------------------------------------------------*/ -/* Private define ------------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /** @addtogroup ADCEx_Private_Functions @@ -107,7 +106,7 @@ /* Private function prototypes -----------------------------------------------*/ static void ADC_MultiModeDMAConvCplt(DMA_HandleTypeDef *hdma); static void ADC_MultiModeDMAError(DMA_HandleTypeDef *hdma); -static void ADC_MultiModeDMAHalfConvCplt(DMA_HandleTypeDef *hdma); +static void ADC_MultiModeDMAHalfConvCplt(DMA_HandleTypeDef *hdma); /** * @} */ @@ -117,13 +116,13 @@ static void ADC_MultiModeDMAHalfConvCplt(DMA_HandleTypeDef *hdma); * @{ */ -/** @defgroup ADCEx_Exported_Functions_Group1 Extended features functions - * @brief Extended features functions +/** @defgroup ADCEx_Exported_Functions_Group1 Extended features functions + * @brief Extended features functions * -@verbatim +@verbatim =============================================================================== ##### Extended features functions ##### - =============================================================================== + =============================================================================== [..] This section provides functions allowing to: (+) Start conversion of injected channel. (+) Stop conversion of injected channel. @@ -133,7 +132,7 @@ static void ADC_MultiModeDMAHalfConvCplt(DMA_HandleTypeDef *hdma); (+) Get result of multimode conversion. (+) Configure injected channels. (+) Configure multimode. - + @endverbatim * @{ */ @@ -144,35 +143,35 @@ static void ADC_MultiModeDMAHalfConvCplt(DMA_HandleTypeDef *hdma); * the configuration information for the specified ADC. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef *hadc) { __IO uint32_t counter = 0U; uint32_t tmp1 = 0U, tmp2 = 0U; ADC_Common_TypeDef *tmpADC_Common; - + /* Process locked */ __HAL_LOCK(hadc); - + /* Enable the ADC peripheral */ - - /* Check if ADC peripheral is disabled in order to enable it and wait during + + /* Check if ADC peripheral is disabled in order to enable it and wait during Tstab time the ADC's stabilization */ - if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) - { + if ((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { /* Enable the Peripheral */ __HAL_ADC_ENABLE(hadc); - + /* Delay for ADC stabilization time */ /* Compute number of CPU cycles to wait for */ counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000U)); - while(counter != 0U) + while (counter != 0U) { counter--; } } - + /* Start conversion if ADC is effectively enabled */ - if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Set ADC state */ /* - Clear state bitfield related to injected group conversion results */ @@ -180,7 +179,7 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef* hadc) ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_READY | HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); - + /* Check if a regular conversion is ongoing */ /* Note: On this device, there is no ADC error code fields related to */ /* conversions on group injected only. In case of conversion on */ @@ -190,12 +189,12 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef* hadc) /* Reset ADC all error code fields */ ADC_CLEAR_ERRORCODE(hadc); } - + /* Process unlocked */ /* Unlock before starting ADC conversions: in case of potential */ /* interruption, to let the process to ADC IRQ Handler. */ __HAL_UNLOCK(hadc); - + /* Clear injected group conversion flag */ /* (To ensure of no unknown state from potential previous ADC operations) */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_JEOC); @@ -206,11 +205,11 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef* hadc) tmpADC_Common = ADC_COMMON_REGISTER(hadc); /* Check if Multimode enabled */ - if(HAL_IS_BIT_CLR(tmpADC_Common->CCR, ADC_CCR_MULTI)) + if (HAL_IS_BIT_CLR(tmpADC_Common->CCR, ADC_CCR_MULTI)) { tmp1 = HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_JEXTEN); tmp2 = HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO); - if(tmp1 && tmp2) + if (tmp1 && tmp2) { /* Enable the selected ADC software conversion for injected group */ hadc->Instance->CR2 |= ADC_CR2_JSWSTART; @@ -220,7 +219,7 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef* hadc) { tmp1 = HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_JEXTEN); tmp2 = HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO); - if((hadc->Instance == ADC1) && tmp1 && tmp2) + if ((hadc->Instance == ADC1) && tmp1 && tmp2) { /* Enable the selected ADC software conversion for injected group */ hadc->Instance->CR2 |= ADC_CR2_JSWSTART; @@ -235,7 +234,7 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef* hadc) /* Set ADC error code to ADC IP internal error */ SET_BIT(hadc->ErrorCode, HAL_ADC_ERROR_INTERNAL); } - + /* Return function status */ return HAL_OK; } @@ -247,35 +246,35 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart(ADC_HandleTypeDef* hadc) * * @retval HAL status. */ -HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef *hadc) { __IO uint32_t counter = 0U; uint32_t tmp1 = 0U, tmp2 = 0U; ADC_Common_TypeDef *tmpADC_Common; - + /* Process locked */ __HAL_LOCK(hadc); - + /* Enable the ADC peripheral */ - - /* Check if ADC peripheral is disabled in order to enable it and wait during + + /* Check if ADC peripheral is disabled in order to enable it and wait during Tstab time the ADC's stabilization */ - if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) - { + if ((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { /* Enable the Peripheral */ __HAL_ADC_ENABLE(hadc); - + /* Delay for ADC stabilization time */ /* Compute number of CPU cycles to wait for */ counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000U)); - while(counter != 0U) + while (counter != 0U) { counter--; } } - + /* Start conversion if ADC is effectively enabled */ - if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Set ADC state */ /* - Clear state bitfield related to injected group conversion results */ @@ -283,7 +282,7 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc) ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_READY | HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); - + /* Check if a regular conversion is ongoing */ /* Note: On this device, there is no ADC error code fields related to */ /* conversions on group injected only. In case of conversion on */ @@ -293,16 +292,16 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc) /* Reset ADC all error code fields */ ADC_CLEAR_ERRORCODE(hadc); } - + /* Process unlocked */ /* Unlock before starting ADC conversions: in case of potential */ /* interruption, to let the process to ADC IRQ Handler. */ __HAL_UNLOCK(hadc); - + /* Clear injected group conversion flag */ /* (To ensure of no unknown state from potential previous ADC operations) */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_JEOC); - + /* Enable end of conversion interrupt for injected channels */ __HAL_ADC_ENABLE_IT(hadc, ADC_IT_JEOC); @@ -310,13 +309,13 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc) /* (Depending on STM32F4 product, there may be up to 3 ADC and 1 common */ /* control register) */ tmpADC_Common = ADC_COMMON_REGISTER(hadc); - + /* Check if Multimode enabled */ - if(HAL_IS_BIT_CLR(tmpADC_Common->CCR, ADC_CCR_MULTI)) + if (HAL_IS_BIT_CLR(tmpADC_Common->CCR, ADC_CCR_MULTI)) { tmp1 = HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_JEXTEN); tmp2 = HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO); - if(tmp1 && tmp2) + if (tmp1 && tmp2) { /* Enable the selected ADC software conversion for injected group */ hadc->Instance->CR2 |= ADC_CR2_JSWSTART; @@ -326,7 +325,7 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc) { tmp1 = HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_JEXTEN); tmp2 = HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO); - if((hadc->Instance == ADC1) && tmp1 && tmp2) + if ((hadc->Instance == ADC1) && tmp1 && tmp2) { /* Enable the selected ADC software conversion for injected group */ hadc->Instance->CR2 |= ADC_CR2_JSWSTART; @@ -341,7 +340,7 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc) /* Set ADC error code to ADC IP internal error */ SET_BIT(hadc->ErrorCode, HAL_ADC_ERROR_INTERNAL); } - + /* Return function status */ return HAL_OK; } @@ -349,7 +348,7 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc) /** * @brief Stop conversion of injected channels. Disable ADC peripheral if * no regular conversion is on going. - * @note If ADC must be disabled and if conversion is on going on + * @note If ADC must be disabled and if conversion is on going on * regular group, function HAL_ADC_Stop must be used to stop both * injected and regular groups, and disable the ADC. * @note If injected group mode auto-injection is enabled, @@ -358,31 +357,31 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStart_IT(ADC_HandleTypeDef* hadc) * @param hadc ADC handle * @retval None */ -HAL_StatusTypeDef HAL_ADCEx_InjectedStop(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADCEx_InjectedStop(ADC_HandleTypeDef *hadc) { HAL_StatusTypeDef tmp_hal_status = HAL_OK; - + /* Check the parameters */ assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); /* Process locked */ __HAL_LOCK(hadc); - + /* Stop potential conversion and disable ADC peripheral */ /* Conditioned to: */ /* - No conversion on the other group (regular group) is intended to */ /* continue (injected and regular groups stop conversion and ADC disable */ /* are common) */ /* - In case of auto-injection mode, HAL_ADC_Stop must be used. */ - if(((hadc->State & HAL_ADC_STATE_REG_BUSY) == RESET) && - HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO) ) + if (((hadc->State & HAL_ADC_STATE_REG_BUSY) == RESET) && + HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO)) { /* Stop potential conversion on going, on regular and injected groups */ /* Disable ADC peripheral */ __HAL_ADC_DISABLE(hadc); - + /* Check if ADC is effectively disabled */ - if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Set ADC state */ ADC_STATE_CLR_SET(hadc->State, @@ -394,13 +393,13 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStop(ADC_HandleTypeDef* hadc) { /* Update ADC state machine to error */ SET_BIT(hadc->State, HAL_ADC_STATE_ERROR_CONFIG); - + tmp_hal_status = HAL_ERROR; } - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return tmp_hal_status; } @@ -409,28 +408,28 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStop(ADC_HandleTypeDef* hadc) * @brief Poll for injected conversion complete * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. - * @param Timeout Timeout value in millisecond. + * @param Timeout Timeout value in millisecond. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADCEx_InjectedPollForConversion(ADC_HandleTypeDef* hadc, uint32_t Timeout) +HAL_StatusTypeDef HAL_ADCEx_InjectedPollForConversion(ADC_HandleTypeDef *hadc, uint32_t Timeout) { uint32_t tickstart = 0U; - /* Get tick */ + /* Get tick */ tickstart = HAL_GetTick(); /* Check End of conversion flag */ - while(!(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_JEOC))) + while (!(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_JEOC))) { /* Check for the Timeout */ - if(Timeout != HAL_MAX_DELAY) + if (Timeout != HAL_MAX_DELAY) { - if((Timeout == 0U)||((HAL_GetTick() - tickstart ) > Timeout)) + if ((Timeout == 0U) || ((HAL_GetTick() - tickstart) > Timeout)) { /* New check to avoid false timeout detection in case of preemption */ - if(!(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_JEOC))) + if (!(__HAL_ADC_GET_FLAG(hadc, ADC_FLAG_JEOC))) { - hadc->State= HAL_ADC_STATE_TIMEOUT; + hadc->State = HAL_ADC_STATE_TIMEOUT; /* Process unlocked */ __HAL_UNLOCK(hadc); return HAL_TIMEOUT; @@ -438,44 +437,44 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedPollForConversion(ADC_HandleTypeDef* hadc, u } } } - + /* Clear injected group conversion flag */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_JSTRT | ADC_FLAG_JEOC); - + /* Update ADC state machine */ SET_BIT(hadc->State, HAL_ADC_STATE_INJ_EOC); - + /* Determine whether any further conversion upcoming on group injected */ /* by external trigger, continuous mode or scan sequence on going. */ /* Note: On STM32F4, there is no independent flag of end of sequence. */ /* The test of scan sequence on going is done either with scan */ /* sequence disabled or with end of conversion flag set to */ /* of end of sequence. */ - if(ADC_IS_SOFTWARE_START_INJECTED(hadc) && - (HAL_IS_BIT_CLR(hadc->Instance->JSQR, ADC_JSQR_JL) || - HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) && - (HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO) && - (ADC_IS_SOFTWARE_START_REGULAR(hadc) && - (hadc->Init.ContinuousConvMode == DISABLE) ) ) ) + if (ADC_IS_SOFTWARE_START_INJECTED(hadc) && + (HAL_IS_BIT_CLR(hadc->Instance->JSQR, ADC_JSQR_JL) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS)) && + (HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO) && + (ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE)))) { /* Set ADC state */ CLEAR_BIT(hadc->State, HAL_ADC_STATE_INJ_BUSY); - + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_REG_BUSY)) - { + { SET_BIT(hadc->State, HAL_ADC_STATE_READY); } } - + /* Return ADC state */ return HAL_OK; -} - +} + /** - * @brief Stop conversion of injected channels, disable interruption of + * @brief Stop conversion of injected channels, disable interruption of * end-of-conversion. Disable ADC peripheral if no regular conversion * is on going. - * @note If ADC must be disabled and if conversion is on going on + * @note If ADC must be disabled and if conversion is on going on * regular group, function HAL_ADC_Stop must be used to stop both * injected and regular groups, and disable the ADC. * @note If injected group mode auto-injection is enabled, @@ -483,35 +482,35 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedPollForConversion(ADC_HandleTypeDef* hadc, u * @param hadc ADC handle * @retval None */ -HAL_StatusTypeDef HAL_ADCEx_InjectedStop_IT(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADCEx_InjectedStop_IT(ADC_HandleTypeDef *hadc) { HAL_StatusTypeDef tmp_hal_status = HAL_OK; - + /* Check the parameters */ assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); /* Process locked */ __HAL_LOCK(hadc); - + /* Stop potential conversion and disable ADC peripheral */ /* Conditioned to: */ /* - No conversion on the other group (regular group) is intended to */ /* continue (injected and regular groups stop conversion and ADC disable */ /* are common) */ - /* - In case of auto-injection mode, HAL_ADC_Stop must be used. */ - if(((hadc->State & HAL_ADC_STATE_REG_BUSY) == RESET) && - HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO) ) + /* - In case of auto-injection mode, HAL_ADC_Stop must be used. */ + if (((hadc->State & HAL_ADC_STATE_REG_BUSY) == RESET) && + HAL_IS_BIT_CLR(hadc->Instance->CR1, ADC_CR1_JAUTO)) { /* Stop potential conversion on going, on regular and injected groups */ /* Disable ADC peripheral */ __HAL_ADC_DISABLE(hadc); - + /* Check if ADC is effectively disabled */ - if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Disable ADC end of conversion interrupt for injected channels */ __HAL_ADC_DISABLE_IT(hadc, ADC_IT_JEOC); - + /* Set ADC state */ ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, @@ -522,13 +521,13 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStop_IT(ADC_HandleTypeDef* hadc) { /* Update ADC state machine to error */ SET_BIT(hadc->State, HAL_ADC_STATE_ERROR_CONFIG); - + tmp_hal_status = HAL_ERROR; } - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return tmp_hal_status; } @@ -545,32 +544,32 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedStop_IT(ADC_HandleTypeDef* hadc) * @arg ADC_INJECTED_RANK_4: Injected Channel4 selected * @retval None */ -uint32_t HAL_ADCEx_InjectedGetValue(ADC_HandleTypeDef* hadc, uint32_t InjectedRank) +uint32_t HAL_ADCEx_InjectedGetValue(ADC_HandleTypeDef *hadc, uint32_t InjectedRank) { __IO uint32_t tmp = 0U; - + /* Check the parameters */ assert_param(IS_ADC_INJECTED_RANK(InjectedRank)); - + /* Clear injected group conversion flag to have similar behaviour as */ /* regular group: reading data register also clears end of conversion flag. */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_JEOC); - - /* Return the selected ADC converted value */ - switch(InjectedRank) - { + + /* Return the selected ADC converted value */ + switch (InjectedRank) + { case ADC_INJECTED_RANK_4: { tmp = hadc->Instance->JDR4; - } + } break; - case ADC_INJECTED_RANK_3: - { + case ADC_INJECTED_RANK_3: + { tmp = hadc->Instance->JDR3; - } + } break; - case ADC_INJECTED_RANK_2: - { + case ADC_INJECTED_RANK_2: + { tmp = hadc->Instance->JDR2; } break; @@ -580,53 +579,53 @@ uint32_t HAL_ADCEx_InjectedGetValue(ADC_HandleTypeDef* hadc, uint32_t InjectedRa } break; default: - break; + break; } return tmp; } /** * @brief Enables ADC DMA request after last transfer (Multi-ADC mode) and enables ADC peripheral - * - * @note Caution: This function must be used only with the ADC master. + * + * @note Caution: This function must be used only with the ADC master. * * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. - * @param pData Pointer to buffer in which transferred from ADC peripheral to memory will be stored. - * @param Length The length of data to be transferred from ADC peripheral to memory. + * @param pData Pointer to buffer in which transferred from ADC peripheral to memory will be stored. + * @param Length The length of data to be transferred from ADC peripheral to memory. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADCEx_MultiModeStart_DMA(ADC_HandleTypeDef* hadc, uint32_t* pData, uint32_t Length) +HAL_StatusTypeDef HAL_ADCEx_MultiModeStart_DMA(ADC_HandleTypeDef *hadc, uint32_t *pData, uint32_t Length) { __IO uint32_t counter = 0U; ADC_Common_TypeDef *tmpADC_Common; - + /* Check the parameters */ assert_param(IS_FUNCTIONAL_STATE(hadc->Init.ContinuousConvMode)); assert_param(IS_ADC_EXT_TRIG_EDGE(hadc->Init.ExternalTrigConvEdge)); assert_param(IS_FUNCTIONAL_STATE(hadc->Init.DMAContinuousRequests)); - + /* Process locked */ __HAL_LOCK(hadc); - - /* Check if ADC peripheral is disabled in order to enable it and wait during + + /* Check if ADC peripheral is disabled in order to enable it and wait during Tstab time the ADC's stabilization */ - if((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) - { + if ((hadc->Instance->CR2 & ADC_CR2_ADON) != ADC_CR2_ADON) + { /* Enable the Peripheral */ __HAL_ADC_ENABLE(hadc); - + /* Delay for temperature sensor stabilization time */ /* Compute number of CPU cycles to wait for */ counter = (ADC_STAB_DELAY_US * (SystemCoreClock / 1000000U)); - while(counter != 0U) + while (counter != 0U) { counter--; } } - + /* Start conversion if ADC is effectively enabled */ - if(HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_SET(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Set ADC state */ /* - Clear state bitfield related to regular group conversion results */ @@ -634,43 +633,43 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeStart_DMA(ADC_HandleTypeDef* hadc, uint32_t ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_READY | HAL_ADC_STATE_REG_EOC | HAL_ADC_STATE_REG_OVR, HAL_ADC_STATE_REG_BUSY); - + /* If conversions on group regular are also triggering group injected, */ /* update ADC state. */ if (READ_BIT(hadc->Instance->CR1, ADC_CR1_JAUTO) != RESET) { - ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); + ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_INJ_EOC, HAL_ADC_STATE_INJ_BUSY); } - + /* State machine update: Check if an injected conversion is ongoing */ if (HAL_IS_BIT_SET(hadc->State, HAL_ADC_STATE_INJ_BUSY)) { /* Reset ADC error code fields related to conversions on group regular */ - CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); + CLEAR_BIT(hadc->ErrorCode, (HAL_ADC_ERROR_OVR | HAL_ADC_ERROR_DMA)); } else { /* Reset ADC all error code fields */ ADC_CLEAR_ERRORCODE(hadc); } - + /* Process unlocked */ /* Unlock before starting ADC conversions: in case of potential */ /* interruption, to let the process to ADC IRQ Handler. */ __HAL_UNLOCK(hadc); - + /* Set the DMA transfer complete callback */ hadc->DMA_Handle->XferCpltCallback = ADC_MultiModeDMAConvCplt; - + /* Set the DMA half transfer complete callback */ hadc->DMA_Handle->XferHalfCpltCallback = ADC_MultiModeDMAHalfConvCplt; - + /* Set the DMA error callback */ hadc->DMA_Handle->XferErrorCallback = ADC_MultiModeDMAError ; - + /* Manage ADC and DMA start: ADC overrun interruption, DMA start, ADC */ /* start (in case of SW start): */ - + /* Clear regular group conversion flag and overrun flag */ /* (To ensure of no unknown state from potential previous ADC operations) */ __HAL_ADC_CLEAR_FLAG(hadc, ADC_FLAG_EOC); @@ -693,12 +692,12 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeStart_DMA(ADC_HandleTypeDef* hadc, uint32_t /* Disable the selected ADC EOC rising on each regular channel conversion */ tmpADC_Common->CCR &= ~ADC_CCR_DDS; } - + /* Enable the DMA Stream */ HAL_DMA_Start_IT(hadc->DMA_Handle, (uint32_t)&tmpADC_Common->CDR, (uint32_t)pData, Length); - + /* if no external trigger present enable software conversion of regular channels */ - if((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) + if ((hadc->Instance->CR2 & ADC_CR2_EXTEN) == RESET) { /* Enable the selected ADC software conversion for regular group */ hadc->Instance->CR2 |= (uint32_t)ADC_CR2_SWSTART; @@ -712,28 +711,28 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeStart_DMA(ADC_HandleTypeDef* hadc, uint32_t /* Set ADC error code to ADC IP internal error */ SET_BIT(hadc->ErrorCode, HAL_ADC_ERROR_INTERNAL); } - + /* Return function status */ return HAL_OK; } /** - * @brief Disables ADC DMA (multi-ADC mode) and disables ADC peripheral + * @brief Disables ADC DMA (multi-ADC mode) and disables ADC peripheral * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADCEx_MultiModeStop_DMA(ADC_HandleTypeDef* hadc) +HAL_StatusTypeDef HAL_ADCEx_MultiModeStop_DMA(ADC_HandleTypeDef *hadc) { HAL_StatusTypeDef tmp_hal_status = HAL_OK; ADC_Common_TypeDef *tmpADC_Common; - + /* Check the parameters */ assert_param(IS_ADC_ALL_INSTANCE(hadc->Instance)); - + /* Process locked */ __HAL_LOCK(hadc); - + /* Stop potential conversion on going, on regular and injected groups */ /* Disable ADC peripheral */ __HAL_ADC_DISABLE(hadc); @@ -744,42 +743,45 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeStop_DMA(ADC_HandleTypeDef* hadc) tmpADC_Common = ADC_COMMON_REGISTER(hadc); /* Check if ADC is effectively disabled */ - if(HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) + if (HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_ADON)) { /* Disable the selected ADC DMA mode for multimode */ tmpADC_Common->CCR &= ~ADC_CCR_DDS; - + /* Disable the DMA channel (in case of DMA in circular mode or stop while */ /* DMA transfer is on going) */ tmp_hal_status = HAL_DMA_Abort(hadc->DMA_Handle); - + /* Disable ADC overrun interrupt */ __HAL_ADC_DISABLE_IT(hadc, ADC_IT_OVR); - + /* Set ADC state */ ADC_STATE_CLR_SET(hadc->State, HAL_ADC_STATE_REG_BUSY | HAL_ADC_STATE_INJ_BUSY, HAL_ADC_STATE_READY); } - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return tmp_hal_status; } /** - * @brief Returns the last ADC1, ADC2 and ADC3 regular conversions results + * @brief Returns the last ADC1, ADC2 and ADC3 regular conversions results * data in the selected multi mode. * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @retval The converted data value. */ -uint32_t HAL_ADCEx_MultiModeGetValue(ADC_HandleTypeDef* hadc) +uint32_t HAL_ADCEx_MultiModeGetValue(ADC_HandleTypeDef *hadc) { ADC_Common_TypeDef *tmpADC_Common; + /* Prevent unused argument(s) compilation warning */ + UNUSED(hadc); + /* Pointer to the common control register to which is belonging hadc */ /* (Depending on STM32F4 product, there may be up to 3 ADC and 1 common */ /* control register) */ @@ -790,12 +792,12 @@ uint32_t HAL_ADCEx_MultiModeGetValue(ADC_HandleTypeDef* hadc) } /** - * @brief Injected conversion complete callback in non blocking mode + * @brief Injected conversion complete callback in non blocking mode * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. * @retval None */ -__weak void HAL_ADCEx_InjectedConvCpltCallback(ADC_HandleTypeDef* hadc) +__weak void HAL_ADCEx_InjectedConvCpltCallback(ADC_HandleTypeDef *hadc) { /* Prevent unused argument(s) compilation warning */ UNUSED(hadc); @@ -809,15 +811,15 @@ __weak void HAL_ADCEx_InjectedConvCpltCallback(ADC_HandleTypeDef* hadc) * rank in the sequencer and its sample time. * @param hadc pointer to a ADC_HandleTypeDef structure that contains * the configuration information for the specified ADC. - * @param sConfigInjected ADC configuration structure for injected channel. + * @param sConfigInjected ADC configuration structure for injected channel. * @retval None */ -HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc, ADC_InjectionConfTypeDef* sConfigInjected) +HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef *hadc, ADC_InjectionConfTypeDef *sConfigInjected) { - -#ifdef USE_FULL_ASSERT + +#ifdef USE_FULL_ASSERT uint32_t tmp = 0U; - + #endif /* USE_FULL_ASSERT */ ADC_Common_TypeDef *tmpADC_Common; @@ -836,20 +838,20 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc, ADC_I assert_param(IS_ADC_RANGE(tmp, sConfigInjected->InjectedOffset)); #endif /* USE_FULL_ASSERT */ - if(sConfigInjected->ExternalTrigInjecConv != ADC_INJECTED_SOFTWARE_START) + if (sConfigInjected->ExternalTrigInjecConv != ADC_INJECTED_SOFTWARE_START) { assert_param(IS_ADC_EXT_INJEC_TRIG_EDGE(sConfigInjected->ExternalTrigInjecConvEdge)); } /* Process locked */ __HAL_LOCK(hadc); - + /* if ADC_Channel_10 ... ADC_Channel_18 is selected */ if (sConfigInjected->InjectedChannel > ADC_CHANNEL_9) { /* Clear the old sample time */ hadc->Instance->SMPR1 &= ~ADC_SMPR1(ADC_SMPR1_SMP10, sConfigInjected->InjectedChannel); - + /* Set the new sample time */ hadc->Instance->SMPR1 |= ADC_SMPR1(sConfigInjected->InjectedSamplingTime, sConfigInjected->InjectedChannel); } @@ -857,34 +859,34 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc, ADC_I { /* Clear the old sample time */ hadc->Instance->SMPR2 &= ~ADC_SMPR2(ADC_SMPR2_SMP0, sConfigInjected->InjectedChannel); - + /* Set the new sample time */ hadc->Instance->SMPR2 |= ADC_SMPR2(sConfigInjected->InjectedSamplingTime, sConfigInjected->InjectedChannel); } - + /*---------------------------- ADCx JSQR Configuration -----------------*/ hadc->Instance->JSQR &= ~(ADC_JSQR_JL); hadc->Instance->JSQR |= ADC_SQR1(sConfigInjected->InjectedNbrOfConversion); - + /* Rank configuration */ - + /* Clear the old SQx bits for the selected rank */ - hadc->Instance->JSQR &= ~ADC_JSQR(ADC_JSQR_JSQ1, sConfigInjected->InjectedRank,sConfigInjected->InjectedNbrOfConversion); - + hadc->Instance->JSQR &= ~ADC_JSQR(ADC_JSQR_JSQ1, sConfigInjected->InjectedRank, sConfigInjected->InjectedNbrOfConversion); + /* Set the SQx bits for the selected rank */ - hadc->Instance->JSQR |= ADC_JSQR(sConfigInjected->InjectedChannel, sConfigInjected->InjectedRank,sConfigInjected->InjectedNbrOfConversion); + hadc->Instance->JSQR |= ADC_JSQR(sConfigInjected->InjectedChannel, sConfigInjected->InjectedRank, sConfigInjected->InjectedNbrOfConversion); /* Enable external trigger if trigger selection is different of software */ /* start. */ /* Note: This configuration keeps the hardware feature of parameter */ /* ExternalTrigConvEdge "trigger edge none" equivalent to */ - /* software start. */ - if(sConfigInjected->ExternalTrigInjecConv != ADC_INJECTED_SOFTWARE_START) - { + /* software start. */ + if (sConfigInjected->ExternalTrigInjecConv != ADC_INJECTED_SOFTWARE_START) + { /* Select external trigger to start conversion */ hadc->Instance->CR2 &= ~(ADC_CR2_JEXTSEL); hadc->Instance->CR2 |= sConfigInjected->ExternalTrigInjecConv; - + /* Select external trigger polarity */ hadc->Instance->CR2 &= ~(ADC_CR2_JEXTEN); hadc->Instance->CR2 |= sConfigInjected->ExternalTrigInjecConvEdge; @@ -893,9 +895,9 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc, ADC_I { /* Reset the external trigger */ hadc->Instance->CR2 &= ~(ADC_CR2_JEXTSEL); - hadc->Instance->CR2 &= ~(ADC_CR2_JEXTEN); + hadc->Instance->CR2 &= ~(ADC_CR2_JEXTEN); } - + if (sConfigInjected->AutoInjectedConv != DISABLE) { /* Enable the selected ADC automatic injected group conversion */ @@ -906,7 +908,7 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc, ADC_I /* Disable the selected ADC automatic injected group conversion */ hadc->Instance->CR1 &= ~(ADC_CR1_JAUTO); } - + if (sConfigInjected->InjectedDiscontinuousConvMode != DISABLE) { /* Enable the selected ADC injected discontinuous mode */ @@ -917,8 +919,8 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc, ADC_I /* Disable the selected ADC injected discontinuous mode */ hadc->Instance->CR1 &= ~(ADC_CR1_JDISCEN); } - - switch(sConfigInjected->InjectedRank) + + switch (sConfigInjected->InjectedRank) { case 1U: /* Set injected channel 1 offset */ @@ -945,7 +947,7 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc, ADC_I /* Pointer to the common control register to which is belonging hadc */ /* (Depending on STM32F4 product, there may be up to 3 ADC and 1 common */ /* control register) */ - tmpADC_Common = ADC_COMMON_REGISTER(hadc); + tmpADC_Common = ADC_COMMON_REGISTER(hadc); /* if ADC1 Channel_18 is selected enable VBAT Channel */ if ((hadc->Instance == ADC1) && (sConfigInjected->InjectedChannel == ADC_CHANNEL_VBAT)) @@ -953,30 +955,30 @@ HAL_StatusTypeDef HAL_ADCEx_InjectedConfigChannel(ADC_HandleTypeDef* hadc, ADC_I /* Enable the VBAT channel*/ tmpADC_Common->CCR |= ADC_CCR_VBATE; } - + /* if ADC1 Channel_16 or Channel_17 is selected enable TSVREFE Channel(Temperature sensor and VREFINT) */ if ((hadc->Instance == ADC1) && ((sConfigInjected->InjectedChannel == ADC_CHANNEL_TEMPSENSOR) || (sConfigInjected->InjectedChannel == ADC_CHANNEL_VREFINT))) { /* Enable the TSVREFE channel*/ tmpADC_Common->CCR |= ADC_CCR_TSVREFE; } - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return HAL_OK; } /** - * @brief Configures the ADC multi-mode + * @brief Configures the ADC multi-mode * @param hadc pointer to a ADC_HandleTypeDef structure that contains - * the configuration information for the specified ADC. - * @param multimode pointer to an ADC_MultiModeTypeDef structure that contains + * the configuration information for the specified ADC. + * @param multimode pointer to an ADC_MultiModeTypeDef structure that contains * the configuration information for multimode. * @retval HAL status */ -HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_MultiModeTypeDef* multimode) +HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef *hadc, ADC_MultiModeTypeDef *multimode) { ADC_Common_TypeDef *tmpADC_Common; @@ -985,7 +987,7 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_ assert_param(IS_ADC_MODE(multimode->Mode)); assert_param(IS_ADC_DMA_ACCESS_MODE(multimode->DMAAccessMode)); assert_param(IS_ADC_SAMPLING_DELAY(multimode->TwoSamplingDelay)); - + /* Process locked */ __HAL_LOCK(hadc); @@ -997,18 +999,18 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_ /* Set ADC mode */ tmpADC_Common->CCR &= ~(ADC_CCR_MULTI); tmpADC_Common->CCR |= multimode->Mode; - + /* Set the ADC DMA access mode */ tmpADC_Common->CCR &= ~(ADC_CCR_DMA); tmpADC_Common->CCR |= multimode->DMAAccessMode; - + /* Set delay between two sampling phases */ tmpADC_Common->CCR &= ~(ADC_CCR_DELAY); tmpADC_Common->CCR |= multimode->TwoSamplingDelay; - + /* Process unlocked */ __HAL_UNLOCK(hadc); - + /* Return function status */ return HAL_OK; } @@ -1018,48 +1020,48 @@ HAL_StatusTypeDef HAL_ADCEx_MultiModeConfigChannel(ADC_HandleTypeDef* hadc, ADC_ */ /** - * @brief DMA transfer complete callback. + * @brief DMA transfer complete callback. * @param hdma pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ -static void ADC_MultiModeDMAConvCplt(DMA_HandleTypeDef *hdma) +static void ADC_MultiModeDMAConvCplt(DMA_HandleTypeDef *hdma) { /* Retrieve ADC handle corresponding to current DMA handle */ - ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; - + ADC_HandleTypeDef *hadc = (ADC_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + /* Update state machine on conversion status if not in error state */ if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_ERROR_INTERNAL | HAL_ADC_STATE_ERROR_DMA)) { /* Update ADC state machine */ SET_BIT(hadc->State, HAL_ADC_STATE_REG_EOC); - + /* Determine whether any further conversion upcoming on group regular */ /* by external trigger, continuous mode or scan sequence on going. */ /* Note: On STM32F4, there is no independent flag of end of sequence. */ /* The test of scan sequence on going is done either with scan */ /* sequence disabled or with end of conversion flag set to */ /* of end of sequence. */ - if(ADC_IS_SOFTWARE_START_REGULAR(hadc) && - (hadc->Init.ContinuousConvMode == DISABLE) && - (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || - HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS) ) ) + if (ADC_IS_SOFTWARE_START_REGULAR(hadc) && + (hadc->Init.ContinuousConvMode == DISABLE) && + (HAL_IS_BIT_CLR(hadc->Instance->SQR1, ADC_SQR1_L) || + HAL_IS_BIT_CLR(hadc->Instance->CR2, ADC_CR2_EOCS))) { /* Disable ADC end of single conversion interrupt on group regular */ /* Note: Overrun interrupt was enabled with EOC interrupt in */ /* HAL_ADC_Start_IT(), but is not disabled here because can be used */ /* by overrun IRQ process below. */ __HAL_ADC_DISABLE_IT(hadc, ADC_IT_EOC); - + /* Set ADC state */ - CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); - + CLEAR_BIT(hadc->State, HAL_ADC_STATE_REG_BUSY); + if (HAL_IS_BIT_CLR(hadc->State, HAL_ADC_STATE_INJ_BUSY)) { SET_BIT(hadc->State, HAL_ADC_STATE_READY); } } - + /* Conversion complete callback */ HAL_ADC_ConvCpltCallback(hadc); } @@ -1071,31 +1073,31 @@ static void ADC_MultiModeDMAConvCplt(DMA_HandleTypeDef *hdma) } /** - * @brief DMA half transfer complete callback. + * @brief DMA half transfer complete callback. * @param hdma pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ -static void ADC_MultiModeDMAHalfConvCplt(DMA_HandleTypeDef *hdma) +static void ADC_MultiModeDMAHalfConvCplt(DMA_HandleTypeDef *hdma) { - ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; - /* Conversion complete callback */ - HAL_ADC_ConvHalfCpltCallback(hadc); + ADC_HandleTypeDef *hadc = (ADC_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + /* Conversion complete callback */ + HAL_ADC_ConvHalfCpltCallback(hadc); } /** - * @brief DMA error callback + * @brief DMA error callback * @param hdma pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ -static void ADC_MultiModeDMAError(DMA_HandleTypeDef *hdma) +static void ADC_MultiModeDMAError(DMA_HandleTypeDef *hdma) { - ADC_HandleTypeDef* hadc = ( ADC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; - hadc->State= HAL_ADC_STATE_ERROR_DMA; - /* Set ADC error code to DMA error */ - hadc->ErrorCode |= HAL_ADC_ERROR_DMA; - HAL_ADC_ErrorCallback(hadc); + ADC_HandleTypeDef *hadc = (ADC_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + hadc->State = HAL_ADC_STATE_ERROR_DMA; + /* Set ADC error code to DMA error */ + hadc->ErrorCode |= HAL_ADC_ERROR_DMA; + HAL_ADC_ErrorCallback(hadc); } /** @@ -1105,10 +1107,9 @@ static void ADC_MultiModeDMAError(DMA_HandleTypeDef *hdma) #endif /* HAL_ADC_MODULE_ENABLED */ /** * @} - */ + */ /** * @} - */ + */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_cortex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_cortex.c index 2efb9864..c3d2ba8e 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_cortex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_cortex.c @@ -68,14 +68,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ @@ -291,9 +289,41 @@ void HAL_MPU_Enable(uint32_t MPU_Control) __ISB(); } +/** + * @brief Enables the MPU Region. + * @retval None + */ +void HAL_MPU_EnableRegion(uint32_t RegionNumber) +{ + /* Check the parameters */ + assert_param(IS_MPU_REGION_NUMBER(RegionNumber)); + + /* Set the Region number */ + MPU->RNR = RegionNumber; + + /* Enable the Region */ + SET_BIT(MPU->RASR, MPU_RASR_ENABLE_Msk); +} + +/** + * @brief Disables the MPU Region. + * @retval None + */ +void HAL_MPU_DisableRegion(uint32_t RegionNumber) +{ + /* Check the parameters */ + assert_param(IS_MPU_REGION_NUMBER(RegionNumber)); + + /* Set the Region number */ + MPU->RNR = RegionNumber; + + /* Disable the Region */ + CLEAR_BIT(MPU->RASR, MPU_RASR_ENABLE_Msk); +} + /** * @brief Initializes and configures the Region and the memory to be protected. - * @param MPU_Init Pointer to a MPU_Region_InitTypeDef structure that contains + * @param MPU_Init Pointer to a MPU_Region_InitTypeDef structure that contains * the initialization and configuration information. * @retval None */ @@ -302,41 +332,45 @@ void HAL_MPU_ConfigRegion(MPU_Region_InitTypeDef *MPU_Init) /* Check the parameters */ assert_param(IS_MPU_REGION_NUMBER(MPU_Init->Number)); assert_param(IS_MPU_REGION_ENABLE(MPU_Init->Enable)); + assert_param(IS_MPU_INSTRUCTION_ACCESS(MPU_Init->DisableExec)); + assert_param(IS_MPU_REGION_PERMISSION_ATTRIBUTE(MPU_Init->AccessPermission)); + assert_param(IS_MPU_TEX_LEVEL(MPU_Init->TypeExtField)); + assert_param(IS_MPU_ACCESS_SHAREABLE(MPU_Init->IsShareable)); + assert_param(IS_MPU_ACCESS_CACHEABLE(MPU_Init->IsCacheable)); + assert_param(IS_MPU_ACCESS_BUFFERABLE(MPU_Init->IsBufferable)); + assert_param(IS_MPU_SUB_REGION_DISABLE(MPU_Init->SubRegionDisable)); + assert_param(IS_MPU_REGION_SIZE(MPU_Init->Size)); /* Set the Region number */ MPU->RNR = MPU_Init->Number; - if ((MPU_Init->Enable) != RESET) - { - /* Check the parameters */ - assert_param(IS_MPU_INSTRUCTION_ACCESS(MPU_Init->DisableExec)); - assert_param(IS_MPU_REGION_PERMISSION_ATTRIBUTE(MPU_Init->AccessPermission)); - assert_param(IS_MPU_TEX_LEVEL(MPU_Init->TypeExtField)); - assert_param(IS_MPU_ACCESS_SHAREABLE(MPU_Init->IsShareable)); - assert_param(IS_MPU_ACCESS_CACHEABLE(MPU_Init->IsCacheable)); - assert_param(IS_MPU_ACCESS_BUFFERABLE(MPU_Init->IsBufferable)); - assert_param(IS_MPU_SUB_REGION_DISABLE(MPU_Init->SubRegionDisable)); - assert_param(IS_MPU_REGION_SIZE(MPU_Init->Size)); - - MPU->RBAR = MPU_Init->BaseAddress; - MPU->RASR = ((uint32_t)MPU_Init->DisableExec << MPU_RASR_XN_Pos) | - ((uint32_t)MPU_Init->AccessPermission << MPU_RASR_AP_Pos) | - ((uint32_t)MPU_Init->TypeExtField << MPU_RASR_TEX_Pos) | - ((uint32_t)MPU_Init->IsShareable << MPU_RASR_S_Pos) | - ((uint32_t)MPU_Init->IsCacheable << MPU_RASR_C_Pos) | - ((uint32_t)MPU_Init->IsBufferable << MPU_RASR_B_Pos) | - ((uint32_t)MPU_Init->SubRegionDisable << MPU_RASR_SRD_Pos) | - ((uint32_t)MPU_Init->Size << MPU_RASR_SIZE_Pos) | - ((uint32_t)MPU_Init->Enable << MPU_RASR_ENABLE_Pos); - } - else - { - MPU->RBAR = 0x00U; - MPU->RASR = 0x00U; - } + /* Disable the Region */ + CLEAR_BIT(MPU->RASR, MPU_RASR_ENABLE_Msk); + + /* Apply configuration */ + MPU->RBAR = MPU_Init->BaseAddress; + MPU->RASR = ((uint32_t)MPU_Init->DisableExec << MPU_RASR_XN_Pos) | + ((uint32_t)MPU_Init->AccessPermission << MPU_RASR_AP_Pos) | + ((uint32_t)MPU_Init->TypeExtField << MPU_RASR_TEX_Pos) | + ((uint32_t)MPU_Init->IsShareable << MPU_RASR_S_Pos) | + ((uint32_t)MPU_Init->IsCacheable << MPU_RASR_C_Pos) | + ((uint32_t)MPU_Init->IsBufferable << MPU_RASR_B_Pos) | + ((uint32_t)MPU_Init->SubRegionDisable << MPU_RASR_SRD_Pos) | + ((uint32_t)MPU_Init->Size << MPU_RASR_SIZE_Pos) | + ((uint32_t)MPU_Init->Enable << MPU_RASR_ENABLE_Pos); } #endif /* __MPU_PRESENT */ +/** + * @brief Clear pending events. + * @retval None + */ +void HAL_CORTEX_ClearEvent(void) +{ + __SEV(); + __WFE(); +} + /** * @brief Gets the priority grouping field from the NVIC Interrupt Controller. * @retval Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field) @@ -502,4 +536,3 @@ __weak void HAL_SYSTICK_Callback(void) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dac.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dac.c index 5efc497f..8953638e 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dac.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dac.c @@ -11,6 +11,17 @@ * + Peripheral State and Errors functions * * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### DAC Peripheral features ##### @@ -88,9 +99,9 @@ A DMA request can be generated when an external trigger (but not a software trigger) occurs if DMA1 requests are enabled using HAL_DAC_Start_DMA(). DMA1 requests are mapped as following: - (#) DAC channel1 mapped on DMA1 Stream5 channel7 which must be + (#) DAC channel1 mapped on DMA1 Stream5 channel7 which must be already configured - (#) DAC channel2 mapped on DMA1 Stream6 channel7 which must be + (#) DAC channel2 mapped on DMA1 Stream6 channel7 which must be already configured [..] @@ -157,7 +168,7 @@ and a pointer to the user callback function. Use function HAL_DAC_UnRegisterCallback() to reset a callback to the default - weak (surcharged) function. It allows to reset following callbacks: + weak (overridden) function. It allows to reset following callbacks: (+) ConvCpltCallbackCh1 : callback when a half transfer is completed on Ch1. (+) ConvHalfCpltCallbackCh1 : callback when a transfer is completed on Ch1. (+) ErrorCallbackCh1 : callback when an error occurs on Ch1. @@ -172,9 +183,9 @@ This function) takes as parameters the HAL peripheral handle and the Callback ID. By default, after the HAL_DAC_Init and if the state is HAL_DAC_STATE_RESET - all callbacks are reset to the corresponding legacy weak (surcharged) functions. + all callbacks are reset to the corresponding legacy weak (overridden) functions. Exception done for MspInit and MspDeInit callbacks that are respectively - reset to the legacy weak (surcharged) functions in the HAL_DAC_Init + reset to the legacy weak (overridden) functions in the HAL_DAC_Init and HAL_DAC_DeInit only when these callbacks are null (not registered beforehand). If not, MspInit or MspDeInit are not null, the HAL_DAC_Init and HAL_DAC_DeInit keep and use the user MspInit/MspDeInit callbacks (registered beforehand) @@ -189,7 +200,7 @@ When The compilation define USE_HAL_DAC_REGISTER_CALLBACKS is set to 0 or not defined, the callback registering feature is not available - and weak (surcharged) callbacks are used. + and weak (overridden) callbacks are used. *** DAC HAL driver macros list *** ============================================= @@ -205,17 +216,6 @@ (@) You can refer to the DAC HAL driver header file for more useful macros @endverbatim - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * ****************************************************************************** */ @@ -270,7 +270,7 @@ */ HAL_StatusTypeDef HAL_DAC_Init(DAC_HandleTypeDef *hdac) { - /* Check DAC handle */ + /* Check the DAC peripheral handle */ if (hdac == NULL) { return HAL_ERROR; @@ -331,7 +331,7 @@ HAL_StatusTypeDef HAL_DAC_Init(DAC_HandleTypeDef *hdac) */ HAL_StatusTypeDef HAL_DAC_DeInit(DAC_HandleTypeDef *hdac) { - /* Check DAC handle */ + /* Check the DAC peripheral handle */ if (hdac == NULL) { return HAL_ERROR; @@ -434,6 +434,12 @@ __weak void HAL_DAC_MspDeInit(DAC_HandleTypeDef *hdac) */ HAL_StatusTypeDef HAL_DAC_Start(DAC_HandleTypeDef *hdac, uint32_t Channel) { + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } + /* Check the parameters */ assert_param(IS_DAC_CHANNEL(Channel)); @@ -489,6 +495,12 @@ HAL_StatusTypeDef HAL_DAC_Start(DAC_HandleTypeDef *hdac, uint32_t Channel) */ HAL_StatusTypeDef HAL_DAC_Stop(DAC_HandleTypeDef *hdac, uint32_t Channel) { + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } + /* Check the parameters */ assert_param(IS_DAC_CHANNEL(Channel)); @@ -519,11 +531,17 @@ HAL_StatusTypeDef HAL_DAC_Stop(DAC_HandleTypeDef *hdac, uint32_t Channel) * @arg DAC_ALIGN_12B_R: 12bit right data alignment selected * @retval HAL status */ -HAL_StatusTypeDef HAL_DAC_Start_DMA(DAC_HandleTypeDef *hdac, uint32_t Channel, uint32_t *pData, uint32_t Length, +HAL_StatusTypeDef HAL_DAC_Start_DMA(DAC_HandleTypeDef *hdac, uint32_t Channel, const uint32_t *pData, uint32_t Length, uint32_t Alignment) { - HAL_StatusTypeDef status = HAL_OK; - uint32_t tmpreg = 0U; + HAL_StatusTypeDef status = HAL_ERROR; + uint32_t tmpreg; + + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } /* Check the parameters */ assert_param(IS_DAC_CHANNEL(Channel)); @@ -560,12 +578,10 @@ HAL_StatusTypeDef HAL_DAC_Start_DMA(DAC_HandleTypeDef *hdac, uint32_t Channel, u /* Get DHR12L1 address */ tmpreg = (uint32_t)&hdac->Instance->DHR12L1; break; - case DAC_ALIGN_8B_R: + default: /* case DAC_ALIGN_8B_R */ /* Get DHR8R1 address */ tmpreg = (uint32_t)&hdac->Instance->DHR8R1; break; - default: - break; } } #if defined(DAC_CHANNEL2_SUPPORT) @@ -594,17 +610,13 @@ HAL_StatusTypeDef HAL_DAC_Start_DMA(DAC_HandleTypeDef *hdac, uint32_t Channel, u /* Get DHR12L2 address */ tmpreg = (uint32_t)&hdac->Instance->DHR12L2; break; - case DAC_ALIGN_8B_R: + default: /* case DAC_ALIGN_8B_R */ /* Get DHR8R2 address */ tmpreg = (uint32_t)&hdac->Instance->DHR8R2; break; - default: - break; } } #endif /* DAC_CHANNEL2_SUPPORT */ - - /* Enable the DMA Stream */ if (Channel == DAC_CHANNEL_1) { /* Enable the DAC DMA underrun interrupt */ @@ -653,6 +665,12 @@ HAL_StatusTypeDef HAL_DAC_Start_DMA(DAC_HandleTypeDef *hdac, uint32_t Channel, u */ HAL_StatusTypeDef HAL_DAC_Stop_DMA(DAC_HandleTypeDef *hdac, uint32_t Channel) { + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } + /* Check the parameters */ assert_param(IS_DAC_CHANNEL(Channel)); @@ -701,10 +719,13 @@ HAL_StatusTypeDef HAL_DAC_Stop_DMA(DAC_HandleTypeDef *hdac, uint32_t Channel) */ void HAL_DAC_IRQHandler(DAC_HandleTypeDef *hdac) { - if (__HAL_DAC_GET_IT_SOURCE(hdac, DAC_IT_DMAUDR1)) + uint32_t itsource = hdac->Instance->CR; + uint32_t itflag = hdac->Instance->SR; + + if ((itsource & DAC_IT_DMAUDR1) == DAC_IT_DMAUDR1) { /* Check underrun flag of DAC channel 1 */ - if (__HAL_DAC_GET_FLAG(hdac, DAC_FLAG_DMAUDR1)) + if ((itflag & DAC_FLAG_DMAUDR1) == DAC_FLAG_DMAUDR1) { /* Change DAC state to error state */ hdac->State = HAL_DAC_STATE_ERROR; @@ -716,7 +737,7 @@ void HAL_DAC_IRQHandler(DAC_HandleTypeDef *hdac) __HAL_DAC_CLEAR_FLAG(hdac, DAC_FLAG_DMAUDR1); /* Disable the selected DAC channel1 DMA request */ - CLEAR_BIT(hdac->Instance->CR, DAC_CR_DMAEN1); + __HAL_DAC_DISABLE_IT(hdac, DAC_CR_DMAEN1); /* Error callback */ #if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) @@ -728,10 +749,10 @@ void HAL_DAC_IRQHandler(DAC_HandleTypeDef *hdac) } #if defined(DAC_CHANNEL2_SUPPORT) - if (__HAL_DAC_GET_IT_SOURCE(hdac, DAC_IT_DMAUDR2)) + if ((itsource & DAC_IT_DMAUDR2) == DAC_IT_DMAUDR2) { /* Check underrun flag of DAC channel 2 */ - if (__HAL_DAC_GET_FLAG(hdac, DAC_FLAG_DMAUDR2)) + if ((itflag & DAC_FLAG_DMAUDR2) == DAC_FLAG_DMAUDR2) { /* Change DAC state to error state */ hdac->State = HAL_DAC_STATE_ERROR; @@ -743,7 +764,7 @@ void HAL_DAC_IRQHandler(DAC_HandleTypeDef *hdac) __HAL_DAC_CLEAR_FLAG(hdac, DAC_FLAG_DMAUDR2); /* Disable the selected DAC channel2 DMA request */ - CLEAR_BIT(hdac->Instance->CR, DAC_CR_DMAEN2); + __HAL_DAC_DISABLE_IT(hdac, DAC_CR_DMAEN2); /* Error callback */ #if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) @@ -776,6 +797,12 @@ HAL_StatusTypeDef HAL_DAC_SetValue(DAC_HandleTypeDef *hdac, uint32_t Channel, ui { __IO uint32_t tmp = 0UL; + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } + /* Check the parameters */ assert_param(IS_DAC_CHANNEL(Channel)); assert_param(IS_DAC_ALIGN(Alignment)); @@ -893,10 +920,13 @@ __weak void HAL_DAC_DMAUnderrunCallbackCh1(DAC_HandleTypeDef *hdac) * @arg DAC_CHANNEL_2: DAC Channel2 selected * @retval The selected DAC channel data output value. */ -uint32_t HAL_DAC_GetValue(DAC_HandleTypeDef *hdac, uint32_t Channel) +uint32_t HAL_DAC_GetValue(const DAC_HandleTypeDef *hdac, uint32_t Channel) { uint32_t result = 0; + /* Check the DAC peripheral handle */ + assert_param(hdac != NULL); + /* Check the parameters */ assert_param(IS_DAC_CHANNEL(Channel)); @@ -925,11 +955,19 @@ uint32_t HAL_DAC_GetValue(DAC_HandleTypeDef *hdac, uint32_t Channel) * @arg DAC_CHANNEL_2: DAC Channel2 selected * @retval HAL status */ -HAL_StatusTypeDef HAL_DAC_ConfigChannel(DAC_HandleTypeDef *hdac, DAC_ChannelConfTypeDef *sConfig, uint32_t Channel) +HAL_StatusTypeDef HAL_DAC_ConfigChannel(DAC_HandleTypeDef *hdac, + const DAC_ChannelConfTypeDef *sConfig, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpreg1; uint32_t tmpreg2; + /* Check the DAC peripheral handle and channel configuration struct */ + if ((hdac == NULL) || (sConfig == NULL)) + { + return HAL_ERROR; + } + /* Check the DAC parameters */ assert_param(IS_DAC_TRIGGER(sConfig->DAC_Trigger)); assert_param(IS_DAC_OUTPUT_BUFFER_STATE(sConfig->DAC_OutputBuffer)); @@ -944,7 +982,8 @@ HAL_StatusTypeDef HAL_DAC_ConfigChannel(DAC_HandleTypeDef *hdac, DAC_ChannelConf /* Get the DAC CR value */ tmpreg1 = hdac->Instance->CR; /* Clear BOFFx, TENx, TSELx, WAVEx and MAMPx bits */ - tmpreg1 &= ~(((uint32_t)(DAC_CR_MAMP1 | DAC_CR_WAVE1 | DAC_CR_TSEL1 | DAC_CR_TEN1 | DAC_CR_BOFF1)) << (Channel & 0x10UL)); + tmpreg1 &= ~(((uint32_t)(DAC_CR_MAMP1 | DAC_CR_WAVE1 | DAC_CR_TSEL1 | DAC_CR_TEN1 | DAC_CR_BOFF1)) + << (Channel & 0x10UL)); /* Configure for the selected DAC channel: buffer output, trigger */ /* Set TSELx and TENx bits according to DAC_Trigger value */ /* Set BOFFx bit according to DAC_OutputBuffer value */ @@ -963,7 +1002,7 @@ HAL_StatusTypeDef HAL_DAC_ConfigChannel(DAC_HandleTypeDef *hdac, DAC_ChannelConf __HAL_UNLOCK(hdac); /* Return function status */ - return HAL_OK; + return status; } /** @@ -992,7 +1031,7 @@ HAL_StatusTypeDef HAL_DAC_ConfigChannel(DAC_HandleTypeDef *hdac, DAC_ChannelConf * the configuration information for the specified DAC. * @retval HAL state */ -HAL_DAC_StateTypeDef HAL_DAC_GetState(DAC_HandleTypeDef *hdac) +HAL_DAC_StateTypeDef HAL_DAC_GetState(const DAC_HandleTypeDef *hdac) { /* Return DAC handle state */ return hdac->State; @@ -1005,7 +1044,7 @@ HAL_DAC_StateTypeDef HAL_DAC_GetState(DAC_HandleTypeDef *hdac) * the configuration information for the specified DAC. * @retval DAC Error Code */ -uint32_t HAL_DAC_GetError(DAC_HandleTypeDef *hdac) +uint32_t HAL_DAC_GetError(const DAC_HandleTypeDef *hdac) { return hdac->ErrorCode; } @@ -1028,7 +1067,9 @@ uint32_t HAL_DAC_GetError(DAC_HandleTypeDef *hdac) #if (USE_HAL_DAC_REGISTER_CALLBACKS == 1) /** * @brief Register a User DAC Callback - * To be used instead of the weak (surcharged) predefined callback + * To be used instead of the weak (overridden) predefined callback + * @note The HAL_DAC_RegisterCallback() may be called before HAL_DAC_Init() in HAL_DAC_STATE_RESET to register + * callbacks for HAL_DAC_MSPINIT_CB_ID and HAL_DAC_MSPDEINIT_CB_ID * @param hdac DAC handle * @param CallbackID ID of the callback to be registered * This parameter can be one of the following values: @@ -1052,6 +1093,12 @@ HAL_StatusTypeDef HAL_DAC_RegisterCallback(DAC_HandleTypeDef *hdac, HAL_DAC_Call { HAL_StatusTypeDef status = HAL_OK; + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } + if (pCallback == NULL) { /* Update the error code */ @@ -1059,9 +1106,6 @@ HAL_StatusTypeDef HAL_DAC_RegisterCallback(DAC_HandleTypeDef *hdac, HAL_DAC_Call return HAL_ERROR; } - /* Process locked */ - __HAL_LOCK(hdac); - if (hdac->State == HAL_DAC_STATE_READY) { switch (CallbackID) @@ -1132,14 +1176,14 @@ HAL_StatusTypeDef HAL_DAC_RegisterCallback(DAC_HandleTypeDef *hdac, HAL_DAC_Call status = HAL_ERROR; } - /* Release Lock */ - __HAL_UNLOCK(hdac); return status; } /** * @brief Unregister a User DAC Callback - * DAC Callback is redirected to the weak (surcharged) predefined callback + * DAC Callback is redirected to the weak (overridden) predefined callback + * @note The HAL_DAC_UnRegisterCallback() may be called before HAL_DAC_Init() in HAL_DAC_STATE_RESET to un-register + * callbacks for HAL_DAC_MSPINIT_CB_ID and HAL_DAC_MSPDEINIT_CB_ID * @param hdac DAC handle * @param CallbackID ID of the callback to be unregistered * This parameter can be one of the following values: @@ -1160,8 +1204,11 @@ HAL_StatusTypeDef HAL_DAC_UnRegisterCallback(DAC_HandleTypeDef *hdac, HAL_DAC_Ca { HAL_StatusTypeDef status = HAL_OK; - /* Process locked */ - __HAL_LOCK(hdac); + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } if (hdac->State == HAL_DAC_STATE_READY) { @@ -1247,8 +1294,6 @@ HAL_StatusTypeDef HAL_DAC_UnRegisterCallback(DAC_HandleTypeDef *hdac, HAL_DAC_Ca status = HAL_ERROR; } - /* Release Lock */ - __HAL_UNLOCK(hdac); return status; } #endif /* USE_HAL_DAC_REGISTER_CALLBACKS */ @@ -1334,9 +1379,6 @@ void DAC_DMAErrorCh1(DMA_HandleTypeDef *hdma) #endif /* DAC */ #endif /* HAL_DAC_MODULE_ENABLED */ - /** * @} */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dac_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dac_ex.c index 4a05998f..6e5ab512 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dac_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dac_ex.c @@ -7,20 +7,22 @@ * functionalities of the DAC peripheral. * * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### How to use this driver ##### ============================================================================== [..] - - *** Dual mode IO operation *** - ============================== - [..] - (+) When Dual mode is enabled (i.e. DAC Channel1 and Channel2 are used simultaneously) : - Use HAL_DACEx_DualGetValue() to get digital data to be converted and use - HAL_DACEx_DualSetValue() to set digital value to converted simultaneously in - Channel 1 and Channel 2. - *** Signal generation operation *** =================================== [..] @@ -28,17 +30,6 @@ (+) Use HAL_DACEx_NoiseWaveGenerate() to generate Noise signal. @endverbatim - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * ****************************************************************************** */ @@ -61,6 +52,7 @@ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ + /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ @@ -100,6 +92,12 @@ HAL_StatusTypeDef HAL_DACEx_DualStart(DAC_HandleTypeDef *hdac) { uint32_t tmp_swtrig = 0UL; + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } + /* Process locked */ __HAL_LOCK(hdac); @@ -141,6 +139,12 @@ HAL_StatusTypeDef HAL_DACEx_DualStart(DAC_HandleTypeDef *hdac) */ HAL_StatusTypeDef HAL_DACEx_DualStop(DAC_HandleTypeDef *hdac) { + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } + /* Disable the Peripheral */ __HAL_DAC_DISABLE(hdac, DAC_CHANNEL_1); @@ -180,6 +184,12 @@ HAL_StatusTypeDef HAL_DACEx_DualStop(DAC_HandleTypeDef *hdac) */ HAL_StatusTypeDef HAL_DACEx_TriangleWaveGenerate(DAC_HandleTypeDef *hdac, uint32_t Channel, uint32_t Amplitude) { + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } + /* Check the parameters */ assert_param(IS_DAC_CHANNEL(Channel)); assert_param(IS_DAC_LFSR_UNMASK_TRIANGLE_AMPLITUDE(Amplitude)); @@ -230,6 +240,12 @@ HAL_StatusTypeDef HAL_DACEx_TriangleWaveGenerate(DAC_HandleTypeDef *hdac, uint32 */ HAL_StatusTypeDef HAL_DACEx_NoiseWaveGenerate(DAC_HandleTypeDef *hdac, uint32_t Channel, uint32_t Amplitude) { + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } + /* Check the parameters */ assert_param(IS_DAC_CHANNEL(Channel)); assert_param(IS_DAC_LFSR_UNMASK_TRIANGLE_AMPLITUDE(Amplitude)); @@ -275,6 +291,12 @@ HAL_StatusTypeDef HAL_DACEx_DualSetValue(DAC_HandleTypeDef *hdac, uint32_t Align uint32_t data; uint32_t tmp; + /* Check the DAC peripheral handle */ + if (hdac == NULL) + { + return HAL_ERROR; + } + /* Check the parameters */ assert_param(IS_DAC_ALIGN(Alignment)); assert_param(IS_DAC_DATA(Data1)); @@ -391,7 +413,7 @@ __weak void HAL_DACEx_DMAUnderrunCallbackCh2(DAC_HandleTypeDef *hdac) * the configuration information for the specified DAC. * @retval The selected DAC channel data output value. */ -uint32_t HAL_DACEx_DualGetValue(DAC_HandleTypeDef *hdac) +uint32_t HAL_DACEx_DualGetValue(const DAC_HandleTypeDef *hdac) { uint32_t tmp = 0UL; @@ -492,5 +514,3 @@ void DAC_DMAErrorCh2(DMA_HandleTypeDef *hdma) /** * @} */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma.c index 69d848f3..3dbb4776 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma.c @@ -83,13 +83,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -199,12 +198,12 @@ HAL_StatusTypeDef HAL_DMA_Init(DMA_HandleTypeDef *hdma) assert_param(IS_DMA_MEMORY_BURST(hdma->Init.MemBurst)); assert_param(IS_DMA_PERIPHERAL_BURST(hdma->Init.PeriphBurst)); } - - /* Allocate lock resource */ - __HAL_UNLOCK(hdma); /* Change DMA peripheral state */ hdma->State = HAL_DMA_STATE_BUSY; + + /* Allocate lock resource */ + __HAL_UNLOCK(hdma); /* Disable the peripheral */ __HAL_DMA_DISABLE(hdma); @@ -550,12 +549,12 @@ HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma) /* Update error code */ hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hdma); - /* Change the DMA state */ hdma->State = HAL_DMA_STATE_TIMEOUT; + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + return HAL_TIMEOUT; } } @@ -563,11 +562,11 @@ HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma) /* Clear all interrupt flags at correct offset within the register */ regs->IFCR = 0x3FU << hdma->StreamIndex; - /* Process Unlocked */ - __HAL_UNLOCK(hdma); - /* Change the DMA state*/ hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); } return HAL_OK; } @@ -602,7 +601,7 @@ HAL_StatusTypeDef HAL_DMA_Abort_IT(DMA_HandleTypeDef *hdma) * @param hdma pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA Stream. * @param CompleteLevel Specifies the DMA level complete. - * @note The polling mode is kept in this version for legacy. it is recommanded to use the IT model instead. + * @note The polling mode is kept in this version for legacy. it is recommended to use the IT model instead. * This model could be used for debug purpose. * @note The HAL_DMA_PollForTransfer API cannot be used in circular and double buffering mode (automatic circular mode). * @param Timeout Timeout duration. @@ -657,13 +656,13 @@ HAL_StatusTypeDef HAL_DMA_PollForTransfer(DMA_HandleTypeDef *hdma, HAL_DMA_Level { /* Update error code */ hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; - - /* Process Unlocked */ - __HAL_UNLOCK(hdma); /* Change the DMA state */ hdma->State = HAL_DMA_STATE_READY; + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + return HAL_TIMEOUT; } } @@ -708,12 +707,12 @@ HAL_StatusTypeDef HAL_DMA_PollForTransfer(DMA_HandleTypeDef *hdma, HAL_DMA_Level /* Clear the half transfer and transfer complete flags */ regs->IFCR = (DMA_FLAG_HTIF0_4 | DMA_FLAG_TCIF0_4) << hdma->StreamIndex; - /* Process Unlocked */ - __HAL_UNLOCK(hdma); - /* Change the DMA state */ hdma->State= HAL_DMA_STATE_READY; + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + return HAL_ERROR; } } @@ -724,10 +723,10 @@ HAL_StatusTypeDef HAL_DMA_PollForTransfer(DMA_HandleTypeDef *hdma, HAL_DMA_Level /* Clear the half transfer and transfer complete flags */ regs->IFCR = (DMA_FLAG_HTIF0_4 | DMA_FLAG_TCIF0_4) << hdma->StreamIndex; + hdma->State = HAL_DMA_STATE_READY; + /* Process Unlocked */ __HAL_UNLOCK(hdma); - - hdma->State = HAL_DMA_STATE_READY; } else { @@ -863,12 +862,12 @@ void HAL_DMA_IRQHandler(DMA_HandleTypeDef *hdma) /* Clear all interrupt flags at correct offset within the register */ regs->IFCR = 0x3FU << hdma->StreamIndex; - /* Process Unlocked */ - __HAL_UNLOCK(hdma); - /* Change the DMA state */ hdma->State = HAL_DMA_STATE_READY; + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + if(hdma->XferAbortCallback != NULL) { hdma->XferAbortCallback(hdma); @@ -905,11 +904,11 @@ void HAL_DMA_IRQHandler(DMA_HandleTypeDef *hdma) /* Disable the transfer complete interrupt */ hdma->Instance->CR &= ~(DMA_IT_TC); - /* Process Unlocked */ - __HAL_UNLOCK(hdma); - /* Change the DMA state */ hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); } if(hdma->XferCpltCallback != NULL) @@ -940,11 +939,11 @@ void HAL_DMA_IRQHandler(DMA_HandleTypeDef *hdma) } while((hdma->Instance->CR & DMA_SxCR_EN) != RESET); - /* Process Unlocked */ - __HAL_UNLOCK(hdma); - /* Change the DMA state */ hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); } if(hdma->XferErrorCallback != NULL) @@ -959,9 +958,9 @@ void HAL_DMA_IRQHandler(DMA_HandleTypeDef *hdma) * @brief Register callbacks * @param hdma pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA Stream. - * @param CallbackID User Callback identifer + * @param CallbackID User Callback identifier * a DMA_HandleTypeDef structure as parameter. - * @param pCallback pointer to private callbacsk function which has pointer to + * @param pCallback pointer to private callback function which has pointer to * a DMA_HandleTypeDef structure as parameter. * @retval HAL status */ @@ -1002,6 +1001,8 @@ HAL_StatusTypeDef HAL_DMA_RegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_Call break; default: + /* Return error status */ + status = HAL_ERROR; break; } } @@ -1021,7 +1022,7 @@ HAL_StatusTypeDef HAL_DMA_RegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_Call * @brief UnRegister callbacks * @param hdma pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA Stream. - * @param CallbackID User Callback identifer + * @param CallbackID User Callback identifier * a HAL_DMA_CallbackIDTypeDef ENUM as parameter. * @retval HAL status */ @@ -1302,4 +1303,3 @@ static HAL_StatusTypeDef DMA_CheckFifoParam(DMA_HandleTypeDef *hdma) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma_ex.c index 6e073768..7167e77e 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_dma_ex.c @@ -25,13 +25,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -312,4 +311,3 @@ static void DMA_MultiBufferSetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddres * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_eth.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_eth.c new file mode 100644 index 00000000..f1a00e3f --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_eth.c @@ -0,0 +1,3238 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_eth.c + * @author MCD Application Team + * @brief ETH HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Ethernet (ETH) peripheral: + * + Initialization and deinitialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State and Errors functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The ETH HAL driver can be used as follows: + + (#)Declare a ETH_HandleTypeDef handle structure, for example: + ETH_HandleTypeDef heth; + + (#)Fill parameters of Init structure in heth handle + + (#)Call HAL_ETH_Init() API to initialize the Ethernet peripheral (MAC, DMA, ...) + + (#)Initialize the ETH low level resources through the HAL_ETH_MspInit() API: + (##) Enable the Ethernet interface clock using + (+++) __HAL_RCC_ETH1MAC_CLK_ENABLE() + (+++) __HAL_RCC_ETH1TX_CLK_ENABLE() + (+++) __HAL_RCC_ETH1RX_CLK_ENABLE() + + (##) Initialize the related GPIO clocks + (##) Configure Ethernet pinout + (##) Configure Ethernet NVIC interrupt (in Interrupt mode) + + (#) Ethernet data reception is asynchronous, so call the following API + to start the listening mode: + (##) HAL_ETH_Start(): + This API starts the MAC and DMA transmission and reception process, + without enabling end of transfer interrupts, in this mode user + has to poll for data reception by calling HAL_ETH_ReadData() + (##) HAL_ETH_Start_IT(): + This API starts the MAC and DMA transmission and reception process, + end of transfer interrupts are enabled in this mode, + HAL_ETH_RxCpltCallback() will be executed when an Ethernet packet is received + + (#) When data is received user can call the following API to get received data: + (##) HAL_ETH_ReadData(): Read a received packet + + (#) For transmission path, two APIs are available: + (##) HAL_ETH_Transmit(): Transmit an ETH frame in blocking mode + (##) HAL_ETH_Transmit_IT(): Transmit an ETH frame in interrupt mode, + HAL_ETH_TxCpltCallback() will be executed when end of transfer occur + + (#) Communication with an external PHY device: + (##) HAL_ETH_ReadPHYRegister(): Read a register from an external PHY + (##) HAL_ETH_WritePHYRegister(): Write data to an external RHY register + + (#) Configure the Ethernet MAC after ETH peripheral initialization + (##) HAL_ETH_GetMACConfig(): Get MAC actual configuration into ETH_MACConfigTypeDef + (##) HAL_ETH_SetMACConfig(): Set MAC configuration based on ETH_MACConfigTypeDef + + (#) Configure the Ethernet DMA after ETH peripheral initialization + (##) HAL_ETH_GetDMAConfig(): Get DMA actual configuration into ETH_DMAConfigTypeDef + (##) HAL_ETH_SetDMAConfig(): Set DMA configuration based on ETH_DMAConfigTypeDef + + (#) Configure the Ethernet PTP after ETH peripheral initialization + (##) Define HAL_ETH_USE_PTP to use PTP APIs. + (##) HAL_ETH_PTP_GetConfig(): Get PTP actual configuration into ETH_PTP_ConfigTypeDef + (##) HAL_ETH_PTP_SetConfig(): Set PTP configuration based on ETH_PTP_ConfigTypeDef + (##) HAL_ETH_PTP_GetTime(): Get Seconds and Nanoseconds for the Ethernet PTP registers + (##) HAL_ETH_PTP_SetTime(): Set Seconds and Nanoseconds for the Ethernet PTP registers + (##) HAL_ETH_PTP_AddTimeOffset(): Add Seconds and Nanoseconds offset for the Ethernet PTP registers + (##) HAL_ETH_PTP_InsertTxTimestamp(): Insert Timestamp in transmission + (##) HAL_ETH_PTP_GetTxTimestamp(): Get transmission timestamp + (##) HAL_ETH_PTP_GetRxTimestamp(): Get reception timestamp + + -@- The ARP offload feature is not supported in this driver. + + -@- The PTP offload feature is not supported in this driver. + + *** Callback registration *** + ============================================= + + The compilation define USE_HAL_ETH_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use Function HAL_ETH_RegisterCallback() to register an interrupt callback. + + Function HAL_ETH_RegisterCallback() allows to register following callbacks: + (+) TxCpltCallback : Tx Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) ErrorCallback : Error Callback. + (+) PMTCallback : Power Management Callback + (+) EEECallback : EEE Callback. + (+) WakeUpCallback : Wake UP Callback + (+) MspInitCallback : MspInit Callback. + (+) MspDeInitCallback: MspDeInit Callback. + + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + For specific callbacks RxAllocateCallback use dedicated register callbacks: + respectively HAL_ETH_RegisterRxAllocateCallback(). + + For specific callbacks RxLinkCallback use dedicated register callbacks: + respectively HAL_ETH_RegisterRxLinkCallback(). + + For specific callbacks TxFreeCallback use dedicated register callbacks: + respectively HAL_ETH_RegisterTxFreeCallback(). + + For specific callbacks TxPtpCallback use dedicated register callbacks: + respectively HAL_ETH_RegisterTxPtpCallback(). + + Use function HAL_ETH_UnRegisterCallback() to reset a callback to the default + weak function. + HAL_ETH_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) TxCpltCallback : Tx Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) ErrorCallback : Error Callback. + (+) PMTCallback : Power Management Callback + (+) EEECallback : EEE Callback. + (+) WakeUpCallback : Wake UP Callback + (+) MspInitCallback : MspInit Callback. + (+) MspDeInitCallback: MspDeInit Callback. + + For specific callbacks RxAllocateCallback use dedicated unregister callbacks: + respectively HAL_ETH_UnRegisterRxAllocateCallback(). + + For specific callbacks RxLinkCallback use dedicated unregister callbacks: + respectively HAL_ETH_UnRegisterRxLinkCallback(). + + For specific callbacks TxFreeCallback use dedicated unregister callbacks: + respectively HAL_ETH_UnRegisterTxFreeCallback(). + + For specific callbacks TxPtpCallback use dedicated unregister callbacks: + respectively HAL_ETH_UnRegisterTxPtpCallback(). + + By default, after the HAL_ETH_Init and when the state is HAL_ETH_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples HAL_ETH_TxCpltCallback(), HAL_ETH_RxCpltCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak function in the HAL_ETH_Init/ HAL_ETH_DeInit only when + these callbacks are null (not registered beforehand). + if not, MspInit or MspDeInit are not null, the HAL_ETH_Init/ HAL_ETH_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in HAL_ETH_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_ETH_STATE_READY or HAL_ETH_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using HAL_ETH_RegisterCallback() before calling HAL_ETH_DeInit + or HAL_ETH_Init function. + + When The compilation define USE_HAL_ETH_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ +#ifdef HAL_ETH_MODULE_ENABLED + +#if defined(ETH) + +/** @defgroup ETH ETH + * @brief ETH HAL module driver + * @{ + */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup ETH_Private_Constants ETH Private Constants + * @{ + */ +#define ETH_MACCR_MASK 0xFFFB7F7CU +#define ETH_MACECR_MASK 0x3F077FFFU +#define ETH_MACFFR_MASK 0x800007FFU +#define ETH_MACWTR_MASK 0x0000010FU +#define ETH_MACTFCR_MASK 0xFFFF00F2U +#define ETH_MACRFCR_MASK 0x00000003U +#define ETH_MTLTQOMR_MASK 0x00000072U +#define ETH_MTLRQOMR_MASK 0x0000007BU + +#define ETH_DMAMR_MASK 0x00007802U +#define ETH_DMASBMR_MASK 0x0000D001U +#define ETH_DMACCR_MASK 0x00013FFFU +#define ETH_DMACTCR_MASK 0x003F1010U +#define ETH_DMACRCR_MASK 0x803F0000U +#define ETH_MACPMTCSR_MASK (ETH_MACPMTCSR_PD | ETH_MACPMTCSR_WFE | \ + ETH_MACPMTCSR_MPE | ETH_MACPMTCSR_GU) + +/* Timeout values */ +#define ETH_SWRESET_TIMEOUT 500U +#define ETH_MDIO_BUS_TIMEOUT 1000U + +#define ETH_DMARXDESC_ERRORS_MASK ((uint32_t)(ETH_DMARXDESC_DBE | ETH_DMARXDESC_RE | \ + ETH_DMARXDESC_OE | ETH_DMARXDESC_RWT |\ + ETH_DMARXDESC_LC | ETH_DMARXDESC_CE |\ + ETH_DMARXDESC_DE | ETH_DMARXDESC_IPV4HCE)) + +#define ETH_MAC_US_TICK 1000000U + +#define ETH_MACTSCR_MASK 0x0087FF2FU + +#define ETH_PTPTSHR_VALUE 0xFFFFFFFFU +#define ETH_PTPTSLR_VALUE 0xBB9ACA00U + +/* Ethernet MACMIIAR register Mask */ +#define ETH_MACMIIAR_CR_MASK 0xFFFFFFE3U + +/* Delay to wait when writing to some Ethernet registers */ +#define ETH_REG_WRITE_DELAY 0x00000001U + +/* ETHERNET MACCR register Mask */ +#define ETH_MACCR_CLEAR_MASK 0xFD20810FU + +/* ETHERNET MACFCR register Mask */ +#define ETH_MACFCR_CLEAR_MASK 0x0000FF41U + +/* ETHERNET DMAOMR register Mask */ +#define ETH_DMAOMR_CLEAR_MASK 0xF8DE3F23U + +/* ETHERNET MAC address offsets */ +#define ETH_MAC_ADDR_HBASE (uint32_t)(ETH_MAC_BASE + 0x40U) /* ETHERNET MAC address high offset */ +#define ETH_MAC_ADDR_LBASE (uint32_t)(ETH_MAC_BASE + 0x44U) /* ETHERNET MAC address low offset */ + +/* ETHERNET DMA Rx descriptors Frame length Shift */ +#define ETH_DMARXDESC_FRAMELENGTHSHIFT 16U +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup ETH_Private_Macros ETH Private Macros + * @{ + */ +/* Helper macros for TX descriptor handling */ +#define INCR_TX_DESC_INDEX(inx, offset) do {\ + (inx) += (offset);\ + if ((inx) >= (uint32_t)ETH_TX_DESC_CNT){\ + (inx) = ((inx) - (uint32_t)ETH_TX_DESC_CNT);}\ + } while (0) + +/* Helper macros for RX descriptor handling */ +#define INCR_RX_DESC_INDEX(inx, offset) do {\ + (inx) += (offset);\ + if ((inx) >= (uint32_t)ETH_RX_DESC_CNT){\ + (inx) = ((inx) - (uint32_t)ETH_RX_DESC_CNT);}\ + } while (0) +/** + * @} + */ +/* Private function prototypes -----------------------------------------------*/ +/** @defgroup ETH_Private_Functions ETH Private Functions + * @{ + */ +static void ETH_SetMACConfig(ETH_HandleTypeDef *heth, const ETH_MACConfigTypeDef *macconf); +static void ETH_SetDMAConfig(ETH_HandleTypeDef *heth, const ETH_DMAConfigTypeDef *dmaconf); +static void ETH_MACDMAConfig(ETH_HandleTypeDef *heth); +static void ETH_DMATxDescListInit(ETH_HandleTypeDef *heth); +static void ETH_DMARxDescListInit(ETH_HandleTypeDef *heth); +static uint32_t ETH_Prepare_Tx_Descriptors(ETH_HandleTypeDef *heth, const ETH_TxPacketConfigTypeDef *pTxConfig, + uint32_t ItMode); +static void ETH_UpdateDescriptor(ETH_HandleTypeDef *heth); +static void ETH_FlushTransmitFIFO(ETH_HandleTypeDef *heth); +static void ETH_MACAddressConfig(ETH_HandleTypeDef *heth, uint32_t MacAddr, uint8_t *Addr); + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +static void ETH_InitCallbacksToDefault(ETH_HandleTypeDef *heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ +/** + * @} + */ + +/* Exported functions ---------------------------------------------------------*/ +/** @defgroup ETH_Exported_Functions ETH Exported Functions + * @{ + */ + +/** @defgroup ETH_Exported_Functions_Group1 Initialization and deinitialization functions + * @brief Initialization and Configuration functions + * +@verbatim +=============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] This subsection provides a set of functions allowing to initialize and + deinitialize the ETH peripheral: + + (+) User must Implement HAL_ETH_MspInit() function in which he configures + all related peripherals resources (CLOCK, GPIO and NVIC ). + + (+) Call the function HAL_ETH_Init() to configure the selected device with + the selected configuration: + (++) MAC address + (++) Media interface (MII or RMII) + (++) Rx DMA Descriptors Tab + (++) Tx DMA Descriptors Tab + (++) Length of Rx Buffers + + (+) Call the function HAL_ETH_DeInit() to restore the default configuration + of the selected ETH peripheral. + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the Ethernet peripheral registers. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Init(ETH_HandleTypeDef *heth) +{ + uint32_t tickstart; + + if (heth == NULL) + { + return HAL_ERROR; + } + if (heth->gState == HAL_ETH_STATE_RESET) + { + heth->gState = HAL_ETH_STATE_BUSY; + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + + ETH_InitCallbacksToDefault(heth); + + if (heth->MspInitCallback == NULL) + { + heth->MspInitCallback = HAL_ETH_MspInit; + } + + /* Init the low level hardware */ + heth->MspInitCallback(heth); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC. */ + HAL_ETH_MspInit(heth); + +#endif /* (USE_HAL_ETH_REGISTER_CALLBACKS) */ + } + + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + /* Select MII or RMII Mode*/ + SYSCFG->PMC &= ~(SYSCFG_PMC_MII_RMII_SEL); + SYSCFG->PMC |= (uint32_t)heth->Init.MediaInterface; + /* Dummy read to sync SYSCFG with ETH */ + (void)SYSCFG->PMC; + + /* Ethernet Software reset */ + /* Set the SWR bit: resets all MAC subsystem internal registers and logic */ + /* After reset all the registers holds their respective reset values */ + SET_BIT(heth->Instance->DMABMR, ETH_DMABMR_SR); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait for software reset */ + while (READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_SR) > 0U) + { + if (((HAL_GetTick() - tickstart) > ETH_SWRESET_TIMEOUT)) + { + /* Set Error Code */ + heth->ErrorCode = HAL_ETH_ERROR_TIMEOUT; + /* Set State as Error */ + heth->gState = HAL_ETH_STATE_ERROR; + /* Return Error */ + return HAL_ERROR; + } + } + + + /*------------------ MAC, MTL and DMA default Configuration ----------------*/ + ETH_MACDMAConfig(heth); + + + /*------------------ DMA Tx Descriptors Configuration ----------------------*/ + ETH_DMATxDescListInit(heth); + + /*------------------ DMA Rx Descriptors Configuration ----------------------*/ + ETH_DMARxDescListInit(heth); + + /*--------------------- ETHERNET MAC Address Configuration ------------------*/ + ETH_MACAddressConfig(heth, ETH_MAC_ADDRESS0, heth->Init.MACAddr); + + /* Disable MMC Interrupts */ + SET_BIT(heth->Instance->MACIMR, ETH_MACIMR_TSTIM | ETH_MACIMR_PMTIM); + + /* Disable Rx MMC Interrupts */ + SET_BIT(heth->Instance->MMCRIMR, ETH_MMCRIMR_RGUFM | ETH_MMCRIMR_RFAEM | \ + ETH_MMCRIMR_RFCEM); + + /* Disable Tx MMC Interrupts */ + SET_BIT(heth->Instance->MMCTIMR, ETH_MMCTIMR_TGFM | ETH_MMCTIMR_TGFMSCM | \ + ETH_MMCTIMR_TGFSCM); + + heth->ErrorCode = HAL_ETH_ERROR_NONE; + heth->gState = HAL_ETH_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the ETH peripheral. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_DeInit(ETH_HandleTypeDef *heth) +{ + /* Set the ETH peripheral state to BUSY */ + heth->gState = HAL_ETH_STATE_BUSY; + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + + if (heth->MspDeInitCallback == NULL) + { + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + } + /* DeInit the low level hardware */ + heth->MspDeInitCallback(heth); +#else + + /* De-Init the low level hardware : GPIO, CLOCK, NVIC. */ + HAL_ETH_MspDeInit(heth); + +#endif /* (USE_HAL_ETH_REGISTER_CALLBACKS) */ + + /* Set ETH HAL state to Disabled */ + heth->gState = HAL_ETH_STATE_RESET; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the ETH MSP. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_MspInit(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes ETH MSP. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_MspDeInit(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User ETH Callback + * To be used instead of the weak predefined callback + * @param heth eth handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_ETH_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_ETH_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_ETH_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_ETH_PMT_CB_ID Power Management Callback ID + * @arg @ref HAL_ETH_WAKEUP_CB_ID Wake UP Callback ID + * @arg @ref HAL_ETH_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_ETH_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_ETH_RegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID, + pETH_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + + if (heth->gState == HAL_ETH_STATE_READY) + { + switch (CallbackID) + { + case HAL_ETH_TX_COMPLETE_CB_ID : + heth->TxCpltCallback = pCallback; + break; + + case HAL_ETH_RX_COMPLETE_CB_ID : + heth->RxCpltCallback = pCallback; + break; + + case HAL_ETH_ERROR_CB_ID : + heth->ErrorCallback = pCallback; + break; + + case HAL_ETH_PMT_CB_ID : + heth->PMTCallback = pCallback; + break; + + + case HAL_ETH_WAKEUP_CB_ID : + heth->WakeUpCallback = pCallback; + break; + + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = pCallback; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (heth->gState == HAL_ETH_STATE_RESET) + { + switch (CallbackID) + { + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = pCallback; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + return status; +} + +/** + * @brief Unregister an ETH Callback + * ETH callback is redirected to the weak predefined callback + * @param heth eth handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_ETH_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_ETH_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_ETH_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_ETH_PMT_CB_ID Power Management Callback ID + * @arg @ref HAL_ETH_WAKEUP_CB_ID Wake UP Callback ID + * @arg @ref HAL_ETH_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_ETH_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (heth->gState == HAL_ETH_STATE_READY) + { + switch (CallbackID) + { + case HAL_ETH_TX_COMPLETE_CB_ID : + heth->TxCpltCallback = HAL_ETH_TxCpltCallback; + break; + + case HAL_ETH_RX_COMPLETE_CB_ID : + heth->RxCpltCallback = HAL_ETH_RxCpltCallback; + break; + + case HAL_ETH_ERROR_CB_ID : + heth->ErrorCallback = HAL_ETH_ErrorCallback; + break; + + case HAL_ETH_PMT_CB_ID : + heth->PMTCallback = HAL_ETH_PMTCallback; + break; + + + case HAL_ETH_WAKEUP_CB_ID : + heth->WakeUpCallback = HAL_ETH_WakeUpCallback; + break; + + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = HAL_ETH_MspInit; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + break; + + default : + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (heth->gState == HAL_ETH_STATE_RESET) + { + switch (CallbackID) + { + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = HAL_ETH_MspInit; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + break; + + default : + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + heth->ErrorCode |= HAL_ETH_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + return status; +} +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group2 IO operation functions + * @brief ETH Transmit and Receive functions + * +@verbatim + ============================================================================== + ##### IO operation functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to manage the ETH + data transfer. + +@endverbatim + * @{ + */ + +/** + * @brief Enables Ethernet MAC and DMA reception and transmission + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Start(ETH_HandleTypeDef *heth) +{ + uint32_t tmpreg1; + + if (heth->gState == HAL_ETH_STATE_READY) + { + heth->gState = HAL_ETH_STATE_BUSY; + + /* Set number of descriptors to build */ + heth->RxDescList.RxBuildDescCnt = ETH_RX_DESC_CNT; + + /* Build all descriptors */ + ETH_UpdateDescriptor(heth); + + /* Enable the MAC transmission */ + SET_BIT(heth->Instance->MACCR, ETH_MACCR_TE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Enable the MAC reception */ + SET_BIT(heth->Instance->MACCR, ETH_MACCR_RE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + /* Enable the DMA transmission */ + SET_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_ST); + + /* Enable the DMA reception */ + SET_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_SR); + + heth->gState = HAL_ETH_STATE_STARTED; + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Enables Ethernet MAC and DMA reception/transmission in Interrupt mode + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Start_IT(ETH_HandleTypeDef *heth) +{ + uint32_t tmpreg1; + + if (heth->gState == HAL_ETH_STATE_READY) + { + heth->gState = HAL_ETH_STATE_BUSY; + + /* save IT mode to ETH Handle */ + heth->RxDescList.ItMode = 1U; + + /* Set number of descriptors to build */ + heth->RxDescList.RxBuildDescCnt = ETH_RX_DESC_CNT; + + /* Build all descriptors */ + ETH_UpdateDescriptor(heth); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Enable the DMA transmission */ + SET_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_ST); + + /* Enable the DMA reception */ + SET_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_SR); + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + + /* Enable the MAC transmission */ + SET_BIT(heth->Instance->MACCR, ETH_MACCR_TE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Enable the MAC reception */ + SET_BIT(heth->Instance->MACCR, ETH_MACCR_RE); + + /* Enable ETH DMA interrupts: + - Tx complete interrupt + - Rx complete interrupt + - Fatal bus interrupt + */ + __HAL_ETH_DMA_ENABLE_IT(heth, (ETH_DMAIER_NISE | ETH_DMAIER_RIE | ETH_DMAIER_TIE | + ETH_DMAIER_FBEIE | ETH_DMAIER_AISE | ETH_DMAIER_RBUIE)); + + heth->gState = HAL_ETH_STATE_STARTED; + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Stop Ethernet MAC and DMA reception/transmission + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Stop(ETH_HandleTypeDef *heth) +{ + uint32_t tmpreg1; + + if (heth->gState == HAL_ETH_STATE_STARTED) + { + /* Set the ETH peripheral state to BUSY */ + heth->gState = HAL_ETH_STATE_BUSY; + + /* Disable the DMA transmission */ + CLEAR_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_ST); + + /* Disable the DMA reception */ + CLEAR_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_SR); + + /* Disable the MAC reception */ + CLEAR_BIT(heth->Instance->MACCR, ETH_MACCR_RE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + /* Disable the MAC transmission */ + CLEAR_BIT(heth->Instance->MACCR, ETH_MACCR_TE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + heth->gState = HAL_ETH_STATE_READY; + + /* Return function status */ + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Stop Ethernet MAC and DMA reception/transmission in Interrupt mode + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Stop_IT(ETH_HandleTypeDef *heth) +{ + ETH_DMADescTypeDef *dmarxdesc; + uint32_t descindex; + uint32_t tmpreg1; + + if (heth->gState == HAL_ETH_STATE_STARTED) + { + /* Set the ETH peripheral state to BUSY */ + heth->gState = HAL_ETH_STATE_BUSY; + + __HAL_ETH_DMA_DISABLE_IT(heth, (ETH_DMAIER_NISE | ETH_DMAIER_RIE | ETH_DMAIER_TIE | + ETH_DMAIER_FBEIE | ETH_DMAIER_AISE | ETH_DMAIER_RBUIE)); + + /* Disable the DMA transmission */ + CLEAR_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_ST); + + /* Disable the DMA reception */ + CLEAR_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_SR); + + /* Disable the MAC reception */ + CLEAR_BIT(heth->Instance->MACCR, ETH_MACCR_RE); + + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + /* Disable the MAC transmission */ + CLEAR_BIT(heth->Instance->MACCR, ETH_MACCR_TE); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /* Clear IOC bit to all Rx descriptors */ + for (descindex = 0; descindex < (uint32_t)ETH_RX_DESC_CNT; descindex++) + { + dmarxdesc = (ETH_DMADescTypeDef *)heth->RxDescList.RxDesc[descindex]; + SET_BIT(dmarxdesc->DESC1, ETH_DMARXDESC_DIC); + } + + heth->RxDescList.ItMode = 0U; + + heth->gState = HAL_ETH_STATE_READY; + + /* Return function status */ + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Sends an Ethernet Packet in polling mode. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pTxConfig: Hold the configuration of packet to be transmitted + * @param Timeout: timeout value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Transmit(ETH_HandleTypeDef *heth, ETH_TxPacketConfigTypeDef *pTxConfig, uint32_t Timeout) +{ + uint32_t tickstart; + ETH_DMADescTypeDef *dmatxdesc; + + if (pTxConfig == NULL) + { + heth->ErrorCode |= HAL_ETH_ERROR_PARAM; + return HAL_ERROR; + } + + if (heth->gState == HAL_ETH_STATE_STARTED) + { + /* Config DMA Tx descriptor by Tx Packet info */ + if (ETH_Prepare_Tx_Descriptors(heth, pTxConfig, 0) != HAL_ETH_ERROR_NONE) + { + /* Set the ETH error code */ + heth->ErrorCode |= HAL_ETH_ERROR_BUSY; + return HAL_ERROR; + } + + /* Ensure completion of descriptor preparation before transmission start */ + __DSB(); + + dmatxdesc = (ETH_DMADescTypeDef *)(&heth->TxDescList)->TxDesc[heth->TxDescList.CurTxDesc]; + + /* Incr current tx desc index */ + INCR_TX_DESC_INDEX(heth->TxDescList.CurTxDesc, 1U); + + /* Start transmission */ + /* issue a poll command to Tx DMA by writing address of next immediate free descriptor */ + WRITE_REG(heth->Instance->DMATPDR, (uint32_t)(heth->TxDescList.TxDesc[heth->TxDescList.CurTxDesc])); + + tickstart = HAL_GetTick(); + + /* Wait for data to be transmitted or timeout occurred */ + while ((dmatxdesc->DESC0 & ETH_DMATXDESC_OWN) != (uint32_t)RESET) + { + if ((heth->Instance->DMASR & ETH_DMASR_FBES) != (uint32_t)RESET) + { + heth->ErrorCode |= HAL_ETH_ERROR_DMA; + heth->DMAErrorCode = heth->Instance->DMASR; + /* Return function status */ + return HAL_ERROR; + } + + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0U)) + { + heth->ErrorCode |= HAL_ETH_ERROR_TIMEOUT; + /* Clear TX descriptor so that we can proceed */ + dmatxdesc->DESC0 = (ETH_DMATXDESC_FS | ETH_DMATXDESC_LS); + return HAL_ERROR; + } + } + } + + /* Return function status */ + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Sends an Ethernet Packet in interrupt mode. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pTxConfig: Hold the configuration of packet to be transmitted + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Transmit_IT(ETH_HandleTypeDef *heth, ETH_TxPacketConfigTypeDef *pTxConfig) +{ + if (pTxConfig == NULL) + { + heth->ErrorCode |= HAL_ETH_ERROR_PARAM; + return HAL_ERROR; + } + + if (heth->gState == HAL_ETH_STATE_STARTED) + { + /* Save the packet pointer to release. */ + heth->TxDescList.CurrentPacketAddress = (uint32_t *)pTxConfig->pData; + + /* Config DMA Tx descriptor by Tx Packet info */ + if (ETH_Prepare_Tx_Descriptors(heth, pTxConfig, 1) != HAL_ETH_ERROR_NONE) + { + heth->ErrorCode |= HAL_ETH_ERROR_BUSY; + return HAL_ERROR; + } + + /* Ensure completion of descriptor preparation before transmission start */ + __DSB(); + + /* Incr current tx desc index */ + INCR_TX_DESC_INDEX(heth->TxDescList.CurTxDesc, 1U); + + /* Start transmission */ + /* issue a poll command to Tx DMA by writing address of next immediate free descriptor */ + if (((heth->Instance)->DMASR & ETH_DMASR_TBUS) != (uint32_t)RESET) + { + /* Clear TBUS ETHERNET DMA flag */ + (heth->Instance)->DMASR = ETH_DMASR_TBUS; + /* Resume DMA transmission*/ + (heth->Instance)->DMATPDR = 0U; + } + + return HAL_OK; + + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Read a received packet. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pAppBuff: Pointer to an application buffer to receive the packet. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ReadData(ETH_HandleTypeDef *heth, void **pAppBuff) +{ + uint32_t descidx; + ETH_DMADescTypeDef *dmarxdesc; + uint32_t desccnt = 0U; + uint32_t desccntmax; + uint32_t bufflength; + uint8_t rxdataready = 0U; + + if (pAppBuff == NULL) + { + heth->ErrorCode |= HAL_ETH_ERROR_PARAM; + return HAL_ERROR; + } + + if (heth->gState != HAL_ETH_STATE_STARTED) + { + return HAL_ERROR; + } + + descidx = heth->RxDescList.RxDescIdx; + dmarxdesc = (ETH_DMADescTypeDef *)heth->RxDescList.RxDesc[descidx]; + desccntmax = ETH_RX_DESC_CNT - heth->RxDescList.RxBuildDescCnt; + + /* Check if descriptor is not owned by DMA */ + while ((READ_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_OWN) == (uint32_t)RESET) && (desccnt < desccntmax) + && (rxdataready == 0U)) + { + if (READ_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_LS) != (uint32_t)RESET) + { + /* Get timestamp high */ + heth->RxDescList.TimeStamp.TimeStampHigh = dmarxdesc->DESC7; + /* Get timestamp low */ + heth->RxDescList.TimeStamp.TimeStampLow = dmarxdesc->DESC6; + } + if ((READ_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_FS) != (uint32_t)RESET) || (heth->RxDescList.pRxStart != NULL)) + { + /* Check first descriptor */ + if (READ_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_FS) != (uint32_t)RESET) + { + heth->RxDescList.RxDescCnt = 0; + heth->RxDescList.RxDataLength = 0; + } + + /* Get the Frame Length of the received packet: substruct 4 bytes of the CRC */ + bufflength = ((dmarxdesc->DESC0 & ETH_DMARXDESC_FL) >> ETH_DMARXDESC_FRAMELENGTHSHIFT); + + /* Check if last descriptor */ + if (READ_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_LS) != (uint32_t)RESET) + { + /* Save Last descriptor index */ + heth->RxDescList.pRxLastRxDesc = dmarxdesc->DESC0; + + /* Packet ready */ + rxdataready = 1; + } + + /* Link data */ + WRITE_REG(dmarxdesc->BackupAddr0, dmarxdesc->DESC2); +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered Link callback*/ + heth->rxLinkCallback(&heth->RxDescList.pRxStart, &heth->RxDescList.pRxEnd, + (uint8_t *)dmarxdesc->BackupAddr0, bufflength); +#else + /* Link callback */ + HAL_ETH_RxLinkCallback(&heth->RxDescList.pRxStart, &heth->RxDescList.pRxEnd, + (uint8_t *)dmarxdesc->BackupAddr0, (uint16_t) bufflength); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + heth->RxDescList.RxDescCnt++; + heth->RxDescList.RxDataLength += bufflength; + + /* Clear buffer pointer */ + dmarxdesc->BackupAddr0 = 0; + } + + /* Increment current rx descriptor index */ + INCR_RX_DESC_INDEX(descidx, 1U); + /* Get current descriptor address */ + dmarxdesc = (ETH_DMADescTypeDef *)heth->RxDescList.RxDesc[descidx]; + desccnt++; + } + + heth->RxDescList.RxBuildDescCnt += desccnt; + if ((heth->RxDescList.RxBuildDescCnt) != 0U) + { + /* Update Descriptors */ + ETH_UpdateDescriptor(heth); + } + + heth->RxDescList.RxDescIdx = descidx; + + if (rxdataready == 1U) + { + /* Return received packet */ + *pAppBuff = heth->RxDescList.pRxStart; + /* Reset first element */ + heth->RxDescList.pRxStart = NULL; + + return HAL_OK; + } + + /* Packet not ready */ + return HAL_ERROR; +} + +/** + * @brief This function gives back Rx Desc of the last received Packet + * to the DMA, so ETH DMA will be able to use these descriptors + * to receive next Packets. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +static void ETH_UpdateDescriptor(ETH_HandleTypeDef *heth) +{ + uint32_t descidx; + uint32_t tailidx; + uint32_t desccount; + ETH_DMADescTypeDef *dmarxdesc; + uint8_t *buff = NULL; + uint8_t allocStatus = 1U; + + descidx = heth->RxDescList.RxBuildDescIdx; + dmarxdesc = (ETH_DMADescTypeDef *)heth->RxDescList.RxDesc[descidx]; + desccount = heth->RxDescList.RxBuildDescCnt; + + while ((desccount > 0U) && (allocStatus != 0U)) + { + /* Check if a buffer's attached the descriptor */ + if (READ_REG(dmarxdesc->BackupAddr0) == 0U) + { + /* Get a new buffer. */ +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered Allocate callback*/ + heth->rxAllocateCallback(&buff); +#else + /* Allocate callback */ + HAL_ETH_RxAllocateCallback(&buff); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + if (buff == NULL) + { + allocStatus = 0U; + } + else + { + WRITE_REG(dmarxdesc->BackupAddr0, (uint32_t)buff); + WRITE_REG(dmarxdesc->DESC2, (uint32_t)buff); + } + } + + if (allocStatus != 0U) + { + if (heth->RxDescList.ItMode == 0U) + { + WRITE_REG(dmarxdesc->DESC1, heth->Init.RxBuffLen | ETH_DMARXDESC_DIC | ETH_DMARXDESC_RCH); + } + else + { + WRITE_REG(dmarxdesc->DESC1, heth->Init.RxBuffLen | ETH_DMARXDESC_RCH); + } + + SET_BIT(dmarxdesc->DESC0, ETH_DMARXDESC_OWN); + + /* Increment current rx descriptor index */ + INCR_RX_DESC_INDEX(descidx, 1U); + /* Get current descriptor address */ + dmarxdesc = (ETH_DMADescTypeDef *)heth->RxDescList.RxDesc[descidx]; + desccount--; + } + } + + if (heth->RxDescList.RxBuildDescCnt != desccount) + { + /* Set the tail pointer index */ + tailidx = (descidx + 1U) % ETH_RX_DESC_CNT; + + /* DMB instruction to avoid race condition */ + __DMB(); + + /* Set the Tail pointer address */ + WRITE_REG(heth->Instance->DMARPDR, ((uint32_t)(heth->Init.RxDesc + (tailidx)))); + + heth->RxDescList.RxBuildDescIdx = descidx; + heth->RxDescList.RxBuildDescCnt = desccount; + } +} + +/** + * @brief Register the Rx alloc callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param rxAllocateCallback: pointer to function to alloc buffer + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_RegisterRxAllocateCallback(ETH_HandleTypeDef *heth, + pETH_rxAllocateCallbackTypeDef rxAllocateCallback) +{ + if (rxAllocateCallback == NULL) + { + /* No buffer to save */ + return HAL_ERROR; + } + + /* Set function to allocate buffer */ + heth->rxAllocateCallback = rxAllocateCallback; + + return HAL_OK; +} + +/** + * @brief Unregister the Rx alloc callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterRxAllocateCallback(ETH_HandleTypeDef *heth) +{ + /* Set function to allocate buffer */ + heth->rxAllocateCallback = HAL_ETH_RxAllocateCallback; + + return HAL_OK; +} + +/** + * @brief Rx Allocate callback. + * @param buff: pointer to allocated buffer + * @retval None + */ +__weak void HAL_ETH_RxAllocateCallback(uint8_t **buff) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(buff); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_RxAllocateCallback could be implemented in the user file + */ +} + +/** + * @brief Rx Link callback. + * @param pStart: pointer to packet start + * @param pEnd: pointer to packet end + * @param buff: pointer to received data + * @param Length: received data length + * @retval None + */ +__weak void HAL_ETH_RxLinkCallback(void **pStart, void **pEnd, uint8_t *buff, uint16_t Length) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(pStart); + UNUSED(pEnd); + UNUSED(buff); + UNUSED(Length); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_RxLinkCallback could be implemented in the user file + */ +} + +/** + * @brief Set the Rx link data function. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param rxLinkCallback: pointer to function to link data + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_RegisterRxLinkCallback(ETH_HandleTypeDef *heth, pETH_rxLinkCallbackTypeDef rxLinkCallback) +{ + if (rxLinkCallback == NULL) + { + /* No buffer to save */ + return HAL_ERROR; + } + + /* Set function to link data */ + heth->rxLinkCallback = rxLinkCallback; + + return HAL_OK; +} + +/** + * @brief Unregister the Rx link callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterRxLinkCallback(ETH_HandleTypeDef *heth) +{ + /* Set function to allocate buffer */ + heth->rxLinkCallback = HAL_ETH_RxLinkCallback; + + return HAL_OK; +} + +/** + * @brief Get the error state of the last received packet. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pErrorCode: pointer to uint32_t to hold the error code + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_GetRxDataErrorCode(const ETH_HandleTypeDef *heth, uint32_t *pErrorCode) +{ + /* Get error bits. */ + *pErrorCode = READ_BIT(heth->RxDescList.pRxLastRxDesc, ETH_DMARXDESC_ERRORS_MASK); + + return HAL_OK; +} + +/** + * @brief Set the Tx free function. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param txFreeCallback: pointer to function to release the packet + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_RegisterTxFreeCallback(ETH_HandleTypeDef *heth, pETH_txFreeCallbackTypeDef txFreeCallback) +{ + if (txFreeCallback == NULL) + { + /* No buffer to save */ + return HAL_ERROR; + } + + /* Set function to free transmmitted packet */ + heth->txFreeCallback = txFreeCallback; + + return HAL_OK; +} + +/** + * @brief Unregister the Tx free callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterTxFreeCallback(ETH_HandleTypeDef *heth) +{ + /* Set function to allocate buffer */ + heth->txFreeCallback = HAL_ETH_TxFreeCallback; + + return HAL_OK; +} + +/** + * @brief Tx Free callback. + * @param buff: pointer to buffer to free + * @retval None + */ +__weak void HAL_ETH_TxFreeCallback(uint32_t *buff) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(buff); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_TxFreeCallback could be implemented in the user file + */ +} + +/** + * @brief Release transmitted Tx packets. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ReleaseTxPacket(ETH_HandleTypeDef *heth) +{ + ETH_TxDescListTypeDef *dmatxdesclist = &heth->TxDescList; + uint32_t numOfBuf = dmatxdesclist->BuffersInUse; + uint32_t idx = dmatxdesclist->releaseIndex; + uint8_t pktTxStatus = 1U; + uint8_t pktInUse; +#ifdef HAL_ETH_USE_PTP + ETH_TimeStampTypeDef *timestamp = &heth->TxTimestamp; +#endif /* HAL_ETH_USE_PTP */ + + /* Loop through buffers in use. */ + while ((numOfBuf != 0U) && (pktTxStatus != 0U)) + { + pktInUse = 1U; + numOfBuf--; + /* If no packet, just examine the next packet. */ + if (dmatxdesclist->PacketAddress[idx] == NULL) + { + /* No packet in use, skip to next. */ + INCR_TX_DESC_INDEX(idx, 1U); + pktInUse = 0U; + } + + if (pktInUse != 0U) + { + /* Determine if the packet has been transmitted. */ + if ((heth->Init.TxDesc[idx].DESC0 & ETH_DMATXDESC_OWN) == 0U) + { +#ifdef HAL_ETH_USE_PTP + if ((heth->Init.TxDesc[idx].DESC3 & ETH_DMATXDESC_LS) + && (heth->Init.TxDesc[idx].DESC3 & ETH_DMATXDESC_TTSS)) + { + /* Get timestamp low */ + timestamp->TimeStampLow = heth->Init.TxDesc[idx].DESC6; + /* Get timestamp high */ + timestamp->TimeStampHigh = heth->Init.TxDesc[idx].DESC7; + } + else + { + timestamp->TimeStampHigh = timestamp->TimeStampLow = UINT32_MAX; + } +#endif /* HAL_ETH_USE_PTP */ + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered callbacks*/ +#ifdef HAL_ETH_USE_PTP + /* Handle Ptp */ + if (timestamp->TimeStampHigh != UINT32_MAX && timestamp->TimeStampLow != UINT32_MAX) + { + heth->txPtpCallback(dmatxdesclist->PacketAddress[idx], timestamp); + } +#endif /* HAL_ETH_USE_PTP */ + /* Release the packet. */ + heth->txFreeCallback(dmatxdesclist->PacketAddress[idx]); +#else + /* Call callbacks */ +#ifdef HAL_ETH_USE_PTP + /* Handle Ptp */ + if (timestamp->TimeStampHigh != UINT32_MAX && timestamp->TimeStampLow != UINT32_MAX) + { + HAL_ETH_TxPtpCallback(dmatxdesclist->PacketAddress[idx], timestamp); + } +#endif /* HAL_ETH_USE_PTP */ + /* Release the packet. */ + HAL_ETH_TxFreeCallback(dmatxdesclist->PacketAddress[idx]); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + /* Clear the entry in the in-use array. */ + dmatxdesclist->PacketAddress[idx] = NULL; + + /* Update the transmit relesae index and number of buffers in use. */ + INCR_TX_DESC_INDEX(idx, 1U); + dmatxdesclist->BuffersInUse = numOfBuf; + dmatxdesclist->releaseIndex = idx; + } + else + { + /* Get out of the loop! */ + pktTxStatus = 0U; + } + } + } + return HAL_OK; +} + +#ifdef HAL_ETH_USE_PTP +/** + * @brief Set the Ethernet PTP configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param ptpconfig: pointer to a ETH_PTP_ConfigTypeDef structure that contains + * the configuration information for PTP + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_SetConfig(ETH_HandleTypeDef *heth, ETH_PTP_ConfigTypeDef *ptpconfig) +{ + uint32_t tmpTSCR; + ETH_TimeTypeDef time; + + if (ptpconfig == NULL) + { + return HAL_ERROR; + } + + tmpTSCR = ptpconfig->Timestamp | + ((uint32_t)ptpconfig->TimestampUpdate << ETH_PTPTSCR_TSFCU_Pos) | + ((uint32_t)ptpconfig->TimestampAll << ETH_PTPTSCR_TSSARFE_Pos) | + ((uint32_t)ptpconfig->TimestampRolloverMode << ETH_PTPTSCR_TSSSR_Pos) | + ((uint32_t)ptpconfig->TimestampV2 << ETH_PTPTSCR_TSPTPPSV2E_Pos) | + ((uint32_t)ptpconfig->TimestampEthernet << ETH_PTPTSCR_TSSPTPOEFE_Pos) | + ((uint32_t)ptpconfig->TimestampIPv6 << ETH_PTPTSCR_TSSIPV6FE_Pos) | + ((uint32_t)ptpconfig->TimestampIPv4 << ETH_PTPTSCR_TSSIPV4FE_Pos) | + ((uint32_t)ptpconfig->TimestampEvent << ETH_PTPTSCR_TSSEME_Pos) | + ((uint32_t)ptpconfig->TimestampMaster << ETH_PTPTSCR_TSSMRME_Pos) | + ((uint32_t)ptpconfig->TimestampFilter << ETH_PTPTSCR_TSPFFMAE_Pos) | + ((uint32_t)ptpconfig->TimestampClockType << ETH_PTPTSCR_TSCNT_Pos); + + /* Write to MACTSCR */ + MODIFY_REG(heth->Instance->PTPTSCR, ETH_MACTSCR_MASK, tmpTSCR); + + /* Enable Timestamp */ + SET_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSE); + WRITE_REG(heth->Instance->PTPSSIR, ptpconfig->TimestampSubsecondInc); + WRITE_REG(heth->Instance->PTPTSAR, ptpconfig->TimestampAddend); + + /* Enable Timestamp */ + if (ptpconfig->TimestampAddendUpdate == ENABLE) + { + SET_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSARU); + while ((heth->Instance->PTPTSCR & ETH_PTPTSCR_TSARU) != 0) + { + + } + } + + /* Ptp Init */ + SET_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSSTI); + + /* Set PTP Configuration done */ + heth->IsPtpConfigured = HAL_ETH_PTP_CONFIGURED; + + /* Set Seconds */ + time.Seconds = heth->Instance->PTPTSHR; + /* Set NanoSeconds */ + time.NanoSeconds = heth->Instance->PTPTSLR; + + HAL_ETH_PTP_SetTime(heth, &time); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Get the Ethernet PTP configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param ptpconfig: pointer to a ETH_PTP_ConfigTypeDef structure that contains + * the configuration information for PTP + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_GetConfig(ETH_HandleTypeDef *heth, ETH_PTP_ConfigTypeDef *ptpconfig) +{ + if (ptpconfig == NULL) + { + return HAL_ERROR; + } + ptpconfig->Timestamp = READ_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSE); + ptpconfig->TimestampUpdate = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSFCU) >> ETH_PTPTSCR_TSFCU_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampAll = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSARFE) >> ETH_PTPTSCR_TSSARFE_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampRolloverMode = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSSR) >> ETH_PTPTSCR_TSSSR_Pos) > 0U) + ? ENABLE : DISABLE; + ptpconfig->TimestampV2 = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSPTPPSV2E) >> ETH_PTPTSCR_TSPTPPSV2E_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampEthernet = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSPTPOEFE) >> ETH_PTPTSCR_TSSPTPOEFE_Pos) > 0U) + ? ENABLE : DISABLE; + ptpconfig->TimestampIPv6 = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSIPV6FE) >> ETH_PTPTSCR_TSSIPV6FE_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampIPv4 = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSIPV4FE) >> ETH_PTPTSCR_TSSIPV4FE_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampEvent = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSEME) >> ETH_PTPTSCR_TSSEME_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampMaster = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSSMRME) >> ETH_PTPTSCR_TSSMRME_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampFilter = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSPFFMAE) >> ETH_PTPTSCR_TSPFFMAE_Pos) > 0U) ? ENABLE : DISABLE; + ptpconfig->TimestampClockType = ((READ_BIT(heth->Instance->PTPTSCR, + ETH_PTPTSCR_TSCNT) >> ETH_PTPTSCR_TSCNT_Pos) > 0U) ? ENABLE : DISABLE; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Set Seconds and Nanoseconds for the Ethernet PTP registers. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param time: pointer to a ETH_TimeTypeDef structure that contains + * time to set + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_SetTime(ETH_HandleTypeDef *heth, ETH_TimeTypeDef *time) +{ + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + /* Set Seconds */ + heth->Instance->PTPTSHUR = time->Seconds; + + /* Set NanoSeconds */ + heth->Instance->PTPTSLUR = time->NanoSeconds; + + /* the system time is updated */ + SET_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSSTU); + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Get Seconds and Nanoseconds for the Ethernet PTP registers. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param time: pointer to a ETH_TimeTypeDef structure that contains + * time to get + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_GetTime(ETH_HandleTypeDef *heth, ETH_TimeTypeDef *time) +{ + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + /* Get Seconds */ + time->Seconds = heth->Instance->PTPTSHR; + /* Get NanoSeconds */ + time->NanoSeconds = heth->Instance->PTPTSLR; + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Update time for the Ethernet PTP registers. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param timeoffset: pointer to a ETH_PtpUpdateTypeDef structure that contains + * the time update information + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_AddTimeOffset(ETH_HandleTypeDef *heth, ETH_PtpUpdateTypeDef ptpoffsettype, + ETH_TimeTypeDef *timeoffset) +{ + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + if (ptpoffsettype == HAL_ETH_PTP_NEGATIVE_UPDATE) + { + /* Set Seconds update */ + heth->Instance->PTPTSHUR = ETH_PTPTSHR_VALUE - timeoffset->Seconds + 1U; + + if (READ_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSSSR) == ETH_PTPTSCR_TSSSR) + { + /* Set nanoSeconds update */ + heth->Instance->PTPTSLUR = ETH_PTPTSLR_VALUE - timeoffset->NanoSeconds; + } + else + { + heth->Instance->PTPTSLUR = ETH_PTPTSHR_VALUE - timeoffset->NanoSeconds + 1U; + } + } + else + { + /* Set Seconds update */ + heth->Instance->PTPTSHUR = timeoffset->Seconds; + /* Set nanoSeconds update */ + heth->Instance->PTPTSLUR = timeoffset->NanoSeconds; + } + + SET_BIT(heth->Instance->PTPTSCR, ETH_PTPTSCR_TSSTU); + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Insert Timestamp in transmission. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_InsertTxTimestamp(ETH_HandleTypeDef *heth) +{ + ETH_TxDescListTypeDef *dmatxdesclist = &heth->TxDescList; + uint32_t descidx = dmatxdesclist->CurTxDesc; + ETH_DMADescTypeDef *dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[descidx]; + + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + /* Enable Time Stamp transmission */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_TTSE); + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Get transmission timestamp. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param timestamp: pointer to ETH_TIMESTAMPTypeDef structure that contains + * transmission timestamp + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_GetTxTimestamp(ETH_HandleTypeDef *heth, ETH_TimeStampTypeDef *timestamp) +{ + ETH_TxDescListTypeDef *dmatxdesclist = &heth->TxDescList; + uint32_t idx = dmatxdesclist->releaseIndex; + ETH_DMADescTypeDef *dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[idx]; + + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + /* Get timestamp low */ + timestamp->TimeStampLow = dmatxdesc->DESC0; + /* Get timestamp high */ + timestamp->TimeStampHigh = dmatxdesc->DESC1; + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Get receive timestamp. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param timestamp: pointer to ETH_TIMESTAMPTypeDef structure that contains + * receive timestamp + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_PTP_GetRxTimestamp(ETH_HandleTypeDef *heth, ETH_TimeStampTypeDef *timestamp) +{ + if (heth->IsPtpConfigured == HAL_ETH_PTP_CONFIGURED) + { + /* Get timestamp low */ + timestamp->TimeStampLow = heth->RxDescList.TimeStamp.TimeStampLow; + /* Get timestamp high */ + timestamp->TimeStampHigh = heth->RxDescList.TimeStamp.TimeStampHigh; + + /* Return function status */ + return HAL_OK; + } + else + { + /* Return function status */ + return HAL_ERROR; + } +} + +/** + * @brief Register the Tx Ptp callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param txPtpCallback: Function to handle Ptp transmission + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_RegisterTxPtpCallback(ETH_HandleTypeDef *heth, pETH_txPtpCallbackTypeDef txPtpCallback) +{ + if (txPtpCallback == NULL) + { + /* No buffer to save */ + return HAL_ERROR; + } + /* Set Function to handle Tx Ptp */ + heth->txPtpCallback = txPtpCallback; + + return HAL_OK; +} + +/** + * @brief Unregister the Tx Ptp callback. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterTxPtpCallback(ETH_HandleTypeDef *heth) +{ + /* Set function to allocate buffer */ + heth->txPtpCallback = HAL_ETH_TxPtpCallback; + + return HAL_OK; +} + +/** + * @brief Tx Ptp callback. + * @param buff: pointer to application buffer + * @param timestamp: pointer to ETH_TimeStampTypeDef structure that contains + * transmission timestamp + * @retval None + */ +__weak void HAL_ETH_TxPtpCallback(uint32_t *buff, ETH_TimeStampTypeDef *timestamp) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(buff); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_TxPtpCallback could be implemented in the user file + */ +} +#endif /* HAL_ETH_USE_PTP */ + +/** + * @brief This function handles ETH interrupt request. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +void HAL_ETH_IRQHandler(ETH_HandleTypeDef *heth) +{ + uint32_t mac_flag = READ_REG(heth->Instance->MACSR); + uint32_t dma_flag = READ_REG(heth->Instance->DMASR); + uint32_t dma_itsource = READ_REG(heth->Instance->DMAIER); + uint32_t exti_flag = READ_REG(EXTI->PR); + + /* Packet received */ + if (((dma_flag & ETH_DMASR_RS) != 0U) && ((dma_itsource & ETH_DMAIER_RIE) != 0U)) + { + /* Clear the Eth DMA Rx IT pending bits */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMASR_RS | ETH_DMASR_NIS); + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered Receive complete callback*/ + heth->RxCpltCallback(heth); +#else + /* Receive complete callback */ + HAL_ETH_RxCpltCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + } + + /* Packet transmitted */ + if (((dma_flag & ETH_DMASR_TS) != 0U) && ((dma_itsource & ETH_DMAIER_TIE) != 0U)) + { + /* Clear the Eth DMA Tx IT pending bits */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMASR_TS | ETH_DMASR_NIS); + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered Transmit complete callback*/ + heth->TxCpltCallback(heth); +#else + /* Transfer complete callback */ + HAL_ETH_TxCpltCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + } + + /* ETH DMA Error */ + if (((dma_flag & ETH_DMASR_AIS) != 0U) && ((dma_itsource & ETH_DMAIER_AISE) != 0U)) + { + heth->ErrorCode |= HAL_ETH_ERROR_DMA; + /* if fatal bus error occurred */ + if ((dma_flag & ETH_DMASR_FBES) != 0U) + { + /* Get DMA error code */ + heth->DMAErrorCode = READ_BIT(heth->Instance->DMASR, (ETH_DMASR_FBES | ETH_DMASR_TPS | ETH_DMASR_RPS)); + + /* Disable all interrupts */ + __HAL_ETH_DMA_DISABLE_IT(heth, ETH_DMAIER_NISE | ETH_DMAIER_AISE); + + /* Set HAL state to ERROR */ + heth->gState = HAL_ETH_STATE_ERROR; + } + else + { + /* Get DMA error status */ + heth->DMAErrorCode = READ_BIT(heth->Instance->DMASR, (ETH_DMASR_ETS | ETH_DMASR_RWTS | + ETH_DMASR_RBUS | ETH_DMASR_AIS)); + + /* Clear the interrupt summary flag */ + __HAL_ETH_DMA_CLEAR_IT(heth, (ETH_DMASR_ETS | ETH_DMASR_RWTS | + ETH_DMASR_RBUS | ETH_DMASR_AIS)); + } +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /* Call registered Error callback*/ + heth->ErrorCallback(heth); +#else + /* Ethernet DMA Error callback */ + HAL_ETH_ErrorCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + } + + + /* ETH PMT IT */ + if ((mac_flag & ETH_MAC_PMT_IT) != 0U) + { + /* Get MAC Wake-up source and clear the status register pending bit */ + heth->MACWakeUpEvent = READ_BIT(heth->Instance->MACPMTCSR, (ETH_MACPMTCSR_WFR | ETH_MACPMTCSR_MPR)); + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /* Call registered PMT callback*/ + heth->PMTCallback(heth); +#else + /* Ethernet PMT callback */ + HAL_ETH_PMTCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + heth->MACWakeUpEvent = (uint32_t)(0x0U); + } + + + /* check ETH WAKEUP exti flag */ + if ((exti_flag & ETH_WAKEUP_EXTI_LINE) != 0U) + { + /* Clear ETH WAKEUP Exti pending bit */ + __HAL_ETH_WAKEUP_EXTI_CLEAR_FLAG(ETH_WAKEUP_EXTI_LINE); +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /* Call registered WakeUp callback*/ + heth->WakeUpCallback(heth); +#else + /* ETH WAKEUP callback */ + HAL_ETH_WakeUpCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + } +} + +/** + * @brief Tx Transfer completed callbacks. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_TxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Rx Transfer completed callbacks. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_RxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Ethernet transfer error callbacks + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_ErrorCallback could be implemented in the user file + */ +} + +/** + * @brief Ethernet Power Management module IT callback + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_PMTCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_PMTCallback could be implemented in the user file + */ +} + + +/** + * @brief ETH WAKEUP interrupt callback + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_WakeUpCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_WakeUpCallback could be implemented in the user file + */ +} + +/** + * @brief Read a PHY register + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param PHYAddr: PHY port address, must be a value from 0 to 31 + * @param PHYReg: PHY register address, must be a value from 0 to 31 + * @param pRegValue: parameter to hold read value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ReadPHYRegister(ETH_HandleTypeDef *heth, uint32_t PHYAddr, uint32_t PHYReg, + uint32_t *pRegValue) +{ + uint32_t tmpreg1; + uint32_t tickstart; + + /* Get the ETHERNET MACMIIAR value */ + tmpreg1 = heth->Instance->MACMIIAR; + + /* Keep only the CSR Clock Range CR[2:0] bits value */ + tmpreg1 &= ~ETH_MACMIIAR_CR_MASK; + + /* Prepare the MII address register value */ + tmpreg1 |= ((PHYAddr << 11U) & ETH_MACMIIAR_PA); /* Set the PHY device address */ + tmpreg1 |= (((uint32_t)PHYReg << 6U) & ETH_MACMIIAR_MR); /* Set the PHY register address */ + tmpreg1 &= ~ETH_MACMIIAR_MW; /* Set the read mode */ + tmpreg1 |= ETH_MACMIIAR_MB; /* Set the MII Busy bit */ + + /* Write the result value into the MII Address register */ + heth->Instance->MACMIIAR = tmpreg1; + + + tickstart = HAL_GetTick(); + + /* Check for the Busy flag */ + while ((tmpreg1 & ETH_MACMIIAR_MB) == ETH_MACMIIAR_MB) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > PHY_READ_TO) + { + return HAL_ERROR; + } + + tmpreg1 = heth->Instance->MACMIIAR; + } + + /* Get MACMIIDR value */ + *pRegValue = (uint16_t)(heth->Instance->MACMIIDR); + + return HAL_OK; +} + +/** + * @brief Writes to a PHY register. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param PHYAddr: PHY port address, must be a value from 0 to 31 + * @param PHYReg: PHY register address, must be a value from 0 to 31 + * @param RegValue: the value to write + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_WritePHYRegister(const ETH_HandleTypeDef *heth, uint32_t PHYAddr, uint32_t PHYReg, + uint32_t RegValue) +{ + uint32_t tmpreg1; + uint32_t tickstart; + + /* Get the ETHERNET MACMIIAR value */ + tmpreg1 = heth->Instance->MACMIIAR; + + /* Keep only the CSR Clock Range CR[2:0] bits value */ + tmpreg1 &= ~ETH_MACMIIAR_CR_MASK; + + /* Prepare the MII register address value */ + tmpreg1 |= ((PHYAddr << 11U) & ETH_MACMIIAR_PA); /* Set the PHY device address */ + tmpreg1 |= (((uint32_t)PHYReg << 6U) & ETH_MACMIIAR_MR); /* Set the PHY register address */ + tmpreg1 |= ETH_MACMIIAR_MW; /* Set the write mode */ + tmpreg1 |= ETH_MACMIIAR_MB; /* Set the MII Busy bit */ + + /* Give the value to the MII data register */ + heth->Instance->MACMIIDR = (uint16_t)RegValue; + + /* Write the result value into the MII Address register */ + heth->Instance->MACMIIAR = tmpreg1; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check for the Busy flag */ + while ((tmpreg1 & ETH_MACMIIAR_MB) == ETH_MACMIIAR_MB) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > PHY_WRITE_TO) + { + return HAL_ERROR; + } + + tmpreg1 = heth->Instance->MACMIIAR; + } + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group3 Peripheral Control functions + * @brief ETH control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control the ETH + peripheral. + +@endverbatim + * @{ + */ +/** + * @brief Get the configuration of the MAC and MTL subsystems. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param macconf: pointer to a ETH_MACConfigTypeDef structure that will hold + * the configuration of the MAC. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_ETH_GetMACConfig(const ETH_HandleTypeDef *heth, ETH_MACConfigTypeDef *macconf) +{ + if (macconf == NULL) + { + return HAL_ERROR; + } + + /* Get MAC parameters */ + macconf->DeferralCheck = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_DC) >> 4) > 0U) ? ENABLE : DISABLE; + macconf->BackOffLimit = READ_BIT(heth->Instance->MACCR, ETH_MACCR_BL); + macconf->RetryTransmission = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_RD) >> 9) == 0U) ? ENABLE : DISABLE; + macconf->CarrierSenseDuringTransmit = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_CSD) >> 16) > 0U) + ? ENABLE : DISABLE; + macconf->ReceiveOwn = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_ROD) >> 13) == 0U) ? ENABLE : DISABLE; + macconf->LoopbackMode = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_LM) >> 12) > 0U) ? ENABLE : DISABLE; + macconf->DuplexMode = READ_BIT(heth->Instance->MACCR, ETH_MACCR_DM); + macconf->Speed = READ_BIT(heth->Instance->MACCR, ETH_MACCR_FES); + macconf->Jabber = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_JD) >> 22) == 0U) ? ENABLE : DISABLE; + macconf->Watchdog = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_WD) >> 23) == 0U) ? ENABLE : DISABLE; + macconf->AutomaticPadCRCStrip = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_APCS) >> 7) > 0U) ? ENABLE : DISABLE; + macconf->InterPacketGapVal = READ_BIT(heth->Instance->MACCR, ETH_MACCR_IFG); + macconf->ChecksumOffload = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_IPCO) >> 10U) > 0U) ? ENABLE : DISABLE; + macconf->CRCStripTypePacket = ((READ_BIT(heth->Instance->MACCR, ETH_MACCR_CSTF) >> 25U) > 0U) ? ENABLE : DISABLE; + + macconf->TransmitFlowControl = ((READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_TFCE) >> 1) > 0U) ? ENABLE : DISABLE; + macconf->ZeroQuantaPause = ((READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_ZQPD) >> 7) == 0U) ? ENABLE : DISABLE; + macconf->PauseLowThreshold = READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_PLT); + macconf->PauseTime = (READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_PT) >> 16); + macconf->ReceiveFlowControl = ((READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_RFCE) >> 2U) > 0U) ? ENABLE : DISABLE; + macconf->UnicastPausePacketDetect = ((READ_BIT(heth->Instance->MACFCR, ETH_MACFCR_UPFD) >> 3U) > 0U) + ? ENABLE : DISABLE; + + return HAL_OK; +} + +/** + * @brief Get the configuration of the DMA. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param dmaconf: pointer to a ETH_DMAConfigTypeDef structure that will hold + * the configuration of the ETH DMA. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_ETH_GetDMAConfig(const ETH_HandleTypeDef *heth, ETH_DMAConfigTypeDef *dmaconf) +{ + if (dmaconf == NULL) + { + return HAL_ERROR; + } + + dmaconf->DMAArbitration = READ_BIT(heth->Instance->DMABMR, + (ETH_DMAARBITRATION_RXPRIORTX | ETH_DMAARBITRATION_ROUNDROBIN_RXTX_4_1)); + dmaconf->AddressAlignedBeats = ((READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_AAB) >> 25U) > 0U) ? ENABLE : DISABLE; + dmaconf->BurstMode = READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_FB | ETH_DMABMR_MB); + dmaconf->RxDMABurstLength = READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_RDP); + dmaconf->TxDMABurstLength = READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_PBL); + dmaconf->EnhancedDescriptorFormat = ((READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_EDE) >> 7) > 0U) ? ENABLE : DISABLE; + dmaconf->DescriptorSkipLength = READ_BIT(heth->Instance->DMABMR, ETH_DMABMR_DSL) >> 2; + + dmaconf->DropTCPIPChecksumErrorFrame = ((READ_BIT(heth->Instance->DMAOMR, + ETH_DMAOMR_DTCEFD) >> 26) > 0U) ? DISABLE : ENABLE; + dmaconf->ReceiveStoreForward = ((READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_RSF) >> 25) > 0U) ? ENABLE : DISABLE; + dmaconf->FlushRxPacket = ((READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_FTF) >> 20) > 0U) ? DISABLE : ENABLE; + dmaconf->TransmitStoreForward = ((READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_TSF) >> 21) > 0U) ? ENABLE : DISABLE; + dmaconf->TransmitThresholdControl = READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_TTC); + dmaconf->ForwardErrorFrames = ((READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_FEF) >> 7) > 0U) ? ENABLE : DISABLE; + dmaconf->ForwardUndersizedGoodFrames = ((READ_BIT(heth->Instance->DMAOMR, + ETH_DMAOMR_FUGF) >> 6) > 0U) ? ENABLE : DISABLE; + dmaconf->ReceiveThresholdControl = READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_RTC); + dmaconf->SecondFrameOperate = ((READ_BIT(heth->Instance->DMAOMR, ETH_DMAOMR_OSF) >> 2) > 0U) ? ENABLE : DISABLE; + + return HAL_OK; +} + +/** + * @brief Set the MAC configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param macconf: pointer to a ETH_MACConfigTypeDef structure that contains + * the configuration of the MAC. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_SetMACConfig(ETH_HandleTypeDef *heth, ETH_MACConfigTypeDef *macconf) +{ + if (macconf == NULL) + { + return HAL_ERROR; + } + + if (heth->gState == HAL_ETH_STATE_READY) + { + ETH_SetMACConfig(heth, macconf); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Set the ETH DMA configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param dmaconf: pointer to a ETH_DMAConfigTypeDef structure that will hold + * the configuration of the ETH DMA. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_SetDMAConfig(ETH_HandleTypeDef *heth, ETH_DMAConfigTypeDef *dmaconf) +{ + if (dmaconf == NULL) + { + return HAL_ERROR; + } + + if (heth->gState == HAL_ETH_STATE_READY) + { + ETH_SetDMAConfig(heth, dmaconf); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Configures the Clock range of ETH MDIO interface. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +void HAL_ETH_SetMDIOClockRange(ETH_HandleTypeDef *heth) +{ + uint32_t hclk; + uint32_t tmpreg; + + /* Get the ETHERNET MACMIIAR value */ + tmpreg = (heth->Instance)->MACMIIAR; + /* Clear CSR Clock Range CR[2:0] bits */ + tmpreg &= ETH_MACMIIAR_CR_MASK; + + /* Get hclk frequency value */ + hclk = HAL_RCC_GetHCLKFreq(); + + /* Set CR bits depending on hclk value */ + if (hclk < 35000000U) + { + /* CSR Clock Range between 0-35 MHz */ + tmpreg |= (uint32_t)ETH_MACMIIAR_CR_Div16; + } + else if (hclk < 60000000U) + { + /* CSR Clock Range between 35-60 MHz */ + tmpreg |= (uint32_t)ETH_MACMIIAR_CR_Div26; + } + else if (hclk < 100000000U) + { + /* CSR Clock Range between 60-100 MHz */ + tmpreg |= (uint32_t)ETH_MACMIIAR_CR_Div42; + } + else if (hclk < 150000000U) + { + /* CSR Clock Range between 100-150 MHz */ + tmpreg |= (uint32_t)ETH_MACMIIAR_CR_Div62; + } + else /* (hclk >= 150000000) */ + { + /* CSR Clock >= 150 MHz */ + tmpreg |= (uint32_t)ETH_MACMIIAR_CR_Div102; + } + + /* Write to ETHERNET MAC MIIAR: Configure the ETHERNET CSR Clock Range */ + (heth->Instance)->MACMIIAR = (uint32_t)tmpreg; +} + +/** + * @brief Set the ETH MAC (L2) Filters configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pFilterConfig: pointer to a ETH_MACFilterConfigTypeDef structure that contains + * the configuration of the ETH MAC filters. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_SetMACFilterConfig(ETH_HandleTypeDef *heth, const ETH_MACFilterConfigTypeDef *pFilterConfig) +{ + uint32_t filterconfig; + uint32_t tmpreg1; + + if (pFilterConfig == NULL) + { + return HAL_ERROR; + } + + filterconfig = ((uint32_t)pFilterConfig->PromiscuousMode | + ((uint32_t)pFilterConfig->HashUnicast << 1) | + ((uint32_t)pFilterConfig->HashMulticast << 2) | + ((uint32_t)pFilterConfig->DestAddrInverseFiltering << 3) | + ((uint32_t)pFilterConfig->PassAllMulticast << 4) | + ((uint32_t)((pFilterConfig->BroadcastFilter == DISABLE) ? 1U : 0U) << 5) | + ((uint32_t)pFilterConfig->SrcAddrInverseFiltering << 8) | + ((uint32_t)pFilterConfig->SrcAddrFiltering << 9) | + ((uint32_t)pFilterConfig->HachOrPerfectFilter << 10) | + ((uint32_t)pFilterConfig->ReceiveAllMode << 31) | + pFilterConfig->ControlPacketsFilter); + + MODIFY_REG(heth->Instance->MACFFR, ETH_MACFFR_MASK, filterconfig); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACFFR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFFR = tmpreg1; + + return HAL_OK; +} + +/** + * @brief Get the ETH MAC (L2) Filters configuration. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pFilterConfig: pointer to a ETH_MACFilterConfigTypeDef structure that will hold + * the configuration of the ETH MAC filters. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_GetMACFilterConfig(const ETH_HandleTypeDef *heth, ETH_MACFilterConfigTypeDef *pFilterConfig) +{ + if (pFilterConfig == NULL) + { + return HAL_ERROR; + } + + pFilterConfig->PromiscuousMode = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_PM)) > 0U) ? ENABLE : DISABLE; + pFilterConfig->HashUnicast = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_HU) >> 1) > 0U) ? ENABLE : DISABLE; + pFilterConfig->HashMulticast = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_HM) >> 2) > 0U) ? ENABLE : DISABLE; + pFilterConfig->DestAddrInverseFiltering = ((READ_BIT(heth->Instance->MACFFR, + ETH_MACFFR_DAIF) >> 3) > 0U) ? ENABLE : DISABLE; + pFilterConfig->PassAllMulticast = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_PAM) >> 4) > 0U) ? ENABLE : DISABLE; + pFilterConfig->BroadcastFilter = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_BFD) >> 5) == 0U) ? ENABLE : DISABLE; + pFilterConfig->ControlPacketsFilter = READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_PCF); + pFilterConfig->SrcAddrInverseFiltering = ((READ_BIT(heth->Instance->MACFFR, + ETH_MACFFR_SAIF) >> 8) > 0U) ? ENABLE : DISABLE; + pFilterConfig->SrcAddrFiltering = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_SAF) >> 9) > 0U) ? ENABLE : DISABLE; + pFilterConfig->HachOrPerfectFilter = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_HPF) >> 10) > 0U) + ? ENABLE : DISABLE; + pFilterConfig->ReceiveAllMode = ((READ_BIT(heth->Instance->MACFFR, ETH_MACFFR_RA) >> 31) > 0U) ? ENABLE : DISABLE; + + return HAL_OK; +} + +/** + * @brief Set the source MAC Address to be matched. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param AddrNbr: The MAC address to configure + * This parameter must be a value of the following: + * ETH_MAC_ADDRESS1 + * ETH_MAC_ADDRESS2 + * ETH_MAC_ADDRESS3 + * @param pMACAddr: Pointer to MAC address buffer data (6 bytes) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_SetSourceMACAddrMatch(const ETH_HandleTypeDef *heth, uint32_t AddrNbr, + const uint8_t *pMACAddr) +{ + uint32_t macaddrlr; + uint32_t macaddrhr; + + if (pMACAddr == NULL) + { + return HAL_ERROR; + } + + /* Get mac addr high reg offset */ + macaddrhr = ((uint32_t) &(heth->Instance->MACA0HR) + AddrNbr); + /* Get mac addr low reg offset */ + macaddrlr = ((uint32_t) &(heth->Instance->MACA0LR) + AddrNbr); + + /* Set MAC addr bits 32 to 47 */ + (*(__IO uint32_t *)macaddrhr) = (((uint32_t)(pMACAddr[5]) << 8) | (uint32_t)pMACAddr[4]); + /* Set MAC addr bits 0 to 31 */ + (*(__IO uint32_t *)macaddrlr) = (((uint32_t)(pMACAddr[3]) << 24) | ((uint32_t)(pMACAddr[2]) << 16) | + ((uint32_t)(pMACAddr[1]) << 8) | (uint32_t)pMACAddr[0]); + + /* Enable address and set source address bit */ + (*(__IO uint32_t *)macaddrhr) |= (ETH_MACA1HR_AE | ETH_MACA1HR_SA); + + return HAL_OK; +} + +/** + * @brief Set the ETH Hash Table Value. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pHashTable: pointer to a table of two 32 bit values, that contains + * the 64 bits of the hash table. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_SetHashTable(ETH_HandleTypeDef *heth, uint32_t *pHashTable) +{ + uint32_t tmpreg1; + if (pHashTable == NULL) + { + return HAL_ERROR; + } + + heth->Instance->MACHTHR = pHashTable[0]; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACHTHR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACHTHR = tmpreg1; + + heth->Instance->MACHTLR = pHashTable[1]; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACHTLR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACHTLR = tmpreg1; + + return HAL_OK; +} + +/** + * @brief Set the VLAN Identifier for Rx packets + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param ComparisonBits: 12 or 16 bit comparison mode + must be a value of @ref ETH_VLAN_Tag_Comparison + * @param VLANIdentifier: VLAN Identifier value + * @retval None + */ +void HAL_ETH_SetRxVLANIdentifier(ETH_HandleTypeDef *heth, uint32_t ComparisonBits, uint32_t VLANIdentifier) +{ + uint32_t tmpreg1; + MODIFY_REG(heth->Instance->MACVLANTR, ETH_MACVLANTR_VLANTI, VLANIdentifier); + if (ComparisonBits == ETH_VLANTAGCOMPARISON_16BIT) + { + CLEAR_BIT(heth->Instance->MACVLANTR, ETH_MACVLANTR_VLANTC); + } + else + { + SET_BIT(heth->Instance->MACVLANTR, ETH_MACVLANTR_VLANTC); + } + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACVLANTR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACVLANTR = tmpreg1; +} + +/** + * @brief Enters the Power down mode. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pPowerDownConfig: a pointer to ETH_PowerDownConfigTypeDef structure + * that contains the Power Down configuration + * @retval None. + */ +void HAL_ETH_EnterPowerDownMode(ETH_HandleTypeDef *heth, const ETH_PowerDownConfigTypeDef *pPowerDownConfig) +{ + uint32_t powerdownconfig; + + powerdownconfig = (((uint32_t)pPowerDownConfig->MagicPacket << ETH_MACPMTCSR_MPE_Pos) | + ((uint32_t)pPowerDownConfig->WakeUpPacket << ETH_MACPMTCSR_WFE_Pos) | + ((uint32_t)pPowerDownConfig->GlobalUnicast << ETH_MACPMTCSR_GU_Pos) | + ETH_MACPMTCSR_PD); + + MODIFY_REG(heth->Instance->MACPMTCSR, ETH_MACPMTCSR_MASK, powerdownconfig); +} + +/** + * @brief Exits from the Power down mode. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None. + */ +void HAL_ETH_ExitPowerDownMode(ETH_HandleTypeDef *heth) +{ + uint32_t tmpreg1; + + /* clear wake up sources */ + CLEAR_BIT(heth->Instance->MACPMTCSR, ETH_MACPMTCSR_WFE | ETH_MACPMTCSR_MPE | ETH_MACPMTCSR_GU); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACPMTCSR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACPMTCSR = tmpreg1; + + if (READ_BIT(heth->Instance->MACPMTCSR, ETH_MACPMTCSR_PD) != 0U) + { + /* Exit power down mode */ + CLEAR_BIT(heth->Instance->MACPMTCSR, ETH_MACPMTCSR_PD); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACPMTCSR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACPMTCSR = tmpreg1; + } + + /* Disable PMT interrupt */ + SET_BIT(heth->Instance->MACIMR, ETH_MACIMR_PMTIM); +} + +/** + * @brief Set the WakeUp filter. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pFilter: pointer to filter registers values + * @param Count: number of filter registers, must be from 1 to 8. + * @retval None. + */ +HAL_StatusTypeDef HAL_ETH_SetWakeUpFilter(ETH_HandleTypeDef *heth, uint32_t *pFilter, uint32_t Count) +{ + uint32_t regindex; + + if (pFilter == NULL) + { + return HAL_ERROR; + } + + /* Reset Filter Pointer */ + SET_BIT(heth->Instance->MACPMTCSR, ETH_MACPMTCSR_WFFRPR); + + /* Wake up packet filter config */ + for (regindex = 0; regindex < Count; regindex++) + { + /* Write filter regs */ + WRITE_REG(heth->Instance->MACRWUFFR, pFilter[regindex]); + } + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group4 Peripheral State and Errors functions + * @brief ETH State and Errors functions + * +@verbatim + ============================================================================== + ##### Peripheral State and Errors functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to return the State of + ETH communication process, return Peripheral Errors occurred during communication + process + + +@endverbatim + * @{ + */ + +/** + * @brief Returns the ETH state. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL state + */ +HAL_ETH_StateTypeDef HAL_ETH_GetState(const ETH_HandleTypeDef *heth) +{ + return heth->gState; +} + +/** + * @brief Returns the ETH error code + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval ETH Error Code + */ +uint32_t HAL_ETH_GetError(const ETH_HandleTypeDef *heth) +{ + return heth->ErrorCode; +} + +/** + * @brief Returns the ETH DMA error code + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval ETH DMA Error Code + */ +uint32_t HAL_ETH_GetDMAError(const ETH_HandleTypeDef *heth) +{ + return heth->DMAErrorCode; +} + +/** + * @brief Returns the ETH MAC error code + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval ETH MAC Error Code + */ +uint32_t HAL_ETH_GetMACError(const ETH_HandleTypeDef *heth) +{ + return heth->MACErrorCode; +} + +/** + * @brief Returns the ETH MAC WakeUp event source + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval ETH MAC WakeUp event source + */ +uint32_t HAL_ETH_GetMACWakeUpSource(const ETH_HandleTypeDef *heth) +{ + return heth->MACWakeUpEvent; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup ETH_Private_Functions ETH Private Functions + * @{ + */ + +/** + * @brief Clears the ETHERNET transmit FIFO. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_FlushTransmitFIFO(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Set the Flush Transmit FIFO bit */ + (heth->Instance)->DMAOMR |= ETH_DMAOMR_FTF; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMAOMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMAOMR = tmpreg; +} + +static void ETH_SetMACConfig(ETH_HandleTypeDef *heth, const ETH_MACConfigTypeDef *macconf) +{ + uint32_t tmpreg1; + + /*------------------------ ETHERNET MACCR Configuration --------------------*/ + /* Get the ETHERNET MACCR value */ + tmpreg1 = (heth->Instance)->MACCR; + /* Clear CSTF, WD, PCE, PS, TE and RE bits */ + tmpreg1 &= ETH_MACCR_CLEAR_MASK; + + tmpreg1 |= (uint32_t)(((uint32_t)macconf->CRCStripTypePacket << 25U) | + ((uint32_t)((macconf->Watchdog == DISABLE) ? 1U : 0U) << 23U) | + ((uint32_t)((macconf->Jabber == DISABLE) ? 1U : 0U) << 22U) | + (uint32_t)macconf->InterPacketGapVal | + ((uint32_t)macconf->CarrierSenseDuringTransmit << 16U) | + macconf->Speed | + ((uint32_t)((macconf->ReceiveOwn == DISABLE) ? 1U : 0U) << 13U) | + ((uint32_t)macconf->LoopbackMode << 12U) | + macconf->DuplexMode | + ((uint32_t)macconf->ChecksumOffload << 10U) | + ((uint32_t)((macconf->RetryTransmission == DISABLE) ? 1U : 0U) << 9U) | + ((uint32_t)macconf->AutomaticPadCRCStrip << 7U) | + macconf->BackOffLimit | + ((uint32_t)macconf->DeferralCheck << 4U)); + + /* Write to ETHERNET MACCR */ + (heth->Instance)->MACCR = (uint32_t)tmpreg1; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg1; + + /*----------------------- ETHERNET MACFCR Configuration --------------------*/ + + /* Get the ETHERNET MACFCR value */ + tmpreg1 = (heth->Instance)->MACFCR; + /* Clear xx bits */ + tmpreg1 &= ETH_MACFCR_CLEAR_MASK; + + tmpreg1 |= (uint32_t)((macconf->PauseTime << 16U) | + ((uint32_t)((macconf->ZeroQuantaPause == DISABLE) ? 1U : 0U) << 7U) | + macconf->PauseLowThreshold | + ((uint32_t)((macconf->UnicastPausePacketDetect == ENABLE) ? 1U : 0U) << 3U) | + ((uint32_t)((macconf->ReceiveFlowControl == ENABLE) ? 1U : 0U) << 2U) | + ((uint32_t)((macconf->TransmitFlowControl == ENABLE) ? 1U : 0U) << 1U)); + + /* Write to ETHERNET MACFCR */ + (heth->Instance)->MACFCR = (uint32_t)tmpreg1; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->MACFCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFCR = tmpreg1; +} + +static void ETH_SetDMAConfig(ETH_HandleTypeDef *heth, const ETH_DMAConfigTypeDef *dmaconf) +{ + uint32_t tmpreg1; + + /*----------------------- ETHERNET DMAOMR Configuration --------------------*/ + /* Get the ETHERNET DMAOMR value */ + tmpreg1 = (heth->Instance)->DMAOMR; + /* Clear xx bits */ + tmpreg1 &= ETH_DMAOMR_CLEAR_MASK; + + tmpreg1 |= (uint32_t)(((uint32_t)((dmaconf->DropTCPIPChecksumErrorFrame == DISABLE) ? 1U : 0U) << 26U) | + ((uint32_t)dmaconf->ReceiveStoreForward << 25U) | + ((uint32_t)((dmaconf->FlushRxPacket == DISABLE) ? 1U : 0U) << 20U) | + ((uint32_t)dmaconf->TransmitStoreForward << 21U) | + dmaconf->TransmitThresholdControl | + ((uint32_t)dmaconf->ForwardErrorFrames << 7U) | + ((uint32_t)dmaconf->ForwardUndersizedGoodFrames << 6U) | + dmaconf->ReceiveThresholdControl | + ((uint32_t)dmaconf->SecondFrameOperate << 2U)); + + /* Write to ETHERNET DMAOMR */ + (heth->Instance)->DMAOMR = (uint32_t)tmpreg1; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->DMAOMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMAOMR = tmpreg1; + + /*----------------------- ETHERNET DMABMR Configuration --------------------*/ + (heth->Instance)->DMABMR = (uint32_t)(((uint32_t)dmaconf->AddressAlignedBeats << 25U) | + dmaconf->BurstMode | + dmaconf->RxDMABurstLength | /* !! if 4xPBL is selected for Tx or + Rx it is applied for the other */ + dmaconf->TxDMABurstLength | + ((uint32_t)dmaconf->EnhancedDescriptorFormat << 7U) | + (dmaconf->DescriptorSkipLength << 2U) | + dmaconf->DMAArbitration | + ETH_DMABMR_USP); /* Enable use of separate PBL for Rx and Tx */ + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg1 = (heth->Instance)->DMABMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMABMR = tmpreg1; +} + +/** + * @brief Configures Ethernet MAC and DMA with default parameters. + * called by HAL_ETH_Init() API. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +static void ETH_MACDMAConfig(ETH_HandleTypeDef *heth) +{ + ETH_MACConfigTypeDef macDefaultConf; + ETH_DMAConfigTypeDef dmaDefaultConf; + + /*--------------- ETHERNET MAC registers default Configuration --------------*/ + macDefaultConf.Watchdog = ENABLE; + macDefaultConf.Jabber = ENABLE; + macDefaultConf.InterPacketGapVal = ETH_INTERFRAMEGAP_96BIT; + macDefaultConf.CarrierSenseDuringTransmit = DISABLE; + macDefaultConf.ReceiveOwn = ENABLE; + macDefaultConf.LoopbackMode = DISABLE; + macDefaultConf.CRCStripTypePacket = ENABLE; + macDefaultConf.ChecksumOffload = ENABLE; + macDefaultConf.RetryTransmission = DISABLE; + macDefaultConf.AutomaticPadCRCStrip = DISABLE; + macDefaultConf.BackOffLimit = ETH_BACKOFFLIMIT_10; + macDefaultConf.DeferralCheck = DISABLE; + macDefaultConf.PauseTime = 0x0U; + macDefaultConf.ZeroQuantaPause = DISABLE; + macDefaultConf.PauseLowThreshold = ETH_PAUSELOWTHRESHOLD_MINUS4; + macDefaultConf.ReceiveFlowControl = DISABLE; + macDefaultConf.TransmitFlowControl = DISABLE; + macDefaultConf.Speed = ETH_SPEED_100M; + macDefaultConf.DuplexMode = ETH_FULLDUPLEX_MODE; + macDefaultConf.UnicastPausePacketDetect = DISABLE; + + /* MAC default configuration */ + ETH_SetMACConfig(heth, &macDefaultConf); + + /*--------------- ETHERNET DMA registers default Configuration --------------*/ + dmaDefaultConf.DropTCPIPChecksumErrorFrame = ENABLE; + dmaDefaultConf.ReceiveStoreForward = ENABLE; + dmaDefaultConf.FlushRxPacket = ENABLE; + dmaDefaultConf.TransmitStoreForward = ENABLE; + dmaDefaultConf.TransmitThresholdControl = ETH_TRANSMITTHRESHOLDCONTROL_64BYTES; + dmaDefaultConf.ForwardErrorFrames = DISABLE; + dmaDefaultConf.ForwardUndersizedGoodFrames = DISABLE; + dmaDefaultConf.ReceiveThresholdControl = ETH_RECEIVEDTHRESHOLDCONTROL_64BYTES; + dmaDefaultConf.SecondFrameOperate = ENABLE; + dmaDefaultConf.AddressAlignedBeats = ENABLE; + dmaDefaultConf.BurstMode = ETH_BURSTLENGTH_FIXED; + dmaDefaultConf.RxDMABurstLength = ETH_RXDMABURSTLENGTH_32BEAT; + dmaDefaultConf.TxDMABurstLength = ETH_TXDMABURSTLENGTH_32BEAT; + dmaDefaultConf.EnhancedDescriptorFormat = ENABLE; + dmaDefaultConf.DescriptorSkipLength = 0x0U; + dmaDefaultConf.DMAArbitration = ETH_DMAARBITRATION_ROUNDROBIN_RXTX_1_1; + + /* DMA default configuration */ + ETH_SetDMAConfig(heth, &dmaDefaultConf); +} +/** + * @brief Configures the selected MAC address. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param MacAddr The MAC address to configure + * This parameter can be one of the following values: + * @arg ETH_MAC_Address0: MAC Address0 + * @arg ETH_MAC_Address1: MAC Address1 + * @arg ETH_MAC_Address2: MAC Address2 + * @arg ETH_MAC_Address3: MAC Address3 + * @param Addr Pointer to MAC address buffer data (6 bytes) + * @retval HAL status + */ +static void ETH_MACAddressConfig(ETH_HandleTypeDef *heth, uint32_t MacAddr, uint8_t *Addr) +{ + uint32_t tmpreg1; + + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* Calculate the selected MAC address high register */ + tmpreg1 = ((uint32_t)Addr[5U] << 8U) | (uint32_t)Addr[4U]; + /* Load the selected MAC address high register */ + (*(__IO uint32_t *)((uint32_t)(ETH_MAC_ADDR_HBASE + MacAddr))) = tmpreg1; + /* Calculate the selected MAC address low register */ + tmpreg1 = ((uint32_t)Addr[3U] << 24U) | ((uint32_t)Addr[2U] << 16U) | ((uint32_t)Addr[1U] << 8U) | Addr[0U]; + + /* Load the selected MAC address low register */ + (*(__IO uint32_t *)((uint32_t)(ETH_MAC_ADDR_LBASE + MacAddr))) = tmpreg1; +} + +/** + * @brief Initializes the DMA Tx descriptors. + * called by HAL_ETH_Init() API. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMATxDescListInit(ETH_HandleTypeDef *heth) +{ + ETH_DMADescTypeDef *dmatxdesc; + uint32_t i; + + /* Fill each DMATxDesc descriptor with the right values */ + for (i = 0; i < (uint32_t)ETH_TX_DESC_CNT; i++) + { + dmatxdesc = heth->Init.TxDesc + i; + + WRITE_REG(dmatxdesc->DESC0, 0x0U); + WRITE_REG(dmatxdesc->DESC1, 0x0U); + WRITE_REG(dmatxdesc->DESC2, 0x0U); + WRITE_REG(dmatxdesc->DESC3, 0x0U); + + WRITE_REG(heth->TxDescList.TxDesc[i], (uint32_t)dmatxdesc); + + /* Set Second Address Chained bit */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_TCH); + + if (i < ((uint32_t)ETH_TX_DESC_CNT - 1U)) + { + WRITE_REG(dmatxdesc->DESC3, (uint32_t)(heth->Init.TxDesc + i + 1U)); + } + else + { + WRITE_REG(dmatxdesc->DESC3, (uint32_t)(heth->Init.TxDesc)); + } + + /* Set the DMA Tx descriptors checksum insertion */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_CHECKSUMTCPUDPICMPFULL); + } + + heth->TxDescList.CurTxDesc = 0; + + /* Set Transmit Descriptor List Address */ + WRITE_REG(heth->Instance->DMATDLAR, (uint32_t) heth->Init.TxDesc); +} + +/** + * @brief Initializes the DMA Rx descriptors in chain mode. + * called by HAL_ETH_Init() API. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMARxDescListInit(ETH_HandleTypeDef *heth) +{ + ETH_DMADescTypeDef *dmarxdesc; + uint32_t i; + + for (i = 0; i < (uint32_t)ETH_RX_DESC_CNT; i++) + { + dmarxdesc = heth->Init.RxDesc + i; + + WRITE_REG(dmarxdesc->DESC0, 0x0U); + WRITE_REG(dmarxdesc->DESC1, 0x0U); + WRITE_REG(dmarxdesc->DESC2, 0x0U); + WRITE_REG(dmarxdesc->DESC3, 0x0U); + WRITE_REG(dmarxdesc->BackupAddr0, 0x0U); + WRITE_REG(dmarxdesc->BackupAddr1, 0x0U); + + /* Set Own bit of the Rx descriptor Status */ + dmarxdesc->DESC0 = ETH_DMARXDESC_OWN; + + /* Set Buffer1 size and Second Address Chained bit */ + dmarxdesc->DESC1 = heth->Init.RxBuffLen | ETH_DMARXDESC_RCH; + + /* Enable Ethernet DMA Rx Descriptor interrupt */ + dmarxdesc->DESC1 &= ~ETH_DMARXDESC_DIC; + /* Set Rx descritors addresses */ + WRITE_REG(heth->RxDescList.RxDesc[i], (uint32_t)dmarxdesc); + + if (i < ((uint32_t)ETH_RX_DESC_CNT - 1U)) + { + WRITE_REG(dmarxdesc->DESC3, (uint32_t)(heth->Init.RxDesc + i + 1U)); + } + else + { + WRITE_REG(dmarxdesc->DESC3, (uint32_t)(heth->Init.RxDesc)); + } + } + + WRITE_REG(heth->RxDescList.RxDescIdx, 0U); + WRITE_REG(heth->RxDescList.RxDescCnt, 0U); + WRITE_REG(heth->RxDescList.RxBuildDescIdx, 0U); + WRITE_REG(heth->RxDescList.RxBuildDescCnt, 0U); + WRITE_REG(heth->RxDescList.ItMode, 0U); + + /* Set Receive Descriptor List Address */ + WRITE_REG(heth->Instance->DMARDLAR, (uint32_t) heth->Init.RxDesc); +} + +/** + * @brief Prepare Tx DMA descriptor before transmission. + * called by HAL_ETH_Transmit_IT and HAL_ETH_Transmit_IT() API. + * @param heth: pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param pTxConfig: Tx packet configuration + * @param ItMode: Enable or disable Tx EOT interrept + * @retval Status + */ +static uint32_t ETH_Prepare_Tx_Descriptors(ETH_HandleTypeDef *heth, const ETH_TxPacketConfigTypeDef *pTxConfig, + uint32_t ItMode) +{ + ETH_TxDescListTypeDef *dmatxdesclist = &heth->TxDescList; + uint32_t descidx = dmatxdesclist->CurTxDesc; + uint32_t firstdescidx = dmatxdesclist->CurTxDesc; + uint32_t idx; + uint32_t descnbr = 0; + ETH_DMADescTypeDef *dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[descidx]; + + ETH_BufferTypeDef *txbuffer = pTxConfig->TxBuffer; + uint32_t bd_count = 0; + uint32_t primask_bit; + + /* Current Tx Descriptor Owned by DMA: cannot be used by the application */ + if ((READ_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_OWN) == ETH_DMATXDESC_OWN) + || (dmatxdesclist->PacketAddress[descidx] != NULL)) + { + return HAL_ETH_ERROR_BUSY; + } + + + descnbr += 1U; + + /* Set header or buffer 1 address */ + WRITE_REG(dmatxdesc->DESC2, (uint32_t)txbuffer->buffer); + + /* Set header or buffer 1 Length */ + MODIFY_REG(dmatxdesc->DESC1, ETH_DMATXDESC_TBS1, txbuffer->len); + + if (READ_BIT(pTxConfig->Attributes, ETH_TX_PACKETS_FEATURES_CSUM) != 0U) + { + MODIFY_REG(dmatxdesc->DESC0, ETH_DMATXDESC_CIC, pTxConfig->ChecksumCtrl); + } + + if (READ_BIT(pTxConfig->Attributes, ETH_TX_PACKETS_FEATURES_CRCPAD) != 0U) + { + MODIFY_REG(dmatxdesc->DESC0, ETH_CRC_PAD_DISABLE, pTxConfig->CRCPadCtrl); + } + + + if (READ_BIT(pTxConfig->Attributes, ETH_TX_PACKETS_FEATURES_VLANTAG) != 0U) + { + /* Set Vlan Type */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_VF); + } + + /* Mark it as First Descriptor */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_FS); + + /* only if the packet is split into more than one descriptors > 1 */ + while (txbuffer->next != NULL) + { + /* Clear the LD bit of previous descriptor */ + CLEAR_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_LS); + if (ItMode != ((uint32_t)RESET)) + { + /* Set Interrupt on completion bit */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_IC); + } + else + { + /* Clear Interrupt on completion bit */ + CLEAR_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_IC); + } + /* Increment current tx descriptor index */ + INCR_TX_DESC_INDEX(descidx, 1U); + /* Get current descriptor address */ + dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[descidx]; + + /* Current Tx Descriptor Owned by DMA: cannot be used by the application */ + if ((READ_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_OWN) == ETH_DMATXDESC_OWN) + || (dmatxdesclist->PacketAddress[descidx] != NULL)) + { + descidx = firstdescidx; + dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[descidx]; + + /* clear previous desc own bit */ + for (idx = 0; idx < descnbr; idx ++) + { + /* Ensure rest of descriptor is written to RAM before the OWN bit */ + __DMB(); + + CLEAR_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_OWN); + + /* Increment current tx descriptor index */ + INCR_TX_DESC_INDEX(descidx, 1U); + /* Get current descriptor address */ + dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[descidx]; + } + + return HAL_ETH_ERROR_BUSY; + } + + /* Clear the FD bit of new Descriptor */ + CLEAR_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_FS); + + descnbr += 1U; + + /* Get the next Tx buffer in the list */ + txbuffer = txbuffer->next; + + /* Set header or buffer 1 address */ + WRITE_REG(dmatxdesc->DESC2, (uint32_t)txbuffer->buffer); + + /* Set header or buffer 1 Length */ + MODIFY_REG(dmatxdesc->DESC1, ETH_DMATXDESC_TBS1, txbuffer->len); + + bd_count += 1U; + + /* Ensure rest of descriptor is written to RAM before the OWN bit */ + __DMB(); + /* Set Own bit */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_OWN); + } + + if (ItMode != ((uint32_t)RESET)) + { + /* Set Interrupt on completion bit */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_IC); + } + else + { + /* Clear Interrupt on completion bit */ + CLEAR_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_IC); + } + + /* Mark it as LAST descriptor */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_LS); + + /* Get address of first descriptor */ + dmatxdesc = (ETH_DMADescTypeDef *)dmatxdesclist->TxDesc[firstdescidx]; + /* Ensure rest of descriptor is written to RAM before the OWN bit */ + __DMB(); + /* set OWN bit of FIRST descriptor */ + SET_BIT(dmatxdesc->DESC0, ETH_DMATXDESC_OWN); + /* Save the current packet address to expose it to the application */ + dmatxdesclist->PacketAddress[descidx] = dmatxdesclist->CurrentPacketAddress; + + dmatxdesclist->CurTxDesc = descidx; + + /* Enter critical section */ + primask_bit = __get_PRIMASK(); + __set_PRIMASK(1); + + dmatxdesclist->BuffersInUse += bd_count + 1U; + + /* Exit critical section: restore previous priority mask */ + __set_PRIMASK(primask_bit); + + /* Return function status */ + return HAL_ETH_ERROR_NONE; +} + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +static void ETH_InitCallbacksToDefault(ETH_HandleTypeDef *heth) +{ + /* Init the ETH Callback settings */ + heth->TxCpltCallback = HAL_ETH_TxCpltCallback; /* Legacy weak TxCpltCallback */ + heth->RxCpltCallback = HAL_ETH_RxCpltCallback; /* Legacy weak RxCpltCallback */ + heth->ErrorCallback = HAL_ETH_ErrorCallback; /* Legacy weak ErrorCallback */ + heth->PMTCallback = HAL_ETH_PMTCallback; /* Legacy weak PMTCallback */ + heth->WakeUpCallback = HAL_ETH_WakeUpCallback; /* Legacy weak WakeUpCallback */ + heth->rxLinkCallback = HAL_ETH_RxLinkCallback; /* Legacy weak RxLinkCallback */ + heth->txFreeCallback = HAL_ETH_TxFreeCallback; /* Legacy weak TxFreeCallback */ +#ifdef HAL_ETH_USE_PTP + heth->txPtpCallback = HAL_ETH_TxPtpCallback; /* Legacy weak TxPtpCallback */ +#endif /* HAL_ETH_USE_PTP */ + heth->rxAllocateCallback = HAL_ETH_RxAllocateCallback; /* Legacy weak RxAllocateCallback */ +} +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* ETH */ + +#endif /* HAL_ETH_MODULE_ENABLED */ + +/** + * @} + */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_exti.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_exti.c index 7cf3e092..3e463122 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_exti.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_exti.c @@ -8,6 +8,17 @@ * + Initialization and de-initialization functions * + IO operation functions * + ****************************************************************************** + * @attention + * + * Copyright (c) 2018 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### EXTI Peripheral features ##### @@ -53,7 +64,7 @@ (++) Provide exiting handle as parameter. (++) Provide pointer on EXTI_ConfigTypeDef structure as second parameter. - (#) Clear Exti configuration of a dedicated line using HAL_EXTI_GetConfigLine(). + (#) Clear Exti configuration of a dedicated line using HAL_EXTI_ClearConfigLine(). (++) Provide exiting handle as parameter. (#) Register callback to treat Exti interrupts using HAL_EXTI_RegisterCallback(). @@ -64,23 +75,11 @@ (#) Get interrupt pending bit using HAL_EXTI_GetPending(). - (#) Clear interrupt pending bit using HAL_EXTI_GetPending(). + (#) Clear interrupt pending bit using HAL_EXTI_ClearPending(). (#) Generate software interrupt using HAL_EXTI_GenerateSWI(). @endverbatim - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2018 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -276,6 +275,10 @@ HAL_StatusTypeDef HAL_EXTI_GetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigT pExtiConfig->Mode |= EXTI_MODE_EVENT; } + /* Get default Trigger and GPIOSel configuration */ + pExtiConfig->Trigger = EXTI_TRIGGER_NONE; + pExtiConfig->GPIOSel = 0x00u; + /* 2] Get trigger for configurable lines : rising */ if ((pExtiConfig->Line & EXTI_CONFIG) != 0x00u) { @@ -284,10 +287,6 @@ HAL_StatusTypeDef HAL_EXTI_GetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigT { pExtiConfig->Trigger = EXTI_TRIGGER_RISING; } - else - { - pExtiConfig->Trigger = EXTI_TRIGGER_NONE; - } /* Get falling configuration */ /* Check if configuration of selected line is enable */ @@ -302,19 +301,9 @@ HAL_StatusTypeDef HAL_EXTI_GetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigT assert_param(IS_EXTI_GPIO_PIN(linepos)); regval = SYSCFG->EXTICR[linepos >> 2u]; - pExtiConfig->GPIOSel = ((regval << (SYSCFG_EXTICR1_EXTI1_Pos * (3uL - (linepos & 0x03u)))) >> 24); - } - else - { - pExtiConfig->GPIOSel = 0x00u; + pExtiConfig->GPIOSel = (regval >> (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))) & SYSCFG_EXTICR1_EXTI0; } } - else - { - /* No Trigger selected */ - pExtiConfig->Trigger = EXTI_TRIGGER_NONE; - pExtiConfig->GPIOSel = 0x00u; - } return HAL_OK; } @@ -480,6 +469,9 @@ uint32_t HAL_EXTI_GetPending(EXTI_HandleTypeDef *hexti, uint32_t Edge) uint32_t linepos; uint32_t maskline; + /* Prevent unused argument(s) compilation warning */ + UNUSED(Edge); + /* Check parameters */ assert_param(IS_EXTI_LINE(hexti->Line)); assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); @@ -507,6 +499,9 @@ void HAL_EXTI_ClearPending(EXTI_HandleTypeDef *hexti, uint32_t Edge) { uint32_t maskline; + /* Prevent unused argument(s) compilation warning */ + UNUSED(Edge); + /* Check parameters */ assert_param(IS_EXTI_LINE(hexti->Line)); assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); @@ -556,4 +551,3 @@ void HAL_EXTI_GenerateSWI(EXTI_HandleTypeDef *hexti) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash.c index 69b47a6e..39f19f2d 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash.c @@ -3,23 +3,23 @@ * @file stm32f4xx_hal_flash.c * @author MCD Application Team * @brief FLASH HAL module driver. - * This file provides firmware functions to manage the following + * This file provides firmware functions to manage the following * functionalities of the internal FLASH memory: * + Program operations functions - * + Memory Control functions + * + Memory Control functions * + Peripheral Errors functions - * + * @verbatim ============================================================================== ##### FLASH peripheral features ##### ============================================================================== - - [..] The Flash memory interface manages CPU AHB I-Code and D-Code accesses - to the Flash memory. It implements the erase and program Flash memory operations + + [..] The Flash memory interface manages CPU AHB I-Code and D-Code accesses + to the Flash memory. It implements the erase and program Flash memory operations and the read and write protection mechanisms. - + [..] The Flash memory interface accelerates code execution with a system of instruction - prefetch and cache lines. + prefetch and cache lines. [..] The FLASH main features are: (+) Flash memory read operations @@ -28,28 +28,28 @@ (+) Prefetch on I-Code (+) 64 cache lines of 128 bits on I-Code (+) 8 cache lines of 128 bits on D-Code - - + + ##### How to use this driver ##### ============================================================================== - [..] - This driver provides functions and macros to configure and program the FLASH + [..] + This driver provides functions and macros to configure and program the FLASH memory of all STM32F4xx devices. - - (#) FLASH Memory IO Programming functions: - (++) Lock and Unlock the FLASH interface using HAL_FLASH_Unlock() and + + (#) FLASH Memory IO Programming functions: + (++) Lock and Unlock the FLASH interface using HAL_FLASH_Unlock() and HAL_FLASH_Lock() functions (++) Program functions: byte, half word, word and double word (++) There Two modes of programming : (+++) Polling mode using HAL_FLASH_Program() function (+++) Interrupt mode using HAL_FLASH_Program_IT() function - - (#) Interrupts and flags management functions : + + (#) Interrupts and flags management functions : (++) Handle FLASH interrupts by calling HAL_FLASH_IRQHandler() (++) Wait for last FLASH operation according to its status - (++) Get error flag status by calling HAL_SetErrorCode() + (++) Get error flag status by calling HAL_SetErrorCode() - [..] + [..] In addition to these functions, this driver includes a set of macros allowing to handle the following operations: (+) Set the latency @@ -58,21 +58,19 @@ (+) Reset the Instruction cache and the Data cache (+) Enable/Disable the FLASH interrupts (+) Monitor the FLASH flags status - + @endverbatim ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** - */ + */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_hal.h" @@ -96,7 +94,7 @@ #define FLASH_TIMEOUT_VALUE 50000U /* 50 s */ /** * @} - */ + */ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /** @addtogroup FLASH_Private_Variables @@ -128,16 +126,16 @@ HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); /** @defgroup FLASH_Exported_Functions FLASH Exported Functions * @{ */ - -/** @defgroup FLASH_Exported_Functions_Group1 Programming operation functions - * @brief Programming operation functions - * -@verbatim + +/** @defgroup FLASH_Exported_Functions_Group1 Programming operation functions + * @brief Programming operation functions + * +@verbatim =============================================================================== ##### Programming operation functions ##### - =============================================================================== + =============================================================================== [..] - This subsection provides a set of functions allowing to manage the FLASH + This subsection provides a set of functions allowing to manage the FLASH program operations. @endverbatim @@ -150,35 +148,35 @@ HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); * This parameter can be a value of @ref FLASH_Type_Program * @param Address specifies the address to be programmed. * @param Data specifies the data to be programmed - * + * * @retval HAL_StatusTypeDef HAL Status */ HAL_StatusTypeDef HAL_FLASH_Program(uint32_t TypeProgram, uint32_t Address, uint64_t Data) { HAL_StatusTypeDef status = HAL_ERROR; - + /* Process Locked */ __HAL_LOCK(&pFlash); - + /* Check the parameters */ assert_param(IS_FLASH_TYPEPROGRAM(TypeProgram)); - + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - - if(status == HAL_OK) + + if (status == HAL_OK) { - if(TypeProgram == FLASH_TYPEPROGRAM_BYTE) + if (TypeProgram == FLASH_TYPEPROGRAM_BYTE) { /*Program byte (8-bit) at a specified address.*/ FLASH_Program_Byte(Address, (uint8_t) Data); } - else if(TypeProgram == FLASH_TYPEPROGRAM_HALFWORD) + else if (TypeProgram == FLASH_TYPEPROGRAM_HALFWORD) { /*Program halfword (16-bit) at a specified address.*/ FLASH_Program_HalfWord(Address, (uint16_t) Data); } - else if(TypeProgram == FLASH_TYPEPROGRAM_WORD) + else if (TypeProgram == FLASH_TYPEPROGRAM_WORD) { /*Program word (32-bit) at a specified address.*/ FLASH_Program_Word(Address, (uint32_t) Data); @@ -188,17 +186,17 @@ HAL_StatusTypeDef HAL_FLASH_Program(uint32_t TypeProgram, uint32_t Address, uint /*Program double word (64-bit) at a specified address.*/ FLASH_Program_DoubleWord(Address, Data); } - + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - + /* If the program operation is completed, disable the PG Bit */ - FLASH->CR &= (~FLASH_CR_PG); + FLASH->CR &= (~FLASH_CR_PG); } - + /* Process Unlocked */ __HAL_UNLOCK(&pFlash); - + return status; } @@ -208,39 +206,36 @@ HAL_StatusTypeDef HAL_FLASH_Program(uint32_t TypeProgram, uint32_t Address, uint * This parameter can be a value of @ref FLASH_Type_Program * @param Address specifies the address to be programmed. * @param Data specifies the data to be programmed - * + * * @retval HAL Status */ HAL_StatusTypeDef HAL_FLASH_Program_IT(uint32_t TypeProgram, uint32_t Address, uint64_t Data) { HAL_StatusTypeDef status = HAL_OK; - - /* Process Locked */ - __HAL_LOCK(&pFlash); /* Check the parameters */ assert_param(IS_FLASH_TYPEPROGRAM(TypeProgram)); /* Enable End of FLASH Operation interrupt */ __HAL_FLASH_ENABLE_IT(FLASH_IT_EOP); - + /* Enable Error source interrupt */ __HAL_FLASH_ENABLE_IT(FLASH_IT_ERR); pFlash.ProcedureOnGoing = FLASH_PROC_PROGRAM; pFlash.Address = Address; - if(TypeProgram == FLASH_TYPEPROGRAM_BYTE) + if (TypeProgram == FLASH_TYPEPROGRAM_BYTE) { /*Program byte (8-bit) at a specified address.*/ - FLASH_Program_Byte(Address, (uint8_t) Data); + FLASH_Program_Byte(Address, (uint8_t) Data); } - else if(TypeProgram == FLASH_TYPEPROGRAM_HALFWORD) + else if (TypeProgram == FLASH_TYPEPROGRAM_HALFWORD) { /*Program halfword (16-bit) at a specified address.*/ FLASH_Program_HalfWord(Address, (uint16_t) Data); } - else if(TypeProgram == FLASH_TYPEPROGRAM_WORD) + else if (TypeProgram == FLASH_TYPEPROGRAM_WORD) { /*Program word (32-bit) at a specified address.*/ FLASH_Program_Word(Address, (uint32_t) Data); @@ -261,23 +256,23 @@ HAL_StatusTypeDef HAL_FLASH_Program_IT(uint32_t TypeProgram, uint32_t Address, u void HAL_FLASH_IRQHandler(void) { uint32_t addresstmp = 0U; - + /* Check FLASH operation error flags */ -#if defined(FLASH_SR_RDERR) - if(__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ - FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR | FLASH_FLAG_RDERR)) != RESET) +#if defined(FLASH_SR_RDERR) + if (__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR | FLASH_FLAG_RDERR)) != RESET) #else - if(__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ - FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR)) != RESET) + if (__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR)) != RESET) #endif /* FLASH_SR_RDERR */ { - if(pFlash.ProcedureOnGoing == FLASH_PROC_SECTERASE) + if (pFlash.ProcedureOnGoing == FLASH_PROC_SECTERASE) { /*return the faulty sector*/ addresstmp = pFlash.Sector; pFlash.Sector = 0xFFFFFFFFU; } - else if(pFlash.ProcedureOnGoing == FLASH_PROC_MASSERASE) + else if (pFlash.ProcedureOnGoing == FLASH_PROC_MASSERASE) { /*return the faulty bank*/ addresstmp = pFlash.Bank; @@ -287,35 +282,35 @@ void HAL_FLASH_IRQHandler(void) /*return the faulty address*/ addresstmp = pFlash.Address; } - + /*Save the Error code*/ FLASH_SetErrorCode(); - + /* FLASH error interrupt user callback */ HAL_FLASH_OperationErrorCallback(addresstmp); - + /*Stop the procedure ongoing*/ pFlash.ProcedureOnGoing = FLASH_PROC_NONE; } - + /* Check FLASH End of Operation flag */ - if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_EOP) != RESET) + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_EOP) != RESET) { /* Clear FLASH End of Operation pending bit */ __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP); - - if(pFlash.ProcedureOnGoing == FLASH_PROC_SECTERASE) + + if (pFlash.ProcedureOnGoing == FLASH_PROC_SECTERASE) { /*Nb of sector to erased can be decreased*/ pFlash.NbSectorsToErase--; - + /* Check if there are still sectors to erase*/ - if(pFlash.NbSectorsToErase != 0U) + if (pFlash.NbSectorsToErase != 0U) { addresstmp = pFlash.Sector; /*Indicate user which sector has been erased*/ HAL_FLASH_EndOfOperationCallback(addresstmp); - + /*Increment sector number*/ pFlash.Sector++; addresstmp = pFlash.Sector; @@ -327,21 +322,21 @@ void HAL_FLASH_IRQHandler(void) /*Reset Sector and stop Erase sectors procedure*/ pFlash.Sector = addresstmp = 0xFFFFFFFFU; pFlash.ProcedureOnGoing = FLASH_PROC_NONE; - + /* Flush the caches to be sure of the data consistency */ - FLASH_FlushCaches() ; - + FLASH_FlushCaches(); + /* FLASH EOP interrupt user callback */ HAL_FLASH_EndOfOperationCallback(addresstmp); } } - else + else { - if(pFlash.ProcedureOnGoing == FLASH_PROC_MASSERASE) + if (pFlash.ProcedureOnGoing == FLASH_PROC_MASSERASE) { /* MassErase ended. Return the selected bank */ /* Flush the caches to be sure of the data consistency */ - FLASH_FlushCaches() ; + FLASH_FlushCaches(); /* FLASH EOP interrupt user callback */ HAL_FLASH_EndOfOperationCallback(pFlash.Bank); @@ -355,20 +350,17 @@ void HAL_FLASH_IRQHandler(void) pFlash.ProcedureOnGoing = FLASH_PROC_NONE; } } - - if(pFlash.ProcedureOnGoing == FLASH_PROC_NONE) + + if (pFlash.ProcedureOnGoing == FLASH_PROC_NONE) { /* Operation is completed, disable the PG, SER, SNB and MER Bits */ CLEAR_BIT(FLASH->CR, (FLASH_CR_PG | FLASH_CR_SER | FLASH_CR_SNB | FLASH_MER_BIT)); /* Disable End of FLASH Operation interrupt */ __HAL_FLASH_DISABLE_IT(FLASH_IT_EOP); - + /* Disable Error source interrupt */ __HAL_FLASH_DISABLE_IT(FLASH_IT_ERR); - - /* Process Unlocked */ - __HAL_UNLOCK(&pFlash); } } @@ -376,7 +368,7 @@ void HAL_FLASH_IRQHandler(void) * @brief FLASH end of operation interrupt callback * @param ReturnValue The value saved in this parameter depends on the ongoing procedure * Mass Erase: Bank number which has been requested to erase - * Sectors Erase: Sector which has been erased + * Sectors Erase: Sector which has been erased * (if 0xFFFFFFFFU, it means that all the selected sectors have been erased) * Program: Address which was selected for data program * @retval None @@ -387,7 +379,7 @@ __weak void HAL_FLASH_EndOfOperationCallback(uint32_t ReturnValue) UNUSED(ReturnValue); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_FLASH_EndOfOperationCallback could be implemented in the user file - */ + */ } /** @@ -404,22 +396,22 @@ __weak void HAL_FLASH_OperationErrorCallback(uint32_t ReturnValue) UNUSED(ReturnValue); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_FLASH_OperationErrorCallback could be implemented in the user file - */ + */ } /** * @} */ -/** @defgroup FLASH_Exported_Functions_Group2 Peripheral Control functions - * @brief management functions - * -@verbatim +/** @defgroup FLASH_Exported_Functions_Group2 Peripheral Control functions + * @brief management functions + * +@verbatim =============================================================================== ##### Peripheral Control functions ##### - =============================================================================== + =============================================================================== [..] - This subsection provides a set of functions allowing to control the FLASH + This subsection provides a set of functions allowing to control the FLASH memory operations. @endverbatim @@ -434,14 +426,14 @@ HAL_StatusTypeDef HAL_FLASH_Unlock(void) { HAL_StatusTypeDef status = HAL_OK; - if(READ_BIT(FLASH->CR, FLASH_CR_LOCK) != RESET) + if (READ_BIT(FLASH->CR, FLASH_CR_LOCK) != RESET) { /* Authorize the FLASH Registers access */ WRITE_REG(FLASH->KEYR, FLASH_KEY1); WRITE_REG(FLASH->KEYR, FLASH_KEY2); /* Verify Flash is unlocked */ - if(READ_BIT(FLASH->CR, FLASH_CR_LOCK) != RESET) + if (READ_BIT(FLASH->CR, FLASH_CR_LOCK) != RESET) { status = HAL_ERROR; } @@ -458,8 +450,8 @@ HAL_StatusTypeDef HAL_FLASH_Lock(void) { /* Set the LOCK Bit to lock the FLASH Registers access */ FLASH->CR |= FLASH_CR_LOCK; - - return HAL_OK; + + return HAL_OK; } /** @@ -468,7 +460,7 @@ HAL_StatusTypeDef HAL_FLASH_Lock(void) */ HAL_StatusTypeDef HAL_FLASH_OB_Unlock(void) { - if((FLASH->OPTCR & FLASH_OPTCR_OPTLOCK) != RESET) + if ((FLASH->OPTCR & FLASH_OPTCR_OPTLOCK) != RESET) { /* Authorizes the Option Byte register programming */ FLASH->OPTKEYR = FLASH_OPT_KEY1; @@ -477,21 +469,21 @@ HAL_StatusTypeDef HAL_FLASH_OB_Unlock(void) else { return HAL_ERROR; - } - - return HAL_OK; + } + + return HAL_OK; } /** * @brief Lock the FLASH Option Control Registers access. - * @retval HAL Status + * @retval HAL Status */ HAL_StatusTypeDef HAL_FLASH_OB_Lock(void) { /* Set the OPTLOCK Bit to lock the FLASH Option Byte Registers access */ FLASH->OPTCR |= FLASH_OPTCR_OPTLOCK; - - return HAL_OK; + + return HAL_OK; } /** @@ -504,20 +496,20 @@ HAL_StatusTypeDef HAL_FLASH_OB_Launch(void) *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS |= FLASH_OPTCR_OPTSTRT; /* Wait for last operation to be completed */ - return(FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE)); + return (FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE)); } /** * @} */ -/** @defgroup FLASH_Exported_Functions_Group3 Peripheral State and Errors functions - * @brief Peripheral Errors functions - * -@verbatim +/** @defgroup FLASH_Exported_Functions_Group3 Peripheral State and Errors functions + * @brief Peripheral Errors functions + * +@verbatim =============================================================================== ##### Peripheral Errors functions ##### - =============================================================================== + =============================================================================== [..] This subsection permits to get in run-time Errors of the FLASH peripheral. @@ -529,20 +521,20 @@ HAL_StatusTypeDef HAL_FLASH_OB_Launch(void) * @brief Get the specific FLASH error flag. * @retval FLASH_ErrorCode: The returned value can be a combination of: * @arg HAL_FLASH_ERROR_RD: FLASH Read Protection error flag (PCROP) - * @arg HAL_FLASH_ERROR_PGS: FLASH Programming Sequence error flag - * @arg HAL_FLASH_ERROR_PGP: FLASH Programming Parallelism error flag + * @arg HAL_FLASH_ERROR_PGS: FLASH Programming Sequence error flag + * @arg HAL_FLASH_ERROR_PGP: FLASH Programming Parallelism error flag * @arg HAL_FLASH_ERROR_PGA: FLASH Programming Alignment error flag * @arg HAL_FLASH_ERROR_WRP: FLASH Write protected error flag - * @arg HAL_FLASH_ERROR_OPERATION: FLASH operation Error flag + * @arg HAL_FLASH_ERROR_OPERATION: FLASH operation Error flag */ uint32_t HAL_FLASH_GetError(void) -{ - return pFlash.ErrorCode; -} - +{ + return pFlash.ErrorCode; +} + /** * @} - */ + */ /** * @brief Wait for a FLASH operation to complete. @@ -550,27 +542,27 @@ uint32_t HAL_FLASH_GetError(void) * @retval HAL Status */ HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout) -{ +{ uint32_t tickstart = 0U; - + /* Clear Error Code */ pFlash.ErrorCode = HAL_FLASH_ERROR_NONE; - + /* Wait for the FLASH operation to complete by polling on BUSY flag to be reset. Even if the FLASH operation fails, the BUSY flag will be reset and an error flag will be set */ /* Get tick */ tickstart = HAL_GetTick(); - while(__HAL_FLASH_GET_FLAG(FLASH_FLAG_BSY) != RESET) - { - if(Timeout != HAL_MAX_DELAY) + while (__HAL_FLASH_GET_FLAG(FLASH_FLAG_BSY) != RESET) + { + if (Timeout != HAL_MAX_DELAY) { - if((Timeout == 0U)||((HAL_GetTick() - tickstart ) > Timeout)) + if ((Timeout == 0U) || ((HAL_GetTick() - tickstart) > Timeout)) { return HAL_TIMEOUT; } - } + } } /* Check FLASH End of Operation flag */ @@ -579,12 +571,12 @@ HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout) /* Clear FLASH End of Operation pending bit */ __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP); } -#if defined(FLASH_SR_RDERR) - if(__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ - FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR | FLASH_FLAG_RDERR)) != RESET) +#if defined(FLASH_SR_RDERR) + if (__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR | FLASH_FLAG_RDERR)) != RESET) #else - if(__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ - FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR)) != RESET) + if (__HAL_FLASH_GET_FLAG((FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR)) != RESET) #endif /* FLASH_SR_RDERR */ { /*Save the error code*/ @@ -594,17 +586,17 @@ HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout) /* If there is no error flag set */ return HAL_OK; - -} + +} /** * @brief Program a double word (64-bit) at a specified address. * @note This function must be used when the device voltage range is from * 2.7V to 3.6V and Vpp in the range 7V to 9V. * - * @note If an erase and a program operations are requested simultaneously, + * @note If an erase and a program operations are requested simultaneously, * the erase operation is performed before the program one. - * + * * @param Address specifies the address to be programmed. * @param Data specifies the data to be programmed. * @retval None @@ -613,21 +605,21 @@ static void FLASH_Program_DoubleWord(uint32_t Address, uint64_t Data) { /* Check the parameters */ assert_param(IS_FLASH_ADDRESS(Address)); - + /* If the previous operation is completed, proceed to program the new data */ CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); FLASH->CR |= FLASH_PSIZE_DOUBLE_WORD; FLASH->CR |= FLASH_CR_PG; /* Program first word */ - *(__IO uint32_t*)Address = (uint32_t)Data; + *(__IO uint32_t *)Address = (uint32_t)Data; /* Barrier to ensure programming is performed in 2 steps, in right order (independently of compiler optimization behavior) */ __ISB(); /* Program second word */ - *(__IO uint32_t*)(Address+4) = (uint32_t)(Data >> 32); + *(__IO uint32_t *)(Address + 4) = (uint32_t)(Data >> 32); } @@ -636,9 +628,9 @@ static void FLASH_Program_DoubleWord(uint32_t Address, uint64_t Data) * @note This function must be used when the device voltage range is from * 2.7V to 3.6V. * - * @note If an erase and a program operations are requested simultaneously, + * @note If an erase and a program operations are requested simultaneously, * the erase operation is performed before the program one. - * + * * @param Address specifies the address to be programmed. * @param Data specifies the data to be programmed. * @retval None @@ -647,13 +639,13 @@ static void FLASH_Program_Word(uint32_t Address, uint32_t Data) { /* Check the parameters */ assert_param(IS_FLASH_ADDRESS(Address)); - + /* If the previous operation is completed, proceed to program the new data */ CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); FLASH->CR |= FLASH_PSIZE_WORD; FLASH->CR |= FLASH_CR_PG; - *(__IO uint32_t*)Address = Data; + *(__IO uint32_t *)Address = Data; } /** @@ -661,9 +653,9 @@ static void FLASH_Program_Word(uint32_t Address, uint32_t Data) * @note This function must be used when the device voltage range is from * 2.1V to 3.6V. * - * @note If an erase and a program operations are requested simultaneously, + * @note If an erase and a program operations are requested simultaneously, * the erase operation is performed before the program one. - * + * * @param Address specifies the address to be programmed. * @param Data specifies the data to be programmed. * @retval None @@ -672,13 +664,13 @@ static void FLASH_Program_HalfWord(uint32_t Address, uint16_t Data) { /* Check the parameters */ assert_param(IS_FLASH_ADDRESS(Address)); - + /* If the previous operation is completed, proceed to program the new data */ CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); FLASH->CR |= FLASH_PSIZE_HALF_WORD; FLASH->CR |= FLASH_CR_PG; - *(__IO uint16_t*)Address = Data; + *(__IO uint16_t *)Address = Data; } /** @@ -686,9 +678,9 @@ static void FLASH_Program_HalfWord(uint32_t Address, uint16_t Data) * @note This function must be used when the device voltage range is from * 1.8V to 3.6V. * - * @note If an erase and a program operations are requested simultaneously, + * @note If an erase and a program operations are requested simultaneously, * the erase operation is performed before the program one. - * + * * @param Address specifies the address to be programmed. * @param Data specifies the data to be programmed. * @retval None @@ -697,13 +689,13 @@ static void FLASH_Program_Byte(uint32_t Address, uint8_t Data) { /* Check the parameters */ assert_param(IS_FLASH_ADDRESS(Address)); - + /* If the previous operation is completed, proceed to program the new data */ CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); FLASH->CR |= FLASH_PSIZE_BYTE; FLASH->CR |= FLASH_CR_PG; - *(__IO uint8_t*)Address = Data; + *(__IO uint8_t *)Address = Data; } /** @@ -711,51 +703,51 @@ static void FLASH_Program_Byte(uint32_t Address, uint8_t Data) * @retval None */ static void FLASH_SetErrorCode(void) -{ - if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_WRPERR) != RESET) +{ + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_WRPERR) != RESET) { - pFlash.ErrorCode |= HAL_FLASH_ERROR_WRP; - - /* Clear FLASH write protection error pending bit */ - __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_WRPERR); + pFlash.ErrorCode |= HAL_FLASH_ERROR_WRP; + + /* Clear FLASH write protection error pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_WRPERR); } - - if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGAERR) != RESET) + + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGAERR) != RESET) { - pFlash.ErrorCode |= HAL_FLASH_ERROR_PGA; - - /* Clear FLASH Programming alignment error pending bit */ - __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_PGAERR); + pFlash.ErrorCode |= HAL_FLASH_ERROR_PGA; + + /* Clear FLASH Programming alignment error pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_PGAERR); } - - if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGPERR) != RESET) + + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGPERR) != RESET) { pFlash.ErrorCode |= HAL_FLASH_ERROR_PGP; - + /* Clear FLASH Programming parallelism error pending bit */ __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_PGPERR); } - - if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGSERR) != RESET) + + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGSERR) != RESET) { pFlash.ErrorCode |= HAL_FLASH_ERROR_PGS; - + /* Clear FLASH Programming sequence error pending bit */ __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_PGSERR); } -#if defined(FLASH_SR_RDERR) - if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_RDERR) != RESET) +#if defined(FLASH_SR_RDERR) + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_RDERR) != RESET) { pFlash.ErrorCode |= HAL_FLASH_ERROR_RD; - + /* Clear FLASH Proprietary readout protection error pending bit */ __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_RDERR); } -#endif /* FLASH_SR_RDERR */ - if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_OPERR) != RESET) +#endif /* FLASH_SR_RDERR */ + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_OPERR) != RESET) { pFlash.ErrorCode |= HAL_FLASH_ERROR_OPERATION; - + /* Clear FLASH Operation error pending bit */ __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_OPERR); } @@ -775,4 +767,3 @@ static void FLASH_SetErrorCode(void) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ex.c index 066aa502..f919fea5 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ex.c @@ -3,62 +3,60 @@ * @file stm32f4xx_hal_flash_ex.c * @author MCD Application Team * @brief Extended FLASH HAL module driver. - * This file provides firmware functions to manage the following + * This file provides firmware functions to manage the following * functionalities of the FLASH extension peripheral: * + Extended programming operations functions - * + * @verbatim ============================================================================== ##### Flash Extension features ##### ============================================================================== - - [..] Comparing to other previous devices, the FLASH interface for STM32F427xx/437xx and - STM32F429xx/439xx devices contains the following additional features - + + [..] Comparing to other previous devices, the FLASH interface for STM32F427xx/437xx and + STM32F429xx/439xx devices contains the following additional features + (+) Capacity up to 2 Mbyte with dual bank architecture supporting read-while-write capability (RWW) - (+) Dual bank memory organization + (+) Dual bank memory organization (+) PCROP protection for all banks - + ##### How to use this driver ##### ============================================================================== - [..] This driver provides functions to configure and program the FLASH memory - of all STM32F427xx/437xx, STM32F429xx/439xx, STM32F469xx/479xx and STM32F446xx + [..] This driver provides functions to configure and program the FLASH memory + of all STM32F427xx/437xx, STM32F429xx/439xx, STM32F469xx/479xx and STM32F446xx devices. It includes - (#) FLASH Memory Erase functions: - (++) Lock and Unlock the FLASH interface using HAL_FLASH_Unlock() and + (#) FLASH Memory Erase functions: + (++) Lock and Unlock the FLASH interface using HAL_FLASH_Unlock() and HAL_FLASH_Lock() functions (++) Erase function: Erase sector, erase all sectors (++) There are two modes of erase : (+++) Polling Mode using HAL_FLASHEx_Erase() (+++) Interrupt Mode using HAL_FLASHEx_Erase_IT() - + (#) Option Bytes Programming functions: Use HAL_FLASHEx_OBProgram() to : (++) Set/Reset the write protection (++) Set the Read protection Level (++) Set the BOR level (++) Program the user Option Bytes - (#) Advanced Option Bytes Programming functions: Use HAL_FLASHEx_AdvOBProgram() to : + (#) Advanced Option Bytes Programming functions: Use HAL_FLASHEx_AdvOBProgram() to : (++) Extended space (bank 2) erase function (++) Full FLASH space (2 Mo) erase (bank 1 and bank 2) (++) Dual Boot activation (++) Write protection configuration for bank 2 (++) PCROP protection configuration and control for both banks - + @endverbatim ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** - */ + */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_hal.h" @@ -78,17 +76,17 @@ /* Private define ------------------------------------------------------------*/ /** @addtogroup FLASHEx_Private_Constants * @{ - */ + */ #define FLASH_TIMEOUT_VALUE 50000U /* 50 s */ /** * @} */ - + /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /** @addtogroup FLASHEx_Private_Variables * @{ - */ + */ extern FLASH_ProcessTypeDef pFlash; /** * @} @@ -118,7 +116,7 @@ static HAL_StatusTypeDef FLASH_OB_DisablePCROP(uint32_t Sector); #endif /* STM32F401xC || STM32F401xE || STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx STM32F413xx || STM32F423xx */ -#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) +#if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) static HAL_StatusTypeDef FLASH_OB_EnablePCROP(uint32_t SectorBank1, uint32_t SectorBank2, uint32_t Banks); static HAL_StatusTypeDef FLASH_OB_DisablePCROP(uint32_t SectorBank1, uint32_t SectorBank2, uint32_t Banks); static HAL_StatusTypeDef FLASH_OB_BootConfig(uint8_t BootConfig); @@ -135,35 +133,35 @@ extern HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); */ /** @defgroup FLASHEx_Exported_Functions_Group1 Extended IO operation functions - * @brief Extended IO operation functions - * -@verbatim + * @brief Extended IO operation functions + * +@verbatim =============================================================================== ##### Extended programming operation functions ##### - =============================================================================== + =============================================================================== [..] - This subsection provides a set of functions allowing to manage the Extension FLASH + This subsection provides a set of functions allowing to manage the Extension FLASH programming operations. @endverbatim * @{ */ /** - * @brief Perform a mass erase or erase the specified FLASH memory sectors + * @brief Perform a mass erase or erase the specified FLASH memory sectors * @param[in] pEraseInit pointer to an FLASH_EraseInitTypeDef structure that * contains the configuration information for the erasing. - * + * * @param[out] SectorError pointer to variable that - * contains the configuration information on faulty sector in case of error + * contains the configuration information on faulty sector in case of error * (0xFFFFFFFFU means that all the sectors have been correctly erased) - * + * * @retval HAL Status */ HAL_StatusTypeDef HAL_FLASHEx_Erase(FLASH_EraseInitTypeDef *pEraseInit, uint32_t *SectorError) { HAL_StatusTypeDef status = HAL_ERROR; uint32_t index = 0U; - + /* Process Locked */ __HAL_LOCK(&pFlash); @@ -173,19 +171,19 @@ HAL_StatusTypeDef HAL_FLASHEx_Erase(FLASH_EraseInitTypeDef *pEraseInit, uint32_t /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) + if (status == HAL_OK) { /*Initialization of SectorError variable*/ *SectorError = 0xFFFFFFFFU; - - if(pEraseInit->TypeErase == FLASH_TYPEERASE_MASSERASE) + + if (pEraseInit->TypeErase == FLASH_TYPEERASE_MASSERASE) { /*Mass erase to be done*/ FLASH_MassErase((uint8_t) pEraseInit->VoltageRange, pEraseInit->Banks); /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - + /* if the erase operation is completed, disable the MER Bit */ FLASH->CR &= (~FLASH_MER_BIT); } @@ -195,17 +193,17 @@ HAL_StatusTypeDef HAL_FLASHEx_Erase(FLASH_EraseInitTypeDef *pEraseInit, uint32_t assert_param(IS_FLASH_NBSECTORS(pEraseInit->NbSectors + pEraseInit->Sector)); /* Erase by sector by sector to be done*/ - for(index = pEraseInit->Sector; index < (pEraseInit->NbSectors + pEraseInit->Sector); index++) + for (index = pEraseInit->Sector; index < (pEraseInit->NbSectors + pEraseInit->Sector); index++) { FLASH_Erase_Sector(index, (uint8_t) pEraseInit->VoltageRange); /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - + /* If the erase operation is completed, disable the SER and SNB Bits */ CLEAR_BIT(FLASH->CR, (FLASH_CR_SER | FLASH_CR_SNB)); - if(status != HAL_OK) + if (status != HAL_OK) { /* In case of error, stop erase procedure and return the faulty sector*/ *SectorError = index; @@ -214,7 +212,7 @@ HAL_StatusTypeDef HAL_FLASHEx_Erase(FLASH_EraseInitTypeDef *pEraseInit, uint32_t } } /* Flush the caches to be sure of the data consistency */ - FLASH_FlushCaches(); + FLASH_FlushCaches(); } /* Process Unlocked */ @@ -227,30 +225,27 @@ HAL_StatusTypeDef HAL_FLASHEx_Erase(FLASH_EraseInitTypeDef *pEraseInit, uint32_t * @brief Perform a mass erase or erase the specified FLASH memory sectors with interrupt enabled * @param pEraseInit pointer to an FLASH_EraseInitTypeDef structure that * contains the configuration information for the erasing. - * + * * @retval HAL Status */ HAL_StatusTypeDef HAL_FLASHEx_Erase_IT(FLASH_EraseInitTypeDef *pEraseInit) { HAL_StatusTypeDef status = HAL_OK; - /* Process Locked */ - __HAL_LOCK(&pFlash); - /* Check the parameters */ assert_param(IS_FLASH_TYPEERASE(pEraseInit->TypeErase)); /* Enable End of FLASH Operation interrupt */ __HAL_FLASH_ENABLE_IT(FLASH_IT_EOP); - + /* Enable Error source interrupt */ __HAL_FLASH_ENABLE_IT(FLASH_IT_ERR); - - /* Clear pending flags (if any) */ - __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP | FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR |\ - FLASH_FLAG_PGAERR | FLASH_FLAG_PGPERR| FLASH_FLAG_PGSERR); - - if(pEraseInit->TypeErase == FLASH_TYPEERASE_MASSERASE) + + /* Clear pending flags (if any) */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP | FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | \ + FLASH_FLAG_PGAERR | FLASH_FLAG_PGPERR | FLASH_FLAG_PGSERR); + + if (pEraseInit->TypeErase == FLASH_TYPEERASE_MASSERASE) { /*Mass erase to be done*/ pFlash.ProcedureOnGoing = FLASH_PROC_MASSERASE; @@ -280,13 +275,13 @@ HAL_StatusTypeDef HAL_FLASHEx_Erase_IT(FLASH_EraseInitTypeDef *pEraseInit) * @brief Program option bytes * @param pOBInit pointer to an FLASH_OBInitStruct structure that * contains the configuration information for the programming. - * + * * @retval HAL Status */ HAL_StatusTypeDef HAL_FLASHEx_OBProgram(FLASH_OBProgramInitTypeDef *pOBInit) { HAL_StatusTypeDef status = HAL_ERROR; - + /* Process Locked */ __HAL_LOCK(&pFlash); @@ -294,10 +289,10 @@ HAL_StatusTypeDef HAL_FLASHEx_OBProgram(FLASH_OBProgramInitTypeDef *pOBInit) assert_param(IS_OPTIONBYTE(pOBInit->OptionType)); /*Write protection configuration*/ - if((pOBInit->OptionType & OPTIONBYTE_WRP) == OPTIONBYTE_WRP) + if ((pOBInit->OptionType & OPTIONBYTE_WRP) == OPTIONBYTE_WRP) { assert_param(IS_WRPSTATE(pOBInit->WRPState)); - if(pOBInit->WRPState == OB_WRPSTATE_ENABLE) + if (pOBInit->WRPState == OB_WRPSTATE_ENABLE) { /*Enable of Write protection on the selected Sector*/ status = FLASH_OB_EnableWRP(pOBInit->WRPSector, pOBInit->Banks); @@ -310,21 +305,21 @@ HAL_StatusTypeDef HAL_FLASHEx_OBProgram(FLASH_OBProgramInitTypeDef *pOBInit) } /*Read protection configuration*/ - if((pOBInit->OptionType & OPTIONBYTE_RDP) == OPTIONBYTE_RDP) + if ((pOBInit->OptionType & OPTIONBYTE_RDP) == OPTIONBYTE_RDP) { status = FLASH_OB_RDP_LevelConfig(pOBInit->RDPLevel); } /*USER configuration*/ - if((pOBInit->OptionType & OPTIONBYTE_USER) == OPTIONBYTE_USER) + if ((pOBInit->OptionType & OPTIONBYTE_USER) == OPTIONBYTE_USER) { - status = FLASH_OB_UserConfig(pOBInit->USERConfig&OB_IWDG_SW, - pOBInit->USERConfig&OB_STOP_NO_RST, - pOBInit->USERConfig&OB_STDBY_NO_RST); + status = FLASH_OB_UserConfig(pOBInit->USERConfig & OB_IWDG_SW, + pOBInit->USERConfig & OB_STOP_NO_RST, + pOBInit->USERConfig & OB_STDBY_NO_RST); } /*BOR Level configuration*/ - if((pOBInit->OptionType & OPTIONBYTE_BOR) == OPTIONBYTE_BOR) + if ((pOBInit->OptionType & OPTIONBYTE_BOR) == OPTIONBYTE_BOR) { status = FLASH_OB_BOR_LevelConfig(pOBInit->BORLevel); } @@ -339,7 +334,7 @@ HAL_StatusTypeDef HAL_FLASHEx_OBProgram(FLASH_OBProgramInitTypeDef *pOBInit) * @brief Get the Option byte configuration * @param pOBInit pointer to an FLASH_OBInitStruct structure that * contains the configuration information for the programming. - * + * * @retval None */ void HAL_FLASHEx_OBGetConfig(FLASH_OBProgramInitTypeDef *pOBInit) @@ -368,22 +363,22 @@ void HAL_FLASHEx_OBGetConfig(FLASH_OBProgramInitTypeDef *pOBInit) * @brief Program option bytes * @param pAdvOBInit pointer to an FLASH_AdvOBProgramInitTypeDef structure that * contains the configuration information for the programming. - * + * * @retval HAL Status */ -HAL_StatusTypeDef HAL_FLASHEx_AdvOBProgram (FLASH_AdvOBProgramInitTypeDef *pAdvOBInit) +HAL_StatusTypeDef HAL_FLASHEx_AdvOBProgram(FLASH_AdvOBProgramInitTypeDef *pAdvOBInit) { HAL_StatusTypeDef status = HAL_ERROR; - + /* Check the parameters */ assert_param(IS_OBEX(pAdvOBInit->OptionType)); /*Program PCROP option byte*/ - if(((pAdvOBInit->OptionType) & OPTIONBYTE_PCROP) == OPTIONBYTE_PCROP) + if (((pAdvOBInit->OptionType) & OPTIONBYTE_PCROP) == OPTIONBYTE_PCROP) { /* Check the parameters */ assert_param(IS_PCROPSTATE(pAdvOBInit->PCROPState)); - if((pAdvOBInit->PCROPState) == OB_PCROP_STATE_ENABLE) + if ((pAdvOBInit->PCROPState) == OB_PCROP_STATE_ENABLE) { /*Enable of Write protection on the selected Sector*/ #if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F410Tx) || defined(STM32F410Cx) || defined(STM32F410Rx) ||\ @@ -408,10 +403,10 @@ HAL_StatusTypeDef HAL_FLASHEx_AdvOBProgram (FLASH_AdvOBProgramInitTypeDef *pAdvO STM32F413xx || STM32F423xx */ } } - + #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) /*Program BOOT config option byte*/ - if(((pAdvOBInit->OptionType) & OPTIONBYTE_BOOTCONFIG) == OPTIONBYTE_BOOTCONFIG) + if (((pAdvOBInit->OptionType) & OPTIONBYTE_BOOTCONFIG) == OPTIONBYTE_BOOTCONFIG) { status = FLASH_OB_BootConfig(pAdvOBInit->BootConfig); } @@ -424,7 +419,7 @@ HAL_StatusTypeDef HAL_FLASHEx_AdvOBProgram (FLASH_AdvOBProgramInitTypeDef *pAdvO * @brief Get the OBEX byte configuration * @param pAdvOBInit pointer to an FLASH_AdvOBProgramInitTypeDef structure that * contains the configuration information for the programming. - * + * * @retval None */ void HAL_FLASHEx_AdvOBGetConfig(FLASH_AdvOBProgramInitTypeDef *pAdvOBInit) @@ -448,15 +443,15 @@ void HAL_FLASHEx_AdvOBGetConfig(FLASH_AdvOBProgramInitTypeDef *pAdvOBInit) } /** - * @brief Select the Protection Mode - * - * @note After PCROP activated Option Byte modification NOT POSSIBLE! excepted - * Global Read Out Protection modification (from level1 to level0) - * @note Once SPRMOD bit is active unprotection of a protected sector is not possible + * @brief Select the Protection Mode + * + * @note After PCROP activated Option Byte modification NOT POSSIBLE! excepted + * Global Read Out Protection modification (from level1 to level0) + * @note Once SPRMOD bit is active unprotection of a protected sector is not possible * @note Read a protected sector will set RDERR Flag and write a protected sector will set WRPERR Flag * @note This function can be used only for STM32F42xxx/STM32F43xxx/STM32F401xx/STM32F411xx/STM32F446xx/ * STM32F469xx/STM32F479xx/STM32F412xx/STM32F413xx devices. - * + * * @retval HAL Status */ HAL_StatusTypeDef HAL_FLASHEx_OB_SelectPCROP(void) @@ -464,36 +459,36 @@ HAL_StatusTypeDef HAL_FLASHEx_OB_SelectPCROP(void) uint8_t optiontmp = 0xFF; /* Mask SPRMOD bit */ - optiontmp = (uint8_t)((*(__IO uint8_t *)OPTCR_BYTE3_ADDRESS) & (uint8_t)0x7F); - + optiontmp = (uint8_t)((*(__IO uint8_t *)OPTCR_BYTE3_ADDRESS) & (uint8_t)0x7F); + /* Update Option Byte */ - *(__IO uint8_t *)OPTCR_BYTE3_ADDRESS = (uint8_t)(OB_PCROP_SELECTED | optiontmp); - + *(__IO uint8_t *)OPTCR_BYTE3_ADDRESS = (uint8_t)(OB_PCROP_SELECTED | optiontmp); + return HAL_OK; } /** - * @brief Deselect the Protection Mode - * - * @note After PCROP activated Option Byte modification NOT POSSIBLE! excepted - * Global Read Out Protection modification (from level1 to level0) - * @note Once SPRMOD bit is active unprotection of a protected sector is not possible + * @brief Deselect the Protection Mode + * + * @note After PCROP activated Option Byte modification NOT POSSIBLE! excepted + * Global Read Out Protection modification (from level1 to level0) + * @note Once SPRMOD bit is active unprotection of a protected sector is not possible * @note Read a protected sector will set RDERR Flag and write a protected sector will set WRPERR Flag * @note This function can be used only for STM32F42xxx/STM32F43xxx/STM32F401xx/STM32F411xx/STM32F446xx/ * STM32F469xx/STM32F479xx/STM32F412xx/STM32F413xx devices. - * + * * @retval HAL Status */ HAL_StatusTypeDef HAL_FLASHEx_OB_DeSelectPCROP(void) { uint8_t optiontmp = 0xFF; - + /* Mask SPRMOD bit */ - optiontmp = (uint8_t)((*(__IO uint8_t *)OPTCR_BYTE3_ADDRESS) & (uint8_t)0x7F); - + optiontmp = (uint8_t)((*(__IO uint8_t *)OPTCR_BYTE3_ADDRESS) & (uint8_t)0x7F); + /* Update Option Byte */ - *(__IO uint8_t *)OPTCR_BYTE3_ADDRESS = (uint8_t)(OB_PCROP_DESELECTED | optiontmp); - + *(__IO uint8_t *)OPTCR_BYTE3_ADDRESS = (uint8_t)(OB_PCROP_DESELECTED | optiontmp); + return HAL_OK; } #endif /* STM32F427xx || STM32F437xx || STM32F429xx || STM32F439xx || STM32F401xC || STM32F401xE || STM32F410xx ||\ @@ -503,11 +498,11 @@ HAL_StatusTypeDef HAL_FLASHEx_OB_DeSelectPCROP(void) #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx)|| defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) /** * @brief Returns the FLASH Write Protection Option Bytes value for Bank 2 - * @note This function can be used only for STM32F42xxx/STM32F43xxx/STM32F469xx/STM32F479xx devices. + * @note This function can be used only for STM32F42xxx/STM32F43xxx/STM32F469xx/STM32F479xx devices. * @retval The FLASH Write Protection Option Bytes value */ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void) -{ +{ /* Return the FLASH write protection Register value */ return (*(__IO uint16_t *)(OPTCR1_BYTE2_ADDRESS)); } @@ -516,21 +511,21 @@ uint16_t HAL_FLASHEx_OB_GetBank2WRP(void) /** * @} */ - + #if defined(STM32F427xx) || defined(STM32F437xx) || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) /** - * @brief Full erase of FLASH memory sectors - * @param VoltageRange The device voltage range which defines the erase parallelism. + * @brief Full erase of FLASH memory sectors + * @param VoltageRange The device voltage range which defines the erase parallelism. * This parameter can be one of the following values: - * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, - * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, * the operation will be done by half word (16-bit) * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, * the operation will be done by word (32-bit) - * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, * the operation will be done by double word (64-bit) - * + * * @param Banks Banks to be erased * This parameter can be one of the following values: * @arg FLASH_BANK_1: Bank1 to be erased @@ -548,12 +543,12 @@ static void FLASH_MassErase(uint8_t VoltageRange, uint32_t Banks) /* if the previous operation is completed, proceed to erase all sectors */ CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); - if(Banks == FLASH_BANK_BOTH) + if (Banks == FLASH_BANK_BOTH) { /* bank1 & bank2 will be erased*/ FLASH->CR |= FLASH_MER_BIT; } - else if(Banks == FLASH_BANK_1) + else if (Banks == FLASH_BANK_1) { /*Only bank1 will be erased*/ FLASH->CR |= FLASH_CR_MER1; @@ -563,24 +558,24 @@ static void FLASH_MassErase(uint8_t VoltageRange, uint32_t Banks) /*Only bank2 will be erased*/ FLASH->CR |= FLASH_CR_MER2; } - FLASH->CR |= FLASH_CR_STRT | ((uint32_t)VoltageRange <<8U); + FLASH->CR |= FLASH_CR_STRT | ((uint32_t)VoltageRange << 8U); } /** * @brief Erase the specified FLASH memory sector * @param Sector FLASH sector to erase - * The value of this parameter depend on device used within the same series - * @param VoltageRange The device voltage range which defines the erase parallelism. + * The value of this parameter depend on device used within the same series + * @param VoltageRange The device voltage range which defines the erase parallelism. * This parameter can be one of the following values: - * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, - * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, * the operation will be done by half word (16-bit) * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, * the operation will be done by word (32-bit) - * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, * the operation will be done by double word (64-bit) - * + * * @retval None */ void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) @@ -590,16 +585,16 @@ void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) /* Check the parameters */ assert_param(IS_FLASH_SECTOR(Sector)); assert_param(IS_VOLTAGERANGE(VoltageRange)); - - if(VoltageRange == FLASH_VOLTAGE_RANGE_1) + + if (VoltageRange == FLASH_VOLTAGE_RANGE_1) { - tmp_psize = FLASH_PSIZE_BYTE; + tmp_psize = FLASH_PSIZE_BYTE; } - else if(VoltageRange == FLASH_VOLTAGE_RANGE_2) + else if (VoltageRange == FLASH_VOLTAGE_RANGE_2) { tmp_psize = FLASH_PSIZE_HALF_WORD; } - else if(VoltageRange == FLASH_VOLTAGE_RANGE_3) + else if (VoltageRange == FLASH_VOLTAGE_RANGE_3) { tmp_psize = FLASH_PSIZE_WORD; } @@ -609,7 +604,7 @@ void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) } /* Need to add offset of 4 when sector higher than FLASH_SECTOR_11 */ - if(Sector > FLASH_SECTOR_11) + if (Sector > FLASH_SECTOR_11) { Sector += 4U; } @@ -624,11 +619,11 @@ void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) /** * @brief Enable the write protection of the desired bank1 or bank 2 sectors * - * @note When the memory read protection level is selected (RDP level = 1), - * it is not possible to program or erase the flash sector i if CortexM4 - * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 - * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). - * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM4 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). + * * @param WRPSector specifies the sector(s) to be write protected. * This parameter can be one of the following values: * @arg WRPSector: A value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_23 @@ -641,53 +636,53 @@ void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) * @arg FLASH_BANK_2: WRP on all sectors of bank2 * @arg FLASH_BANK_BOTH: WRP on all sectors of bank1 & bank2 * - * @retval HAL FLASH State + * @retval HAL FLASH State */ static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector, uint32_t Banks) { HAL_StatusTypeDef status = HAL_OK; - + /* Check the parameters */ assert_param(IS_OB_WRP_SECTOR(WRPSector)); assert_param(IS_FLASH_BANK(Banks)); - + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) + if (status == HAL_OK) { - if(((WRPSector == OB_WRP_SECTOR_All) && ((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH))) || - (WRPSector < OB_WRP_SECTOR_12)) + if (((WRPSector == OB_WRP_SECTOR_All) && ((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH))) || + (WRPSector < OB_WRP_SECTOR_12)) { - if(WRPSector == OB_WRP_SECTOR_All) - { - /*Write protection on all sector of BANK1*/ - *(__IO uint16_t*)OPTCR_BYTE2_ADDRESS &= (~(WRPSector>>12)); - } - else - { - /*Write protection done on sectors of BANK1*/ - *(__IO uint16_t*)OPTCR_BYTE2_ADDRESS &= (~WRPSector); - } + if (WRPSector == OB_WRP_SECTOR_All) + { + /*Write protection on all sector of BANK1*/ + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS &= (~(WRPSector >> 12)); + } + else + { + /*Write protection done on sectors of BANK1*/ + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS &= (~WRPSector); + } } - else + else { /*Write protection done on sectors of BANK2*/ - *(__IO uint16_t*)OPTCR1_BYTE2_ADDRESS &= (~(WRPSector>>12)); + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS &= (~(WRPSector >> 12)); } /*Write protection on all sector of BANK2*/ - if((WRPSector == OB_WRP_SECTOR_All) && (Banks == FLASH_BANK_BOTH)) + if ((WRPSector == OB_WRP_SECTOR_All) && (Banks == FLASH_BANK_BOTH)) { /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - - if(status == HAL_OK) - { - *(__IO uint16_t*)OPTCR1_BYTE2_ADDRESS &= (~(WRPSector>>12)); + + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS &= (~(WRPSector >> 12)); } } - + } return status; } @@ -695,11 +690,11 @@ static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector, uint32_t Banks) /** * @brief Disable the write protection of the desired bank1 or bank 2 sectors * - * @note When the memory read protection level is selected (RDP level = 1), - * it is not possible to program or erase the flash sector i if CortexM4 - * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 - * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). - * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM4 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). + * * @param WRPSector specifies the sector(s) to be write protected. * This parameter can be one of the following values: * @arg WRPSector: A value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_23 @@ -712,53 +707,53 @@ static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector, uint32_t Banks) * @arg FLASH_BANK_2: Bank2 to be erased * @arg FLASH_BANK_BOTH: Bank1 and Bank2 to be erased * - * @retval HAL Status + * @retval HAL Status */ static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector, uint32_t Banks) { HAL_StatusTypeDef status = HAL_OK; - + /* Check the parameters */ assert_param(IS_OB_WRP_SECTOR(WRPSector)); assert_param(IS_FLASH_BANK(Banks)); - + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) + if (status == HAL_OK) { - if(((WRPSector == OB_WRP_SECTOR_All) && ((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH))) || - (WRPSector < OB_WRP_SECTOR_12)) + if (((WRPSector == OB_WRP_SECTOR_All) && ((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH))) || + (WRPSector < OB_WRP_SECTOR_12)) { - if(WRPSector == OB_WRP_SECTOR_All) - { - /*Write protection on all sector of BANK1*/ - *(__IO uint16_t*)OPTCR_BYTE2_ADDRESS |= (uint16_t)(WRPSector>>12); - } - else - { - /*Write protection done on sectors of BANK1*/ - *(__IO uint16_t*)OPTCR_BYTE2_ADDRESS |= (uint16_t)WRPSector; - } + if (WRPSector == OB_WRP_SECTOR_All) + { + /*Write protection on all sector of BANK1*/ + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS |= (uint16_t)(WRPSector >> 12); + } + else + { + /*Write protection done on sectors of BANK1*/ + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS |= (uint16_t)WRPSector; + } } - else + else { /*Write protection done on sectors of BANK2*/ - *(__IO uint16_t*)OPTCR1_BYTE2_ADDRESS |= (uint16_t)(WRPSector>>12); + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS |= (uint16_t)(WRPSector >> 12); } /*Write protection on all sector of BANK2*/ - if((WRPSector == OB_WRP_SECTOR_All) && (Banks == FLASH_BANK_BOTH)) + if ((WRPSector == OB_WRP_SECTOR_All) && (Banks == FLASH_BANK_BOTH)) { /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - - if(status == HAL_OK) - { - *(__IO uint16_t*)OPTCR1_BYTE2_ADDRESS |= (uint16_t)(WRPSector>>12); + + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS |= (uint16_t)(WRPSector >> 12); } } - + } return status; @@ -766,9 +761,9 @@ static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector, uint32_t Banks) /** * @brief Configure the Dual Bank Boot. - * + * * @note This function can be used only for STM32F42xxx/43xxx devices. - * + * * @param BootConfig specifies the Dual Bank Boot Option byte. * This parameter can be one of the following values: * @arg OB_Dual_BootEnabled: Dual Bank Boot Enable @@ -782,77 +777,77 @@ static HAL_StatusTypeDef FLASH_OB_BootConfig(uint8_t BootConfig) /* Check the parameters */ assert_param(IS_OB_BOOT(BootConfig)); - /* Wait for last operation to be completed */ + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) - { + if (status == HAL_OK) + { /* Set Dual Bank Boot */ *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS &= (~FLASH_OPTCR_BFB2); *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS |= BootConfig; } - + return status; } /** - * @brief Enable the read/write protection (PCROP) of the desired + * @brief Enable the read/write protection (PCROP) of the desired * sectors of Bank 1 and/or Bank 2. * @note This function can be used only for STM32F42xxx/43xxx devices. * @param SectorBank1 Specifies the sector(s) to be read/write protected or unprotected for bank1. * This parameter can be one of the following values: * @arg OB_PCROP: A value between OB_PCROP_SECTOR_0 and OB_PCROP_SECTOR_11 - * @arg OB_PCROP_SECTOR__All + * @arg OB_PCROP_SECTOR__All * @param SectorBank2 Specifies the sector(s) to be read/write protected or unprotected for bank2. * This parameter can be one of the following values: * @arg OB_PCROP: A value between OB_PCROP_SECTOR_12 and OB_PCROP_SECTOR_23 - * @arg OB_PCROP_SECTOR__All + * @arg OB_PCROP_SECTOR__All * @param Banks Enable PCROP protection on all the sectors for the specific bank * This parameter can be one of the following values: * @arg FLASH_BANK_1: WRP on all sectors of bank1 * @arg FLASH_BANK_2: WRP on all sectors of bank2 * @arg FLASH_BANK_BOTH: WRP on all sectors of bank1 & bank2 * - * @retval HAL Status + * @retval HAL Status */ static HAL_StatusTypeDef FLASH_OB_EnablePCROP(uint32_t SectorBank1, uint32_t SectorBank2, uint32_t Banks) { HAL_StatusTypeDef status = HAL_OK; - + assert_param(IS_FLASH_BANK(Banks)); - + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) + if (status == HAL_OK) { - if((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH)) + if ((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH)) { assert_param(IS_OB_PCROP(SectorBank1)); /*Write protection done on sectors of BANK1*/ - *(__IO uint16_t*)OPTCR_BYTE2_ADDRESS |= (uint16_t)SectorBank1; + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS |= (uint16_t)SectorBank1; } - else + else { assert_param(IS_OB_PCROP(SectorBank2)); /*Write protection done on sectors of BANK2*/ - *(__IO uint16_t*)OPTCR1_BYTE2_ADDRESS |= (uint16_t)SectorBank2; + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS |= (uint16_t)SectorBank2; } /*Write protection on all sector of BANK2*/ - if(Banks == FLASH_BANK_BOTH) + if (Banks == FLASH_BANK_BOTH) { assert_param(IS_OB_PCROP(SectorBank2)); /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - - if(status == HAL_OK) - { + + if (status == HAL_OK) + { /*Write protection done on sectors of BANK2*/ - *(__IO uint16_t*)OPTCR1_BYTE2_ADDRESS |= (uint16_t)SectorBank2; + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS |= (uint16_t)SectorBank2; } } - + } return status; @@ -860,66 +855,66 @@ static HAL_StatusTypeDef FLASH_OB_EnablePCROP(uint32_t SectorBank1, uint32_t Sec /** - * @brief Disable the read/write protection (PCROP) of the desired + * @brief Disable the read/write protection (PCROP) of the desired * sectors of Bank 1 and/or Bank 2. * @note This function can be used only for STM32F42xxx/43xxx devices. * @param SectorBank1 specifies the sector(s) to be read/write protected or unprotected for bank1. * This parameter can be one of the following values: * @arg OB_PCROP: A value between OB_PCROP_SECTOR_0 and OB_PCROP_SECTOR_11 - * @arg OB_PCROP_SECTOR__All + * @arg OB_PCROP_SECTOR__All * @param SectorBank2 Specifies the sector(s) to be read/write protected or unprotected for bank2. * This parameter can be one of the following values: * @arg OB_PCROP: A value between OB_PCROP_SECTOR_12 and OB_PCROP_SECTOR_23 - * @arg OB_PCROP_SECTOR__All + * @arg OB_PCROP_SECTOR__All * @param Banks Disable PCROP protection on all the sectors for the specific bank * This parameter can be one of the following values: * @arg FLASH_BANK_1: WRP on all sectors of bank1 * @arg FLASH_BANK_2: WRP on all sectors of bank2 * @arg FLASH_BANK_BOTH: WRP on all sectors of bank1 & bank2 * - * @retval HAL Status + * @retval HAL Status */ static HAL_StatusTypeDef FLASH_OB_DisablePCROP(uint32_t SectorBank1, uint32_t SectorBank2, uint32_t Banks) -{ +{ HAL_StatusTypeDef status = HAL_OK; - + /* Check the parameters */ assert_param(IS_FLASH_BANK(Banks)); - + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) + if (status == HAL_OK) { - if((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH)) + if ((Banks == FLASH_BANK_1) || (Banks == FLASH_BANK_BOTH)) { assert_param(IS_OB_PCROP(SectorBank1)); /*Write protection done on sectors of BANK1*/ - *(__IO uint16_t*)OPTCR_BYTE2_ADDRESS &= (~SectorBank1); + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS &= (~SectorBank1); } - else + else { /*Write protection done on sectors of BANK2*/ assert_param(IS_OB_PCROP(SectorBank2)); - *(__IO uint16_t*)OPTCR1_BYTE2_ADDRESS &= (~SectorBank2); + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS &= (~SectorBank2); } /*Write protection on all sector of BANK2*/ - if(Banks == FLASH_BANK_BOTH) + if (Banks == FLASH_BANK_BOTH) { assert_param(IS_OB_PCROP(SectorBank2)); - /* Wait for last operation to be completed */ + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - - if(status == HAL_OK) - { + + if (status == HAL_OK) + { /*Write protection done on sectors of BANK2*/ - *(__IO uint16_t*)OPTCR1_BYTE2_ADDRESS &= (~SectorBank2); + *(__IO uint16_t *)OPTCR1_BYTE2_ADDRESS &= (~SectorBank2); } } - + } - + return status; } @@ -933,17 +928,17 @@ static HAL_StatusTypeDef FLASH_OB_DisablePCROP(uint32_t SectorBank1, uint32_t Se defined(STM32F423xx) /** * @brief Mass erase of FLASH memory - * @param VoltageRange The device voltage range which defines the erase parallelism. + * @param VoltageRange The device voltage range which defines the erase parallelism. * This parameter can be one of the following values: - * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, - * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, * the operation will be done by half word (16-bit) * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, * the operation will be done by word (32-bit) - * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, * the operation will be done by double word (64-bit) - * + * * @param Banks Banks to be erased * This parameter can be one of the following values: * @arg FLASH_BANK_1: Bank1 to be erased @@ -955,28 +950,28 @@ static void FLASH_MassErase(uint8_t VoltageRange, uint32_t Banks) /* Check the parameters */ assert_param(IS_VOLTAGERANGE(VoltageRange)); assert_param(IS_FLASH_BANK(Banks)); - + /* If the previous operation is completed, proceed to erase all sectors */ CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE); FLASH->CR |= FLASH_CR_MER; - FLASH->CR |= FLASH_CR_STRT | ((uint32_t)VoltageRange <<8U); + FLASH->CR |= FLASH_CR_STRT | ((uint32_t)VoltageRange << 8U); } /** * @brief Erase the specified FLASH memory sector * @param Sector FLASH sector to erase - * The value of this parameter depend on device used within the same series - * @param VoltageRange The device voltage range which defines the erase parallelism. + * The value of this parameter depend on device used within the same series + * @param VoltageRange The device voltage range which defines the erase parallelism. * This parameter can be one of the following values: - * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, - * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, * the operation will be done by half word (16-bit) * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, * the operation will be done by word (32-bit) - * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, * the operation will be done by double word (64-bit) - * + * * @retval None */ void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) @@ -986,16 +981,16 @@ void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) /* Check the parameters */ assert_param(IS_FLASH_SECTOR(Sector)); assert_param(IS_VOLTAGERANGE(VoltageRange)); - - if(VoltageRange == FLASH_VOLTAGE_RANGE_1) + + if (VoltageRange == FLASH_VOLTAGE_RANGE_1) { - tmp_psize = FLASH_PSIZE_BYTE; + tmp_psize = FLASH_PSIZE_BYTE; } - else if(VoltageRange == FLASH_VOLTAGE_RANGE_2) + else if (VoltageRange == FLASH_VOLTAGE_RANGE_2) { tmp_psize = FLASH_PSIZE_HALF_WORD; } - else if(VoltageRange == FLASH_VOLTAGE_RANGE_3) + else if (VoltageRange == FLASH_VOLTAGE_RANGE_3) { tmp_psize = FLASH_PSIZE_WORD; } @@ -1015,72 +1010,72 @@ void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) /** * @brief Enable the write protection of the desired bank 1 sectors * - * @note When the memory read protection level is selected (RDP level = 1), - * it is not possible to program or erase the flash sector i if CortexM4 - * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 - * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). - * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM4 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). + * * @param WRPSector specifies the sector(s) to be write protected. - * The value of this parameter depend on device used within the same series - * + * The value of this parameter depend on device used within the same series + * * @param Banks Enable write protection on all the sectors for the specific bank * This parameter can be one of the following values: * @arg FLASH_BANK_1: WRP on all sectors of bank1 * - * @retval HAL Status + * @retval HAL Status */ static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector, uint32_t Banks) { HAL_StatusTypeDef status = HAL_OK; - + /* Check the parameters */ assert_param(IS_OB_WRP_SECTOR(WRPSector)); assert_param(IS_FLASH_BANK(Banks)); - + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) - { - *(__IO uint16_t*)OPTCR_BYTE2_ADDRESS &= (~WRPSector); + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS &= (~WRPSector); } - + return status; } /** * @brief Disable the write protection of the desired bank 1 sectors * - * @note When the memory read protection level is selected (RDP level = 1), - * it is not possible to program or erase the flash sector i if CortexM4 - * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 - * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). - * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM4 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * @note Active value of nWRPi bits is inverted when PCROP mode is active (SPRMOD =1). + * * @param WRPSector specifies the sector(s) to be write protected. - * The value of this parameter depend on device used within the same series - * + * The value of this parameter depend on device used within the same series + * * @param Banks Enable write protection on all the sectors for the specific bank * This parameter can be one of the following values: * @arg FLASH_BANK_1: WRP on all sectors of bank1 * - * @retval HAL Status + * @retval HAL Status */ static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector, uint32_t Banks) { HAL_StatusTypeDef status = HAL_OK; - + /* Check the parameters */ assert_param(IS_OB_WRP_SECTOR(WRPSector)); assert_param(IS_FLASH_BANK(Banks)); - + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) - { - *(__IO uint16_t*)OPTCR_BYTE2_ADDRESS |= (uint16_t)WRPSector; + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS |= (uint16_t)WRPSector; } - + return status; } #endif /* STM32F40xxx || STM32F41xxx || STM32F401xx || STM32F410xx || STM32F411xE || STM32F446xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx @@ -1095,24 +1090,24 @@ static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector, uint32_t Banks) * @param Sector specifies the sector(s) to be read/write protected or unprotected. * This parameter can be one of the following values: * @arg OB_PCROP: A value between OB_PCROP_Sector0 and OB_PCROP_Sector5 - * @arg OB_PCROP_Sector_All - * @retval HAL Status + * @arg OB_PCROP_Sector_All + * @retval HAL Status */ static HAL_StatusTypeDef FLASH_OB_EnablePCROP(uint32_t Sector) { HAL_StatusTypeDef status = HAL_OK; - + /* Check the parameters */ assert_param(IS_OB_PCROP(Sector)); - - /* Wait for last operation to be completed */ + + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) - { - *(__IO uint16_t*)OPTCR_BYTE2_ADDRESS |= (uint16_t)Sector; + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS |= (uint16_t)Sector; } - + return status; } @@ -1123,24 +1118,24 @@ static HAL_StatusTypeDef FLASH_OB_EnablePCROP(uint32_t Sector) * @param Sector specifies the sector(s) to be read/write protected or unprotected. * This parameter can be one of the following values: * @arg OB_PCROP: A value between OB_PCROP_Sector0 and OB_PCROP_Sector5 - * @arg OB_PCROP_Sector_All - * @retval HAL Status + * @arg OB_PCROP_Sector_All + * @retval HAL Status */ static HAL_StatusTypeDef FLASH_OB_DisablePCROP(uint32_t Sector) -{ +{ HAL_StatusTypeDef status = HAL_OK; - + /* Check the parameters */ assert_param(IS_OB_PCROP(Sector)); - - /* Wait for last operation to be completed */ + + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) - { - *(__IO uint16_t*)OPTCR_BYTE2_ADDRESS &= (~Sector); + if (status == HAL_OK) + { + *(__IO uint16_t *)OPTCR_BYTE2_ADDRESS &= (~Sector); } - + return status; } @@ -1154,31 +1149,31 @@ static HAL_StatusTypeDef FLASH_OB_DisablePCROP(uint32_t Sector) * @arg OB_RDP_LEVEL_0: No protection * @arg OB_RDP_LEVEL_1: Read protection of the memory * @arg OB_RDP_LEVEL_2: Full chip protection - * + * * @note WARNING: When enabling OB_RDP level 2 it's no more possible to go back to level 1 or 0 - * + * * @retval HAL Status */ static HAL_StatusTypeDef FLASH_OB_RDP_LevelConfig(uint8_t Level) { HAL_StatusTypeDef status = HAL_OK; - + /* Check the parameters */ assert_param(IS_OB_RDP_LEVEL(Level)); - + /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - if(status == HAL_OK) - { - *(__IO uint8_t*)OPTCR_BYTE1_ADDRESS = Level; + if (status == HAL_OK) + { + *(__IO uint8_t *)OPTCR_BYTE1_ADDRESS = Level; } - + return status; } /** - * @brief Program the FLASH User Option Byte: IWDG_SW / RST_STOP / RST_STDBY. + * @brief Program the FLASH User Option Byte: IWDG_SW / RST_STOP / RST_STDBY. * @param Iwdg Selects the IWDG mode * This parameter can be one of the following values: * @arg OB_IWDG_SW: Software IWDG selected @@ -1205,21 +1200,21 @@ static HAL_StatusTypeDef FLASH_OB_UserConfig(uint8_t Iwdg, uint8_t Stop, uint8_t /* Wait for last operation to be completed */ status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); - - if(status == HAL_OK) - { + + if (status == HAL_OK) + { /* Mask OPTLOCK, OPTSTRT, BOR_LEV and BFB2 bits */ - optiontmp = (uint8_t)((*(__IO uint8_t *)OPTCR_BYTE0_ADDRESS) & (uint8_t)0x1F); + optiontmp = (uint8_t)((*(__IO uint8_t *)OPTCR_BYTE0_ADDRESS) & (uint8_t)0x1F); /* Update User Option Byte */ - *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS = Iwdg | (uint8_t)(Stdby | (uint8_t)(Stop | ((uint8_t)optiontmp))); + *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS = Iwdg | (uint8_t)(Stdby | (uint8_t)(Stop | ((uint8_t)optiontmp))); } - - return status; + + return status; } /** - * @brief Set the BOR Level. + * @brief Set the BOR Level. * @param Level specifies the Option Bytes BOR Reset Level. * This parameter can be one of the following values: * @arg OB_BOR_LEVEL3: Supply voltage ranges from 2.7 to 3.6 V @@ -1236,9 +1231,9 @@ static HAL_StatusTypeDef FLASH_OB_BOR_LevelConfig(uint8_t Level) /* Set the BOR Level */ *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS &= (~FLASH_OPTCR_BOR_LEV); *(__IO uint8_t *)OPTCR_BYTE0_ADDRESS |= Level; - + return HAL_OK; - + } /** @@ -1274,15 +1269,15 @@ static uint8_t FLASH_OB_GetRDP(void) { uint8_t readstatus = OB_RDP_LEVEL_0; - if((*(__IO uint8_t*)(OPTCR_BYTE1_ADDRESS) == (uint8_t)OB_RDP_LEVEL_2)) + if (*(__IO uint8_t *)(OPTCR_BYTE1_ADDRESS) == (uint8_t)OB_RDP_LEVEL_2) { readstatus = OB_RDP_LEVEL_2; } - else if((*(__IO uint8_t*)(OPTCR_BYTE1_ADDRESS) == (uint8_t)OB_RDP_LEVEL_0)) + else if (*(__IO uint8_t *)(OPTCR_BYTE1_ADDRESS) == (uint8_t)OB_RDP_LEVEL_0) { readstatus = OB_RDP_LEVEL_0; } - else + else { readstatus = OB_RDP_LEVEL_1; } @@ -1296,7 +1291,7 @@ static uint8_t FLASH_OB_GetRDP(void) * - OB_BOR_LEVEL3: Supply voltage ranges from 2.7 to 3.6 V * - OB_BOR_LEVEL2: Supply voltage ranges from 2.4 to 2.7 V * - OB_BOR_LEVEL1: Supply voltage ranges from 2.1 to 2.4 V - * - OB_BOR_OFF : Supply voltage ranges from 1.62 to 2.1 V + * - OB_BOR_OFF : Supply voltage ranges from 1.62 to 2.1 V */ static uint8_t FLASH_OB_GetBOR(void) { @@ -1311,7 +1306,7 @@ static uint8_t FLASH_OB_GetBOR(void) void FLASH_FlushCaches(void) { /* Flush instruction cache */ - if(READ_BIT(FLASH->ACR, FLASH_ACR_ICEN)!= RESET) + if (READ_BIT(FLASH->ACR, FLASH_ACR_ICEN) != RESET) { /* Disable instruction cache */ __HAL_FLASH_INSTRUCTION_CACHE_DISABLE(); @@ -1320,9 +1315,9 @@ void FLASH_FlushCaches(void) /* Enable instruction cache */ __HAL_FLASH_INSTRUCTION_CACHE_ENABLE(); } - + /* Flush data cache */ - if(READ_BIT(FLASH->ACR, FLASH_ACR_DCEN) != RESET) + if (READ_BIT(FLASH->ACR, FLASH_ACR_DCEN) != RESET) { /* Disable data cache */ __HAL_FLASH_DATA_CACHE_DISABLE(); @@ -1336,7 +1331,7 @@ void FLASH_FlushCaches(void) /** * @} */ - + #endif /* HAL_FLASH_MODULE_ENABLED */ /** @@ -1347,4 +1342,3 @@ void FLASH_FlushCaches(void) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ramfunc.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ramfunc.c index 12db458b..e6ab3ac8 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ramfunc.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_flash_ramfunc.c @@ -3,7 +3,7 @@ * @file stm32f4xx_hal_flash_ramfunc.c * @author MCD Application Team * @brief FLASH RAMFUNC module driver. - * This file provides a FLASH firmware functions which should be + * This file provides a FLASH firmware functions which should be * executed from internal SRAM * + Stop/Start the flash interface while System Run * + Enable/Disable the flash sleep while System Run @@ -14,11 +14,11 @@ [..] *** ARM Compiler *** -------------------- - [..] RAM functions are defined using the toolchain options. + [..] RAM functions are defined using the toolchain options. Functions that are be executed in RAM should reside in a separate source module. Using the 'Options for File' dialog you can simply change the 'Code / Const' area of a module to a memory space in physical RAM. - Available memory areas are declared in the 'Target' tab of the + Available memory areas are declared in the 'Target' tab of the Options for Target' dialog. *** ICCARM Compiler *** @@ -29,21 +29,19 @@ -------------------- [..] RAM functions are defined using a specific toolchain attribute "__attribute__((section(".RamFunc")))". - - @endverbatim + + @endverbatim ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** - */ + */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_hal.h" @@ -70,44 +68,44 @@ * @{ */ -/** @defgroup FLASH_RAMFUNC_Exported_Functions_Group1 Peripheral features functions executed from internal RAM - * @brief Peripheral Extended features functions +/** @defgroup FLASH_RAMFUNC_Exported_Functions_Group1 Peripheral features functions executed from internal RAM + * @brief Peripheral Extended features functions * -@verbatim +@verbatim =============================================================================== ##### ramfunc functions ##### - =============================================================================== + =============================================================================== [..] - This subsection provides a set of functions that should be executed from RAM + This subsection provides a set of functions that should be executed from RAM transfers. - + @endverbatim * @{ */ /** * @brief Stop the flash interface while System Run - * @note This mode is only available for STM32F41xxx/STM32F446xx devices. - * @note This mode couldn't be set while executing with the flash itself. - * It should be done with specific routine executed from RAM. + * @note This mode is only available for STM32F41xxx/STM32F446xx devices. + * @note This mode couldn't be set while executing with the flash itself. + * It should be done with specific routine executed from RAM. * @retval HAL status */ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_StopFlashInterfaceClk(void) { /* Enable Power ctrl clock */ __HAL_RCC_PWR_CLK_ENABLE(); - /* Stop the flash interface while System Run */ + /* Stop the flash interface while System Run */ SET_BIT(PWR->CR, PWR_CR_FISSR); - + return HAL_OK; } /** * @brief Start the flash interface while System Run - * @note This mode is only available for STM32F411xx/STM32F446xx devices. - * @note This mode couldn't be set while executing with the flash itself. - * It should be done with specific routine executed from RAM. + * @note This mode is only available for STM32F411xx/STM32F446xx devices. + * @note This mode couldn't be set while executing with the flash itself. + * It should be done with specific routine executed from RAM. * @retval HAL status */ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_StartFlashInterfaceClk(void) @@ -122,9 +120,9 @@ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_StartFlashInterfaceClk(void) /** * @brief Enable the flash sleep while System Run - * @note This mode is only available for STM32F41xxx/STM32F446xx devices. - * @note This mode could n't be set while executing with the flash itself. - * It should be done with specific routine executed from RAM. + * @note This mode is only available for STM32F41xxx/STM32F446xx devices. + * @note This mode could n't be set while executing with the flash itself. + * It should be done with specific routine executed from RAM. * @retval HAL status */ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_EnableFlashSleepMode(void) @@ -139,9 +137,9 @@ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_EnableFlashSleepMode(void) /** * @brief Disable the flash sleep while System Run - * @note This mode is only available for STM32F41xxx/STM32F446xx devices. - * @note This mode couldn't be set while executing with the flash itself. - * It should be done with specific routine executed from RAM. + * @note This mode is only available for STM32F41xxx/STM32F446xx devices. + * @note This mode couldn't be set while executing with the flash itself. + * It should be done with specific routine executed from RAM. * @retval HAL status */ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_DisableFlashSleepMode(void) @@ -150,7 +148,7 @@ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_DisableFlashSleepMode(void) __HAL_RCC_PWR_CLK_ENABLE(); /* Disable the flash sleep while System Run */ CLEAR_BIT(PWR->CR, PWR_CR_FMSSR); - + return HAL_OK; } @@ -172,4 +170,3 @@ __RAM_FUNC HAL_StatusTypeDef HAL_FLASHEx_DisableFlashSleepMode(void) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_gpio.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_gpio.c index 89090d6d..b3ce9bbc 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_gpio.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_gpio.c @@ -8,6 +8,17 @@ * + Initialization and de-initialization functions * + IO operation functions * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### GPIO Peripheral features ##### @@ -91,17 +102,6 @@ @endverbatim ****************************************************************************** - * @attention - * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -123,13 +123,6 @@ /** @addtogroup GPIO_Private_Constants GPIO Private Constants * @{ */ -#define GPIO_MODE 0x00000003U -#define EXTI_MODE 0x10000000U -#define GPIO_MODE_IT 0x00010000U -#define GPIO_MODE_EVT 0x00020000U -#define RISING_EDGE 0x00100000U -#define FALLING_EDGE 0x00200000U -#define GPIO_OUTPUT_TYPE 0x00000010U #define GPIO_NUMBER 16U /** @@ -179,7 +172,6 @@ void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init) assert_param(IS_GPIO_ALL_INSTANCE(GPIOx)); assert_param(IS_GPIO_PIN(GPIO_Init->Pin)); assert_param(IS_GPIO_MODE(GPIO_Init->Mode)); - assert_param(IS_GPIO_PULL(GPIO_Init->Pull)); /* Configure the port pins */ for(position = 0U; position < GPIO_NUMBER; position++) @@ -193,8 +185,8 @@ void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init) { /*--------------------- GPIO Mode Configuration ------------------------*/ /* In case of Output or Alternate function mode selection */ - if((GPIO_Init->Mode == GPIO_MODE_OUTPUT_PP) || (GPIO_Init->Mode == GPIO_MODE_AF_PP) || - (GPIO_Init->Mode == GPIO_MODE_OUTPUT_OD) || (GPIO_Init->Mode == GPIO_MODE_AF_OD)) + if(((GPIO_Init->Mode & GPIO_MODE) == MODE_OUTPUT) || \ + (GPIO_Init->Mode & GPIO_MODE) == MODE_AF) { /* Check the Speed parameter */ assert_param(IS_GPIO_SPEED(GPIO_Init->Speed)); @@ -207,18 +199,24 @@ void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init) /* Configure the IO Output Type */ temp = GPIOx->OTYPER; temp &= ~(GPIO_OTYPER_OT_0 << position) ; - temp |= (((GPIO_Init->Mode & GPIO_OUTPUT_TYPE) >> 4U) << position); + temp |= (((GPIO_Init->Mode & OUTPUT_TYPE) >> OUTPUT_TYPE_Pos) << position); GPIOx->OTYPER = temp; } - /* Activate the Pull-up or Pull down resistor for the current IO */ - temp = GPIOx->PUPDR; - temp &= ~(GPIO_PUPDR_PUPDR0 << (position * 2U)); - temp |= ((GPIO_Init->Pull) << (position * 2U)); - GPIOx->PUPDR = temp; + if((GPIO_Init->Mode & GPIO_MODE) != MODE_ANALOG) + { + /* Check the parameters */ + assert_param(IS_GPIO_PULL(GPIO_Init->Pull)); + + /* Activate the Pull-up or Pull down resistor for the current IO */ + temp = GPIOx->PUPDR; + temp &= ~(GPIO_PUPDR_PUPDR0 << (position * 2U)); + temp |= ((GPIO_Init->Pull) << (position * 2U)); + GPIOx->PUPDR = temp; + } /* In case of Alternate function mode selection */ - if((GPIO_Init->Mode == GPIO_MODE_AF_PP) || (GPIO_Init->Mode == GPIO_MODE_AF_OD)) + if((GPIO_Init->Mode & GPIO_MODE) == MODE_AF) { /* Check the Alternate function parameter */ assert_param(IS_GPIO_AF(GPIO_Init->Alternate)); @@ -237,7 +235,7 @@ void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init) /*--------------------- EXTI Mode Configuration ------------------------*/ /* Configure the External Interrupt or event for the current IO */ - if((GPIO_Init->Mode & EXTI_MODE) == EXTI_MODE) + if((GPIO_Init->Mode & EXTI_MODE) != 0x00U) { /* Enable SYSCFG Clock */ __HAL_RCC_SYSCFG_CLK_ENABLE(); @@ -247,39 +245,39 @@ void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init) temp |= ((uint32_t)(GPIO_GET_INDEX(GPIOx)) << (4U * (position & 0x03U))); SYSCFG->EXTICR[position >> 2U] = temp; - /* Clear EXTI line configuration */ - temp = EXTI->IMR; + /* Clear Rising Falling edge configuration */ + temp = EXTI->RTSR; temp &= ~((uint32_t)iocurrent); - if((GPIO_Init->Mode & GPIO_MODE_IT) == GPIO_MODE_IT) + if((GPIO_Init->Mode & TRIGGER_RISING) != 0x00U) { temp |= iocurrent; } - EXTI->IMR = temp; + EXTI->RTSR = temp; - temp = EXTI->EMR; + temp = EXTI->FTSR; temp &= ~((uint32_t)iocurrent); - if((GPIO_Init->Mode & GPIO_MODE_EVT) == GPIO_MODE_EVT) + if((GPIO_Init->Mode & TRIGGER_FALLING) != 0x00U) { temp |= iocurrent; } - EXTI->EMR = temp; + EXTI->FTSR = temp; - /* Clear Rising Falling edge configuration */ - temp = EXTI->RTSR; + temp = EXTI->EMR; temp &= ~((uint32_t)iocurrent); - if((GPIO_Init->Mode & RISING_EDGE) == RISING_EDGE) + if((GPIO_Init->Mode & EXTI_EVT) != 0x00U) { temp |= iocurrent; } - EXTI->RTSR = temp; + EXTI->EMR = temp; - temp = EXTI->FTSR; + /* Clear EXTI line configuration */ + temp = EXTI->IMR; temp &= ~((uint32_t)iocurrent); - if((GPIO_Init->Mode & FALLING_EDGE) == FALLING_EDGE) + if((GPIO_Init->Mode & EXTI_IT) != 0x00U) { temp |= iocurrent; } - EXTI->FTSR = temp; + EXTI->IMR = temp; } } } @@ -323,8 +321,8 @@ void HAL_GPIO_DeInit(GPIO_TypeDef *GPIOx, uint32_t GPIO_Pin) EXTI->EMR &= ~((uint32_t)iocurrent); /* Clear Rising Falling edge configuration */ - EXTI->RTSR &= ~((uint32_t)iocurrent); EXTI->FTSR &= ~((uint32_t)iocurrent); + EXTI->RTSR &= ~((uint32_t)iocurrent); /* Configure the External Interrupt or event for the current IO */ tmp = 0x0FU << (4U * (position & 0x03U)); @@ -434,17 +432,16 @@ void HAL_GPIO_WritePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin, GPIO_PinState Pin */ void HAL_GPIO_TogglePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) { + uint32_t odr; + /* Check the parameters */ assert_param(IS_GPIO_PIN(GPIO_Pin)); - if ((GPIOx->ODR & GPIO_Pin) == GPIO_Pin) - { - GPIOx->BSRR = (uint32_t)GPIO_Pin << GPIO_NUMBER; - } - else - { - GPIOx->BSRR = GPIO_Pin; - } + /* get current Output Data Register value */ + odr = GPIOx->ODR; + + /* Set selected pins that were at low level, and reset ones that were high */ + GPIOx->BSRR = ((odr & GPIO_Pin) << GPIO_NUMBER) | (~odr & GPIO_Pin); } /** @@ -534,4 +531,3 @@ __weak void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2c.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2c.c index 3fb6c3cb..dce9ab11 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2c.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2c.c @@ -9,6 +9,17 @@ * + IO operation functions * + Peripheral State, Mode and Error functions * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### How to use this driver ##### @@ -19,7 +30,7 @@ (#) Declare a I2C_HandleTypeDef handle structure, for example: I2C_HandleTypeDef hi2c; - (#)Initialize the I2C low level resources by implementing the @ref HAL_I2C_MspInit() API: + (#)Initialize the I2C low level resources by implementing the HAL_I2C_MspInit() API: (##) Enable the I2Cx interface clock (##) I2C pins configuration (+++) Enable the clock for the I2C GPIOs @@ -39,48 +50,48 @@ (#) Configure the Communication Speed, Duty cycle, Addressing mode, Own Address1, Dual Addressing mode, Own Address2, General call and Nostretch mode in the hi2c Init structure. - (#) Initialize the I2C registers by calling the @ref HAL_I2C_Init(), configures also the low level Hardware - (GPIO, CLOCK, NVIC...etc) by calling the customized @ref HAL_I2C_MspInit() API. + (#) Initialize the I2C registers by calling the HAL_I2C_Init(), configures also the low level Hardware + (GPIO, CLOCK, NVIC...etc) by calling the customized HAL_I2C_MspInit() API. - (#) To check if target device is ready for communication, use the function @ref HAL_I2C_IsDeviceReady() + (#) To check if target device is ready for communication, use the function HAL_I2C_IsDeviceReady() (#) For I2C IO and IO MEM operations, three operation modes are available within this driver : *** Polling mode IO operation *** ================================= [..] - (+) Transmit in master mode an amount of data in blocking mode using @ref HAL_I2C_Master_Transmit() - (+) Receive in master mode an amount of data in blocking mode using @ref HAL_I2C_Master_Receive() - (+) Transmit in slave mode an amount of data in blocking mode using @ref HAL_I2C_Slave_Transmit() - (+) Receive in slave mode an amount of data in blocking mode using @ref HAL_I2C_Slave_Receive() + (+) Transmit in master mode an amount of data in blocking mode using HAL_I2C_Master_Transmit() + (+) Receive in master mode an amount of data in blocking mode using HAL_I2C_Master_Receive() + (+) Transmit in slave mode an amount of data in blocking mode using HAL_I2C_Slave_Transmit() + (+) Receive in slave mode an amount of data in blocking mode using HAL_I2C_Slave_Receive() *** Polling mode IO MEM operation *** ===================================== [..] - (+) Write an amount of data in blocking mode to a specific memory address using @ref HAL_I2C_Mem_Write() - (+) Read an amount of data in blocking mode from a specific memory address using @ref HAL_I2C_Mem_Read() + (+) Write an amount of data in blocking mode to a specific memory address using HAL_I2C_Mem_Write() + (+) Read an amount of data in blocking mode from a specific memory address using HAL_I2C_Mem_Read() *** Interrupt mode IO operation *** =================================== [..] - (+) Transmit in master mode an amount of data in non-blocking mode using @ref HAL_I2C_Master_Transmit_IT() - (+) At transmission end of transfer, @ref HAL_I2C_MasterTxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_MasterTxCpltCallback() - (+) Receive in master mode an amount of data in non-blocking mode using @ref HAL_I2C_Master_Receive_IT() - (+) At reception end of transfer, @ref HAL_I2C_MasterRxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_MasterRxCpltCallback() - (+) Transmit in slave mode an amount of data in non-blocking mode using @ref HAL_I2C_Slave_Transmit_IT() - (+) At transmission end of transfer, @ref HAL_I2C_SlaveTxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_SlaveTxCpltCallback() - (+) Receive in slave mode an amount of data in non-blocking mode using @ref HAL_I2C_Slave_Receive_IT() - (+) At reception end of transfer, @ref HAL_I2C_SlaveRxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_SlaveRxCpltCallback() - (+) In case of transfer Error, @ref HAL_I2C_ErrorCallback() function is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_ErrorCallback() - (+) Abort a master I2C process communication with Interrupt using @ref HAL_I2C_Master_Abort_IT() - (+) End of abort process, @ref HAL_I2C_AbortCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_AbortCpltCallback() + (+) Transmit in master mode an amount of data in non-blocking mode using HAL_I2C_Master_Transmit_IT() + (+) At transmission end of transfer, HAL_I2C_MasterTxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_MasterTxCpltCallback() + (+) Receive in master mode an amount of data in non-blocking mode using HAL_I2C_Master_Receive_IT() + (+) At reception end of transfer, HAL_I2C_MasterRxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_MasterRxCpltCallback() + (+) Transmit in slave mode an amount of data in non-blocking mode using HAL_I2C_Slave_Transmit_IT() + (+) At transmission end of transfer, HAL_I2C_SlaveTxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_SlaveTxCpltCallback() + (+) Receive in slave mode an amount of data in non-blocking mode using HAL_I2C_Slave_Receive_IT() + (+) At reception end of transfer, HAL_I2C_SlaveRxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_SlaveRxCpltCallback() + (+) In case of transfer Error, HAL_I2C_ErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_I2C_ErrorCallback() + (+) Abort a master or memory I2C process communication with Interrupt using HAL_I2C_Master_Abort_IT() + (+) End of abort process, HAL_I2C_AbortCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_AbortCpltCallback() *** Interrupt mode or DMA mode IO sequential operation *** ========================================================== @@ -89,14 +100,14 @@ when a direction change during transfer [..] (+) A specific option field manage the different steps of a sequential transfer - (+) Option field values are defined through @ref I2C_XferOptions_definition and are listed below: - (++) I2C_FIRST_AND_LAST_FRAME: No sequential usage, functionnal is same as associated interfaces in no sequential mode + (+) Option field values are defined through I2C_XferOptions_definition and are listed below: + (++) I2C_FIRST_AND_LAST_FRAME: No sequential usage, functional is same as associated interfaces in no sequential mode (++) I2C_FIRST_FRAME: Sequential usage, this option allow to manage a sequence with start condition, address and data to transfer without a final stop condition (++) I2C_FIRST_AND_NEXT_FRAME: Sequential usage (Master only), this option allow to manage a sequence with start condition, address and data to transfer without a final stop condition, an then permit a call the same master sequential interface - several times (like @ref HAL_I2C_Master_Seq_Transmit_IT() then @ref HAL_I2C_Master_Seq_Transmit_IT() - or @ref HAL_I2C_Master_Seq_Transmit_DMA() then @ref HAL_I2C_Master_Seq_Transmit_DMA()) + several times (like HAL_I2C_Master_Seq_Transmit_IT() then HAL_I2C_Master_Seq_Transmit_IT() + or HAL_I2C_Master_Seq_Transmit_DMA() then HAL_I2C_Master_Seq_Transmit_DMA()) (++) I2C_NEXT_FRAME: Sequential usage, this option allow to manage a sequence with a restart condition, address and with new data to transfer if the direction change or manage only the new data to transfer if no direction change and without a final stop condition in both cases @@ -109,7 +120,7 @@ or HAL_I2C_Master_Seq_Receive_IT(option I2C_FIRST_AND_NEXT_FRAME then I2C_NEXT_FRAME) or HAL_I2C_Master_Seq_Transmit_DMA(option I2C_FIRST_AND_NEXT_FRAME then I2C_NEXT_FRAME) or HAL_I2C_Master_Seq_Receive_DMA(option I2C_FIRST_AND_NEXT_FRAME then I2C_NEXT_FRAME). - Then usage of this option I2C_LAST_FRAME_NO_STOP at the last Transmit or Receive sequence permit to call the oposite interface Receive or Transmit + Then usage of this option I2C_LAST_FRAME_NO_STOP at the last Transmit or Receive sequence permit to call the opposite interface Receive or Transmit without stopping the communication and so generate a restart condition. (++) I2C_OTHER_FRAME: Sequential usage (Master only), this option allow to manage a restart condition after each call of the same master sequential interface. @@ -119,86 +130,86 @@ or HAL_I2C_Master_Seq_Receive_DMA(option I2C_FIRST_FRAME then I2C_OTHER_FRAME). Then usage of this option I2C_OTHER_AND_LAST_FRAME at the last frame to help automatic generation of STOP condition. - (+) Differents sequential I2C interfaces are listed below: - (++) Sequential transmit in master I2C mode an amount of data in non-blocking mode using @ref HAL_I2C_Master_Seq_Transmit_IT() - or using @ref HAL_I2C_Master_Seq_Transmit_DMA() - (+++) At transmission end of current frame transfer, @ref HAL_I2C_MasterTxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_MasterTxCpltCallback() - (++) Sequential receive in master I2C mode an amount of data in non-blocking mode using @ref HAL_I2C_Master_Seq_Receive_IT() - or using @ref HAL_I2C_Master_Seq_Receive_DMA() - (+++) At reception end of current frame transfer, @ref HAL_I2C_MasterRxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_MasterRxCpltCallback() - (++) Abort a master IT or DMA I2C process communication with Interrupt using @ref HAL_I2C_Master_Abort_IT() - (+++) End of abort process, @ref HAL_I2C_AbortCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_AbortCpltCallback() - (++) Enable/disable the Address listen mode in slave I2C mode using @ref HAL_I2C_EnableListen_IT() @ref HAL_I2C_DisableListen_IT() - (+++) When address slave I2C match, @ref HAL_I2C_AddrCallback() is executed and user can + (+) Different sequential I2C interfaces are listed below: + (++) Sequential transmit in master I2C mode an amount of data in non-blocking mode using HAL_I2C_Master_Seq_Transmit_IT() + or using HAL_I2C_Master_Seq_Transmit_DMA() + (+++) At transmission end of current frame transfer, HAL_I2C_MasterTxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_MasterTxCpltCallback() + (++) Sequential receive in master I2C mode an amount of data in non-blocking mode using HAL_I2C_Master_Seq_Receive_IT() + or using HAL_I2C_Master_Seq_Receive_DMA() + (+++) At reception end of current frame transfer, HAL_I2C_MasterRxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_MasterRxCpltCallback() + (++) Abort a master or memory IT or DMA I2C process communication with Interrupt using HAL_I2C_Master_Abort_IT() + (+++) End of abort process, HAL_I2C_AbortCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_AbortCpltCallback() + (++) Enable/disable the Address listen mode in slave I2C mode using HAL_I2C_EnableListen_IT() HAL_I2C_DisableListen_IT() + (+++) When address slave I2C match, HAL_I2C_AddrCallback() is executed and user can add his own code to check the Address Match Code and the transmission direction request by master (Write/Read). - (+++) At Listen mode end @ref HAL_I2C_ListenCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_ListenCpltCallback() - (++) Sequential transmit in slave I2C mode an amount of data in non-blocking mode using @ref HAL_I2C_Slave_Seq_Transmit_IT() - or using @ref HAL_I2C_Slave_Seq_Transmit_DMA() - (+++) At transmission end of current frame transfer, @ref HAL_I2C_SlaveTxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_SlaveTxCpltCallback() - (++) Sequential receive in slave I2C mode an amount of data in non-blocking mode using @ref HAL_I2C_Slave_Seq_Receive_IT() - or using @ref HAL_I2C_Slave_Seq_Receive_DMA() - (+++) At reception end of current frame transfer, @ref HAL_I2C_SlaveRxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_SlaveRxCpltCallback() - (++) In case of transfer Error, @ref HAL_I2C_ErrorCallback() function is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_ErrorCallback() + (+++) At Listen mode end HAL_I2C_ListenCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_ListenCpltCallback() + (++) Sequential transmit in slave I2C mode an amount of data in non-blocking mode using HAL_I2C_Slave_Seq_Transmit_IT() + or using HAL_I2C_Slave_Seq_Transmit_DMA() + (+++) At transmission end of current frame transfer, HAL_I2C_SlaveTxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_SlaveTxCpltCallback() + (++) Sequential receive in slave I2C mode an amount of data in non-blocking mode using HAL_I2C_Slave_Seq_Receive_IT() + or using HAL_I2C_Slave_Seq_Receive_DMA() + (+++) At reception end of current frame transfer, HAL_I2C_SlaveRxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_SlaveRxCpltCallback() + (++) In case of transfer Error, HAL_I2C_ErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_I2C_ErrorCallback() *** Interrupt mode IO MEM operation *** ======================================= [..] (+) Write an amount of data in non-blocking mode with Interrupt to a specific memory address using - @ref HAL_I2C_Mem_Write_IT() - (+) At Memory end of write transfer, @ref HAL_I2C_MemTxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_MemTxCpltCallback() + HAL_I2C_Mem_Write_IT() + (+) At Memory end of write transfer, HAL_I2C_MemTxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_MemTxCpltCallback() (+) Read an amount of data in non-blocking mode with Interrupt from a specific memory address using - @ref HAL_I2C_Mem_Read_IT() - (+) At Memory end of read transfer, @ref HAL_I2C_MemRxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_MemRxCpltCallback() - (+) In case of transfer Error, @ref HAL_I2C_ErrorCallback() function is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_ErrorCallback() + HAL_I2C_Mem_Read_IT() + (+) At Memory end of read transfer, HAL_I2C_MemRxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_MemRxCpltCallback() + (+) In case of transfer Error, HAL_I2C_ErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_I2C_ErrorCallback() *** DMA mode IO operation *** ============================== [..] (+) Transmit in master mode an amount of data in non-blocking mode (DMA) using - @ref HAL_I2C_Master_Transmit_DMA() - (+) At transmission end of transfer, @ref HAL_I2C_MasterTxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_MasterTxCpltCallback() + HAL_I2C_Master_Transmit_DMA() + (+) At transmission end of transfer, HAL_I2C_MasterTxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_MasterTxCpltCallback() (+) Receive in master mode an amount of data in non-blocking mode (DMA) using - @ref HAL_I2C_Master_Receive_DMA() - (+) At reception end of transfer, @ref HAL_I2C_MasterRxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_MasterRxCpltCallback() + HAL_I2C_Master_Receive_DMA() + (+) At reception end of transfer, HAL_I2C_MasterRxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_MasterRxCpltCallback() (+) Transmit in slave mode an amount of data in non-blocking mode (DMA) using - @ref HAL_I2C_Slave_Transmit_DMA() - (+) At transmission end of transfer, @ref HAL_I2C_SlaveTxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_SlaveTxCpltCallback() + HAL_I2C_Slave_Transmit_DMA() + (+) At transmission end of transfer, HAL_I2C_SlaveTxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_SlaveTxCpltCallback() (+) Receive in slave mode an amount of data in non-blocking mode (DMA) using - @ref HAL_I2C_Slave_Receive_DMA() - (+) At reception end of transfer, @ref HAL_I2C_SlaveRxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_SlaveRxCpltCallback() - (+) In case of transfer Error, @ref HAL_I2C_ErrorCallback() function is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_ErrorCallback() - (+) Abort a master I2C process communication with Interrupt using @ref HAL_I2C_Master_Abort_IT() - (+) End of abort process, @ref HAL_I2C_AbortCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_AbortCpltCallback() + HAL_I2C_Slave_Receive_DMA() + (+) At reception end of transfer, HAL_I2C_SlaveRxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_SlaveRxCpltCallback() + (+) In case of transfer Error, HAL_I2C_ErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_I2C_ErrorCallback() + (+) Abort a master or memory I2C process communication with Interrupt using HAL_I2C_Master_Abort_IT() + (+) End of abort process, HAL_I2C_AbortCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_AbortCpltCallback() *** DMA mode IO MEM operation *** ================================= [..] (+) Write an amount of data in non-blocking mode with DMA to a specific memory address using - @ref HAL_I2C_Mem_Write_DMA() - (+) At Memory end of write transfer, @ref HAL_I2C_MemTxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_MemTxCpltCallback() + HAL_I2C_Mem_Write_DMA() + (+) At Memory end of write transfer, HAL_I2C_MemTxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_MemTxCpltCallback() (+) Read an amount of data in non-blocking mode with DMA from a specific memory address using - @ref HAL_I2C_Mem_Read_DMA() - (+) At Memory end of read transfer, @ref HAL_I2C_MemRxCpltCallback() is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_MemRxCpltCallback() - (+) In case of transfer Error, @ref HAL_I2C_ErrorCallback() function is executed and user can - add his own code by customization of function pointer @ref HAL_I2C_ErrorCallback() + HAL_I2C_Mem_Read_DMA() + (+) At Memory end of read transfer, HAL_I2C_MemRxCpltCallback() is executed and user can + add his own code by customization of function pointer HAL_I2C_MemRxCpltCallback() + (+) In case of transfer Error, HAL_I2C_ErrorCallback() function is executed and user can + add his own code by customization of function pointer HAL_I2C_ErrorCallback() *** I2C HAL driver macros list *** @@ -206,22 +217,22 @@ [..] Below the list of most used macros in I2C HAL driver. - (+) @ref __HAL_I2C_ENABLE: Enable the I2C peripheral - (+) @ref __HAL_I2C_DISABLE: Disable the I2C peripheral - (+) @ref __HAL_I2C_GET_FLAG: Checks whether the specified I2C flag is set or not - (+) @ref __HAL_I2C_CLEAR_FLAG: Clear the specified I2C pending flag - (+) @ref __HAL_I2C_ENABLE_IT: Enable the specified I2C interrupt - (+) @ref __HAL_I2C_DISABLE_IT: Disable the specified I2C interrupt + (+) __HAL_I2C_ENABLE: Enable the I2C peripheral + (+) __HAL_I2C_DISABLE: Disable the I2C peripheral + (+) __HAL_I2C_GET_FLAG: Checks whether the specified I2C flag is set or not + (+) __HAL_I2C_CLEAR_FLAG: Clear the specified I2C pending flag + (+) __HAL_I2C_ENABLE_IT: Enable the specified I2C interrupt + (+) __HAL_I2C_DISABLE_IT: Disable the specified I2C interrupt *** Callback registration *** ============================================= [..] The compilation flag USE_HAL_I2C_REGISTER_CALLBACKS when set to 1 allows the user to configure dynamically the driver callbacks. - Use Functions @ref HAL_I2C_RegisterCallback() or @ref HAL_I2C_RegisterAddrCallback() + Use Functions HAL_I2C_RegisterCallback() or HAL_I2C_RegisterAddrCallback() to register an interrupt callback. [..] - Function @ref HAL_I2C_RegisterCallback() allows to register following callbacks: + Function HAL_I2C_RegisterCallback() allows to register following callbacks: (+) MasterTxCpltCallback : callback for Master transmission end of transfer. (+) MasterRxCpltCallback : callback for Master reception end of transfer. (+) SlaveTxCpltCallback : callback for Slave transmission end of transfer. @@ -236,11 +247,11 @@ This function takes as parameters the HAL peripheral handle, the Callback ID and a pointer to the user callback function. [..] - For specific callback AddrCallback use dedicated register callbacks : @ref HAL_I2C_RegisterAddrCallback(). + For specific callback AddrCallback use dedicated register callbacks : HAL_I2C_RegisterAddrCallback(). [..] - Use function @ref HAL_I2C_UnRegisterCallback to reset a callback to the default + Use function HAL_I2C_UnRegisterCallback to reset a callback to the default weak function. - @ref HAL_I2C_UnRegisterCallback takes as parameters the HAL peripheral handle, + HAL_I2C_UnRegisterCallback takes as parameters the HAL peripheral handle, and the Callback ID. This function allows to reset following callbacks: (+) MasterTxCpltCallback : callback for Master transmission end of transfer. @@ -255,24 +266,24 @@ (+) MspInitCallback : callback for Msp Init. (+) MspDeInitCallback : callback for Msp DeInit. [..] - For callback AddrCallback use dedicated register callbacks : @ref HAL_I2C_UnRegisterAddrCallback(). + For callback AddrCallback use dedicated register callbacks : HAL_I2C_UnRegisterAddrCallback(). [..] - By default, after the @ref HAL_I2C_Init() and when the state is @ref HAL_I2C_STATE_RESET + By default, after the HAL_I2C_Init() and when the state is HAL_I2C_STATE_RESET all callbacks are set to the corresponding weak functions: - examples @ref HAL_I2C_MasterTxCpltCallback(), @ref HAL_I2C_MasterRxCpltCallback(). + examples HAL_I2C_MasterTxCpltCallback(), HAL_I2C_MasterRxCpltCallback(). Exception done for MspInit and MspDeInit functions that are - reset to the legacy weak functions in the @ref HAL_I2C_Init()/ @ref HAL_I2C_DeInit() only when + reset to the legacy weak functions in the HAL_I2C_Init()/ HAL_I2C_DeInit() only when these callbacks are null (not registered beforehand). - If MspInit or MspDeInit are not null, the @ref HAL_I2C_Init()/ @ref HAL_I2C_DeInit() + If MspInit or MspDeInit are not null, the HAL_I2C_Init()/ HAL_I2C_DeInit() keep and use the user MspInit/MspDeInit callbacks (registered beforehand) whatever the state. [..] - Callbacks can be registered/unregistered in @ref HAL_I2C_STATE_READY state only. + Callbacks can be registered/unregistered in HAL_I2C_STATE_READY state only. Exception done MspInit/MspDeInit functions that can be registered/unregistered - in @ref HAL_I2C_STATE_READY or @ref HAL_I2C_STATE_RESET state, + in HAL_I2C_STATE_READY or HAL_I2C_STATE_RESET state, thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. Then, the user first registers the MspInit/MspDeInit user callbacks - using @ref HAL_I2C_RegisterCallback() before calling @ref HAL_I2C_DeInit() - or @ref HAL_I2C_Init() function. + using HAL_I2C_RegisterCallback() before calling HAL_I2C_DeInit() + or HAL_I2C_Init() function. [..] When the compilation flag USE_HAL_I2C_REGISTER_CALLBACKS is set to 0 or not defined, the callback registration feature is not available and all callbacks @@ -284,18 +295,6 @@ (@) You can refer to the I2C HAL driver header file for more useful macros @endverbatim - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -314,7 +313,7 @@ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ -/** @addtogroup I2C_Private_Define +/** @defgroup I2C_Private_Define I2C Private Define * @{ */ #define I2C_TIMEOUT_FLAG 35U /*!< Timeout 35 ms */ @@ -335,6 +334,14 @@ */ /* Private macro -------------------------------------------------------------*/ +/** @addtogroup I2C_Private_Macros + * @{ + */ +/* Macro to get remaining data to transfer on DMA side */ +#define I2C_GET_DMA_REMAIN_DATA(__HANDLE__) __HAL_DMA_GET_COUNTER(__HANDLE__) +/** + * @} + */ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ @@ -384,6 +391,9 @@ static void I2C_MemoryTransmit_TXE_BTF(I2C_HandleTypeDef *hi2c); /* Private function to Convert Specific options */ static void I2C_ConvertOtherXferOptions(I2C_HandleTypeDef *hi2c); + +/* Private function to flush DR register */ +static void I2C_Flush_DR(I2C_HandleTypeDef *hi2c); /** * @} */ @@ -941,6 +951,20 @@ HAL_StatusTypeDef HAL_I2C_UnRegisterAddrCallback(I2C_HandleTypeDef *hi2c) #endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +/** + * @brief I2C data register flush process. + * @param hi2c I2C handle. + * @retval None + */ +static void I2C_Flush_DR(I2C_HandleTypeDef *hi2c) +{ + /* Write a dummy data in DR to clear TXE flag */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_TXE) != RESET) + { + hi2c->Instance->DR = 0x00U; + } +} + /** * @} */ @@ -1358,6 +1382,13 @@ HAL_StatusTypeDef HAL_I2C_Master_Receive(I2C_HandleTypeDef *hi2c, uint16_t DevAd if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BTF) == SET) { + + if (hi2c->XferSize == 3U) + { + /* Disable Acknowledge */ + CLEAR_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); + } + /* Read data from DR */ *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->DR; @@ -1663,10 +1694,7 @@ HAL_StatusTypeDef HAL_I2C_Master_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t D hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -1695,9 +1723,6 @@ HAL_StatusTypeDef HAL_I2C_Master_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t D hi2c->XferOptions = I2C_NO_OPTION_FRAME; hi2c->Devaddress = DevAddress; - /* Generate Start */ - SET_BIT(hi2c->Instance->CR1, I2C_CR1_START); - /* Process Unlocked */ __HAL_UNLOCK(hi2c); @@ -1707,6 +1732,9 @@ HAL_StatusTypeDef HAL_I2C_Master_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t D /* Enable EVT, BUF and ERR interrupt */ __HAL_I2C_ENABLE_IT(hi2c, I2C_IT_EVT | I2C_IT_BUF | I2C_IT_ERR); + /* Generate Start */ + SET_BIT(hi2c->Instance->CR1, I2C_CR1_START); + return HAL_OK; } else @@ -1743,10 +1771,7 @@ HAL_StatusTypeDef HAL_I2C_Master_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t De hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -1775,11 +1800,6 @@ HAL_StatusTypeDef HAL_I2C_Master_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t De hi2c->XferOptions = I2C_NO_OPTION_FRAME; hi2c->Devaddress = DevAddress; - /* Enable Acknowledge */ - SET_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); - - /* Generate Start */ - SET_BIT(hi2c->Instance->CR1, I2C_CR1_START); /* Process Unlocked */ __HAL_UNLOCK(hi2c); @@ -1791,6 +1811,12 @@ HAL_StatusTypeDef HAL_I2C_Master_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t De /* Enable EVT, BUF and ERR interrupt */ __HAL_I2C_ENABLE_IT(hi2c, I2C_IT_EVT | I2C_IT_BUF | I2C_IT_ERR); + /* Enable Acknowledge */ + SET_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); + + /* Generate Start */ + SET_BIT(hi2c->Instance->CR1, I2C_CR1_START); + return HAL_OK; } else @@ -1952,10 +1978,7 @@ HAL_StatusTypeDef HAL_I2C_Master_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -1986,29 +2009,40 @@ HAL_StatusTypeDef HAL_I2C_Master_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t if (hi2c->XferSize > 0U) { - /* Set the I2C DMA transfer complete callback */ - hi2c->hdmatx->XferCpltCallback = I2C_DMAXferCplt; - - /* Set the DMA error callback */ - hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAXferCplt; - /* Set the unused DMA callbacks to NULL */ - hi2c->hdmatx->XferHalfCpltCallback = NULL; - hi2c->hdmatx->XferM1CpltCallback = NULL; - hi2c->hdmatx->XferM1HalfCpltCallback = NULL; - hi2c->hdmatx->XferAbortCallback = NULL; + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; - /* Enable the DMA stream */ - dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->DR, hi2c->XferSize); + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferM1CpltCallback = NULL; + hi2c->hdmatx->XferM1HalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; - if (dmaxferstatus == HAL_OK) + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->DR, hi2c->XferSize); + } + else { - /* Enable Acknowledge */ - SET_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; - /* Generate Start */ - SET_BIT(hi2c->Instance->CR1, I2C_CR1_START); + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + if (dmaxferstatus == HAL_OK) + { /* Process Unlocked */ __HAL_UNLOCK(hi2c); @@ -2021,6 +2055,12 @@ HAL_StatusTypeDef HAL_I2C_Master_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t /* Enable DMA Request */ SET_BIT(hi2c->Instance->CR2, I2C_CR2_DMAEN); + + /* Enable Acknowledge */ + SET_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); + + /* Generate Start */ + SET_BIT(hi2c->Instance->CR1, I2C_CR1_START); } else { @@ -2093,10 +2133,7 @@ HAL_StatusTypeDef HAL_I2C_Master_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t D hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -2127,20 +2164,37 @@ HAL_StatusTypeDef HAL_I2C_Master_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t D if (hi2c->XferSize > 0U) { - /* Set the I2C DMA transfer complete callback */ - hi2c->hdmarx->XferCpltCallback = I2C_DMAXferCplt; + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAXferCplt; - /* Set the DMA error callback */ - hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; - /* Set the unused DMA callbacks to NULL */ - hi2c->hdmarx->XferHalfCpltCallback = NULL; - hi2c->hdmarx->XferM1CpltCallback = NULL; - hi2c->hdmarx->XferM1HalfCpltCallback = NULL; - hi2c->hdmarx->XferAbortCallback = NULL; + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferM1CpltCallback = NULL; + hi2c->hdmarx->XferM1HalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; - /* Enable the DMA stream */ - dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->DR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize); + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->DR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } if (dmaxferstatus == HAL_OK) { @@ -2180,12 +2234,6 @@ HAL_StatusTypeDef HAL_I2C_Master_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t D } else { - /* Enable Acknowledge */ - SET_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); - - /* Generate Start */ - SET_BIT(hi2c->Instance->CR1, I2C_CR1_START); - /* Process Unlocked */ __HAL_UNLOCK(hi2c); @@ -2195,6 +2243,12 @@ HAL_StatusTypeDef HAL_I2C_Master_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t D /* Enable EVT, BUF and ERR interrupt */ __HAL_I2C_ENABLE_IT(hi2c, I2C_IT_EVT | I2C_IT_BUF | I2C_IT_ERR); + + /* Enable Acknowledge */ + SET_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); + + /* Generate Start */ + SET_BIT(hi2c->Instance->CR1, I2C_CR1_START); } return HAL_OK; @@ -2247,20 +2301,37 @@ HAL_StatusTypeDef HAL_I2C_Slave_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint8_t *p hi2c->XferSize = hi2c->XferCount; hi2c->XferOptions = I2C_NO_OPTION_FRAME; - /* Set the I2C DMA transfer complete callback */ - hi2c->hdmatx->XferCpltCallback = I2C_DMAXferCplt; + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAXferCplt; - /* Set the DMA error callback */ - hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; - /* Set the unused DMA callbacks to NULL */ - hi2c->hdmatx->XferHalfCpltCallback = NULL; - hi2c->hdmatx->XferM1CpltCallback = NULL; - hi2c->hdmatx->XferM1HalfCpltCallback = NULL; - hi2c->hdmatx->XferAbortCallback = NULL; + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferM1CpltCallback = NULL; + hi2c->hdmatx->XferM1HalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; - /* Enable the DMA stream */ - dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->DR, hi2c->XferSize); + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->DR, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } if (dmaxferstatus == HAL_OK) { @@ -2344,20 +2415,37 @@ HAL_StatusTypeDef HAL_I2C_Slave_Receive_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pD hi2c->XferSize = hi2c->XferCount; hi2c->XferOptions = I2C_NO_OPTION_FRAME; - /* Set the I2C DMA transfer complete callback */ - hi2c->hdmarx->XferCpltCallback = I2C_DMAXferCplt; + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAXferCplt; - /* Set the DMA error callback */ - hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; - /* Set the unused DMA callbacks to NULL */ - hi2c->hdmarx->XferHalfCpltCallback = NULL; - hi2c->hdmarx->XferM1CpltCallback = NULL; - hi2c->hdmarx->XferM1HalfCpltCallback = NULL; - hi2c->hdmarx->XferAbortCallback = NULL; + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferM1CpltCallback = NULL; + hi2c->hdmarx->XferM1HalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; - /* Enable the DMA stream */ - dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->DR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize); + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->DR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } if (dmaxferstatus == HAL_OK) { @@ -2743,6 +2831,11 @@ HAL_StatusTypeDef HAL_I2C_Mem_Read(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BTF) == SET) { + if (hi2c->XferSize == 3U) + { + /* Disable Acknowledge */ + CLEAR_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); + } /* Read data from DR */ *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->DR; @@ -2803,10 +2896,7 @@ HAL_StatusTypeDef HAL_I2C_Mem_Write_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddr hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -2891,10 +2981,7 @@ HAL_StatusTypeDef HAL_I2C_Mem_Read_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddre hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -2989,10 +3076,7 @@ HAL_StatusTypeDef HAL_I2C_Mem_Write_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAdd hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -3019,23 +3103,44 @@ HAL_StatusTypeDef HAL_I2C_Mem_Write_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAdd hi2c->XferCount = Size; hi2c->XferSize = hi2c->XferCount; hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->Devaddress = DevAddress; + hi2c->Memaddress = MemAddress; + hi2c->MemaddSize = MemAddSize; + hi2c->EventCount = 0U; if (hi2c->XferSize > 0U) { - /* Set the I2C DMA transfer complete callback */ - hi2c->hdmatx->XferCpltCallback = I2C_DMAXferCplt; + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAXferCplt; - /* Set the DMA error callback */ - hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; - /* Set the unused DMA callbacks to NULL */ - hi2c->hdmatx->XferHalfCpltCallback = NULL; - hi2c->hdmatx->XferM1CpltCallback = NULL; - hi2c->hdmatx->XferM1HalfCpltCallback = NULL; - hi2c->hdmatx->XferAbortCallback = NULL; + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferM1CpltCallback = NULL; + hi2c->hdmatx->XferM1HalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; - /* Enable the DMA stream */ - dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->DR, hi2c->XferSize); + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->DR, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } if (dmaxferstatus == HAL_OK) { @@ -3048,11 +3153,8 @@ HAL_StatusTypeDef HAL_I2C_Mem_Write_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAdd /* Prevent unused argument(s) compilation and MISRA warning */ UNUSED(dmaxferstatus); - /* Clear directly Complete callback as no XferAbortCallback is used to finalize Abort treatment */ - if (hi2c->hdmatx != NULL) - { - hi2c->hdmatx->XferCpltCallback = NULL; - } + /* Set the unused I2C DMA transfer complete callback to NULL */ + hi2c->hdmatx->XferCpltCallback = NULL; /* Disable Acknowledge */ CLEAR_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); @@ -3155,10 +3257,7 @@ HAL_StatusTypeDef HAL_I2C_Mem_Read_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddr hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -3185,23 +3284,44 @@ HAL_StatusTypeDef HAL_I2C_Mem_Read_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddr hi2c->XferCount = Size; hi2c->XferSize = hi2c->XferCount; hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->Devaddress = DevAddress; + hi2c->Memaddress = MemAddress; + hi2c->MemaddSize = MemAddSize; + hi2c->EventCount = 0U; if (hi2c->XferSize > 0U) { - /* Set the I2C DMA transfer complete callback */ - hi2c->hdmarx->XferCpltCallback = I2C_DMAXferCplt; + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAXferCplt; - /* Set the DMA error callback */ - hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; - /* Set the unused DMA callbacks to NULL */ - hi2c->hdmarx->XferHalfCpltCallback = NULL; - hi2c->hdmarx->XferM1CpltCallback = NULL; - hi2c->hdmarx->XferM1HalfCpltCallback = NULL; - hi2c->hdmarx->XferAbortCallback = NULL; + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferM1CpltCallback = NULL; + hi2c->hdmarx->XferM1HalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->DR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; - /* Enable the DMA stream */ - dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->DR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize); + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } if (dmaxferstatus == HAL_OK) { @@ -3214,11 +3334,8 @@ HAL_StatusTypeDef HAL_I2C_Mem_Read_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddr /* Prevent unused argument(s) compilation and MISRA warning */ UNUSED(dmaxferstatus); - /* Clear directly Complete callback as no XferAbortCallback is used to finalize Abort treatment */ - if (hi2c->hdmarx != NULL) - { - hi2c->hdmarx->XferCpltCallback = NULL; - } + /* Set the unused I2C DMA transfer complete callback to NULL */ + hi2c->hdmarx->XferCpltCallback = NULL; /* Disable Acknowledge */ CLEAR_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); @@ -3316,7 +3433,7 @@ HAL_StatusTypeDef HAL_I2C_IsDeviceReady(I2C_HandleTypeDef *hi2c, uint16_t DevAdd { /* Get tick */ uint32_t tickstart = HAL_GetTick(); - uint32_t I2C_Trials = 1U; + uint32_t I2C_Trials = 0U; FlagStatus tmp1; FlagStatus tmp2; @@ -3473,10 +3590,7 @@ HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16 hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -3572,10 +3686,7 @@ HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint1 hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -3609,18 +3720,35 @@ HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint1 if (hi2c->XferSize > 0U) { - /* Set the I2C DMA transfer complete callback */ - hi2c->hdmatx->XferCpltCallback = I2C_DMAXferCplt; + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAXferCplt; - /* Set the DMA error callback */ - hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; - /* Set the unused DMA callbacks to NULL */ - hi2c->hdmatx->XferHalfCpltCallback = NULL; - hi2c->hdmatx->XferAbortCallback = NULL; + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; - /* Enable the DMA stream */ - dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->DR, hi2c->XferSize); + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->DR, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } if (dmaxferstatus == HAL_OK) { @@ -3738,10 +3866,7 @@ HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_ hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -3863,10 +3988,7 @@ HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16 hi2c->Mode = HAL_I2C_MODE_NONE; hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); - - return HAL_ERROR; + return HAL_BUSY; } } while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET); @@ -3933,20 +4055,35 @@ HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16 SET_BIT(hi2c->Instance->CR2, I2C_CR2_LAST); } } + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAXferCplt; - /* Set the I2C DMA transfer complete callback */ - hi2c->hdmarx->XferCpltCallback = I2C_DMAXferCplt; + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; - /* Set the DMA error callback */ - hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; - /* Set the unused DMA callbacks to NULL */ - hi2c->hdmarx->XferHalfCpltCallback = NULL; - hi2c->hdmarx->XferAbortCallback = NULL; + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->DR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; - /* Enable the DMA stream */ - dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->DR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize); + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } if (dmaxferstatus == HAL_OK) { /* If transfer direction not change and there is no request to start another frame, do not generate Restart Condition */ @@ -4196,18 +4333,35 @@ HAL_StatusTypeDef HAL_I2C_Slave_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint8_ hi2c->XferSize = hi2c->XferCount; hi2c->XferOptions = XferOptions; - /* Set the I2C DMA transfer complete callback */ - hi2c->hdmatx->XferCpltCallback = I2C_DMAXferCplt; + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAXferCplt; - /* Set the DMA error callback */ - hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; - /* Set the unused DMA callbacks to NULL */ - hi2c->hdmatx->XferHalfCpltCallback = NULL; - hi2c->hdmatx->XferAbortCallback = NULL; + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->DR, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); - /* Enable the DMA stream */ - dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->DR, hi2c->XferSize); + return HAL_ERROR; + } if (dmaxferstatus == HAL_OK) { @@ -4419,18 +4573,35 @@ HAL_StatusTypeDef HAL_I2C_Slave_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint8_t hi2c->XferSize = hi2c->XferCount; hi2c->XferOptions = XferOptions; - /* Set the I2C DMA transfer complete callback */ - hi2c->hdmarx->XferCpltCallback = I2C_DMAXferCplt; + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAXferCplt; - /* Set the DMA error callback */ - hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; - /* Set the unused DMA callbacks to NULL */ - hi2c->hdmarx->XferHalfCpltCallback = NULL; - hi2c->hdmarx->XferAbortCallback = NULL; + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->DR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); - /* Enable the DMA stream */ - dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->DR, (uint32_t)hi2c->pBuffPtr, hi2c->XferSize); + return HAL_ERROR; + } if (dmaxferstatus == HAL_OK) { @@ -4542,7 +4713,7 @@ HAL_StatusTypeDef HAL_I2C_DisableListen_IT(I2C_HandleTypeDef *hi2c) } /** - * @brief Abort a master I2C IT or DMA process communication with Interrupt. + * @brief Abort a master or memory I2C IT or DMA process communication with Interrupt. * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains * the configuration information for the specified I2C. * @param DevAddress Target device address: The device 7 bits address value @@ -4558,7 +4729,8 @@ HAL_StatusTypeDef HAL_I2C_Master_Abort_IT(I2C_HandleTypeDef *hi2c, uint16_t DevA UNUSED(DevAddress); /* Abort Master transfer during Receive or Transmit process */ - if ((__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET) && (CurrentMode == HAL_I2C_MODE_MASTER)) + if ((__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET) && ((CurrentMode == HAL_I2C_MODE_MASTER) || + (CurrentMode == HAL_I2C_MODE_MEM))) { /* Process Locked */ __HAL_LOCK(hi2c); @@ -4661,13 +4833,16 @@ void HAL_I2C_EV_IRQHandler(I2C_HandleTypeDef *hi2c) /* BTF set -------------------------------------------------------------*/ else if ((I2C_CHECK_FLAG(sr1itflags, I2C_FLAG_BTF) != RESET) && (I2C_CHECK_IT_SOURCE(itsources, I2C_IT_EVT) != RESET)) { - if (CurrentMode == HAL_I2C_MODE_MASTER) + if (CurrentState == HAL_I2C_STATE_BUSY_TX) { I2C_MasterTransmit_BTF(hi2c); } else /* HAL_I2C_MODE_MEM */ { - I2C_MemoryTransmit_TXE_BTF(hi2c); + if (CurrentMode == HAL_I2C_MODE_MEM) + { + I2C_MemoryTransmit_TXE_BTF(hi2c); + } } } else @@ -5221,13 +5396,25 @@ static void I2C_MasterTransmit_BTF(I2C_HandleTypeDef *hi2c) hi2c->PreviousState = I2C_STATE_NONE; hi2c->State = HAL_I2C_STATE_READY; - hi2c->Mode = HAL_I2C_MODE_NONE; + if (hi2c->Mode == HAL_I2C_MODE_MEM) + { + hi2c->Mode = HAL_I2C_MODE_NONE; +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MemTxCpltCallback(hi2c); +#else + HAL_I2C_MemTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + hi2c->Mode = HAL_I2C_MODE_NONE; #if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) - hi2c->MasterTxCpltCallback(hi2c); + hi2c->MasterTxCpltCallback(hi2c); #else - HAL_I2C_MasterTxCpltCallback(hi2c); + HAL_I2C_MasterTxCpltCallback(hi2c); #endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } } } } @@ -5280,6 +5467,8 @@ static void I2C_MemoryTransmit_TXE_BTF(I2C_HandleTypeDef *hi2c) { /* Generate Restart */ hi2c->Instance->CR1 |= I2C_CR1_START; + + hi2c->EventCount++; } else if ((hi2c->XferCount > 0U) && (CurrentState == HAL_I2C_STATE_BUSY_TX)) { @@ -5317,7 +5506,8 @@ static void I2C_MemoryTransmit_TXE_BTF(I2C_HandleTypeDef *hi2c) } else { - /* Do nothing */ + /* Clear TXE and BTF flags */ + I2C_Flush_DR(hi2c); } } @@ -5332,7 +5522,9 @@ static void I2C_MasterReceive_RXNE(I2C_HandleTypeDef *hi2c) if (hi2c->State == HAL_I2C_STATE_BUSY_RX) { uint32_t tmp; + uint32_t CurrentXferOptions; + CurrentXferOptions = hi2c->XferOptions; tmp = hi2c->XferCount; if (tmp > 3U) { @@ -5388,7 +5580,14 @@ static void I2C_MasterReceive_RXNE(I2C_HandleTypeDef *hi2c) else { hi2c->Mode = HAL_I2C_MODE_NONE; - hi2c->PreviousState = I2C_STATE_MASTER_BUSY_RX; + if ((CurrentXferOptions == I2C_FIRST_AND_LAST_FRAME) || (CurrentXferOptions == I2C_LAST_FRAME)) + { + hi2c->PreviousState = I2C_STATE_NONE; + } + else + { + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_RX; + } #if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) hi2c->MasterRxCpltCallback(hi2c); @@ -5424,7 +5623,9 @@ static void I2C_MasterReceive_RXNE(I2C_HandleTypeDef *hi2c) } else { - /* Do nothing */ + /* Disable BUF interrupt, this help to treat correctly the last 2 bytes + on BTF subroutine if there is a reception delay between N-1 and N byte */ + __HAL_I2C_DISABLE_IT(hi2c, I2C_IT_BUF); } } } @@ -5534,7 +5735,14 @@ static void I2C_MasterReceive_BTF(I2C_HandleTypeDef *hi2c) else { hi2c->Mode = HAL_I2C_MODE_NONE; - hi2c->PreviousState = I2C_STATE_MASTER_BUSY_RX; + if ((CurrentXferOptions == I2C_FIRST_AND_LAST_FRAME) || (CurrentXferOptions == I2C_LAST_FRAME)) + { + hi2c->PreviousState = I2C_STATE_NONE; + } + else + { + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_RX; + } #if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) hi2c->MasterRxCpltCallback(hi2c); #else @@ -5627,13 +5835,11 @@ static void I2C_Master_ADD10(I2C_HandleTypeDef *hi2c) /* Send slave address */ hi2c->Instance->DR = I2C_10BIT_ADDRESS(hi2c->Devaddress); - if ((hi2c->hdmatx != NULL) || (hi2c->hdmarx != NULL)) + if (((hi2c->hdmatx != NULL) && (hi2c->hdmatx->XferCpltCallback != NULL)) + || ((hi2c->hdmarx != NULL) && (hi2c->hdmarx->XferCpltCallback != NULL))) { - if ((hi2c->hdmatx->XferCpltCallback != NULL) || (hi2c->hdmarx->XferCpltCallback != NULL)) - { - /* Enable DMA Request */ - SET_BIT(hi2c->Instance->CR2, I2C_CR2_DMAEN); - } + /* Enable DMA Request */ + SET_BIT(hi2c->Instance->CR2, I2C_CR2_DMAEN); } } @@ -5951,7 +6157,7 @@ static void I2C_Slave_ADDR(I2C_HandleTypeDef *hi2c, uint32_t IT2Flags) else { /* Clear ADDR flag */ - __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + __HAL_I2C_CLEAR_ADDRFLAG(hi2c); /* Process Unlocked */ __HAL_UNLOCK(hi2c); @@ -5983,7 +6189,7 @@ static void I2C_Slave_STOPF(I2C_HandleTypeDef *hi2c) { if ((CurrentState == HAL_I2C_STATE_BUSY_RX) || (CurrentState == HAL_I2C_STATE_BUSY_RX_LISTEN)) { - hi2c->XferCount = (uint16_t)(__HAL_DMA_GET_COUNTER(hi2c->hdmarx)); + hi2c->XferCount = (uint16_t)(I2C_GET_DMA_REMAIN_DATA(hi2c->hdmarx)); if (hi2c->XferCount != 0U) { @@ -6011,7 +6217,7 @@ static void I2C_Slave_STOPF(I2C_HandleTypeDef *hi2c) } else { - hi2c->XferCount = (uint16_t)(__HAL_DMA_GET_COUNTER(hi2c->hdmatx)); + hi2c->XferCount = (uint16_t)(I2C_GET_DMA_REMAIN_DATA(hi2c->hdmatx)); if (hi2c->XferCount != 0U) { @@ -6180,6 +6386,9 @@ static void I2C_Slave_AF(I2C_HandleTypeDef *hi2c) /* Disable Acknowledge */ CLEAR_BIT(hi2c->Instance->CR1, I2C_CR1_ACK); + /* Clear TXE flag */ + I2C_Flush_DR(hi2c); + #if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) hi2c->SlaveTxCpltCallback(hi2c); #else @@ -6841,7 +7050,14 @@ static void I2C_DMAXferCplt(DMA_HandleTypeDef *hdma) else { hi2c->Mode = HAL_I2C_MODE_NONE; - hi2c->PreviousState = I2C_STATE_MASTER_BUSY_RX; + if ((CurrentXferOptions == I2C_FIRST_AND_LAST_FRAME) || (CurrentXferOptions == I2C_LAST_FRAME)) + { + hi2c->PreviousState = I2C_STATE_NONE; + } + else + { + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_RX; + } #if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) hi2c->MasterRxCpltCallback(hi2c); @@ -7016,15 +7232,18 @@ static HAL_StatusTypeDef I2C_WaitOnFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uin { if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) { - hi2c->PreviousState = I2C_STATE_NONE; - hi2c->State = HAL_I2C_STATE_READY; - hi2c->Mode = HAL_I2C_MODE_NONE; - hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + if ((__HAL_I2C_GET_FLAG(hi2c, Flag) == Status)) + { + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); - return HAL_ERROR; + return HAL_ERROR; + } } } } @@ -7068,15 +7287,18 @@ static HAL_StatusTypeDef I2C_WaitOnMasterAddressFlagUntilTimeout(I2C_HandleTypeD { if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) { - hi2c->PreviousState = I2C_STATE_NONE; - hi2c->State = HAL_I2C_STATE_READY; - hi2c->Mode = HAL_I2C_MODE_NONE; - hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + if ((__HAL_I2C_GET_FLAG(hi2c, Flag) == RESET)) + { + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); - return HAL_ERROR; + return HAL_ERROR; + } } } } @@ -7106,15 +7328,18 @@ static HAL_StatusTypeDef I2C_WaitOnTXEFlagUntilTimeout(I2C_HandleTypeDef *hi2c, { if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) { - hi2c->PreviousState = I2C_STATE_NONE; - hi2c->State = HAL_I2C_STATE_READY; - hi2c->Mode = HAL_I2C_MODE_NONE; - hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + if ((__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_TXE) == RESET)) + { + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); - return HAL_ERROR; + return HAL_ERROR; + } } } } @@ -7144,15 +7369,18 @@ static HAL_StatusTypeDef I2C_WaitOnBTFFlagUntilTimeout(I2C_HandleTypeDef *hi2c, { if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) { - hi2c->PreviousState = I2C_STATE_NONE; - hi2c->State = HAL_I2C_STATE_READY; - hi2c->Mode = HAL_I2C_MODE_NONE; - hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + if ((__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BTF) == RESET)) + { + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); - return HAL_ERROR; + return HAL_ERROR; + } } } } @@ -7180,15 +7408,18 @@ static HAL_StatusTypeDef I2C_WaitOnSTOPFlagUntilTimeout(I2C_HandleTypeDef *hi2c, /* Check for the Timeout */ if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) { - hi2c->PreviousState = I2C_STATE_NONE; - hi2c->State = HAL_I2C_STATE_READY; - hi2c->Mode = HAL_I2C_MODE_NONE; - hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + if ((__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF) == RESET)) + { + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); - return HAL_ERROR; + return HAL_ERROR; + } } } return HAL_OK; @@ -7254,15 +7485,18 @@ static HAL_StatusTypeDef I2C_WaitOnRXNEFlagUntilTimeout(I2C_HandleTypeDef *hi2c, /* Check for the Timeout */ if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) { - hi2c->PreviousState = I2C_STATE_NONE; - hi2c->State = HAL_I2C_STATE_READY; - hi2c->Mode = HAL_I2C_MODE_NONE; - hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + if ((__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_RXNE) == RESET)) + { + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; - /* Process Unlocked */ - __HAL_UNLOCK(hi2c); + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); - return HAL_ERROR; + return HAL_ERROR; + } } } return HAL_OK; @@ -7295,7 +7529,7 @@ static HAL_StatusTypeDef I2C_IsAcknowledgeFailed(I2C_HandleTypeDef *hi2c) } /** - * @brief Convert I2Cx OTHER_xxx XferOptions to functionnal XferOptions. + * @brief Convert I2Cx OTHER_xxx XferOptions to functional XferOptions. * @param hi2c I2C handle. * @retval None */ @@ -7335,4 +7569,3 @@ static void I2C_ConvertOtherXferOptions(I2C_HandleTypeDef *hi2c) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2c_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2c_ex.c index ed503320..64aabaa4 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2c_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2c_ex.c @@ -7,6 +7,17 @@ * functionalities of I2C extension peripheral: * + Extension features functions * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### I2C peripheral extension features ##### @@ -25,18 +36,6 @@ (#) Configure I2C Digital noise filter using the function HAL_I2C_DigitalFilter_Config() @endverbatim - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -181,4 +180,3 @@ HAL_StatusTypeDef HAL_I2CEx_ConfigDigitalFilter(I2C_HandleTypeDef *hi2c, uint32_ * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2s.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2s.c index 35e29f87..d6643c8d 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2s.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2s.c @@ -8,6 +8,17 @@ * + Initialization and de-initialization functions * + IO operation functions * + Peripheral State and Errors functions + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim =============================================================================== ##### How to use this driver ##### @@ -90,7 +101,7 @@ (+) Stop the DMA Transfer using HAL_I2S_DMAStop() In Slave mode, if HAL_I2S_DMAStop is used to stop the communication, an error HAL_I2S_ERROR_BUSY_LINE_RX is raised as the master continue to transmit data. - In this case __HAL_I2S_FLUSH_RX_DR macro must be used to flush the remaining data + In this case __HAL_I2S_FLUSH_RX_DR macro must be used to flush the remaining data inside DR register and avoid using DeInit/Init process for the next transfer. *** I2S HAL driver macros list *** @@ -103,8 +114,8 @@ (+) __HAL_I2S_ENABLE_IT : Enable the specified I2S interrupts (+) __HAL_I2S_DISABLE_IT : Disable the specified I2S interrupts (+) __HAL_I2S_GET_FLAG: Check whether the specified I2S flag is set or not - (+) __HAL_I2S_FLUSH_RX_DR: Read DR Register to Flush RX Data + [..] (@) You can refer to the I2S HAL driver header file for more useful macros @@ -169,18 +180,7 @@ and weak (surcharged) callbacks are used. @endverbatim - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** + */ /* Includes ------------------------------------------------------------------*/ @@ -303,12 +303,12 @@ HAL_StatusTypeDef HAL_I2S_Init(I2S_HandleTypeDef *hi2s) hi2s->RxCpltCallback = HAL_I2S_RxCpltCallback; /* Legacy weak RxCpltCallback */ #if defined (SPI_I2S_FULLDUPLEX_SUPPORT) hi2s->TxRxCpltCallback = HAL_I2SEx_TxRxCpltCallback; /* Legacy weak TxRxCpltCallback */ -#endif +#endif /* SPI_I2S_FULLDUPLEX_SUPPORT */ hi2s->TxHalfCpltCallback = HAL_I2S_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ hi2s->RxHalfCpltCallback = HAL_I2S_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ #if defined (SPI_I2S_FULLDUPLEX_SUPPORT) hi2s->TxRxHalfCpltCallback = HAL_I2SEx_TxRxHalfCpltCallback; /* Legacy weak TxRxHalfCpltCallback */ -#endif +#endif /* SPI_I2S_FULLDUPLEX_SUPPORT */ hi2s->ErrorCallback = HAL_I2S_ErrorCallback; /* Legacy weak ErrorCallback */ if (hi2s->MspInitCallback == NULL) @@ -352,7 +352,7 @@ HAL_StatusTypeDef HAL_I2S_Init(I2S_HandleTypeDef *hi2s) /* I2S standard */ if (hi2s->Init.Standard <= I2S_STANDARD_LSB) { - /* In I2S standard packet lenght is multiplied by 2 */ + /* In I2S standard packet length is multiplied by 2 */ packetlength = packetlength * 2U; } @@ -368,7 +368,7 @@ HAL_StatusTypeDef HAL_I2S_Init(I2S_HandleTypeDef *hi2s) } #else i2sclk = HAL_RCCEx_GetPeriphCLKFreq(RCC_PERIPHCLK_I2S); -#endif +#endif /* I2S_APB1_APB2_FEATURE */ /* Compute the Real divider depending on the MCLK output state, with a floating point */ if (hi2s->Init.MCLKOutput == I2S_MCLKOUTPUT_ENABLE) @@ -469,9 +469,11 @@ HAL_StatusTypeDef HAL_I2S_Init(I2S_HandleTypeDef *hi2s) } /* Configure the I2S Slave with the I2S Master parameter values */ - tmpreg |= (uint16_t)((uint16_t)SPI_I2SCFGR_I2SMOD | (uint16_t)(tmp | \ - (uint16_t)(hi2s->Init.Standard | (uint16_t)(hi2s->Init.DataFormat | \ - (uint16_t)hi2s->Init.CPOL)))); + tmpreg |= (uint16_t)((uint16_t)SPI_I2SCFGR_I2SMOD | \ + (uint16_t)tmp | \ + (uint16_t)hi2s->Init.Standard | \ + (uint16_t)hi2s->Init.DataFormat | \ + (uint16_t)hi2s->Init.CPOL); /* Write to SPIx I2SCFGR */ WRITE_REG(I2SxEXT(hi2s->Instance)->I2SCFGR, tmpreg); @@ -601,7 +603,7 @@ HAL_StatusTypeDef HAL_I2S_RegisterCallback(I2S_HandleTypeDef *hi2s, HAL_I2S_Call case HAL_I2S_TX_RX_COMPLETE_CB_ID : hi2s->TxRxCpltCallback = pCallback; break; -#endif +#endif /* SPI_I2S_FULLDUPLEX_SUPPORT */ case HAL_I2S_TX_HALF_COMPLETE_CB_ID : hi2s->TxHalfCpltCallback = pCallback; @@ -615,7 +617,7 @@ HAL_StatusTypeDef HAL_I2S_RegisterCallback(I2S_HandleTypeDef *hi2s, HAL_I2S_Call case HAL_I2S_TX_RX_HALF_COMPLETE_CB_ID : hi2s->TxRxHalfCpltCallback = pCallback; break; -#endif +#endif /* SPI_I2S_FULLDUPLEX_SUPPORT */ case HAL_I2S_ERROR_CB_ID : hi2s->ErrorCallback = pCallback; @@ -704,7 +706,7 @@ HAL_StatusTypeDef HAL_I2S_UnRegisterCallback(I2S_HandleTypeDef *hi2s, HAL_I2S_Ca case HAL_I2S_TX_RX_COMPLETE_CB_ID : hi2s->TxRxCpltCallback = HAL_I2SEx_TxRxCpltCallback; /* Legacy weak TxRxCpltCallback */ break; -#endif +#endif /* SPI_I2S_FULLDUPLEX_SUPPORT */ case HAL_I2S_TX_HALF_COMPLETE_CB_ID : hi2s->TxHalfCpltCallback = HAL_I2S_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ @@ -718,7 +720,7 @@ HAL_StatusTypeDef HAL_I2S_UnRegisterCallback(I2S_HandleTypeDef *hi2s, HAL_I2S_Ca case HAL_I2S_TX_RX_HALF_COMPLETE_CB_ID : hi2s->TxRxHalfCpltCallback = HAL_I2SEx_TxRxHalfCpltCallback; /* Legacy weak TxRxHalfCpltCallback */ break; -#endif +#endif /* SPI_I2S_FULLDUPLEX_SUPPORT */ case HAL_I2S_ERROR_CB_ID : hi2s->ErrorCallback = HAL_I2S_ErrorCallback; /* Legacy weak ErrorCallback */ @@ -831,7 +833,7 @@ HAL_StatusTypeDef HAL_I2S_UnRegisterCallback(I2S_HandleTypeDef *hi2s, HAL_I2S_Ca * @note When a 16-bit data frame or a 16-bit data frame extended is selected during the I2S * configuration phase, the Size parameter means the number of 16-bit data length * in the transaction and when a 24-bit data frame or a 32-bit data frame is selected - * the Size parameter means the number of 16-bit data length. + * the Size parameter means the number of 24-bit or 32-bit data length. * @param Timeout Timeout duration * @note The I2S is kept enabled at the end of transaction to avoid the clock de-synchronization * between Master and Slave(example: audio streaming). @@ -846,15 +848,14 @@ HAL_StatusTypeDef HAL_I2S_Transmit(I2S_HandleTypeDef *hi2s, uint16_t *pData, uin return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(hi2s); - if (hi2s->State != HAL_I2S_STATE_READY) { - __HAL_UNLOCK(hi2s); return HAL_BUSY; } + /* Process Locked */ + __HAL_LOCK(hi2s); + /* Set state and reset error code */ hi2s->State = HAL_I2S_STATE_BUSY_TX; hi2s->ErrorCode = HAL_I2S_ERROR_NONE; @@ -948,7 +949,7 @@ HAL_StatusTypeDef HAL_I2S_Transmit(I2S_HandleTypeDef *hi2s, uint16_t *pData, uin * @note When a 16-bit data frame or a 16-bit data frame extended is selected during the I2S * configuration phase, the Size parameter means the number of 16-bit data length * in the transaction and when a 24-bit data frame or a 32-bit data frame is selected - * the Size parameter means the number of 16-bit data length. + * the Size parameter means the number of 24-bit or 32-bit data length. * @param Timeout Timeout duration * @note The I2S is kept enabled at the end of transaction to avoid the clock de-synchronization * between Master and Slave(example: audio streaming). @@ -965,15 +966,14 @@ HAL_StatusTypeDef HAL_I2S_Receive(I2S_HandleTypeDef *hi2s, uint16_t *pData, uint return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(hi2s); - if (hi2s->State != HAL_I2S_STATE_READY) { - __HAL_UNLOCK(hi2s); return HAL_BUSY; } + /* Process Locked */ + __HAL_LOCK(hi2s); + /* Set state and reset error code */ hi2s->State = HAL_I2S_STATE_BUSY_RX; hi2s->ErrorCode = HAL_I2S_ERROR_NONE; @@ -1049,7 +1049,7 @@ HAL_StatusTypeDef HAL_I2S_Receive(I2S_HandleTypeDef *hi2s, uint16_t *pData, uint * @note When a 16-bit data frame or a 16-bit data frame extended is selected during the I2S * configuration phase, the Size parameter means the number of 16-bit data length * in the transaction and when a 24-bit data frame or a 32-bit data frame is selected - * the Size parameter means the number of 16-bit data length. + * the Size parameter means the number of 24-bit or 32-bit data length. * @note The I2S is kept enabled at the end of transaction to avoid the clock de-synchronization * between Master and Slave(example: audio streaming). * @retval HAL status @@ -1063,15 +1063,14 @@ HAL_StatusTypeDef HAL_I2S_Transmit_IT(I2S_HandleTypeDef *hi2s, uint16_t *pData, return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(hi2s); - if (hi2s->State != HAL_I2S_STATE_READY) { - __HAL_UNLOCK(hi2s); return HAL_BUSY; } + /* Process Locked */ + __HAL_LOCK(hi2s); + /* Set state and reset error code */ hi2s->State = HAL_I2S_STATE_BUSY_TX; hi2s->ErrorCode = HAL_I2S_ERROR_NONE; @@ -1090,6 +1089,8 @@ HAL_StatusTypeDef HAL_I2S_Transmit_IT(I2S_HandleTypeDef *hi2s, uint16_t *pData, hi2s->TxXferCount = Size; } + __HAL_UNLOCK(hi2s); + /* Enable TXE and ERR interrupt */ __HAL_I2S_ENABLE_IT(hi2s, (I2S_IT_TXE | I2S_IT_ERR)); @@ -1100,7 +1101,6 @@ HAL_StatusTypeDef HAL_I2S_Transmit_IT(I2S_HandleTypeDef *hi2s, uint16_t *pData, __HAL_I2S_ENABLE(hi2s); } - __HAL_UNLOCK(hi2s); return HAL_OK; } @@ -1113,7 +1113,7 @@ HAL_StatusTypeDef HAL_I2S_Transmit_IT(I2S_HandleTypeDef *hi2s, uint16_t *pData, * @note When a 16-bit data frame or a 16-bit data frame extended is selected during the I2S * configuration phase, the Size parameter means the number of 16-bit data length * in the transaction and when a 24-bit data frame or a 32-bit data frame is selected - * the Size parameter means the number of 16-bit data length. + * the Size parameter means the number of 24-bit or 32-bit data length. * @note The I2S is kept enabled at the end of transaction to avoid the clock de-synchronization * between Master and Slave(example: audio streaming). * @note It is recommended to use DMA for the I2S receiver to avoid de-synchronization @@ -1129,15 +1129,14 @@ HAL_StatusTypeDef HAL_I2S_Receive_IT(I2S_HandleTypeDef *hi2s, uint16_t *pData, u return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(hi2s); - if (hi2s->State != HAL_I2S_STATE_READY) { - __HAL_UNLOCK(hi2s); return HAL_BUSY; } + /* Process Locked */ + __HAL_LOCK(hi2s); + /* Set state and reset error code */ hi2s->State = HAL_I2S_STATE_BUSY_RX; hi2s->ErrorCode = HAL_I2S_ERROR_NONE; @@ -1156,6 +1155,8 @@ HAL_StatusTypeDef HAL_I2S_Receive_IT(I2S_HandleTypeDef *hi2s, uint16_t *pData, u hi2s->RxXferCount = Size; } + __HAL_UNLOCK(hi2s); + /* Enable RXNE and ERR interrupt */ __HAL_I2S_ENABLE_IT(hi2s, (I2S_IT_RXNE | I2S_IT_ERR)); @@ -1166,7 +1167,6 @@ HAL_StatusTypeDef HAL_I2S_Receive_IT(I2S_HandleTypeDef *hi2s, uint16_t *pData, u __HAL_I2S_ENABLE(hi2s); } - __HAL_UNLOCK(hi2s); return HAL_OK; } @@ -1179,7 +1179,7 @@ HAL_StatusTypeDef HAL_I2S_Receive_IT(I2S_HandleTypeDef *hi2s, uint16_t *pData, u * @note When a 16-bit data frame or a 16-bit data frame extended is selected during the I2S * configuration phase, the Size parameter means the number of 16-bit data length * in the transaction and when a 24-bit data frame or a 32-bit data frame is selected - * the Size parameter means the number of 16-bit data length. + * the Size parameter means the number of 24-bit or 32-bit data length. * @note The I2S is kept enabled at the end of transaction to avoid the clock de-synchronization * between Master and Slave(example: audio streaming). * @retval HAL status @@ -1193,15 +1193,14 @@ HAL_StatusTypeDef HAL_I2S_Transmit_DMA(I2S_HandleTypeDef *hi2s, uint16_t *pData, return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(hi2s); - if (hi2s->State != HAL_I2S_STATE_READY) { - __HAL_UNLOCK(hi2s); return HAL_BUSY; } + /* Process Locked */ + __HAL_LOCK(hi2s); + /* Set state and reset error code */ hi2s->State = HAL_I2S_STATE_BUSY_TX; hi2s->ErrorCode = HAL_I2S_ERROR_NONE; @@ -1243,12 +1242,7 @@ HAL_StatusTypeDef HAL_I2S_Transmit_DMA(I2S_HandleTypeDef *hi2s, uint16_t *pData, return HAL_ERROR; } - /* Check if the I2S is already enabled */ - if (HAL_IS_BIT_CLR(hi2s->Instance->I2SCFGR, SPI_I2SCFGR_I2SE)) - { - /* Enable I2S peripheral */ - __HAL_I2S_ENABLE(hi2s); - } + __HAL_UNLOCK(hi2s); /* Check if the I2S Tx request is already enabled */ if (HAL_IS_BIT_CLR(hi2s->Instance->CR2, SPI_CR2_TXDMAEN)) @@ -1257,7 +1251,13 @@ HAL_StatusTypeDef HAL_I2S_Transmit_DMA(I2S_HandleTypeDef *hi2s, uint16_t *pData, SET_BIT(hi2s->Instance->CR2, SPI_CR2_TXDMAEN); } - __HAL_UNLOCK(hi2s); + /* Check if the I2S is already enabled */ + if (HAL_IS_BIT_CLR(hi2s->Instance->I2SCFGR, SPI_I2SCFGR_I2SE)) + { + /* Enable I2S peripheral */ + __HAL_I2S_ENABLE(hi2s); + } + return HAL_OK; } @@ -1270,7 +1270,7 @@ HAL_StatusTypeDef HAL_I2S_Transmit_DMA(I2S_HandleTypeDef *hi2s, uint16_t *pData, * @note When a 16-bit data frame or a 16-bit data frame extended is selected during the I2S * configuration phase, the Size parameter means the number of 16-bit data length * in the transaction and when a 24-bit data frame or a 32-bit data frame is selected - * the Size parameter means the number of 16-bit data length. + * the Size parameter means the number of 24-bit or 32-bit data length. * @note The I2S is kept enabled at the end of transaction to avoid the clock de-synchronization * between Master and Slave(example: audio streaming). * @retval HAL status @@ -1284,15 +1284,14 @@ HAL_StatusTypeDef HAL_I2S_Receive_DMA(I2S_HandleTypeDef *hi2s, uint16_t *pData, return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(hi2s); - if (hi2s->State != HAL_I2S_STATE_READY) { - __HAL_UNLOCK(hi2s); return HAL_BUSY; } + /* Process Locked */ + __HAL_LOCK(hi2s); + /* Set state and reset error code */ hi2s->State = HAL_I2S_STATE_BUSY_RX; hi2s->ErrorCode = HAL_I2S_ERROR_NONE; @@ -1340,12 +1339,7 @@ HAL_StatusTypeDef HAL_I2S_Receive_DMA(I2S_HandleTypeDef *hi2s, uint16_t *pData, return HAL_ERROR; } - /* Check if the I2S is already enabled */ - if (HAL_IS_BIT_CLR(hi2s->Instance->I2SCFGR, SPI_I2SCFGR_I2SE)) - { - /* Enable I2S peripheral */ - __HAL_I2S_ENABLE(hi2s); - } + __HAL_UNLOCK(hi2s); /* Check if the I2S Rx request is already enabled */ if (HAL_IS_BIT_CLR(hi2s->Instance->CR2, SPI_CR2_RXDMAEN)) @@ -1354,7 +1348,13 @@ HAL_StatusTypeDef HAL_I2S_Receive_DMA(I2S_HandleTypeDef *hi2s, uint16_t *pData, SET_BIT(hi2s->Instance->CR2, SPI_CR2_RXDMAEN); } - __HAL_UNLOCK(hi2s); + /* Check if the I2S is already enabled */ + if (HAL_IS_BIT_CLR(hi2s->Instance->I2SCFGR, SPI_I2SCFGR_I2SE)) + { + /* Enable I2S peripheral */ + __HAL_I2S_ENABLE(hi2s); + } + return HAL_OK; } @@ -2090,4 +2090,3 @@ static HAL_StatusTypeDef I2S_WaitFlagStateUntilTimeout(I2S_HandleTypeDef *hi2s, #endif /* HAL_I2S_MODULE_ENABLED */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2s_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2s_ex.c index 791c2192..823a8cc5 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2s_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_i2s_ex.c @@ -6,7 +6,17 @@ * This file provides firmware functions to manage the following * functionalities of I2S extension peripheral: * + Extension features Functions + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### I2S Extension features ##### @@ -73,18 +83,6 @@ | | I2Sx_ext |------------------->I2Sx_extSD(in/out) +----->| | +-----------------------+ - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -129,8 +127,11 @@ static void I2SEx_RxISR_I2S(I2S_HandleTypeDef *hi2s); static void I2SEx_RxISR_I2SExt(I2S_HandleTypeDef *hi2s); static void I2SEx_TxISR_I2S(I2S_HandleTypeDef *hi2s); static void I2SEx_TxISR_I2SExt(I2S_HandleTypeDef *hi2s); -static HAL_StatusTypeDef I2SEx_FullDuplexWaitFlagStateUntilTimeout(I2S_HandleTypeDef *hi2s, uint32_t Flag, - uint32_t State, uint32_t Timeout, I2S_UseTypeDef i2sUsed); +static HAL_StatusTypeDef I2SEx_FullDuplexWaitFlagStateUntilTimeout(I2S_HandleTypeDef *hi2s, + uint32_t Flag, + uint32_t State, + uint32_t Timeout, + I2S_UseTypeDef i2sUsed); /** * @} */ @@ -202,21 +203,22 @@ static HAL_StatusTypeDef I2SEx_FullDuplexWaitFlagStateUntilTimeout(I2S_HandleTyp * between Master and Slave(example: audio streaming). * @retval HAL status */ -HAL_StatusTypeDef HAL_I2SEx_TransmitReceive(I2S_HandleTypeDef *hi2s, uint16_t *pTxData, uint16_t *pRxData, - uint16_t Size, uint32_t Timeout) +HAL_StatusTypeDef HAL_I2SEx_TransmitReceive(I2S_HandleTypeDef *hi2s, + uint16_t *pTxData, + uint16_t *pRxData, + uint16_t Size, + uint32_t Timeout) { uint32_t tmp1 = 0U; - HAL_StatusTypeDef errorcode = HAL_OK; if (hi2s->State != HAL_I2S_STATE_READY) { - errorcode = HAL_BUSY; - goto error; + return HAL_BUSY; } if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) { - return HAL_ERROR; + return HAL_ERROR; } /* Process Locked */ @@ -277,8 +279,11 @@ HAL_StatusTypeDef HAL_I2SEx_TransmitReceive(I2S_HandleTypeDef *hi2s, uint16_t *p { /* Set the error code */ SET_BIT(hi2s->ErrorCode, HAL_I2S_ERROR_TIMEOUT); - errorcode = HAL_ERROR; - goto error; + hi2s->State = HAL_I2S_STATE_READY; + + /* Process UnLock */ + __HAL_UNLOCK(hi2s); + return HAL_ERROR; } /* Write Data on DR register */ hi2s->Instance->DR = (*pTxData++); @@ -301,8 +306,11 @@ HAL_StatusTypeDef HAL_I2SEx_TransmitReceive(I2S_HandleTypeDef *hi2s, uint16_t *p { /* Set the error code */ SET_BIT(hi2s->ErrorCode, HAL_I2S_ERROR_TIMEOUT); - errorcode = HAL_ERROR; - goto error; + hi2s->State = HAL_I2S_STATE_READY; + + /* Process UnLock */ + __HAL_UNLOCK(hi2s); + return HAL_ERROR; } /* Read Data from DR register */ (*pRxData++) = I2SxEXT(hi2s->Instance)->DR; @@ -350,8 +358,11 @@ HAL_StatusTypeDef HAL_I2SEx_TransmitReceive(I2S_HandleTypeDef *hi2s, uint16_t *p { /* Set the error code */ SET_BIT(hi2s->ErrorCode, HAL_I2S_ERROR_TIMEOUT); - errorcode = HAL_ERROR; - goto error; + hi2s->State = HAL_I2S_STATE_READY; + + /* Process UnLock */ + __HAL_UNLOCK(hi2s); + return HAL_ERROR; } /* Write Data on DR register */ I2SxEXT(hi2s->Instance)->DR = (*pTxData++); @@ -374,8 +385,11 @@ HAL_StatusTypeDef HAL_I2SEx_TransmitReceive(I2S_HandleTypeDef *hi2s, uint16_t *p { /* Set the error code */ SET_BIT(hi2s->ErrorCode, HAL_I2S_ERROR_TIMEOUT); - errorcode = HAL_ERROR; - goto error; + hi2s->State = HAL_I2S_STATE_READY; + + /* Process UnLock */ + __HAL_UNLOCK(hi2s); + return HAL_ERROR; } /* Read Data from DR register */ (*pRxData++) = hi2s->Instance->DR; @@ -394,15 +408,17 @@ HAL_StatusTypeDef HAL_I2SEx_TransmitReceive(I2S_HandleTypeDef *hi2s, uint16_t *p } } + hi2s->State = HAL_I2S_STATE_READY; + __HAL_UNLOCK(hi2s); + if (hi2s->ErrorCode != HAL_I2S_ERROR_NONE) { - errorcode = HAL_ERROR; + return HAL_ERROR; + } + else + { + return HAL_OK; } - -error : - hi2s->State = HAL_I2S_STATE_READY; - __HAL_UNLOCK(hi2s); - return errorcode; } /** @@ -420,16 +436,16 @@ error : * between Master and Slave(example: audio streaming). * @retval HAL status */ -HAL_StatusTypeDef HAL_I2SEx_TransmitReceive_IT(I2S_HandleTypeDef *hi2s, uint16_t *pTxData, uint16_t *pRxData, +HAL_StatusTypeDef HAL_I2SEx_TransmitReceive_IT(I2S_HandleTypeDef *hi2s, + uint16_t *pTxData, + uint16_t *pRxData, uint16_t Size) { uint32_t tmp1 = 0U; - HAL_StatusTypeDef errorcode = HAL_OK; if (hi2s->State != HAL_I2S_STATE_READY) { - errorcode = HAL_BUSY; - goto error; + return HAL_BUSY; } if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) @@ -504,15 +520,14 @@ HAL_StatusTypeDef HAL_I2SEx_TransmitReceive_IT(I2S_HandleTypeDef *hi2s, uint16_t } } + __HAL_UNLOCK(hi2s); /* Enable I2Sext peripheral */ __HAL_I2SEXT_ENABLE(hi2s); /* Enable I2S peripheral */ __HAL_I2S_ENABLE(hi2s); -error : - __HAL_UNLOCK(hi2s); - return errorcode; + return HAL_OK; } /** @@ -530,17 +545,17 @@ error : * between Master and Slave(example: audio streaming). * @retval HAL status */ -HAL_StatusTypeDef HAL_I2SEx_TransmitReceive_DMA(I2S_HandleTypeDef *hi2s, uint16_t *pTxData, uint16_t *pRxData, +HAL_StatusTypeDef HAL_I2SEx_TransmitReceive_DMA(I2S_HandleTypeDef *hi2s, + uint16_t *pTxData, + uint16_t *pRxData, uint16_t Size) { uint32_t *tmp = NULL; uint32_t tmp1 = 0U; - HAL_StatusTypeDef errorcode = HAL_OK; if (hi2s->State != HAL_I2S_STATE_READY) { - errorcode = HAL_BUSY; - goto error; + return HAL_BUSY; } if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) @@ -586,11 +601,11 @@ HAL_StatusTypeDef HAL_I2SEx_TransmitReceive_DMA(I2S_HandleTypeDef *hi2s, uint16_ /* Set the I2S Rx DMA error callback */ hi2s->hdmarx->XferErrorCallback = I2SEx_TxRxDMAError; - /* Set the I2S Tx DMA Half transfer complete callback */ - hi2s->hdmatx->XferHalfCpltCallback = I2SEx_TxRxDMAHalfCplt; + /* Set the I2S Tx DMA Half transfer complete callback as NULL */ + hi2s->hdmatx->XferHalfCpltCallback = NULL; - /* Set the I2S Tx DMA transfer complete callback */ - hi2s->hdmatx->XferCpltCallback = I2SEx_TxRxDMACplt; + /* Set the I2S Tx DMA transfer complete callback as NULL */ + hi2s->hdmatx->XferCpltCallback = NULL; /* Set the I2S Tx DMA error callback */ hi2s->hdmatx->XferErrorCallback = I2SEx_TxRxDMAError; @@ -612,16 +627,6 @@ HAL_StatusTypeDef HAL_I2SEx_TransmitReceive_DMA(I2S_HandleTypeDef *hi2s, uint16_ /* Enable Tx DMA Request */ SET_BIT(hi2s->Instance->CR2, SPI_CR2_TXDMAEN); - - /* Check if the I2S is already enabled */ - if ((hi2s->Instance->I2SCFGR & SPI_I2SCFGR_I2SE) != SPI_I2SCFGR_I2SE) - { - /* Enable I2Sext(receiver) before enabling I2Sx peripheral */ - __HAL_I2SEXT_ENABLE(hi2s); - - /* Enable I2S peripheral after the I2Sext */ - __HAL_I2S_ENABLE(hi2s); - } } else { @@ -645,20 +650,19 @@ HAL_StatusTypeDef HAL_I2SEx_TransmitReceive_DMA(I2S_HandleTypeDef *hi2s, uint16_ /* Enable Rx DMA Request */ SET_BIT(hi2s->Instance->CR2, SPI_CR2_RXDMAEN); - - /* Check if the I2S is already enabled */ - if ((hi2s->Instance->I2SCFGR & SPI_I2SCFGR_I2SE) != SPI_I2SCFGR_I2SE) - { - /* Enable I2Sext(transmitter) before enabling I2Sx peripheral */ - __HAL_I2SEXT_ENABLE(hi2s); - /* Enable I2S peripheral before the I2Sext */ - __HAL_I2S_ENABLE(hi2s); - } } -error : __HAL_UNLOCK(hi2s); - return errorcode; + /* Check if the I2S is already enabled */ + if ((hi2s->Instance->I2SCFGR & SPI_I2SCFGR_I2SE) != SPI_I2SCFGR_I2SE) + { + /* Enable I2Sext(transmitter) before enabling I2Sx peripheral */ + __HAL_I2SEXT_ENABLE(hi2s); + /* Enable I2S peripheral before the I2Sext */ + __HAL_I2S_ENABLE(hi2s); + } + + return HAL_OK; } /** @@ -877,65 +881,34 @@ static void I2SEx_TxRxDMACplt(DMA_HandleTypeDef *hdma) { I2S_HandleTypeDef *hi2s = (I2S_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; - /* if DMA is configured in DMA_NORMAL mode */ + /* If DMA is configured in DMA_NORMAL mode */ if (hdma->Init.Mode == DMA_NORMAL) { - if (hi2s->hdmarx == hdma) + if (((hi2s->Instance->I2SCFGR & SPI_I2SCFGR_I2SCFG) == I2S_MODE_MASTER_TX) || \ + ((hi2s->Instance->I2SCFGR & SPI_I2SCFGR_I2SCFG) == I2S_MODE_SLAVE_TX)) + /* Disable Tx & Rx DMA Requests */ { - /* Disable Rx DMA Request */ - if (((hi2s->Instance->I2SCFGR & SPI_I2SCFGR_I2SCFG) == I2S_MODE_MASTER_TX) || \ - ((hi2s->Instance->I2SCFGR & SPI_I2SCFGR_I2SCFG) == I2S_MODE_SLAVE_TX)) - { - CLEAR_BIT(I2SxEXT(hi2s->Instance)->CR2, SPI_CR2_RXDMAEN); - } - else - { - CLEAR_BIT(hi2s->Instance->CR2, SPI_CR2_RXDMAEN); - } - - hi2s->RxXferCount = 0U; - - if (hi2s->TxXferCount == 0U) - { - hi2s->State = HAL_I2S_STATE_READY; - - /* Call user TxRx complete callback */ -#if (USE_HAL_I2S_REGISTER_CALLBACKS == 1U) - hi2s->TxRxCpltCallback(hi2s); -#else - HAL_I2SEx_TxRxCpltCallback(hi2s); -#endif /* USE_HAL_I2S_REGISTER_CALLBACKS */ - } + CLEAR_BIT(I2SxEXT(hi2s->Instance)->CR2, SPI_CR2_RXDMAEN); + CLEAR_BIT(hi2s->Instance->CR2, SPI_CR2_TXDMAEN); } - - if (hi2s->hdmatx == hdma) + else { - /* Disable Tx DMA Request */ - if (((hi2s->Instance->I2SCFGR & SPI_I2SCFGR_I2SCFG) == I2S_MODE_MASTER_TX) || \ - ((hi2s->Instance->I2SCFGR & SPI_I2SCFGR_I2SCFG) == I2S_MODE_SLAVE_TX)) - { - CLEAR_BIT(hi2s->Instance->CR2, SPI_CR2_TXDMAEN); - } - else - { - CLEAR_BIT(I2SxEXT(hi2s->Instance)->CR2, SPI_CR2_TXDMAEN); - } + CLEAR_BIT(hi2s->Instance->CR2, SPI_CR2_RXDMAEN); + CLEAR_BIT(I2SxEXT(hi2s->Instance)->CR2, SPI_CR2_TXDMAEN); + } - hi2s->TxXferCount = 0U; + hi2s->RxXferCount = 0U; + hi2s->TxXferCount = 0U; - if (hi2s->RxXferCount == 0U) - { - hi2s->State = HAL_I2S_STATE_READY; + hi2s->State = HAL_I2S_STATE_READY; + } - /* Call user TxRx complete callback */ + /* Call user TxRx complete callback */ #if (USE_HAL_I2S_REGISTER_CALLBACKS == 1U) - hi2s->TxRxCpltCallback(hi2s); + hi2s->TxRxCpltCallback(hi2s); #else - HAL_I2SEx_TxRxCpltCallback(hi2s); + HAL_I2SEx_TxRxCpltCallback(hi2s); #endif /* USE_HAL_I2S_REGISTER_CALLBACKS */ - } - } - } } /** @@ -1091,8 +1064,11 @@ static void I2SEx_RxISR_I2SExt(I2S_HandleTypeDef *hi2s) * @param i2sUsed I2S instance reference * @retval HAL status */ -static HAL_StatusTypeDef I2SEx_FullDuplexWaitFlagStateUntilTimeout(I2S_HandleTypeDef *hi2s, uint32_t Flag, - uint32_t State, uint32_t Timeout, I2S_UseTypeDef i2sUsed) +static HAL_StatusTypeDef I2SEx_FullDuplexWaitFlagStateUntilTimeout(I2S_HandleTypeDef *hi2s, + uint32_t Flag, + uint32_t State, + uint32_t Timeout, + I2S_UseTypeDef i2sUsed) { uint32_t tickstart = HAL_GetTick(); @@ -1153,4 +1129,3 @@ static HAL_StatusTypeDef I2SEx_FullDuplexWaitFlagStateUntilTimeout(I2S_HandleTyp * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_mmc.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_mmc.c new file mode 100644 index 00000000..466eab5f --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_mmc.c @@ -0,0 +1,3201 @@ +/** + ****************************************************************************** + * @file stm32f4xx_hal_mmc.c + * @author MCD Application Team + * @brief MMC card HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Secure Digital (MMC) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + MMC card Control functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + This driver implements a high level communication layer for read and write from/to + this memory. The needed STM32 hardware resources (SDMMC and GPIO) are performed by + the user in HAL_MMC_MspInit() function (MSP layer). + Basically, the MSP layer configuration should be the same as we provide in the + examples. + You can easily tailor this configuration according to hardware resources. + + [..] + This driver is a generic layered driver for SDMMC memories which uses the HAL + SDMMC driver functions to interface with MMC and eMMC cards devices. + It is used as follows: + + (#)Initialize the SDMMC low level resources by implement the HAL_MMC_MspInit() API: + (##) Enable the SDMMC interface clock using __HAL_RCC_SDMMC_CLK_ENABLE(); + (##) SDMMC pins configuration for MMC card + (+++) Enable the clock for the SDMMC GPIOs using the functions __HAL_RCC_GPIOx_CLK_ENABLE(); + (+++) Configure these SDMMC pins as alternate function pull-up using HAL_GPIO_Init() + and according to your pin assignment; + (##) DMA Configuration if you need to use DMA process (HAL_MMC_ReadBlocks_DMA() + and HAL_MMC_WriteBlocks_DMA() APIs). + (+++) Enable the DMAx interface clock using __HAL_RCC_DMAx_CLK_ENABLE(); + (+++) Configure the DMA using the function HAL_DMA_Init() with predeclared and filled. + (##) NVIC configuration if you need to use interrupt process when using DMA transfer. + (+++) Configure the SDMMC and DMA interrupt priorities using function HAL_NVIC_SetPriority(); + DMA priority is superior to SDMMC's priority + (+++) Enable the NVIC DMA and SDMMC IRQs using function HAL_NVIC_EnableIRQ() + (+++) SDMMC interrupts are managed using the macros __HAL_MMC_ENABLE_IT() + and __HAL_MMC_DISABLE_IT() inside the communication process. + (+++) SDMMC interrupts pending bits are managed using the macros __HAL_MMC_GET_IT() + and __HAL_MMC_CLEAR_IT() + (##) NVIC configuration if you need to use interrupt process (HAL_MMC_ReadBlocks_IT() + and HAL_MMC_WriteBlocks_IT() APIs). + (+++) Configure the SDMMC interrupt priorities using function HAL_NVIC_SetPriority(); + (+++) Enable the NVIC SDMMC IRQs using function HAL_NVIC_EnableIRQ() + (+++) SDMMC interrupts are managed using the macros __HAL_MMC_ENABLE_IT() + and __HAL_MMC_DISABLE_IT() inside the communication process. + (+++) SDMMC interrupts pending bits are managed using the macros __HAL_MMC_GET_IT() + and __HAL_MMC_CLEAR_IT() + (#) At this stage, you can perform MMC read/write/erase operations after MMC card initialization + + + *** MMC Card Initialization and configuration *** + ================================================ + [..] + To initialize the MMC Card, use the HAL_MMC_Init() function. It Initializes + SDMMC Peripheral (STM32 side) and the MMC Card, and put it into StandBy State (Ready for data transfer). + This function provide the following operations: + + (#) Initialize the SDMMC peripheral interface with default configuration. + The initialization process is done at 400KHz. You can change or adapt + this frequency by adjusting the "ClockDiv" field. + The MMC Card frequency (SDMMC_CK) is computed as follows: + + SDMMC_CK = SDMMCCLK / (ClockDiv + 2) + + In initialization mode and according to the MMC Card standard, + make sure that the SDMMC_CK frequency doesn't exceed 400KHz. + + This phase of initialization is done through SDMMC_Init() and + SDMMC_PowerState_ON() SDMMC low level APIs. + + (#) Initialize the MMC card. The API used is HAL_MMC_InitCard(). + This phase allows the card initialization and identification + and check the MMC Card type (Standard Capacity or High Capacity) + The initialization flow is compatible with MMC standard. + + This API (HAL_MMC_InitCard()) could be used also to reinitialize the card in case + of plug-off plug-in. + + (#) Configure the MMC Card Data transfer frequency. By Default, the card transfer + frequency is set to 24MHz. You can change or adapt this frequency by adjusting + the "ClockDiv" field. + In transfer mode and according to the MMC Card standard, make sure that the + SDMMC_CK frequency doesn't exceed 25MHz and 50MHz in High-speed mode switch. + To be able to use a frequency higher than 24MHz, you should use the SDMMC + peripheral in bypass mode. Refer to the corresponding reference manual + for more details. + + (#) Select the corresponding MMC Card according to the address read with the step 2. + + (#) Configure the MMC Card in wide bus mode: 4-bits data. + + *** MMC Card Read operation *** + ============================== + [..] + (+) You can read from MMC card in polling mode by using function HAL_MMC_ReadBlocks(). + This function support only 512-bytes block length (the block size should be + chosen as 512 bytes). + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_MMC_GetCardState() function for MMC card state. + + (+) You can read from MMC card in DMA mode by using function HAL_MMC_ReadBlocks_DMA(). + This function support only 512-bytes block length (the block size should be + chosen as 512 bytes). + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_MMC_GetCardState() function for MMC card state. + You could also check the DMA transfer process through the MMC Rx interrupt event. + + (+) You can read from MMC card in Interrupt mode by using function HAL_MMC_ReadBlocks_IT(). + This function allows the read of 512 bytes blocks. + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_MMC_GetCardState() function for MMC card state. + You could also check the IT transfer process through the MMC Rx interrupt event. + + *** MMC Card Write operation *** + =============================== + [..] + (+) You can write to MMC card in polling mode by using function HAL_MMC_WriteBlocks(). + This function support only 512-bytes block length (the block size should be + chosen as 512 bytes). + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_MMC_GetCardState() function for MMC card state. + + (+) You can write to MMC card in DMA mode by using function HAL_MMC_WriteBlocks_DMA(). + This function support only 512-bytes block length (the block size should be + chosen as 512 byte). + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_MMC_GetCardState() function for MMC card state. + You could also check the DMA transfer process through the MMC Tx interrupt event. + + (+) You can write to MMC card in Interrupt mode by using function HAL_MMC_WriteBlocks_IT(). + This function allows the read of 512 bytes blocks. + You can choose either one block read operation or multiple block read operation + by adjusting the "NumberOfBlocks" parameter. + After this, you have to ensure that the transfer is done correctly. The check is done + through HAL_MMC_GetCardState() function for MMC card state. + You could also check the IT transfer process through the MMC Tx interrupt event. + + *** MMC card information *** + =========================== + [..] + (+) To get MMC card information, you can use the function HAL_MMC_GetCardInfo(). + It returns useful information about the MMC card such as block size, card type, + block number ... + + *** MMC card CSD register *** + ============================ + [..] + (+) The HAL_MMC_GetCardCSD() API allows to get the parameters of the CSD register. + Some of the CSD parameters are useful for card initialization and identification. + + *** MMC card CID register *** + ============================ + [..] + (+) The HAL_MMC_GetCardCID() API allows to get the parameters of the CID register. + Some of the CID parameters are useful for card initialization and identification. + + *** MMC HAL driver macros list *** + ================================== + [..] + Below the list of most used macros in MMC HAL driver. + + (+) __HAL_MMC_ENABLE : Enable the MMC device + (+) __HAL_MMC_DISABLE : Disable the MMC device + (+) __HAL_MMC_DMA_ENABLE: Enable the SDMMC DMA transfer + (+) __HAL_MMC_DMA_DISABLE: Disable the SDMMC DMA transfer + (+) __HAL_MMC_ENABLE_IT: Enable the MMC device interrupt + (+) __HAL_MMC_DISABLE_IT: Disable the MMC device interrupt + (+) __HAL_MMC_GET_FLAG:Check whether the specified MMC flag is set or not + (+) __HAL_MMC_CLEAR_FLAG: Clear the MMC's pending flags + + [..] + (@) You can refer to the MMC HAL driver header file for more useful macros + + *** Callback registration *** + ============================================= + [..] + The compilation define USE_HAL_MMC_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + Use Functions HAL_MMC_RegisterCallback() to register a user callback, + it allows to register following callbacks: + (+) TxCpltCallback : callback when a transmission transfer is completed. + (+) RxCpltCallback : callback when a reception transfer is completed. + (+) ErrorCallback : callback when error occurs. + (+) AbortCpltCallback : callback when abort is completed. + (+) MspInitCallback : MMC MspInit. + (+) MspDeInitCallback : MMC MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + Use function HAL_MMC_UnRegisterCallback() to reset a callback to the default + weak (surcharged) function. It allows to reset following callbacks: + (+) TxCpltCallback : callback when a transmission transfer is completed. + (+) RxCpltCallback : callback when a reception transfer is completed. + (+) ErrorCallback : callback when error occurs. + (+) AbortCpltCallback : callback when abort is completed. + (+) MspInitCallback : MMC MspInit. + (+) MspDeInitCallback : MMC MspDeInit. + This function) takes as parameters the HAL peripheral handle and the Callback ID. + + By default, after the HAL_MMC_Init and if the state is HAL_MMC_STATE_RESET + all callbacks are reset to the corresponding legacy weak (surcharged) functions. + Exception done for MspInit and MspDeInit callbacks that are respectively + reset to the legacy weak (surcharged) functions in the HAL_MMC_Init + and HAL_MMC_DeInit only when these callbacks are null (not registered beforehand). + If not, MspInit or MspDeInit are not null, the HAL_MMC_Init and HAL_MMC_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in READY state only. + Exception done for MspInit/MspDeInit callbacks that can be registered/unregistered + in READY or RESET state, thus registered (user) MspInit/DeInit callbacks can be used + during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using HAL_MMC_RegisterCallback before calling HAL_MMC_DeInit + or HAL_MMC_Init function. + + When The compilation define USE_HAL_MMC_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registering feature is not available + and weak (surcharged) callbacks are used. + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_hal.h" + +/** @addtogroup STM32F4xx_HAL_Driver + * @{ + */ + +/** @defgroup MMC MMC + * @brief MMC HAL module driver + * @{ + */ + +#ifdef HAL_MMC_MODULE_ENABLED + +#if defined(SDIO) + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup MMC_Private_Defines + * @{ + */ +#if defined (VDD_VALUE) && (VDD_VALUE <= 1950U) +#define MMC_VOLTAGE_RANGE EMMC_LOW_VOLTAGE_RANGE + +#define MMC_EXT_CSD_PWR_CL_26_INDEX 201 +#define MMC_EXT_CSD_PWR_CL_52_INDEX 200 +#define MMC_EXT_CSD_PWR_CL_DDR_52_INDEX 238 + +#define MMC_EXT_CSD_PWR_CL_26_POS 8 +#define MMC_EXT_CSD_PWR_CL_52_POS 0 +#define MMC_EXT_CSD_PWR_CL_DDR_52_POS 16 +#else +#define MMC_VOLTAGE_RANGE EMMC_HIGH_VOLTAGE_RANGE + +#define MMC_EXT_CSD_PWR_CL_26_INDEX 203 +#define MMC_EXT_CSD_PWR_CL_52_INDEX 202 +#define MMC_EXT_CSD_PWR_CL_DDR_52_INDEX 239 + +#define MMC_EXT_CSD_PWR_CL_26_POS 24 +#define MMC_EXT_CSD_PWR_CL_52_POS 16 +#define MMC_EXT_CSD_PWR_CL_DDR_52_POS 24 +#endif + +/* Frequencies used in the driver for clock divider calculation */ +#define MMC_INIT_FREQ 400000U /* Initialization phase : 400 kHz max */ +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @defgroup MMC_Private_Functions MMC Private Functions + * @{ + */ +static uint32_t MMC_InitCard(MMC_HandleTypeDef *hmmc); +static uint32_t MMC_PowerON(MMC_HandleTypeDef *hmmc); +static uint32_t MMC_SendStatus(MMC_HandleTypeDef *hmmc, uint32_t *pCardStatus); +static uint32_t MMC_ReadExtCSD(MMC_HandleTypeDef *hmmc, uint32_t *pFieldData, uint16_t FieldIndex, uint32_t Timeout); +static void MMC_PowerOFF(MMC_HandleTypeDef *hmmc); +static void MMC_Write_IT(MMC_HandleTypeDef *hmmc); +static void MMC_Read_IT(MMC_HandleTypeDef *hmmc); +static void MMC_DMATransmitCplt(DMA_HandleTypeDef *hdma); +static void MMC_DMAReceiveCplt(DMA_HandleTypeDef *hdma); +static void MMC_DMAError(DMA_HandleTypeDef *hdma); +static void MMC_DMATxAbort(DMA_HandleTypeDef *hdma); +static void MMC_DMARxAbort(DMA_HandleTypeDef *hdma); +static uint32_t MMC_PwrClassUpdate(MMC_HandleTypeDef *hmmc, uint32_t Wide); +/** + * @} + */ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup MMC_Exported_Functions + * @{ + */ + +/** @addtogroup MMC_Exported_Functions_Group1 + * @brief Initialization and de-initialization functions + * +@verbatim + ============================================================================== + ##### Initialization and de-initialization functions ##### + ============================================================================== + [..] + This section provides functions allowing to initialize/de-initialize the MMC + card device to be ready for use. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the MMC according to the specified parameters in the + MMC_HandleTypeDef and create the associated handle. + * @param hmmc: Pointer to the MMC handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_Init(MMC_HandleTypeDef *hmmc) +{ + /* Check the MMC handle allocation */ + if(hmmc == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_SDIO_ALL_INSTANCE(hmmc->Instance)); + assert_param(IS_SDIO_CLOCK_EDGE(hmmc->Init.ClockEdge)); + assert_param(IS_SDIO_CLOCK_BYPASS(hmmc->Init.ClockBypass)); + assert_param(IS_SDIO_CLOCK_POWER_SAVE(hmmc->Init.ClockPowerSave)); + assert_param(IS_SDIO_BUS_WIDE(hmmc->Init.BusWide)); + assert_param(IS_SDIO_HARDWARE_FLOW_CONTROL(hmmc->Init.HardwareFlowControl)); + assert_param(IS_SDIO_CLKDIV(hmmc->Init.ClockDiv)); + + if(hmmc->State == HAL_MMC_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hmmc->Lock = HAL_UNLOCKED; +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + /* Reset Callback pointers in HAL_MMC_STATE_RESET only */ + hmmc->TxCpltCallback = HAL_MMC_TxCpltCallback; + hmmc->RxCpltCallback = HAL_MMC_RxCpltCallback; + hmmc->ErrorCallback = HAL_MMC_ErrorCallback; + hmmc->AbortCpltCallback = HAL_MMC_AbortCallback; + + if(hmmc->MspInitCallback == NULL) + { + hmmc->MspInitCallback = HAL_MMC_MspInit; + } + + /* Init the low level hardware */ + hmmc->MspInitCallback(hmmc); +#else + /* Init the low level hardware : GPIO, CLOCK, CORTEX...etc */ + HAL_MMC_MspInit(hmmc); +#endif + } + + hmmc->State = HAL_MMC_STATE_BUSY; + + /* Initialize the Card parameters */ + if(HAL_MMC_InitCard(hmmc) == HAL_ERROR) + { + return HAL_ERROR; + } + + /* Initialize the error code */ + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + + /* Initialize the MMC operation */ + hmmc->Context = MMC_CONTEXT_NONE; + + /* Initialize the MMC state */ + hmmc->State = HAL_MMC_STATE_READY; + + /* Configure bus width */ + if (hmmc->Init.BusWide != SDIO_BUS_WIDE_1B) + { + if (HAL_MMC_ConfigWideBusOperation(hmmc, hmmc->Init.BusWide) != HAL_OK) + { + return HAL_ERROR; + } + } + + return HAL_OK; +} + +/** + * @brief Initializes the MMC Card. + * @param hmmc: Pointer to MMC handle + * @note This function initializes the MMC card. It could be used when a card + re-initialization is needed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_InitCard(MMC_HandleTypeDef *hmmc) +{ + uint32_t errorstate; + MMC_InitTypeDef Init; + HAL_StatusTypeDef status; + + /* Default SDIO peripheral configuration for MMC card initialization */ + Init.ClockEdge = SDIO_CLOCK_EDGE_RISING; + Init.ClockBypass = SDIO_CLOCK_BYPASS_DISABLE; + Init.ClockPowerSave = SDIO_CLOCK_POWER_SAVE_DISABLE; + Init.BusWide = SDIO_BUS_WIDE_1B; + Init.HardwareFlowControl = SDIO_HARDWARE_FLOW_CONTROL_DISABLE; + Init.ClockDiv = SDIO_INIT_CLK_DIV; + + /* Initialize SDIO peripheral interface with default configuration */ + status = SDIO_Init(hmmc->Instance, Init); + if(status == HAL_ERROR) + { + return HAL_ERROR; + } + + /* Disable SDIO Clock */ + __HAL_MMC_DISABLE(hmmc); + + /* Set Power State to ON */ + status = SDIO_PowerState_ON(hmmc->Instance); + if(status == HAL_ERROR) + { + return HAL_ERROR; + } + + /* Enable MMC Clock */ + __HAL_MMC_ENABLE(hmmc); + + /* Required power up waiting time before starting the MMC initialization sequence */ + HAL_Delay(2); + + /* Identify card operating voltage */ + errorstate = MMC_PowerON(hmmc); + if(errorstate != HAL_MMC_ERROR_NONE) + { + hmmc->State = HAL_MMC_STATE_READY; + hmmc->ErrorCode |= errorstate; + return HAL_ERROR; + } + + /* Card initialization */ + errorstate = MMC_InitCard(hmmc); + if(errorstate != HAL_MMC_ERROR_NONE) + { + hmmc->State = HAL_MMC_STATE_READY; + hmmc->ErrorCode |= errorstate; + return HAL_ERROR; + } + + /* Set Block Size for Card */ + errorstate = SDMMC_CmdBlockLength(hmmc->Instance, MMC_BLOCKSIZE); + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief De-Initializes the MMC card. + * @param hmmc: Pointer to MMC handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_DeInit(MMC_HandleTypeDef *hmmc) +{ + /* Check the MMC handle allocation */ + if(hmmc == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_SDIO_ALL_INSTANCE(hmmc->Instance)); + + hmmc->State = HAL_MMC_STATE_BUSY; + + /* Set MMC power state to off */ + MMC_PowerOFF(hmmc); + +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + if(hmmc->MspDeInitCallback == NULL) + { + hmmc->MspDeInitCallback = HAL_MMC_MspDeInit; + } + + /* DeInit the low level hardware */ + hmmc->MspDeInitCallback(hmmc); +#else + /* De-Initialize the MSP layer */ + HAL_MMC_MspDeInit(hmmc); +#endif + + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + hmmc->State = HAL_MMC_STATE_RESET; + + return HAL_OK; +} + + +/** + * @brief Initializes the MMC MSP. + * @param hmmc: Pointer to MMC handle + * @retval None + */ +__weak void HAL_MMC_MspInit(MMC_HandleTypeDef *hmmc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hmmc); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_MMC_MspInit could be implemented in the user file + */ +} + +/** + * @brief De-Initialize MMC MSP. + * @param hmmc: Pointer to MMC handle + * @retval None + */ +__weak void HAL_MMC_MspDeInit(MMC_HandleTypeDef *hmmc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hmmc); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_MMC_MspDeInit could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @addtogroup MMC_Exported_Functions_Group2 + * @brief Data transfer functions + * +@verbatim + ============================================================================== + ##### IO operation functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to manage the data + transfer from/to MMC card. + +@endverbatim + * @{ + */ + +/** + * @brief Reads block(s) from a specified address in a card. The Data transfer + * is managed by polling mode. + * @note This API should be followed by a check on the card state through + * HAL_MMC_GetCardState(). + * @param hmmc: Pointer to MMC handle + * @param pData: pointer to the buffer that will contain the received data + * @param BlockAdd: Block Address from where data is to be read + * @param NumberOfBlocks: Number of MMC blocks to read + * @param Timeout: Specify timeout value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_ReadBlocks(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks, uint32_t Timeout) +{ + SDIO_DataInitTypeDef config; + uint32_t errorstate; + uint32_t tickstart = HAL_GetTick(); + uint32_t count, data, dataremaining; + uint32_t add = BlockAdd; + uint8_t *tempbuff = pData; + + if(NULL == pData) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_PARAM; + return HAL_ERROR; + } + + if(hmmc->State == HAL_MMC_STATE_READY) + { + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + + if((BlockAdd + NumberOfBlocks) > (hmmc->MmcCard.LogBlockNbr)) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hmmc->State = HAL_MMC_STATE_BUSY; + + /* Initialize data control register */ + hmmc->Instance->DCTRL = 0U; + + if ((hmmc->MmcCard.CardType) != MMC_HIGH_CAPACITY_CARD) + { + add *= 512U; + } + + /* Configure the MMC DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = NumberOfBlocks * MMC_BLOCKSIZE; + config.DataBlockSize = SDIO_DATABLOCK_SIZE_512B; + config.TransferDir = SDIO_TRANSFER_DIR_TO_SDIO; + config.TransferMode = SDIO_TRANSFER_MODE_BLOCK; + config.DPSM = SDIO_DPSM_ENABLE; + (void)SDIO_ConfigData(hmmc->Instance, &config); + + /* Read block(s) in polling mode */ + if(NumberOfBlocks > 1U) + { + hmmc->Context = MMC_CONTEXT_READ_MULTIPLE_BLOCK; + + /* Read Multi Block command */ + errorstate = SDMMC_CmdReadMultiBlock(hmmc->Instance, add); + } + else + { + hmmc->Context = MMC_CONTEXT_READ_SINGLE_BLOCK; + + /* Read Single Block command */ + errorstate = SDMMC_CmdReadSingleBlock(hmmc->Instance, add); + } + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + /* Poll on SDIO flags */ + dataremaining = config.DataLength; +#if defined(SDIO_STA_STBITERR) + while(!__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXOVERR | SDIO_FLAG_DCRCFAIL | SDIO_FLAG_DTIMEOUT | SDIO_FLAG_DATAEND | SDIO_FLAG_STBITERR)) +#else /* SDIO_STA_STBITERR not defined */ + while(!__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXOVERR | SDIO_FLAG_DCRCFAIL | SDIO_FLAG_DTIMEOUT | SDIO_FLAG_DATAEND)) +#endif /* SDIO_STA_STBITERR */ + { + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXFIFOHF) && (dataremaining > 0U)) + { + /* Read data from SDIO Rx FIFO */ + for(count = 0U; count < 8U; count++) + { + data = SDIO_ReadFIFO(hmmc->Instance); + *tempbuff = (uint8_t)(data & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 8U) & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 16U) & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 24U) & 0xFFU); + tempbuff++; + dataremaining--; + } + } + + if(((HAL_GetTick()-tickstart) >= Timeout) || (Timeout == 0U)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_TIMEOUT; + hmmc->State= HAL_MMC_STATE_READY; + return HAL_TIMEOUT; + } + } + + /* Send stop transmission command in case of multiblock read */ + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DATAEND) && (NumberOfBlocks > 1U)) + { + /* Send stop transmission command */ + errorstate = SDMMC_CmdStopTransfer(hmmc->Instance); + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + } + + /* Get error state */ + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DTIMEOUT)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_DATA_TIMEOUT; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DCRCFAIL)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_DATA_CRC_FAIL; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXOVERR)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_RX_OVERRUN; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else + { + /* Nothing to do */ + } + + /* Empty FIFO if there is still any data */ + while ((__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXDAVL)) && (dataremaining > 0U)) + { + data = SDIO_ReadFIFO(hmmc->Instance); + *tempbuff = (uint8_t)(data & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 8U) & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 16U) & 0xFFU); + tempbuff++; + dataremaining--; + *tempbuff = (uint8_t)((data >> 24U) & 0xFFU); + tempbuff++; + dataremaining--; + + if(((HAL_GetTick()-tickstart) >= Timeout) || (Timeout == 0U)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_TIMEOUT; + hmmc->State= HAL_MMC_STATE_READY; + return HAL_ERROR; + } + } + + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_DATA_FLAGS); + + hmmc->State = HAL_MMC_STATE_READY; + + return HAL_OK; + } + else + { + hmmc->ErrorCode |= HAL_MMC_ERROR_BUSY; + return HAL_ERROR; + } +} + +/** + * @brief Allows to write block(s) to a specified address in a card. The Data + * transfer is managed by polling mode. + * @note This API should be followed by a check on the card state through + * HAL_MMC_GetCardState(). + * @param hmmc: Pointer to MMC handle + * @param pData: pointer to the buffer that will contain the data to transmit + * @param BlockAdd: Block Address where data will be written + * @param NumberOfBlocks: Number of MMC blocks to write + * @param Timeout: Specify timeout value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_WriteBlocks(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks, uint32_t Timeout) +{ + SDIO_DataInitTypeDef config; + uint32_t errorstate; + uint32_t tickstart = HAL_GetTick(); + uint32_t count, data, dataremaining; + uint32_t add = BlockAdd; + uint8_t *tempbuff = pData; + + if(NULL == pData) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_PARAM; + return HAL_ERROR; + } + + if(hmmc->State == HAL_MMC_STATE_READY) + { + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + + if((BlockAdd + NumberOfBlocks) > (hmmc->MmcCard.LogBlockNbr)) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hmmc->State = HAL_MMC_STATE_BUSY; + + /* Initialize data control register */ + hmmc->Instance->DCTRL = 0U; + + if ((hmmc->MmcCard.CardType) != MMC_HIGH_CAPACITY_CARD) + { + add *= 512U; + } + + /* Write Blocks in Polling mode */ + if(NumberOfBlocks > 1U) + { + hmmc->Context = MMC_CONTEXT_WRITE_MULTIPLE_BLOCK; + + /* Write Multi Block command */ + errorstate = SDMMC_CmdWriteMultiBlock(hmmc->Instance, add); + } + else + { + hmmc->Context = MMC_CONTEXT_WRITE_SINGLE_BLOCK; + + /* Write Single Block command */ + errorstate = SDMMC_CmdWriteSingleBlock(hmmc->Instance, add); + } + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + /* Configure the MMC DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = NumberOfBlocks * MMC_BLOCKSIZE; + config.DataBlockSize = SDIO_DATABLOCK_SIZE_512B; + config.TransferDir = SDIO_TRANSFER_DIR_TO_CARD; + config.TransferMode = SDIO_TRANSFER_MODE_BLOCK; + config.DPSM = SDIO_DPSM_ENABLE; + (void)SDIO_ConfigData(hmmc->Instance, &config); + + /* Write block(s) in polling mode */ + dataremaining = config.DataLength; +#if defined(SDIO_STA_STBITERR) + while(!__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_TXUNDERR | SDIO_FLAG_DCRCFAIL | SDIO_FLAG_DTIMEOUT | SDIO_FLAG_DATAEND | SDIO_FLAG_STBITERR)) +#else /* SDIO_STA_STBITERR not defined */ + while(!__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_TXUNDERR | SDIO_FLAG_DCRCFAIL | SDIO_FLAG_DTIMEOUT | SDIO_FLAG_DATAEND)) +#endif /* SDIO_STA_STBITERR */ + { + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_TXFIFOHE) && (dataremaining > 0U)) + { + /* Write data to SDIO Tx FIFO */ + for(count = 0U; count < 8U; count++) + { + data = (uint32_t)(*tempbuff); + tempbuff++; + dataremaining--; + data |= ((uint32_t)(*tempbuff) << 8U); + tempbuff++; + dataremaining--; + data |= ((uint32_t)(*tempbuff) << 16U); + tempbuff++; + dataremaining--; + data |= ((uint32_t)(*tempbuff) << 24U); + tempbuff++; + dataremaining--; + (void)SDIO_WriteFIFO(hmmc->Instance, &data); + } + } + + if(((HAL_GetTick()-tickstart) >= Timeout) || (Timeout == 0U)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_TIMEOUT; + } + } + + /* Send stop transmission command in case of multiblock write */ + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DATAEND) && (NumberOfBlocks > 1U)) + { + /* Send stop transmission command */ + errorstate = SDMMC_CmdStopTransfer(hmmc->Instance); + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + } + + /* Get error state */ + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DTIMEOUT)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_DATA_TIMEOUT; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DCRCFAIL)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_DATA_CRC_FAIL; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_TXUNDERR)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_TX_UNDERRUN; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else + { + /* Nothing to do */ + } + + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_DATA_FLAGS); + + hmmc->State = HAL_MMC_STATE_READY; + + return HAL_OK; + } + else + { + hmmc->ErrorCode |= HAL_MMC_ERROR_BUSY; + return HAL_ERROR; + } +} + +/** + * @brief Reads block(s) from a specified address in a card. The Data transfer + * is managed in interrupt mode. + * @note This API should be followed by a check on the card state through + * HAL_MMC_GetCardState(). + * @note You could also check the IT transfer process through the MMC Rx + * interrupt event. + * @param hmmc: Pointer to MMC handle + * @param pData: Pointer to the buffer that will contain the received data + * @param BlockAdd: Block Address from where data is to be read + * @param NumberOfBlocks: Number of blocks to read. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_ReadBlocks_IT(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks) +{ + SDIO_DataInitTypeDef config; + uint32_t errorstate; + uint32_t add = BlockAdd; + + if(NULL == pData) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_PARAM; + return HAL_ERROR; + } + + if(hmmc->State == HAL_MMC_STATE_READY) + { + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + + if((BlockAdd + NumberOfBlocks) > (hmmc->MmcCard.LogBlockNbr)) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hmmc->State = HAL_MMC_STATE_BUSY; + + /* Initialize data control register */ + hmmc->Instance->DCTRL = 0U; + + hmmc->pRxBuffPtr = pData; + hmmc->RxXferSize = MMC_BLOCKSIZE * NumberOfBlocks; + +#if defined(SDIO_STA_STBITERR) + __HAL_MMC_ENABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_RXOVERR | SDIO_IT_DATAEND | SDIO_FLAG_RXFIFOHF | SDIO_IT_STBITERR)); +#else /* SDIO_STA_STBITERR not defined */ + __HAL_MMC_ENABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_RXOVERR | SDIO_IT_DATAEND | SDIO_FLAG_RXFIFOHF)); +#endif /* SDIO_STA_STBITERR */ + + if ((hmmc->MmcCard.CardType) != MMC_HIGH_CAPACITY_CARD) + { + add *= 512U; + } + + /* Configure the MMC DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = MMC_BLOCKSIZE * NumberOfBlocks; + config.DataBlockSize = SDIO_DATABLOCK_SIZE_512B; + config.TransferDir = SDIO_TRANSFER_DIR_TO_SDIO; + config.TransferMode = SDIO_TRANSFER_MODE_BLOCK; + config.DPSM = SDIO_DPSM_ENABLE; + (void)SDIO_ConfigData(hmmc->Instance, &config); + + /* Read Blocks in IT mode */ + if(NumberOfBlocks > 1U) + { + hmmc->Context = (MMC_CONTEXT_READ_MULTIPLE_BLOCK | MMC_CONTEXT_IT); + + /* Read Multi Block command */ + errorstate = SDMMC_CmdReadMultiBlock(hmmc->Instance, add); + } + else + { + hmmc->Context = (MMC_CONTEXT_READ_SINGLE_BLOCK | MMC_CONTEXT_IT); + + /* Read Single Block command */ + errorstate = SDMMC_CmdReadSingleBlock(hmmc->Instance, add); + } + + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Writes block(s) to a specified address in a card. The Data transfer + * is managed in interrupt mode. + * @note This API should be followed by a check on the card state through + * HAL_MMC_GetCardState(). + * @note You could also check the IT transfer process through the MMC Tx + * interrupt event. + * @param hmmc: Pointer to MMC handle + * @param pData: Pointer to the buffer that will contain the data to transmit + * @param BlockAdd: Block Address where data will be written + * @param NumberOfBlocks: Number of blocks to write + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_WriteBlocks_IT(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks) +{ + SDIO_DataInitTypeDef config; + uint32_t errorstate; + uint32_t add = BlockAdd; + + if(NULL == pData) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_PARAM; + return HAL_ERROR; + } + + if(hmmc->State == HAL_MMC_STATE_READY) + { + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + + if((BlockAdd + NumberOfBlocks) > (hmmc->MmcCard.LogBlockNbr)) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hmmc->State = HAL_MMC_STATE_BUSY; + + /* Initialize data control register */ + hmmc->Instance->DCTRL = 0U; + + hmmc->pTxBuffPtr = pData; + hmmc->TxXferSize = MMC_BLOCKSIZE * NumberOfBlocks; + + /* Enable transfer interrupts */ +#if defined(SDIO_STA_STBITERR) + __HAL_MMC_ENABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_TXUNDERR | SDIO_IT_DATAEND | SDIO_FLAG_TXFIFOHE | SDIO_IT_STBITERR)); +#else /* SDIO_STA_STBITERR not defined */ + __HAL_MMC_ENABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_TXUNDERR | SDIO_IT_DATAEND | SDIO_FLAG_TXFIFOHE)); +#endif /* SDIO_STA_STBITERR */ + + if ((hmmc->MmcCard.CardType) != MMC_HIGH_CAPACITY_CARD) + { + add *= 512U; + } + + /* Write Blocks in Polling mode */ + if(NumberOfBlocks > 1U) + { + hmmc->Context = (MMC_CONTEXT_WRITE_MULTIPLE_BLOCK| MMC_CONTEXT_IT); + + /* Write Multi Block command */ + errorstate = SDMMC_CmdWriteMultiBlock(hmmc->Instance, add); + } + else + { + hmmc->Context = (MMC_CONTEXT_WRITE_SINGLE_BLOCK | MMC_CONTEXT_IT); + + /* Write Single Block command */ + errorstate = SDMMC_CmdWriteSingleBlock(hmmc->Instance, add); + } + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + /* Configure the MMC DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = MMC_BLOCKSIZE * NumberOfBlocks; + config.DataBlockSize = SDIO_DATABLOCK_SIZE_512B; + config.TransferDir = SDIO_TRANSFER_DIR_TO_CARD; + config.TransferMode = SDIO_TRANSFER_MODE_BLOCK; + config.DPSM = SDIO_DPSM_ENABLE; + (void)SDIO_ConfigData(hmmc->Instance, &config); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Reads block(s) from a specified address in a card. The Data transfer + * is managed by DMA mode. + * @note This API should be followed by a check on the card state through + * HAL_MMC_GetCardState(). + * @note You could also check the DMA transfer process through the MMC Rx + * interrupt event. + * @param hmmc: Pointer MMC handle + * @param pData: Pointer to the buffer that will contain the received data + * @param BlockAdd: Block Address from where data is to be read + * @param NumberOfBlocks: Number of blocks to read. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_ReadBlocks_DMA(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks) +{ + SDIO_DataInitTypeDef config; + uint32_t errorstate; + uint32_t add = BlockAdd; + + if(NULL == pData) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_PARAM; + return HAL_ERROR; + } + + if(hmmc->State == HAL_MMC_STATE_READY) + { + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + + if((BlockAdd + NumberOfBlocks) > (hmmc->MmcCard.LogBlockNbr)) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hmmc->State = HAL_MMC_STATE_BUSY; + + /* Initialize data control register */ + hmmc->Instance->DCTRL = 0U; + +#if defined(SDIO_STA_STBITERR) + __HAL_MMC_ENABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_RXOVERR | SDIO_IT_DATAEND | SDIO_IT_STBITERR)); +#else /* SDIO_STA_STBITERR not defined */ + __HAL_MMC_ENABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_RXOVERR | SDIO_IT_DATAEND)); +#endif /* SDIO_STA_STBITERR */ + + /* Set the DMA transfer complete callback */ + hmmc->hdmarx->XferCpltCallback = MMC_DMAReceiveCplt; + + /* Set the DMA error callback */ + hmmc->hdmarx->XferErrorCallback = MMC_DMAError; + + /* Set the DMA Abort callback */ + hmmc->hdmarx->XferAbortCallback = NULL; + + if ((hmmc->MmcCard.CardType) != MMC_HIGH_CAPACITY_CARD) + { + add *= 512U; + } + + /* Force DMA Direction */ + hmmc->hdmarx->Init.Direction = DMA_PERIPH_TO_MEMORY; + MODIFY_REG(hmmc->hdmarx->Instance->CR, DMA_SxCR_DIR, hmmc->hdmarx->Init.Direction); + + /* Enable the DMA Channel */ + if(HAL_DMA_Start_IT(hmmc->hdmarx, (uint32_t)&hmmc->Instance->FIFO, (uint32_t)pData, (uint32_t)(MMC_BLOCKSIZE * NumberOfBlocks)/4) != HAL_OK) + { + __HAL_MMC_DISABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_RXOVERR | SDIO_IT_DATAEND)); + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode = HAL_MMC_ERROR_DMA; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else + { + /* Enable MMC DMA transfer */ + __HAL_MMC_DMA_ENABLE(hmmc); + + /* Configure the MMC DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = MMC_BLOCKSIZE * NumberOfBlocks; + config.DataBlockSize = SDIO_DATABLOCK_SIZE_512B; + config.TransferDir = SDIO_TRANSFER_DIR_TO_SDIO; + config.TransferMode = SDIO_TRANSFER_MODE_BLOCK; + config.DPSM = SDIO_DPSM_ENABLE; + (void)SDIO_ConfigData(hmmc->Instance, &config); + + /* Read Blocks in DMA mode */ + if(NumberOfBlocks > 1U) + { + hmmc->Context = (MMC_CONTEXT_READ_MULTIPLE_BLOCK | MMC_CONTEXT_DMA); + + /* Read Multi Block command */ + errorstate = SDMMC_CmdReadMultiBlock(hmmc->Instance, add); + } + else + { + hmmc->Context = (MMC_CONTEXT_READ_SINGLE_BLOCK | MMC_CONTEXT_DMA); + + /* Read Single Block command */ + errorstate = SDMMC_CmdReadSingleBlock(hmmc->Instance, add); + } + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + __HAL_MMC_DISABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_RXOVERR | SDIO_IT_DATAEND)); + hmmc->ErrorCode = errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + return HAL_OK; + } + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Writes block(s) to a specified address in a card. The Data transfer + * is managed by DMA mode. + * @note This API should be followed by a check on the card state through + * HAL_MMC_GetCardState(). + * @note You could also check the DMA transfer process through the MMC Tx + * interrupt event. + * @param hmmc: Pointer to MMC handle + * @param pData: Pointer to the buffer that will contain the data to transmit + * @param BlockAdd: Block Address where data will be written + * @param NumberOfBlocks: Number of blocks to write + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_WriteBlocks_DMA(MMC_HandleTypeDef *hmmc, uint8_t *pData, uint32_t BlockAdd, uint32_t NumberOfBlocks) +{ + SDIO_DataInitTypeDef config; + uint32_t errorstate; + uint32_t add = BlockAdd; + + if(NULL == pData) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_PARAM; + return HAL_ERROR; + } + + if(hmmc->State == HAL_MMC_STATE_READY) + { + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + + if((BlockAdd + NumberOfBlocks) > (hmmc->MmcCard.LogBlockNbr)) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hmmc->State = HAL_MMC_STATE_BUSY; + + /* Initialize data control register */ + hmmc->Instance->DCTRL = 0U; + + /* Enable MMC Error interrupts */ +#if defined(SDIO_STA_STBITERR) + __HAL_MMC_ENABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_TXUNDERR | SDIO_IT_STBITERR)); +#else /* SDIO_STA_STBITERR not defined */ + __HAL_MMC_ENABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_TXUNDERR)); +#endif /* SDIO_STA_STBITERR */ + + /* Set the DMA transfer complete callback */ + hmmc->hdmatx->XferCpltCallback = MMC_DMATransmitCplt; + + /* Set the DMA error callback */ + hmmc->hdmatx->XferErrorCallback = MMC_DMAError; + + /* Set the DMA Abort callback */ + hmmc->hdmatx->XferAbortCallback = NULL; + + if ((hmmc->MmcCard.CardType) != MMC_HIGH_CAPACITY_CARD) + { + add *= 512U; + } + + + /* Write Blocks in Polling mode */ + if(NumberOfBlocks > 1U) + { + hmmc->Context = (MMC_CONTEXT_WRITE_MULTIPLE_BLOCK | MMC_CONTEXT_DMA); + + /* Write Multi Block command */ + errorstate = SDMMC_CmdWriteMultiBlock(hmmc->Instance, add); + } + else + { + hmmc->Context = (MMC_CONTEXT_WRITE_SINGLE_BLOCK | MMC_CONTEXT_DMA); + + /* Write Single Block command */ + errorstate = SDMMC_CmdWriteSingleBlock(hmmc->Instance, add); + } + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + __HAL_MMC_DISABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_TXUNDERR | SDIO_IT_DATAEND)); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + /* Enable SDIO DMA transfer */ + __HAL_MMC_DMA_ENABLE(hmmc); + + /* Force DMA Direction */ + hmmc->hdmatx->Init.Direction = DMA_MEMORY_TO_PERIPH; + MODIFY_REG(hmmc->hdmatx->Instance->CR, DMA_SxCR_DIR, hmmc->hdmatx->Init.Direction); + + /* Enable the DMA Channel */ + if(HAL_DMA_Start_IT(hmmc->hdmatx, (uint32_t)pData, (uint32_t)&hmmc->Instance->FIFO, (uint32_t)(MMC_BLOCKSIZE * NumberOfBlocks)/4) != HAL_OK) + { + __HAL_MMC_DISABLE_IT(hmmc, (SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT | SDIO_IT_TXUNDERR | SDIO_IT_DATAEND)); + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_DMA; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else + { + /* Configure the MMC DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = MMC_BLOCKSIZE * NumberOfBlocks; + config.DataBlockSize = SDIO_DATABLOCK_SIZE_512B; + config.TransferDir = SDIO_TRANSFER_DIR_TO_CARD; + config.TransferMode = SDIO_TRANSFER_MODE_BLOCK; + config.DPSM = SDIO_DPSM_ENABLE; + (void)SDIO_ConfigData(hmmc->Instance, &config); + + return HAL_OK; + } + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Erases the specified memory area of the given MMC card. + * @note This API should be followed by a check on the card state through + * HAL_MMC_GetCardState(). + * @param hmmc: Pointer to MMC handle + * @param BlockStartAdd: Start Block address + * @param BlockEndAdd: End Block address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_Erase(MMC_HandleTypeDef *hmmc, uint32_t BlockStartAdd, uint32_t BlockEndAdd) +{ + uint32_t errorstate; + uint32_t start_add = BlockStartAdd; + uint32_t end_add = BlockEndAdd; + + if(hmmc->State == HAL_MMC_STATE_READY) + { + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + + if(end_add < start_add) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_PARAM; + return HAL_ERROR; + } + + if(end_add > (hmmc->MmcCard.LogBlockNbr)) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_ADDR_OUT_OF_RANGE; + return HAL_ERROR; + } + + hmmc->State = HAL_MMC_STATE_BUSY; + + /* Check if the card command class supports erase command */ + if(((hmmc->MmcCard.Class) & SDIO_CCCC_ERASE) == 0U) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_REQUEST_NOT_APPLICABLE; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + if((SDIO_GetResponse(hmmc->Instance, SDIO_RESP1) & SDMMC_CARD_LOCKED) == SDMMC_CARD_LOCKED) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_LOCK_UNLOCK_FAILED; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + if ((hmmc->MmcCard.CardType) != MMC_HIGH_CAPACITY_CARD) + { + start_add *= 512U; + end_add *= 512U; + } + + /* Send CMD35 MMC_ERASE_GRP_START with argument as addr */ + errorstate = SDMMC_CmdEraseStartAdd(hmmc->Instance, start_add); + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + /* Send CMD36 MMC_ERASE_GRP_END with argument as addr */ + errorstate = SDMMC_CmdEraseEndAdd(hmmc->Instance, end_add); + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + /* Send CMD38 ERASE */ + errorstate = SDMMC_CmdErase(hmmc->Instance); + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + hmmc->State = HAL_MMC_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief This function handles MMC card interrupt request. + * @param hmmc: Pointer to MMC handle + * @retval None + */ +void HAL_MMC_IRQHandler(MMC_HandleTypeDef *hmmc) +{ + uint32_t errorstate; + uint32_t context = hmmc->Context; + + /* Check for SDIO interrupt flags */ + if((__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXFIFOHF) != RESET) && ((context & MMC_CONTEXT_IT) != 0U)) + { + MMC_Read_IT(hmmc); + } + + else if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DATAEND) != RESET) + { + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_FLAG_DATAEND); + +#if defined(SDIO_STA_STBITERR) + __HAL_MMC_DISABLE_IT(hmmc, SDIO_IT_DATAEND | SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT|\ + SDIO_IT_TXUNDERR| SDIO_IT_RXOVERR | SDIO_IT_STBITERR); +#else /* SDIO_STA_STBITERR not defined */ + __HAL_MMC_DISABLE_IT(hmmc, SDIO_IT_DATAEND | SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT |\ + SDIO_IT_TXUNDERR | SDIO_IT_RXOVERR | SDIO_IT_TXFIFOHE |\ + SDIO_IT_RXFIFOHF); +#endif /* SDIO_STA_STBITERR */ + + hmmc->Instance->DCTRL &= ~(SDIO_DCTRL_DTEN); + + if((context & MMC_CONTEXT_DMA) != 0U) + { + if((context & MMC_CONTEXT_WRITE_MULTIPLE_BLOCK) != 0U) + { + errorstate = SDMMC_CmdStopTransfer(hmmc->Instance); + if(errorstate != HAL_MMC_ERROR_NONE) + { + hmmc->ErrorCode |= errorstate; +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->ErrorCallback(hmmc); +#else + HAL_MMC_ErrorCallback(hmmc); +#endif + } + } + if(((context & MMC_CONTEXT_READ_SINGLE_BLOCK) == 0U) && ((context & MMC_CONTEXT_READ_MULTIPLE_BLOCK) == 0U)) + { + /* Disable the DMA transfer for transmit request by setting the DMAEN bit + in the MMC DCTRL register */ + hmmc->Instance->DCTRL &= (uint32_t)~((uint32_t)SDIO_DCTRL_DMAEN); + + hmmc->State = HAL_MMC_STATE_READY; + +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->TxCpltCallback(hmmc); +#else + HAL_MMC_TxCpltCallback(hmmc); +#endif + } + } + else if((context & MMC_CONTEXT_IT) != 0U) + { + /* Stop Transfer for Write Multi blocks or Read Multi blocks */ + if(((context & MMC_CONTEXT_READ_MULTIPLE_BLOCK) != 0U) || ((context & MMC_CONTEXT_WRITE_MULTIPLE_BLOCK) != 0U)) + { + errorstate = SDMMC_CmdStopTransfer(hmmc->Instance); + if(errorstate != HAL_MMC_ERROR_NONE) + { + hmmc->ErrorCode |= errorstate; +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->ErrorCallback(hmmc); +#else + HAL_MMC_ErrorCallback(hmmc); +#endif + } + } + + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_DATA_FLAGS); + + hmmc->State = HAL_MMC_STATE_READY; + if(((context & MMC_CONTEXT_READ_SINGLE_BLOCK) != 0U) || ((context & MMC_CONTEXT_READ_MULTIPLE_BLOCK) != 0U)) + { +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->RxCpltCallback(hmmc); +#else + HAL_MMC_RxCpltCallback(hmmc); +#endif + } + else + { +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->TxCpltCallback(hmmc); +#else + HAL_MMC_TxCpltCallback(hmmc); +#endif + } + } + else + { + /* Nothing to do */ + } + } + + else if((__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_TXFIFOHE) != RESET) && ((context & MMC_CONTEXT_IT) != 0U)) + { + MMC_Write_IT(hmmc); + } + +#if defined(SDIO_STA_STBITERR) + else if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DCRCFAIL | SDIO_FLAG_DTIMEOUT | SDIO_FLAG_RXOVERR | SDIO_FLAG_TXUNDERR | SDIO_FLAG_STBITERR) != RESET) +#else /* SDIO_STA_STBITERR not defined */ + else if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DCRCFAIL | SDIO_FLAG_DTIMEOUT | SDIO_FLAG_RXOVERR | SDIO_FLAG_TXUNDERR) != RESET) +#endif /* SDIO_STA_STBITERR */ + { + /* Set Error code */ + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DCRCFAIL) != RESET) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_DATA_CRC_FAIL; + } + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DTIMEOUT) != RESET) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_DATA_TIMEOUT; + } + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXOVERR) != RESET) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_RX_OVERRUN; + } + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_TXUNDERR) != RESET) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_TX_UNDERRUN; + } +#if defined(SDIO_STA_STBITERR) + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_STBITERR) != RESET) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_DATA_TIMEOUT; + } +#endif /* SDIO_STA_STBITERR */ + +#if defined(SDIO_STA_STBITERR) + /* Clear All flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_DATA_FLAGS | SDIO_FLAG_STBITERR); + + /* Disable all interrupts */ + __HAL_MMC_DISABLE_IT(hmmc, SDIO_IT_DATAEND | SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT|\ + SDIO_IT_TXUNDERR| SDIO_IT_RXOVERR | SDIO_IT_STBITERR); +#else /* SDIO_STA_STBITERR */ + /* Clear All flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_DATA_FLAGS); + + /* Disable all interrupts */ + __HAL_MMC_DISABLE_IT(hmmc, SDIO_IT_DATAEND | SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT|\ + SDIO_IT_TXUNDERR| SDIO_IT_RXOVERR); +#endif /* SDIO_STA_STBITERR */ + + hmmc->ErrorCode |= SDMMC_CmdStopTransfer(hmmc->Instance); + + if((context & MMC_CONTEXT_IT) != 0U) + { + /* Set the MMC state to ready to be able to start again the process */ + hmmc->State = HAL_MMC_STATE_READY; +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->ErrorCallback(hmmc); +#else + HAL_MMC_ErrorCallback(hmmc); +#endif /* USE_HAL_MMC_REGISTER_CALLBACKS */ + } + else if((context & MMC_CONTEXT_DMA) != 0U) + { + /* Abort the MMC DMA Streams */ + if(hmmc->hdmatx != NULL) + { + /* Set the DMA Tx abort callback */ + hmmc->hdmatx->XferAbortCallback = MMC_DMATxAbort; + /* Abort DMA in IT mode */ + if(HAL_DMA_Abort_IT(hmmc->hdmatx) != HAL_OK) + { + MMC_DMATxAbort(hmmc->hdmatx); + } + } + else if(hmmc->hdmarx != NULL) + { + /* Set the DMA Rx abort callback */ + hmmc->hdmarx->XferAbortCallback = MMC_DMARxAbort; + /* Abort DMA in IT mode */ + if(HAL_DMA_Abort_IT(hmmc->hdmarx) != HAL_OK) + { + MMC_DMARxAbort(hmmc->hdmarx); + } + } + else + { + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + hmmc->State = HAL_MMC_STATE_READY; +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->AbortCpltCallback(hmmc); +#else + HAL_MMC_AbortCallback(hmmc); +#endif + } + } + else + { + /* Nothing to do */ + } + } + + else + { + /* Nothing to do */ + } +} + +/** + * @brief return the MMC state + * @param hmmc: Pointer to mmc handle + * @retval HAL state + */ +HAL_MMC_StateTypeDef HAL_MMC_GetState(MMC_HandleTypeDef *hmmc) +{ + return hmmc->State; +} + +/** +* @brief Return the MMC error code +* @param hmmc : Pointer to a MMC_HandleTypeDef structure that contains + * the configuration information. +* @retval MMC Error Code +*/ +uint32_t HAL_MMC_GetError(MMC_HandleTypeDef *hmmc) +{ + return hmmc->ErrorCode; +} + +/** + * @brief Tx Transfer completed callbacks + * @param hmmc: Pointer to MMC handle + * @retval None + */ +__weak void HAL_MMC_TxCpltCallback(MMC_HandleTypeDef *hmmc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hmmc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_MMC_TxCpltCallback can be implemented in the user file + */ +} + +/** + * @brief Rx Transfer completed callbacks + * @param hmmc: Pointer MMC handle + * @retval None + */ +__weak void HAL_MMC_RxCpltCallback(MMC_HandleTypeDef *hmmc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hmmc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_MMC_RxCpltCallback can be implemented in the user file + */ +} + +/** + * @brief MMC error callbacks + * @param hmmc: Pointer MMC handle + * @retval None + */ +__weak void HAL_MMC_ErrorCallback(MMC_HandleTypeDef *hmmc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hmmc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_MMC_ErrorCallback can be implemented in the user file + */ +} + +/** + * @brief MMC Abort callbacks + * @param hmmc: Pointer MMC handle + * @retval None + */ +__weak void HAL_MMC_AbortCallback(MMC_HandleTypeDef *hmmc) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hmmc); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_MMC_AbortCallback can be implemented in the user file + */ +} + +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) +/** + * @brief Register a User MMC Callback + * To be used instead of the weak (surcharged) predefined callback + * @param hmmc : MMC handle + * @param CallbackId : ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_MMC_TX_CPLT_CB_ID MMC Tx Complete Callback ID + * @arg @ref HAL_MMC_RX_CPLT_CB_ID MMC Rx Complete Callback ID + * @arg @ref HAL_MMC_ERROR_CB_ID MMC Error Callback ID + * @arg @ref HAL_MMC_ABORT_CB_ID MMC Abort Callback ID + * @arg @ref HAL_MMC_MSP_INIT_CB_ID MMC MspInit Callback ID + * @arg @ref HAL_MMC_MSP_DEINIT_CB_ID MMC MspDeInit Callback ID + * @param pCallback : pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_MMC_RegisterCallback(MMC_HandleTypeDef *hmmc, HAL_MMC_CallbackIDTypeDef CallbackId, pMMC_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if(pCallback == NULL) + { + /* Update the error code */ + hmmc->ErrorCode |= HAL_MMC_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hmmc); + + if(hmmc->State == HAL_MMC_STATE_READY) + { + switch (CallbackId) + { + case HAL_MMC_TX_CPLT_CB_ID : + hmmc->TxCpltCallback = pCallback; + break; + case HAL_MMC_RX_CPLT_CB_ID : + hmmc->RxCpltCallback = pCallback; + break; + case HAL_MMC_ERROR_CB_ID : + hmmc->ErrorCallback = pCallback; + break; + case HAL_MMC_ABORT_CB_ID : + hmmc->AbortCpltCallback = pCallback; + break; + case HAL_MMC_MSP_INIT_CB_ID : + hmmc->MspInitCallback = pCallback; + break; + case HAL_MMC_MSP_DEINIT_CB_ID : + hmmc->MspDeInitCallback = pCallback; + break; + default : + /* Update the error code */ + hmmc->ErrorCode |= HAL_MMC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else if (hmmc->State == HAL_MMC_STATE_RESET) + { + switch (CallbackId) + { + case HAL_MMC_MSP_INIT_CB_ID : + hmmc->MspInitCallback = pCallback; + break; + case HAL_MMC_MSP_DEINIT_CB_ID : + hmmc->MspDeInitCallback = pCallback; + break; + default : + /* Update the error code */ + hmmc->ErrorCode |= HAL_MMC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hmmc->ErrorCode |= HAL_MMC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hmmc); + return status; +} + +/** + * @brief Unregister a User MMC Callback + * MMC Callback is redirected to the weak (surcharged) predefined callback + * @param hmmc : MMC handle + * @param CallbackId : ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_MMC_TX_CPLT_CB_ID MMC Tx Complete Callback ID + * @arg @ref HAL_MMC_RX_CPLT_CB_ID MMC Rx Complete Callback ID + * @arg @ref HAL_MMC_ERROR_CB_ID MMC Error Callback ID + * @arg @ref HAL_MMC_ABORT_CB_ID MMC Abort Callback ID + * @arg @ref HAL_MMC_MSP_INIT_CB_ID MMC MspInit Callback ID + * @arg @ref HAL_MMC_MSP_DEINIT_CB_ID MMC MspDeInit Callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_MMC_UnRegisterCallback(MMC_HandleTypeDef *hmmc, HAL_MMC_CallbackIDTypeDef CallbackId) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hmmc); + + if(hmmc->State == HAL_MMC_STATE_READY) + { + switch (CallbackId) + { + case HAL_MMC_TX_CPLT_CB_ID : + hmmc->TxCpltCallback = HAL_MMC_TxCpltCallback; + break; + case HAL_MMC_RX_CPLT_CB_ID : + hmmc->RxCpltCallback = HAL_MMC_RxCpltCallback; + break; + case HAL_MMC_ERROR_CB_ID : + hmmc->ErrorCallback = HAL_MMC_ErrorCallback; + break; + case HAL_MMC_ABORT_CB_ID : + hmmc->AbortCpltCallback = HAL_MMC_AbortCallback; + break; + case HAL_MMC_MSP_INIT_CB_ID : + hmmc->MspInitCallback = HAL_MMC_MspInit; + break; + case HAL_MMC_MSP_DEINIT_CB_ID : + hmmc->MspDeInitCallback = HAL_MMC_MspDeInit; + break; + default : + /* Update the error code */ + hmmc->ErrorCode |= HAL_MMC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else if (hmmc->State == HAL_MMC_STATE_RESET) + { + switch (CallbackId) + { + case HAL_MMC_MSP_INIT_CB_ID : + hmmc->MspInitCallback = HAL_MMC_MspInit; + break; + case HAL_MMC_MSP_DEINIT_CB_ID : + hmmc->MspDeInitCallback = HAL_MMC_MspDeInit; + break; + default : + /* Update the error code */ + hmmc->ErrorCode |= HAL_MMC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hmmc->ErrorCode |= HAL_MMC_ERROR_INVALID_CALLBACK; + /* update return status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hmmc); + return status; +} +#endif + +/** + * @} + */ + +/** @addtogroup MMC_Exported_Functions_Group3 + * @brief management functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control the MMC card + operations and get the related information + +@endverbatim + * @{ + */ + +/** + * @brief Returns information the information of the card which are stored on + * the CID register. + * @param hmmc: Pointer to MMC handle + * @param pCID: Pointer to a HAL_MMC_CIDTypedef structure that + * contains all CID register parameters + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_GetCardCID(MMC_HandleTypeDef *hmmc, HAL_MMC_CardCIDTypeDef *pCID) +{ + pCID->ManufacturerID = (uint8_t)((hmmc->CID[0] & 0xFF000000U) >> 24U); + + pCID->OEM_AppliID = (uint16_t)((hmmc->CID[0] & 0x00FFFF00U) >> 8U); + + pCID->ProdName1 = (((hmmc->CID[0] & 0x000000FFU) << 24U) | ((hmmc->CID[1] & 0xFFFFFF00U) >> 8U)); + + pCID->ProdName2 = (uint8_t)(hmmc->CID[1] & 0x000000FFU); + + pCID->ProdRev = (uint8_t)((hmmc->CID[2] & 0xFF000000U) >> 24U); + + pCID->ProdSN = (((hmmc->CID[2] & 0x00FFFFFFU) << 8U) | ((hmmc->CID[3] & 0xFF000000U) >> 24U)); + + pCID->Reserved1 = (uint8_t)((hmmc->CID[3] & 0x00F00000U) >> 20U); + + pCID->ManufactDate = (uint16_t)((hmmc->CID[3] & 0x000FFF00U) >> 8U); + + pCID->CID_CRC = (uint8_t)((hmmc->CID[3] & 0x000000FEU) >> 1U); + + pCID->Reserved2 = 1U; + + return HAL_OK; +} + +/** + * @brief Returns information the information of the card which are stored on + * the CSD register. + * @param hmmc: Pointer to MMC handle + * @param pCSD: Pointer to a HAL_MMC_CardCSDTypeDef structure that + * contains all CSD register parameters + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_GetCardCSD(MMC_HandleTypeDef *hmmc, HAL_MMC_CardCSDTypeDef *pCSD) +{ + uint32_t block_nbr = 0; + + pCSD->CSDStruct = (uint8_t)((hmmc->CSD[0] & 0xC0000000U) >> 30U); + + pCSD->SysSpecVersion = (uint8_t)((hmmc->CSD[0] & 0x3C000000U) >> 26U); + + pCSD->Reserved1 = (uint8_t)((hmmc->CSD[0] & 0x03000000U) >> 24U); + + pCSD->TAAC = (uint8_t)((hmmc->CSD[0] & 0x00FF0000U) >> 16U); + + pCSD->NSAC = (uint8_t)((hmmc->CSD[0] & 0x0000FF00U) >> 8U); + + pCSD->MaxBusClkFrec = (uint8_t)(hmmc->CSD[0] & 0x000000FFU); + + pCSD->CardComdClasses = (uint16_t)((hmmc->CSD[1] & 0xFFF00000U) >> 20U); + + pCSD->RdBlockLen = (uint8_t)((hmmc->CSD[1] & 0x000F0000U) >> 16U); + + pCSD->PartBlockRead = (uint8_t)((hmmc->CSD[1] & 0x00008000U) >> 15U); + + pCSD->WrBlockMisalign = (uint8_t)((hmmc->CSD[1] & 0x00004000U) >> 14U); + + pCSD->RdBlockMisalign = (uint8_t)((hmmc->CSD[1] & 0x00002000U) >> 13U); + + pCSD->DSRImpl = (uint8_t)((hmmc->CSD[1] & 0x00001000U) >> 12U); + + pCSD->Reserved2 = 0U; /*!< Reserved */ + + pCSD->DeviceSize = (((hmmc->CSD[1] & 0x000003FFU) << 2U) | ((hmmc->CSD[2] & 0xC0000000U) >> 30U)); + + pCSD->MaxRdCurrentVDDMin = (uint8_t)((hmmc->CSD[2] & 0x38000000U) >> 27U); + + pCSD->MaxRdCurrentVDDMax = (uint8_t)((hmmc->CSD[2] & 0x07000000U) >> 24U); + + pCSD->MaxWrCurrentVDDMin = (uint8_t)((hmmc->CSD[2] & 0x00E00000U) >> 21U); + + pCSD->MaxWrCurrentVDDMax = (uint8_t)((hmmc->CSD[2] & 0x001C0000U) >> 18U); + + pCSD->DeviceSizeMul = (uint8_t)((hmmc->CSD[2] & 0x00038000U) >> 15U); + + if(MMC_ReadExtCSD(hmmc, &block_nbr, 212, 0x0FFFFFFFU) != HAL_OK) /* Field SEC_COUNT [215:212] */ + { + return HAL_ERROR; + } + + if(hmmc->MmcCard.CardType == MMC_LOW_CAPACITY_CARD) + { + hmmc->MmcCard.BlockNbr = (pCSD->DeviceSize + 1U) ; + hmmc->MmcCard.BlockNbr *= (1UL << ((pCSD->DeviceSizeMul & 0x07U) + 2U)); + hmmc->MmcCard.BlockSize = (1UL << (pCSD->RdBlockLen & 0x0FU)); + hmmc->MmcCard.LogBlockNbr = (hmmc->MmcCard.BlockNbr) * ((hmmc->MmcCard.BlockSize) / 512U); + hmmc->MmcCard.LogBlockSize = 512U; + } + else if(hmmc->MmcCard.CardType == MMC_HIGH_CAPACITY_CARD) + { + hmmc->MmcCard.BlockNbr = block_nbr; + hmmc->MmcCard.LogBlockNbr = hmmc->MmcCard.BlockNbr; + hmmc->MmcCard.BlockSize = 512U; + hmmc->MmcCard.LogBlockSize = hmmc->MmcCard.BlockSize; + } + else + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_UNSUPPORTED_FEATURE; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + pCSD->EraseGrSize = (uint8_t)((hmmc->CSD[2] & 0x00004000U) >> 14U); + + pCSD->EraseGrMul = (uint8_t)((hmmc->CSD[2] & 0x00003F80U) >> 7U); + + pCSD->WrProtectGrSize = (uint8_t)(hmmc->CSD[2] & 0x0000007FU); + + pCSD->WrProtectGrEnable = (uint8_t)((hmmc->CSD[3] & 0x80000000U) >> 31U); + + pCSD->ManDeflECC = (uint8_t)((hmmc->CSD[3] & 0x60000000U) >> 29U); + + pCSD->WrSpeedFact = (uint8_t)((hmmc->CSD[3] & 0x1C000000U) >> 26U); + + pCSD->MaxWrBlockLen= (uint8_t)((hmmc->CSD[3] & 0x03C00000U) >> 22U); + + pCSD->WriteBlockPaPartial = (uint8_t)((hmmc->CSD[3] & 0x00200000U) >> 21U); + + pCSD->Reserved3 = 0; + + pCSD->ContentProtectAppli = (uint8_t)((hmmc->CSD[3] & 0x00010000U) >> 16U); + + pCSD->FileFormatGroup = (uint8_t)((hmmc->CSD[3] & 0x00008000U) >> 15U); + + pCSD->CopyFlag = (uint8_t)((hmmc->CSD[3] & 0x00004000U) >> 14U); + + pCSD->PermWrProtect = (uint8_t)((hmmc->CSD[3] & 0x00002000U) >> 13U); + + pCSD->TempWrProtect = (uint8_t)((hmmc->CSD[3] & 0x00001000U) >> 12U); + + pCSD->FileFormat = (uint8_t)((hmmc->CSD[3] & 0x00000C00U) >> 10U); + + pCSD->ECC= (uint8_t)((hmmc->CSD[3] & 0x00000300U) >> 8U); + + pCSD->CSD_CRC = (uint8_t)((hmmc->CSD[3] & 0x000000FEU) >> 1U); + + pCSD->Reserved4 = 1; + + return HAL_OK; +} + +/** + * @brief Gets the MMC card info. + * @param hmmc: Pointer to MMC handle + * @param pCardInfo: Pointer to the HAL_MMC_CardInfoTypeDef structure that + * will contain the MMC card status information + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_GetCardInfo(MMC_HandleTypeDef *hmmc, HAL_MMC_CardInfoTypeDef *pCardInfo) +{ + pCardInfo->CardType = (uint32_t)(hmmc->MmcCard.CardType); + pCardInfo->Class = (uint32_t)(hmmc->MmcCard.Class); + pCardInfo->RelCardAdd = (uint32_t)(hmmc->MmcCard.RelCardAdd); + pCardInfo->BlockNbr = (uint32_t)(hmmc->MmcCard.BlockNbr); + pCardInfo->BlockSize = (uint32_t)(hmmc->MmcCard.BlockSize); + pCardInfo->LogBlockNbr = (uint32_t)(hmmc->MmcCard.LogBlockNbr); + pCardInfo->LogBlockSize = (uint32_t)(hmmc->MmcCard.LogBlockSize); + + return HAL_OK; +} + +/** + * @brief Returns information the information of the card which are stored on + * the Extended CSD register. + * @param hmmc Pointer to MMC handle + * @param pExtCSD Pointer to a memory area (512 bytes) that contains all + * Extended CSD register parameters + * @param Timeout Specify timeout value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_GetCardExtCSD(MMC_HandleTypeDef *hmmc, uint32_t *pExtCSD, uint32_t Timeout) +{ + SDIO_DataInitTypeDef config; + uint32_t errorstate; + uint32_t tickstart = HAL_GetTick(); + uint32_t count; + uint32_t *tmp_buf; + + if(NULL == pExtCSD) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_PARAM; + return HAL_ERROR; + } + + if(hmmc->State == HAL_MMC_STATE_READY) + { + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + + hmmc->State = HAL_MMC_STATE_BUSY; + + /* Initialize data control register */ + hmmc->Instance->DCTRL = 0; + + /* Initiaize the destination pointer */ + tmp_buf = pExtCSD; + + /* Configure the MMC DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = 512; + config.DataBlockSize = SDIO_DATABLOCK_SIZE_512B; + config.TransferDir = SDIO_TRANSFER_DIR_TO_SDIO; + config.TransferMode = SDIO_TRANSFER_MODE_BLOCK; + config.DPSM = SDIO_DPSM_ENABLE; + (void)SDIO_ConfigData(hmmc->Instance, &config); + + /* Send ExtCSD Read command to Card */ + errorstate = SDMMC_CmdSendEXTCSD(hmmc->Instance, 0); + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + /* Poll on SDMMC flags */ + while(!__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXOVERR | SDIO_FLAG_DCRCFAIL | SDIO_FLAG_DTIMEOUT | SDIO_FLAG_DATAEND)) + { + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXFIFOHF)) + { + /* Read data from SDMMC Rx FIFO */ + for(count = 0U; count < 8U; count++) + { + *tmp_buf = SDIO_ReadFIFO(hmmc->Instance); + tmp_buf++; + } + } + + if(((HAL_GetTick()-tickstart) >= Timeout) || (Timeout == 0U)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_TIMEOUT; + hmmc->State= HAL_MMC_STATE_READY; + return HAL_TIMEOUT; + } + } + + /* Get error state */ + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DTIMEOUT)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_DATA_TIMEOUT; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_DCRCFAIL)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_DATA_CRC_FAIL; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXOVERR)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_RX_OVERRUN; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + else + { + /* Nothing to do */ + } + + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_DATA_FLAGS); + hmmc->State = HAL_MMC_STATE_READY; + } + + return HAL_OK; +} + +/** + * @brief Enables wide bus operation for the requested card if supported by + * card. + * @param hmmc: Pointer to MMC handle + * @param WideMode: Specifies the MMC card wide bus mode + * This parameter can be one of the following values: + * @arg SDIO_BUS_WIDE_8B: 8-bit data transfer + * @arg SDIO_BUS_WIDE_4B: 4-bit data transfer + * @arg SDIO_BUS_WIDE_1B: 1-bit data transfer + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_ConfigWideBusOperation(MMC_HandleTypeDef *hmmc, uint32_t WideMode) +{ + uint32_t count; + SDIO_InitTypeDef Init; + uint32_t errorstate; + uint32_t response = 0U; + + /* Check the parameters */ + assert_param(IS_SDIO_BUS_WIDE(WideMode)); + + /* Change State */ + hmmc->State = HAL_MMC_STATE_BUSY; + + errorstate = MMC_PwrClassUpdate(hmmc, WideMode); + + if(errorstate == HAL_MMC_ERROR_NONE) + { + if(WideMode == SDIO_BUS_WIDE_8B) + { + errorstate = SDMMC_CmdSwitch(hmmc->Instance, 0x03B70200U); + } + else if(WideMode == SDIO_BUS_WIDE_4B) + { + errorstate = SDMMC_CmdSwitch(hmmc->Instance, 0x03B70100U); + } + else if(WideMode == SDIO_BUS_WIDE_1B) + { + errorstate = SDMMC_CmdSwitch(hmmc->Instance, 0x03B70000U); + } + else + { + /* WideMode is not a valid argument*/ + errorstate = HAL_MMC_ERROR_PARAM; + } + + /* Check for switch error and violation of the trial number of sending CMD 13 */ + if(errorstate == HAL_MMC_ERROR_NONE) + { + /* While card is not ready for data and trial number for sending CMD13 is not exceeded */ + count = SDMMC_MAX_TRIAL; + do + { + errorstate = SDMMC_CmdSendStatus(hmmc->Instance, (uint32_t)(((uint32_t)hmmc->MmcCard.RelCardAdd) << 16U)); + if(errorstate != HAL_MMC_ERROR_NONE) + { + break; + } + + /* Get command response */ + response = SDIO_GetResponse(hmmc->Instance, SDIO_RESP1); + count--; + }while(((response & 0x100U) == 0U) && (count != 0U)); + + /* Check the status after the switch command execution */ + if ((count != 0U) && (errorstate == HAL_MMC_ERROR_NONE)) + { + /* Check the bit SWITCH_ERROR of the device status */ + if ((response & 0x80U) != 0U) + { + errorstate = SDMMC_ERROR_GENERAL_UNKNOWN_ERR; + } + else + { + /* Configure the SDIO peripheral */ + Init = hmmc->Init; + Init.BusWide = WideMode; + (void)SDIO_Init(hmmc->Instance, Init); + } + } + else if (count == 0U) + { + errorstate = SDMMC_ERROR_TIMEOUT; + } + else + { + /* Nothing to do */ + } + } + } + + /* Change State */ + hmmc->State = HAL_MMC_STATE_READY; + + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDMMC_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief Gets the current mmc card data state. + * @param hmmc: pointer to MMC handle + * @retval Card state + */ +HAL_MMC_CardStateTypeDef HAL_MMC_GetCardState(MMC_HandleTypeDef *hmmc) +{ + uint32_t cardstate; + uint32_t errorstate; + uint32_t resp1 = 0U; + + errorstate = MMC_SendStatus(hmmc, &resp1); + if(errorstate != HAL_MMC_ERROR_NONE) + { + hmmc->ErrorCode |= errorstate; + } + + cardstate = ((resp1 >> 9U) & 0x0FU); + + return (HAL_MMC_CardStateTypeDef)cardstate; +} + +/** + * @brief Abort the current transfer and disable the MMC. + * @param hmmc: pointer to a MMC_HandleTypeDef structure that contains + * the configuration information for MMC module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_Abort(MMC_HandleTypeDef *hmmc) +{ + HAL_MMC_CardStateTypeDef CardState; + + /* DIsable All interrupts */ + __HAL_MMC_DISABLE_IT(hmmc, SDIO_IT_DATAEND | SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT|\ + SDIO_IT_TXUNDERR| SDIO_IT_RXOVERR); + + /* Clear All flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_DATA_FLAGS); + + if((hmmc->hdmatx != NULL) || (hmmc->hdmarx != NULL)) + { + /* Disable the MMC DMA request */ + hmmc->Instance->DCTRL &= (uint32_t)~((uint32_t)SDIO_DCTRL_DMAEN); + + /* Abort the MMC DMA Tx Stream */ + if(hmmc->hdmatx != NULL) + { + if(HAL_DMA_Abort(hmmc->hdmatx) != HAL_OK) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_DMA; + } + } + /* Abort the MMC DMA Rx Stream */ + if(hmmc->hdmarx != NULL) + { + if(HAL_DMA_Abort(hmmc->hdmarx) != HAL_OK) + { + hmmc->ErrorCode |= HAL_MMC_ERROR_DMA; + } + } + } + + hmmc->State = HAL_MMC_STATE_READY; + + /* Initialize the MMC operation */ + hmmc->Context = MMC_CONTEXT_NONE; + + CardState = HAL_MMC_GetCardState(hmmc); + if((CardState == HAL_MMC_CARD_RECEIVING) || (CardState == HAL_MMC_CARD_SENDING)) + { + hmmc->ErrorCode = SDMMC_CmdStopTransfer(hmmc->Instance); + } + if(hmmc->ErrorCode != HAL_MMC_ERROR_NONE) + { + return HAL_ERROR; + } + return HAL_OK; +} + +/** + * @brief Abort the current transfer and disable the MMC (IT mode). + * @param hmmc: pointer to a MMC_HandleTypeDef structure that contains + * the configuration information for MMC module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MMC_Abort_IT(MMC_HandleTypeDef *hmmc) +{ + HAL_MMC_CardStateTypeDef CardState; + + /* DIsable All interrupts */ + __HAL_MMC_DISABLE_IT(hmmc, SDIO_IT_DATAEND | SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT|\ + SDIO_IT_TXUNDERR| SDIO_IT_RXOVERR); + + /* Clear All flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_DATA_FLAGS); + + if((hmmc->hdmatx != NULL) || (hmmc->hdmarx != NULL)) + { + /* Disable the MMC DMA request */ + hmmc->Instance->DCTRL &= (uint32_t)~((uint32_t)SDIO_DCTRL_DMAEN); + + /* Abort the MMC DMA Tx Stream */ + if(hmmc->hdmatx != NULL) + { + hmmc->hdmatx->XferAbortCallback = MMC_DMATxAbort; + if(HAL_DMA_Abort_IT(hmmc->hdmatx) != HAL_OK) + { + hmmc->hdmatx = NULL; + } + } + /* Abort the MMC DMA Rx Stream */ + if(hmmc->hdmarx != NULL) + { + hmmc->hdmarx->XferAbortCallback = MMC_DMARxAbort; + if(HAL_DMA_Abort_IT(hmmc->hdmarx) != HAL_OK) + { + hmmc->hdmarx = NULL; + } + } + } + + /* No transfer ongoing on both DMA channels*/ + if((hmmc->hdmatx == NULL) && (hmmc->hdmarx == NULL)) + { + CardState = HAL_MMC_GetCardState(hmmc); + hmmc->State = HAL_MMC_STATE_READY; + + if((CardState == HAL_MMC_CARD_RECEIVING) || (CardState == HAL_MMC_CARD_SENDING)) + { + hmmc->ErrorCode = SDMMC_CmdStopTransfer(hmmc->Instance); + } + if(hmmc->ErrorCode != HAL_MMC_ERROR_NONE) + { + return HAL_ERROR; + } + else + { +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->AbortCpltCallback(hmmc); +#else + HAL_MMC_AbortCallback(hmmc); +#endif + } + } + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +/* Private function ----------------------------------------------------------*/ +/** @addtogroup MMC_Private_Functions + * @{ + */ + +/** + * @brief DMA MMC transmit process complete callback + * @param hdma: DMA handle + * @retval None + */ +static void MMC_DMATransmitCplt(DMA_HandleTypeDef *hdma) +{ + MMC_HandleTypeDef* hmmc = (MMC_HandleTypeDef* )(hdma->Parent); + + /* Enable DATAEND Interrupt */ + __HAL_MMC_ENABLE_IT(hmmc, (SDIO_IT_DATAEND)); +} + +/** + * @brief DMA MMC receive process complete callback + * @param hdma: DMA handle + * @retval None + */ +static void MMC_DMAReceiveCplt(DMA_HandleTypeDef *hdma) +{ + MMC_HandleTypeDef* hmmc = (MMC_HandleTypeDef* )(hdma->Parent); + uint32_t errorstate; + + /* Send stop command in multiblock write */ + if(hmmc->Context == (MMC_CONTEXT_READ_MULTIPLE_BLOCK | MMC_CONTEXT_DMA)) + { + errorstate = SDMMC_CmdStopTransfer(hmmc->Instance); + if(errorstate != HAL_MMC_ERROR_NONE) + { + hmmc->ErrorCode |= errorstate; +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->ErrorCallback(hmmc); +#else + HAL_MMC_ErrorCallback(hmmc); +#endif + } + } + + /* Disable the DMA transfer for transmit request by setting the DMAEN bit + in the MMC DCTRL register */ + hmmc->Instance->DCTRL &= (uint32_t)~((uint32_t)SDIO_DCTRL_DMAEN); + + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_DATA_FLAGS); + + hmmc->State = HAL_MMC_STATE_READY; + +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->RxCpltCallback(hmmc); +#else + HAL_MMC_RxCpltCallback(hmmc); +#endif +} + +/** + * @brief DMA MMC communication error callback + * @param hdma: DMA handle + * @retval None + */ +static void MMC_DMAError(DMA_HandleTypeDef *hdma) +{ + MMC_HandleTypeDef* hmmc = (MMC_HandleTypeDef* )(hdma->Parent); + HAL_MMC_CardStateTypeDef CardState; + uint32_t RxErrorCode, TxErrorCode; + + /* if DMA error is FIFO error ignore it */ + if(HAL_DMA_GetError(hdma) != HAL_DMA_ERROR_FE) + { + RxErrorCode = hmmc->hdmarx->ErrorCode; + TxErrorCode = hmmc->hdmatx->ErrorCode; + if((RxErrorCode == HAL_DMA_ERROR_TE) || (TxErrorCode == HAL_DMA_ERROR_TE)) + { + /* Clear All flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + + /* Disable All interrupts */ + __HAL_MMC_DISABLE_IT(hmmc, SDIO_IT_DATAEND | SDIO_IT_DCRCFAIL | SDIO_IT_DTIMEOUT|\ + SDIO_IT_TXUNDERR| SDIO_IT_RXOVERR); + + hmmc->ErrorCode |= HAL_MMC_ERROR_DMA; + CardState = HAL_MMC_GetCardState(hmmc); + if((CardState == HAL_MMC_CARD_RECEIVING) || (CardState == HAL_MMC_CARD_SENDING)) + { + hmmc->ErrorCode |= SDMMC_CmdStopTransfer(hmmc->Instance); + } + + hmmc->State= HAL_MMC_STATE_READY; + } + +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->ErrorCallback(hmmc); +#else + HAL_MMC_ErrorCallback(hmmc); +#endif + } +} + +/** + * @brief DMA MMC Tx Abort callback + * @param hdma: DMA handle + * @retval None + */ +static void MMC_DMATxAbort(DMA_HandleTypeDef *hdma) +{ + MMC_HandleTypeDef* hmmc = (MMC_HandleTypeDef* )(hdma->Parent); + HAL_MMC_CardStateTypeDef CardState; + + if(hmmc->hdmatx != NULL) + { + hmmc->hdmatx = NULL; + } + + /* All DMA channels are aborted */ + if(hmmc->hdmarx == NULL) + { + CardState = HAL_MMC_GetCardState(hmmc); + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + hmmc->State = HAL_MMC_STATE_READY; + if((CardState == HAL_MMC_CARD_RECEIVING) || (CardState == HAL_MMC_CARD_SENDING)) + { + hmmc->ErrorCode |= SDMMC_CmdStopTransfer(hmmc->Instance); + + if(hmmc->ErrorCode != HAL_MMC_ERROR_NONE) + { +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->AbortCpltCallback(hmmc); +#else + HAL_MMC_AbortCallback(hmmc); +#endif + } + else + { +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->ErrorCallback(hmmc); +#else + HAL_MMC_ErrorCallback(hmmc); +#endif + } + } + } +} + +/** + * @brief DMA MMC Rx Abort callback + * @param hdma: DMA handle + * @retval None + */ +static void MMC_DMARxAbort(DMA_HandleTypeDef *hdma) +{ + MMC_HandleTypeDef* hmmc = (MMC_HandleTypeDef* )(hdma->Parent); + HAL_MMC_CardStateTypeDef CardState; + + if(hmmc->hdmarx != NULL) + { + hmmc->hdmarx = NULL; + } + + /* All DMA channels are aborted */ + if(hmmc->hdmatx == NULL) + { + CardState = HAL_MMC_GetCardState(hmmc); + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + hmmc->State = HAL_MMC_STATE_READY; + if((CardState == HAL_MMC_CARD_RECEIVING) || (CardState == HAL_MMC_CARD_SENDING)) + { + hmmc->ErrorCode |= SDMMC_CmdStopTransfer(hmmc->Instance); + + if(hmmc->ErrorCode != HAL_MMC_ERROR_NONE) + { +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->AbortCpltCallback(hmmc); +#else + HAL_MMC_AbortCallback(hmmc); +#endif + } + else + { +#if defined (USE_HAL_MMC_REGISTER_CALLBACKS) && (USE_HAL_MMC_REGISTER_CALLBACKS == 1U) + hmmc->ErrorCallback(hmmc); +#else + HAL_MMC_ErrorCallback(hmmc); +#endif + } + } + } +} + +/** + * @brief Initializes the mmc card. + * @param hmmc: Pointer to MMC handle + * @retval MMC Card error state + */ +static uint32_t MMC_InitCard(MMC_HandleTypeDef *hmmc) +{ + HAL_MMC_CardCSDTypeDef CSD; + uint32_t errorstate; + uint16_t mmc_rca = 2U; + MMC_InitTypeDef Init; + + /* Check the power State */ + if(SDIO_GetPowerState(hmmc->Instance) == 0U) + { + /* Power off */ + return HAL_MMC_ERROR_REQUEST_NOT_APPLICABLE; + } + + /* Send CMD2 ALL_SEND_CID */ + errorstate = SDMMC_CmdSendCID(hmmc->Instance); + if(errorstate != HAL_MMC_ERROR_NONE) + { + return errorstate; + } + else + { + /* Get Card identification number data */ + hmmc->CID[0U] = SDIO_GetResponse(hmmc->Instance, SDIO_RESP1); + hmmc->CID[1U] = SDIO_GetResponse(hmmc->Instance, SDIO_RESP2); + hmmc->CID[2U] = SDIO_GetResponse(hmmc->Instance, SDIO_RESP3); + hmmc->CID[3U] = SDIO_GetResponse(hmmc->Instance, SDIO_RESP4); + } + + /* Send CMD3 SET_REL_ADDR with RCA = 2 (should be greater than 1) */ + /* MMC Card publishes its RCA. */ + errorstate = SDMMC_CmdSetRelAddMmc(hmmc->Instance, mmc_rca); + if(errorstate != HAL_MMC_ERROR_NONE) + { + return errorstate; + } + + /* Get the MMC card RCA */ + hmmc->MmcCard.RelCardAdd = mmc_rca; + + /* Send CMD9 SEND_CSD with argument as card's RCA */ + errorstate = SDMMC_CmdSendCSD(hmmc->Instance, (uint32_t)(hmmc->MmcCard.RelCardAdd << 16U)); + if(errorstate != HAL_MMC_ERROR_NONE) + { + return errorstate; + } + else + { + /* Get Card Specific Data */ + hmmc->CSD[0U] = SDIO_GetResponse(hmmc->Instance, SDIO_RESP1); + hmmc->CSD[1U] = SDIO_GetResponse(hmmc->Instance, SDIO_RESP2); + hmmc->CSD[2U] = SDIO_GetResponse(hmmc->Instance, SDIO_RESP3); + hmmc->CSD[3U] = SDIO_GetResponse(hmmc->Instance, SDIO_RESP4); + } + + /* Get the Card Class */ + hmmc->MmcCard.Class = (SDIO_GetResponse(hmmc->Instance, SDIO_RESP2) >> 20U); + + /* Select the Card */ + errorstate = SDMMC_CmdSelDesel(hmmc->Instance, (uint32_t)(((uint32_t)hmmc->MmcCard.RelCardAdd) << 16U)); + if(errorstate != HAL_MMC_ERROR_NONE) + { + return errorstate; + } + + /* Get CSD parameters */ + if (HAL_MMC_GetCardCSD(hmmc, &CSD) != HAL_OK) + { + return hmmc->ErrorCode; + } + + /* While card is not ready for data and trial number for sending CMD13 is not exceeded */ + errorstate = SDMMC_CmdSendStatus(hmmc->Instance, (uint32_t)(((uint32_t)hmmc->MmcCard.RelCardAdd) << 16U)); + if(errorstate != HAL_MMC_ERROR_NONE) + { + hmmc->ErrorCode |= errorstate; + } + + /* Get Extended CSD parameters */ + if (HAL_MMC_GetCardExtCSD(hmmc, hmmc->Ext_CSD, SDMMC_DATATIMEOUT) != HAL_OK) + { + return hmmc->ErrorCode; + } + + /* While card is not ready for data and trial number for sending CMD13 is not exceeded */ + errorstate = SDMMC_CmdSendStatus(hmmc->Instance, (uint32_t)(((uint32_t)hmmc->MmcCard.RelCardAdd) << 16U)); + if(errorstate != HAL_MMC_ERROR_NONE) + { + hmmc->ErrorCode |= errorstate; + } + + /* Configure the SDIO peripheral */ + Init = hmmc->Init; + Init.BusWide = SDIO_BUS_WIDE_1B; + (void)SDIO_Init(hmmc->Instance, Init); + + /* All cards are initialized */ + return HAL_MMC_ERROR_NONE; +} + +/** + * @brief Enquires cards about their operating voltage and configures clock + * controls and stores MMC information that will be needed in future + * in the MMC handle. + * @param hmmc: Pointer to MMC handle + * @retval error state + */ +static uint32_t MMC_PowerON(MMC_HandleTypeDef *hmmc) +{ + __IO uint32_t count = 0U; + uint32_t response = 0U, validvoltage = 0U; + uint32_t errorstate; + + /* CMD0: GO_IDLE_STATE */ + errorstate = SDMMC_CmdGoIdleState(hmmc->Instance); + if(errorstate != HAL_MMC_ERROR_NONE) + { + return errorstate; + } + + while(validvoltage == 0U) + { + if(count++ == SDMMC_MAX_VOLT_TRIAL) + { + return HAL_MMC_ERROR_INVALID_VOLTRANGE; + } + + /* SEND CMD1 APP_CMD with voltage range as argument */ + errorstate = SDMMC_CmdOpCondition(hmmc->Instance, MMC_VOLTAGE_RANGE); + if(errorstate != HAL_MMC_ERROR_NONE) + { + return HAL_MMC_ERROR_UNSUPPORTED_FEATURE; + } + + /* Get command response */ + response = SDIO_GetResponse(hmmc->Instance, SDIO_RESP1); + + /* Get operating voltage*/ + validvoltage = (((response >> 31U) == 1U) ? 1U : 0U); + } + + /* When power routine is finished and command returns valid voltage */ + if (((response & (0xFF000000U)) >> 24U) == 0xC0U) + { + hmmc->MmcCard.CardType = MMC_HIGH_CAPACITY_CARD; + } + else + { + hmmc->MmcCard.CardType = MMC_LOW_CAPACITY_CARD; + } + + return HAL_MMC_ERROR_NONE; +} + +/** + * @brief Turns the SDIO output signals off. + * @param hmmc: Pointer to MMC handle + * @retval None + */ +static void MMC_PowerOFF(MMC_HandleTypeDef *hmmc) +{ + /* Set Power State to OFF */ + (void)SDIO_PowerState_OFF(hmmc->Instance); +} + +/** + * @brief Returns the current card's status. + * @param hmmc: Pointer to MMC handle + * @param pCardStatus: pointer to the buffer that will contain the MMC card + * status (Card Status register) + * @retval error state + */ +static uint32_t MMC_SendStatus(MMC_HandleTypeDef *hmmc, uint32_t *pCardStatus) +{ + uint32_t errorstate; + + if(pCardStatus == NULL) + { + return HAL_MMC_ERROR_PARAM; + } + + /* Send Status command */ + errorstate = SDMMC_CmdSendStatus(hmmc->Instance, (uint32_t)(hmmc->MmcCard.RelCardAdd << 16U)); + if(errorstate != HAL_MMC_ERROR_NONE) + { + return errorstate; + } + + /* Get MMC card status */ + *pCardStatus = SDIO_GetResponse(hmmc->Instance, SDIO_RESP1); + + return HAL_MMC_ERROR_NONE; +} + +/** + * @brief Reads extended CSD register to get the sectors number of the device + * @param hmmc: Pointer to MMC handle + * @param pFieldData: Pointer to the read buffer + * @param FieldIndex: Index of the field to be read + * @param Timeout: Specify timeout value + * @retval HAL status + */ +static uint32_t MMC_ReadExtCSD(MMC_HandleTypeDef *hmmc, uint32_t *pFieldData, uint16_t FieldIndex, uint32_t Timeout) +{ + SDIO_DataInitTypeDef config; + uint32_t errorstate; + uint32_t tickstart = HAL_GetTick(); + uint32_t count; + uint32_t i = 0; + uint32_t tmp_data; + + hmmc->ErrorCode = HAL_MMC_ERROR_NONE; + + /* Initialize data control register */ + hmmc->Instance->DCTRL = 0; + + /* Configure the MMC DPSM (Data Path State Machine) */ + config.DataTimeOut = SDMMC_DATATIMEOUT; + config.DataLength = 512; + config.DataBlockSize = SDIO_DATABLOCK_SIZE_512B; + config.TransferDir = SDIO_TRANSFER_DIR_TO_SDIO; + config.TransferMode = SDIO_TRANSFER_MODE_BLOCK; + config.DPSM = SDIO_DPSM_ENABLE; + (void)SDIO_ConfigData(hmmc->Instance, &config); + + /* Set Block Size for Card */ + errorstate = SDMMC_CmdSendEXTCSD(hmmc->Instance, 0); + if(errorstate != HAL_MMC_ERROR_NONE) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= errorstate; + hmmc->State = HAL_MMC_STATE_READY; + return HAL_ERROR; + } + + /* Poll on SDMMC flags */ + while(!__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXOVERR | SDIO_FLAG_DCRCFAIL | SDIO_FLAG_DTIMEOUT | SDIO_FLAG_DATAEND)) + { + if(__HAL_MMC_GET_FLAG(hmmc, SDIO_FLAG_RXFIFOHF)) + { + /* Read data from SDMMC Rx FIFO */ + for(count = 0U; count < 8U; count++) + { + tmp_data = SDIO_ReadFIFO(hmmc->Instance); + /* eg : SEC_COUNT : FieldIndex = 212 => i+count = 53 */ + /* DEVICE_TYPE : FieldIndex = 196 => i+count = 49 */ + if ((i + count) == ((uint32_t)FieldIndex/4U)) + { + *pFieldData = tmp_data; + } + } + i += 8U; + } + + if(((HAL_GetTick()-tickstart) >= Timeout) || (Timeout == 0U)) + { + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_FLAGS); + hmmc->ErrorCode |= HAL_MMC_ERROR_TIMEOUT; + hmmc->State= HAL_MMC_STATE_READY; + return HAL_TIMEOUT; + } + } + + /* While card is not ready for data and trial number for sending CMD13 is not exceeded */ + errorstate = SDMMC_CmdSendStatus(hmmc->Instance, (uint32_t)(((uint32_t)hmmc->MmcCard.RelCardAdd) << 16)); + if(errorstate != HAL_MMC_ERROR_NONE) + { + hmmc->ErrorCode |= errorstate; + } + + /* Clear all the static flags */ + __HAL_MMC_CLEAR_FLAG(hmmc, SDIO_STATIC_DATA_FLAGS); + + hmmc->State = HAL_MMC_STATE_READY; + + return HAL_OK; +} + + +/** + * @brief Wrap up reading in non-blocking mode. + * @param hmmc: pointer to a MMC_HandleTypeDef structure that contains + * the configuration information. + * @retval None + */ +static void MMC_Read_IT(MMC_HandleTypeDef *hmmc) +{ + uint32_t count, data, dataremaining; + uint8_t* tmp; + + tmp = hmmc->pRxBuffPtr; + dataremaining = hmmc->RxXferSize; + + if (dataremaining > 0U) + { + /* Read data from SDIO Rx FIFO */ + for(count = 0U; count < 8U; count++) + { + data = SDIO_ReadFIFO(hmmc->Instance); + *tmp = (uint8_t)(data & 0xFFU); + tmp++; + dataremaining--; + *tmp = (uint8_t)((data >> 8U) & 0xFFU); + tmp++; + dataremaining--; + *tmp = (uint8_t)((data >> 16U) & 0xFFU); + tmp++; + dataremaining--; + *tmp = (uint8_t)((data >> 24U) & 0xFFU); + tmp++; + dataremaining--; + } + + hmmc->pRxBuffPtr = tmp; + hmmc->RxXferSize = dataremaining; + } +} + +/** + * @brief Wrap up writing in non-blocking mode. + * @param hmmc: pointer to a MMC_HandleTypeDef structure that contains + * the configuration information. + * @retval None + */ +static void MMC_Write_IT(MMC_HandleTypeDef *hmmc) +{ + uint32_t count, data, dataremaining; + uint8_t* tmp; + + tmp = hmmc->pTxBuffPtr; + dataremaining = hmmc->TxXferSize; + + if (dataremaining > 0U) + { + /* Write data to SDIO Tx FIFO */ + for(count = 0U; count < 8U; count++) + { + data = (uint32_t)(*tmp); + tmp++; + dataremaining--; + data |= ((uint32_t)(*tmp) << 8U); + tmp++; + dataremaining--; + data |= ((uint32_t)(*tmp) << 16U); + tmp++; + dataremaining--; + data |= ((uint32_t)(*tmp) << 24U); + tmp++; + dataremaining--; + (void)SDIO_WriteFIFO(hmmc->Instance, &data); + } + + hmmc->pTxBuffPtr = tmp; + hmmc->TxXferSize = dataremaining; + } +} + +/** + * @brief Update the power class of the device. + * @param hmmc MMC handle + * @param Wide Wide of MMC bus + * @param Speed Speed of the MMC bus + * @retval MMC Card error state + */ +static uint32_t MMC_PwrClassUpdate(MMC_HandleTypeDef *hmmc, uint32_t Wide) +{ + uint32_t count; + uint32_t response = 0U; + uint32_t errorstate = HAL_MMC_ERROR_NONE; + uint32_t power_class, supported_pwr_class; + + if((Wide == SDIO_BUS_WIDE_8B) || (Wide == SDIO_BUS_WIDE_4B)) + { + power_class = 0U; /* Default value after power-on or software reset */ + + /* Read the PowerClass field of the Extended CSD register */ + if(MMC_ReadExtCSD(hmmc, &power_class, 187, SDMMC_DATATIMEOUT) != HAL_OK) /* Field POWER_CLASS [187] */ + { + errorstate = SDMMC_ERROR_GENERAL_UNKNOWN_ERR; + } + else + { + power_class = ((power_class >> 24U) & 0x000000FFU); + } + + /* Get the supported PowerClass field of the Extended CSD register */ + /* Field PWR_CL_26_xxx [201 or 203] */ + supported_pwr_class = ((hmmc->Ext_CSD[(MMC_EXT_CSD_PWR_CL_26_INDEX/4)] >> MMC_EXT_CSD_PWR_CL_26_POS) & 0x000000FFU); + + if(errorstate == HAL_MMC_ERROR_NONE) + { + if(Wide == SDIO_BUS_WIDE_8B) + { + /* Bit [7:4] : power class for 8-bits bus configuration - Bit [3:0] : power class for 4-bits bus configuration */ + supported_pwr_class = (supported_pwr_class >> 4U); + } + + if ((power_class & 0x0FU) != (supported_pwr_class & 0x0FU)) + { + /* Need to change current power class */ + errorstate = SDMMC_CmdSwitch(hmmc->Instance, (0x03BB0000U | ((supported_pwr_class & 0x0FU) << 8U))); + + if(errorstate == HAL_MMC_ERROR_NONE) + { + /* While card is not ready for data and trial number for sending CMD13 is not exceeded */ + count = SDMMC_MAX_TRIAL; + do + { + errorstate = SDMMC_CmdSendStatus(hmmc->Instance, (uint32_t)(((uint32_t)hmmc->MmcCard.RelCardAdd) << 16U)); + if(errorstate != HAL_MMC_ERROR_NONE) + { + break; + } + + /* Get command response */ + response = SDIO_GetResponse(hmmc->Instance, SDIO_RESP1); + count--; + }while(((response & 0x100U) == 0U) && (count != 0U)); + + /* Check the status after the switch command execution */ + if ((count != 0U) && (errorstate == HAL_MMC_ERROR_NONE)) + { + /* Check the bit SWITCH_ERROR of the device status */ + if ((response & 0x80U) != 0U) + { + errorstate = SDMMC_ERROR_UNSUPPORTED_FEATURE; + } + } + else if (count == 0U) + { + errorstate = SDMMC_ERROR_TIMEOUT; + } + else + { + /* Nothing to do */ + } + } + } + } + } + + return errorstate; +} + +/** + * @} + */ + +#endif /* SDIO */ + +#endif /* HAL_MMC_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pcd.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pcd.c index 13aace25..d53e0b1c 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pcd.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pcd.c @@ -10,6 +10,17 @@ * + Peripheral Control functions * + Peripheral State functions * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### How to use this driver ##### @@ -41,17 +52,6 @@ @endverbatim ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -122,7 +122,9 @@ static HAL_StatusTypeDef PCD_EP_OutSetupPacket_int(PCD_HandleTypeDef *hpcd, uint */ HAL_StatusTypeDef HAL_PCD_Init(PCD_HandleTypeDef *hpcd) { - USB_OTG_GlobalTypeDef *USBx; +#if defined (USB_OTG_FS) + const USB_OTG_GlobalTypeDef *USBx; +#endif /* defined (USB_OTG_FS) */ uint8_t i; /* Check the PCD handle allocation */ @@ -134,7 +136,9 @@ HAL_StatusTypeDef HAL_PCD_Init(PCD_HandleTypeDef *hpcd) /* Check the parameters */ assert_param(IS_PCD_ALL_INSTANCE(hpcd->Instance)); +#if defined (USB_OTG_FS) USBx = hpcd->Instance; +#endif /* defined (USB_OTG_FS) */ if (hpcd->State == HAL_PCD_STATE_RESET) { @@ -171,11 +175,13 @@ HAL_StatusTypeDef HAL_PCD_Init(PCD_HandleTypeDef *hpcd) hpcd->State = HAL_PCD_STATE_BUSY; +#if defined (USB_OTG_FS) /* Disable DMA mode for FS instance */ - if ((USBx->CID & (0x1U << 8)) == 0U) + if (USBx == USB_OTG_FS) { hpcd->Init.dma_enable = 0U; } +#endif /* defined (USB_OTG_FS) */ /* Disable the Interrupts */ __HAL_PCD_DISABLE(hpcd); @@ -187,8 +193,12 @@ HAL_StatusTypeDef HAL_PCD_Init(PCD_HandleTypeDef *hpcd) return HAL_ERROR; } - /* Force Device Mode*/ - (void)USB_SetCurrentMode(hpcd->Instance, USB_DEVICE_MODE); + /* Force Device Mode */ + if (USB_SetCurrentMode(hpcd->Instance, USB_DEVICE_MODE) != HAL_OK) + { + hpcd->State = HAL_PCD_STATE_ERROR; + return HAL_ERROR; + } /* Init endpoints structures */ for (i = 0U; i < hpcd->Init.dev_endpoints; i++) @@ -224,13 +234,17 @@ HAL_StatusTypeDef HAL_PCD_Init(PCD_HandleTypeDef *hpcd) hpcd->USB_Address = 0U; hpcd->State = HAL_PCD_STATE_READY; - #if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) \ + || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) \ + || defined(STM32F423xx) /* Activate LPM */ if (hpcd->Init.lpm_enable == 1U) { (void)HAL_PCDEx_ActivateLPM(hpcd); } -#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ +#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || + defined(STM32F423xx) */ (void)USB_DevDisconnect(hpcd->Instance); return HAL_OK; @@ -318,7 +332,7 @@ __weak void HAL_PCD_MspDeInit(PCD_HandleTypeDef *hpcd) * @arg @ref HAL_PCD_SUSPEND_CB_ID USB PCD Suspend callback ID * @arg @ref HAL_PCD_RESUME_CB_ID USB PCD Resume callback ID * @arg @ref HAL_PCD_CONNECT_CB_ID USB PCD Connect callback ID - * @arg @ref HAL_PCD_DISCONNECT_CB_ID OTG PCD Disconnect callback ID + * @arg @ref HAL_PCD_DISCONNECT_CB_ID USB PCD Disconnect callback ID * @arg @ref HAL_PCD_MSPINIT_CB_ID MspDeInit callback ID * @arg @ref HAL_PCD_MSPDEINIT_CB_ID MspDeInit callback ID * @param pCallback pointer to the Callback function @@ -422,7 +436,7 @@ HAL_StatusTypeDef HAL_PCD_RegisterCallback(PCD_HandleTypeDef *hpcd, /** * @brief Unregister an USB PCD Callback - * USB PCD callabck is redirected to the weak predefined callback + * USB PCD callback is redirected to the weak predefined callback * @param hpcd USB PCD handle * @param CallbackID ID of the callback to be unregistered * This parameter can be one of the following values: @@ -432,7 +446,7 @@ HAL_StatusTypeDef HAL_PCD_RegisterCallback(PCD_HandleTypeDef *hpcd, * @arg @ref HAL_PCD_SUSPEND_CB_ID USB PCD Suspend callback ID * @arg @ref HAL_PCD_RESUME_CB_ID USB PCD Resume callback ID * @arg @ref HAL_PCD_CONNECT_CB_ID USB PCD Connect callback ID - * @arg @ref HAL_PCD_DISCONNECT_CB_ID OTG PCD Disconnect callback ID + * @arg @ref HAL_PCD_DISCONNECT_CB_ID USB PCD Disconnect callback ID * @arg @ref HAL_PCD_MSPINIT_CB_ID MspDeInit callback ID * @arg @ref HAL_PCD_MSPDEINIT_CB_ID MspDeInit callback ID * @retval HAL status @@ -721,7 +735,8 @@ HAL_StatusTypeDef HAL_PCD_RegisterIsoOutIncpltCallback(PCD_HandleTypeDef *hpcd, /** * @brief Unregister the USB PCD Iso OUT incomplete Callback - * USB PCD Iso OUT incomplete Callback is redirected to the weak HAL_PCD_ISOOUTIncompleteCallback() predefined callback + * USB PCD Iso OUT incomplete Callback is redirected + * to the weak HAL_PCD_ISOOUTIncompleteCallback() predefined callback * @param hpcd PCD handle * @retval HAL status */ @@ -795,7 +810,8 @@ HAL_StatusTypeDef HAL_PCD_RegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd, /** * @brief Unregister the USB PCD Iso IN incomplete Callback - * USB PCD Iso IN incomplete Callback is redirected to the weak HAL_PCD_ISOINIncompleteCallback() predefined callback + * USB PCD Iso IN incomplete Callback is redirected + * to the weak HAL_PCD_ISOINIncompleteCallback() predefined callback * @param hpcd PCD handle * @retval HAL status */ @@ -1002,8 +1018,8 @@ HAL_StatusTypeDef HAL_PCD_Start(PCD_HandleTypeDef *hpcd) __HAL_LOCK(hpcd); - if ((hpcd->Init.battery_charging_enable == 1U) && - (hpcd->Init.phy_itface != USB_OTG_ULPI_PHY)) + if (((USBx->GUSBCFG & USB_OTG_GUSBCFG_PHYSEL) != 0U) && + (hpcd->Init.battery_charging_enable == 1U)) { /* Enable USB Transceiver */ USBx->GCCFG |= USB_OTG_GCCFG_PWRDWN; @@ -1031,12 +1047,13 @@ HAL_StatusTypeDef HAL_PCD_Stop(PCD_HandleTypeDef *hpcd) (void)USB_FlushTxFifo(hpcd->Instance, 0x10U); - if ((hpcd->Init.battery_charging_enable == 1U) && - (hpcd->Init.phy_itface != USB_OTG_ULPI_PHY)) + if (((USBx->GUSBCFG & USB_OTG_GUSBCFG_PHYSEL) != 0U) && + (hpcd->Init.battery_charging_enable == 1U)) { /* Disable USB Transceiver */ USBx->GCCFG &= ~(USB_OTG_GCCFG_PWRDWN); } + __HAL_UNLOCK(hpcd); return HAL_OK; @@ -1052,9 +1069,13 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) { USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; uint32_t USBx_BASE = (uint32_t)USBx; - uint32_t i, ep_intr, epint, epnum; - uint32_t fifoemptymsk, temp; USB_OTG_EPTypeDef *ep; + uint32_t i; + uint32_t ep_intr; + uint32_t epint; + uint32_t epnum; + uint32_t fifoemptymsk; + uint32_t RegVal; /* ensure that we are in device mode */ if (USB_GetMode(hpcd->Instance) == USB_OTG_MODE_DEVICE) @@ -1065,6 +1086,9 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) return; } + /* store current frame number */ + hpcd->FrameNumber = (USBx_DEVICE->DSTS & USB_OTG_DSTS_FNSOF_Msk) >> USB_OTG_DSTS_FNSOF_Pos; + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_MMIS)) { /* incorrect mode, acknowledge the interrupt */ @@ -1076,30 +1100,31 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) { USB_MASK_INTERRUPT(hpcd->Instance, USB_OTG_GINTSTS_RXFLVL); - temp = USBx->GRXSTSP; + RegVal = USBx->GRXSTSP; - ep = &hpcd->OUT_ep[temp & USB_OTG_GRXSTSP_EPNUM]; + ep = &hpcd->OUT_ep[RegVal & USB_OTG_GRXSTSP_EPNUM]; - if (((temp & USB_OTG_GRXSTSP_PKTSTS) >> 17) == STS_DATA_UPDT) + if (((RegVal & USB_OTG_GRXSTSP_PKTSTS) >> 17) == STS_DATA_UPDT) { - if ((temp & USB_OTG_GRXSTSP_BCNT) != 0U) + if ((RegVal & USB_OTG_GRXSTSP_BCNT) != 0U) { (void)USB_ReadPacket(USBx, ep->xfer_buff, - (uint16_t)((temp & USB_OTG_GRXSTSP_BCNT) >> 4)); + (uint16_t)((RegVal & USB_OTG_GRXSTSP_BCNT) >> 4)); - ep->xfer_buff += (temp & USB_OTG_GRXSTSP_BCNT) >> 4; - ep->xfer_count += (temp & USB_OTG_GRXSTSP_BCNT) >> 4; + ep->xfer_buff += (RegVal & USB_OTG_GRXSTSP_BCNT) >> 4; + ep->xfer_count += (RegVal & USB_OTG_GRXSTSP_BCNT) >> 4; } } - else if (((temp & USB_OTG_GRXSTSP_PKTSTS) >> 17) == STS_SETUP_UPDT) + else if (((RegVal & USB_OTG_GRXSTSP_PKTSTS) >> 17) == STS_SETUP_UPDT) { (void)USB_ReadPacket(USBx, (uint8_t *)hpcd->Setup, 8U); - ep->xfer_count += (temp & USB_OTG_GRXSTSP_BCNT) >> 4; + ep->xfer_count += (RegVal & USB_OTG_GRXSTSP_BCNT) >> 4; } else { /* ... */ } + USB_UNMASK_INTERRUPT(hpcd->Instance, USB_OTG_GINTSTS_RXFLVL); } @@ -1134,6 +1159,30 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_OTEPDIS); } + /* Clear OUT Endpoint disable interrupt */ + if ((epint & USB_OTG_DOEPINT_EPDISD) == USB_OTG_DOEPINT_EPDISD) + { + if ((USBx->GINTSTS & USB_OTG_GINTSTS_BOUTNAKEFF) == USB_OTG_GINTSTS_BOUTNAKEFF) + { + USBx_DEVICE->DCTL |= USB_OTG_DCTL_CGONAK; + } + + ep = &hpcd->OUT_ep[epnum]; + + if (ep->is_iso_incomplete == 1U) + { + ep->is_iso_incomplete = 0U; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ISOOUTIncompleteCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_ISOOUTIncompleteCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_EPDISD); + } + /* Clear Status Phase Received interrupt */ if ((epint & USB_OTG_DOEPINT_OTEPSPR) == USB_OTG_DOEPINT_OTEPSPR) { @@ -1203,6 +1252,21 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) } if ((epint & USB_OTG_DIEPINT_EPDISD) == USB_OTG_DIEPINT_EPDISD) { + (void)USB_FlushTxFifo(USBx, epnum); + + ep = &hpcd->IN_ep[epnum]; + + if (ep->is_iso_incomplete == 1U) + { + ep->is_iso_incomplete = 0U; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ISOINIncompleteCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_ISOINIncompleteCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_EPDISD); } if ((epint & USB_OTG_DIEPINT_TXFE) == USB_OTG_DIEPINT_TXFE) @@ -1256,7 +1320,9 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) } __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_USBSUSP); } -#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) \ + || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) \ + || defined(STM32F423xx) /* Handle LPM Interrupt */ if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_LPMINT)) { @@ -1282,7 +1348,9 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) #endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ } } -#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ +#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || + defined(STM32F423xx) */ /* Handle Reset Interrupt */ if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_USBRST)) { @@ -1293,7 +1361,6 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) { USBx_INEP(i)->DIEPINT = 0xFB7FU; USBx_INEP(i)->DIEPCTL &= ~USB_OTG_DIEPCTL_STALL; - USBx_INEP(i)->DIEPCTL |= USB_OTG_DIEPCTL_SNAK; USBx_OUTEP(i)->DOEPINT = 0xFB7FU; USBx_OUTEP(i)->DOEPCTL &= ~USB_OTG_DOEPCTL_STALL; USBx_OUTEP(i)->DOEPCTL |= USB_OTG_DOEPCTL_SNAK; @@ -1365,18 +1432,37 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_SOF); } + /* Handle Global OUT NAK effective Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_BOUTNAKEFF)) + { + USBx->GINTMSK &= ~USB_OTG_GINTMSK_GONAKEFFM; + + for (epnum = 1U; epnum < hpcd->Init.dev_endpoints; epnum++) + { + if (hpcd->OUT_ep[epnum].is_iso_incomplete == 1U) + { + /* Abort current transaction and disable the EP */ + (void)HAL_PCD_EP_Abort(hpcd, (uint8_t)epnum); + } + } + } + /* Handle Incomplete ISO IN Interrupt */ if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_IISOIXFR)) { - /* Keep application checking the corresponding Iso IN endpoint - causing the incomplete Interrupt */ - epnum = 0U; + for (epnum = 1U; epnum < hpcd->Init.dev_endpoints; epnum++) + { + RegVal = USBx_INEP(epnum)->DIEPCTL; -#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) - hpcd->ISOINIncompleteCallback(hpcd, (uint8_t)epnum); -#else - HAL_PCD_ISOINIncompleteCallback(hpcd, (uint8_t)epnum); -#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + if ((hpcd->IN_ep[epnum].type == EP_TYPE_ISOC) && + ((RegVal & USB_OTG_DIEPCTL_EPENA) == USB_OTG_DIEPCTL_EPENA)) + { + hpcd->IN_ep[epnum].is_iso_incomplete = 1U; + + /* Abort current transaction and disable the EP */ + (void)HAL_PCD_EP_Abort(hpcd, (uint8_t)(epnum | 0x80U)); + } + } __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_IISOIXFR); } @@ -1384,15 +1470,25 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) /* Handle Incomplete ISO OUT Interrupt */ if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_PXFR_INCOMPISOOUT)) { - /* Keep application checking the corresponding Iso OUT endpoint - causing the incomplete Interrupt */ - epnum = 0U; + for (epnum = 1U; epnum < hpcd->Init.dev_endpoints; epnum++) + { + RegVal = USBx_OUTEP(epnum)->DOEPCTL; -#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) - hpcd->ISOOUTIncompleteCallback(hpcd, (uint8_t)epnum); -#else - HAL_PCD_ISOOUTIncompleteCallback(hpcd, (uint8_t)epnum); -#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + if ((hpcd->OUT_ep[epnum].type == EP_TYPE_ISOC) && + ((RegVal & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA) && + ((RegVal & (0x1U << 16)) == (hpcd->FrameNumber & 0x1U))) + { + hpcd->OUT_ep[epnum].is_iso_incomplete = 1U; + + USBx->GINTMSK |= USB_OTG_GINTMSK_GONAKEFFM; + + if ((USBx->GINTSTS & USB_OTG_GINTSTS_BOUTNAKEFF) == 0U) + { + USBx_DEVICE->DCTL |= USB_OTG_DCTL_SGONAK; + break; + } + } + } __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_PXFR_INCOMPISOOUT); } @@ -1412,9 +1508,9 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) /* Handle Disconnection event Interrupt */ if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_OTGINT)) { - temp = hpcd->Instance->GOTGINT; + RegVal = hpcd->Instance->GOTGINT; - if ((temp & USB_OTG_GOTGINT_SEDET) == USB_OTG_GOTGINT_SEDET) + if ((RegVal & USB_OTG_GOTGINT_SEDET) == USB_OTG_GOTGINT_SEDET) { #if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) hpcd->DisconnectCallback(hpcd); @@ -1422,7 +1518,7 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) HAL_PCD_DisconnectCallback(hpcd); #endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ } - hpcd->Instance->GOTGINT |= temp; + hpcd->Instance->GOTGINT |= RegVal; } } } @@ -1435,16 +1531,17 @@ void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) */ void HAL_PCD_WKUP_IRQHandler(PCD_HandleTypeDef *hpcd) { +#if defined (USB_OTG_FS) USB_OTG_GlobalTypeDef *USBx; - USBx = hpcd->Instance; - if ((USBx->CID & (0x1U << 8)) == 0U) + if (USBx == USB_OTG_FS) { /* Clear EXTI pending Bit */ __HAL_USB_OTG_FS_WAKEUP_EXTI_CLEAR_FLAG(); } else +#endif /* defined (USB_OTG_FS) */ { /* Clear EXTI pending Bit */ __HAL_USB_OTG_HS_WAKEUP_EXTI_CLEAR_FLAG(); @@ -1651,19 +1748,16 @@ __weak void HAL_PCD_DisconnectCallback(PCD_HandleTypeDef *hpcd) */ HAL_StatusTypeDef HAL_PCD_DevConnect(PCD_HandleTypeDef *hpcd) { -#if defined (USB_OTG_FS) || defined (USB_OTG_HS) USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; -#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ __HAL_LOCK(hpcd); -#if defined (USB_OTG_FS) || defined (USB_OTG_HS) - if ((hpcd->Init.battery_charging_enable == 1U) && - (hpcd->Init.phy_itface != USB_OTG_ULPI_PHY)) + + if (((USBx->GUSBCFG & USB_OTG_GUSBCFG_PHYSEL) != 0U) && + (hpcd->Init.battery_charging_enable == 1U)) { /* Enable USB Transceiver */ USBx->GCCFG |= USB_OTG_GCCFG_PWRDWN; } -#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ (void)USB_DevConnect(hpcd->Instance); __HAL_UNLOCK(hpcd); @@ -1677,21 +1771,17 @@ HAL_StatusTypeDef HAL_PCD_DevConnect(PCD_HandleTypeDef *hpcd) */ HAL_StatusTypeDef HAL_PCD_DevDisconnect(PCD_HandleTypeDef *hpcd) { -#if defined (USB_OTG_FS) || defined (USB_OTG_HS) USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; -#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ __HAL_LOCK(hpcd); (void)USB_DevDisconnect(hpcd->Instance); -#if defined (USB_OTG_FS) || defined (USB_OTG_HS) - if ((hpcd->Init.battery_charging_enable == 1U) && - (hpcd->Init.phy_itface != USB_OTG_ULPI_PHY)) + if (((USBx->GUSBCFG & USB_OTG_GUSBCFG_PHYSEL) != 0U) && + (hpcd->Init.battery_charging_enable == 1U)) { /* Disable USB Transceiver */ USBx->GCCFG &= ~(USB_OTG_GCCFG_PWRDWN); } -#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ __HAL_UNLOCK(hpcd); @@ -1724,7 +1814,7 @@ HAL_StatusTypeDef HAL_PCD_SetAddress(PCD_HandleTypeDef *hpcd, uint8_t address) HAL_StatusTypeDef HAL_PCD_EP_Open(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint16_t ep_mps, uint8_t ep_type) { - HAL_StatusTypeDef ret = HAL_OK; + HAL_StatusTypeDef ret = HAL_OK; PCD_EPTypeDef *ep; if ((ep_addr & 0x80U) == 0x80U) @@ -1739,7 +1829,7 @@ HAL_StatusTypeDef HAL_PCD_EP_Open(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, } ep->num = ep_addr & EP_ADDR_MSK; - ep->maxpacket = ep_mps; + ep->maxpacket = (uint32_t)ep_mps & 0x7FFU; ep->type = ep_type; if (ep->is_in != 0U) @@ -1747,6 +1837,7 @@ HAL_StatusTypeDef HAL_PCD_EP_Open(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, /* Assign a Tx FIFO */ ep->tx_fifo_num = ep->num; } + /* Set initial data PID. */ if (ep_type == EP_TYPE_BULK) { @@ -1780,7 +1871,7 @@ HAL_StatusTypeDef HAL_PCD_EP_Close(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) ep = &hpcd->OUT_ep[ep_addr & EP_ADDR_MSK]; ep->is_in = 0U; } - ep->num = ep_addr & EP_ADDR_MSK; + ep->num = ep_addr & EP_ADDR_MSK; __HAL_LOCK(hpcd); (void)USB_DeactivateEndpoint(hpcd->Instance, ep); @@ -1815,14 +1906,7 @@ HAL_StatusTypeDef HAL_PCD_EP_Receive(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, u ep->dma_addr = (uint32_t)pBuf; } - if ((ep_addr & EP_ADDR_MSK) == 0U) - { - (void)USB_EP0StartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); - } - else - { - (void)USB_EPStartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); - } + (void)USB_EPStartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); return HAL_OK; } @@ -1833,7 +1917,7 @@ HAL_StatusTypeDef HAL_PCD_EP_Receive(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, u * @param ep_addr endpoint address * @retval Data Size */ -uint32_t HAL_PCD_EP_GetRxCount(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +uint32_t HAL_PCD_EP_GetRxCount(PCD_HandleTypeDef const *hpcd, uint8_t ep_addr) { return hpcd->OUT_ep[ep_addr & EP_ADDR_MSK].xfer_count; } @@ -1863,14 +1947,7 @@ HAL_StatusTypeDef HAL_PCD_EP_Transmit(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, ep->dma_addr = (uint32_t)pBuf; } - if ((ep_addr & EP_ADDR_MSK) == 0U) - { - (void)USB_EP0StartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); - } - else - { - (void)USB_EPStartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); - } + (void)USB_EPStartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); return HAL_OK; } @@ -1954,6 +2031,32 @@ HAL_StatusTypeDef HAL_PCD_EP_ClrStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) return HAL_OK; } +/** + * @brief Abort an USB EP transaction. + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Abort(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + HAL_StatusTypeDef ret; + PCD_EPTypeDef *ep; + + if ((0x80U & ep_addr) == 0x80U) + { + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + } + else + { + ep = &hpcd->OUT_ep[ep_addr & EP_ADDR_MSK]; + } + + /* Stop Xfer */ + ret = USB_EPStopXfer(hpcd->Instance, ep); + + return ret; +} + /** * @brief Flush an endpoint * @param hpcd PCD handle @@ -2022,11 +2125,40 @@ HAL_StatusTypeDef HAL_PCD_DeActivateRemoteWakeup(PCD_HandleTypeDef *hpcd) * @param hpcd PCD handle * @retval HAL state */ -PCD_StateTypeDef HAL_PCD_GetState(PCD_HandleTypeDef *hpcd) +PCD_StateTypeDef HAL_PCD_GetState(PCD_HandleTypeDef const *hpcd) { return hpcd->State; } +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** + * @brief Set the USB Device high speed test mode. + * @param hpcd PCD handle + * @param testmode USB Device high speed test mode + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_SetTestMode(const PCD_HandleTypeDef *hpcd, uint8_t testmode) +{ + const USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + uint32_t USBx_BASE = (uint32_t)USBx; + + switch (testmode) + { + case TEST_J: + case TEST_K: + case TEST_SE0_NAK: + case TEST_PACKET: + case TEST_FORCE_EN: + USBx_DEVICE->DCTL |= (uint32_t)testmode << 4; + break; + + default: + break; + } + + return HAL_OK; +} +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ /** * @} */ @@ -2108,9 +2240,10 @@ static HAL_StatusTypeDef PCD_WriteEmptyTxFifo(PCD_HandleTypeDef *hpcd, uint32_t */ static HAL_StatusTypeDef PCD_EP_OutXfrComplete_int(PCD_HandleTypeDef *hpcd, uint32_t epnum) { - USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + USB_OTG_EPTypeDef *ep; + const USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; uint32_t USBx_BASE = (uint32_t)USBx; - uint32_t gSNPSiD = *(__IO uint32_t *)(&USBx->CID + 0x1U); + uint32_t gSNPSiD = *(__IO const uint32_t *)(&USBx->CID + 0x1U); uint32_t DoepintReg = USBx_OUTEP(epnum)->DOEPINT; if (hpcd->Init.dma_enable == 1U) @@ -2138,18 +2271,24 @@ static HAL_StatusTypeDef PCD_EP_OutXfrComplete_int(PCD_HandleTypeDef *hpcd, uint } else { - /* out data packet received over EP0 */ - hpcd->OUT_ep[epnum].xfer_count = - hpcd->OUT_ep[epnum].maxpacket - - (USBx_OUTEP(epnum)->DOEPTSIZ & USB_OTG_DOEPTSIZ_XFRSIZ); + ep = &hpcd->OUT_ep[epnum]; - hpcd->OUT_ep[epnum].xfer_buff += hpcd->OUT_ep[epnum].maxpacket; + /* out data packet received over EP */ + ep->xfer_count = ep->xfer_size - (USBx_OUTEP(epnum)->DOEPTSIZ & USB_OTG_DOEPTSIZ_XFRSIZ); - if ((epnum == 0U) && (hpcd->OUT_ep[epnum].xfer_len == 0U)) + if (epnum == 0U) { - /* this is ZLP, so prepare EP0 for next setup */ - (void)USB_EP0_OutStart(hpcd->Instance, 1U, (uint8_t *)hpcd->Setup); + if (ep->xfer_len == 0U) + { + /* this is ZLP, so prepare EP0 for next setup */ + (void)USB_EP0_OutStart(hpcd->Instance, 1U, (uint8_t *)hpcd->Setup); + } + else + { + ep->xfer_buff += ep->xfer_count; + } } + #if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) hpcd->DataOutStageCallback(hpcd, (uint8_t)epnum); #else @@ -2213,9 +2352,9 @@ static HAL_StatusTypeDef PCD_EP_OutXfrComplete_int(PCD_HandleTypeDef *hpcd, uint */ static HAL_StatusTypeDef PCD_EP_OutSetupPacket_int(PCD_HandleTypeDef *hpcd, uint32_t epnum) { - USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + const USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; uint32_t USBx_BASE = (uint32_t)USBx; - uint32_t gSNPSiD = *(__IO uint32_t *)(&USBx->CID + 0x1U); + uint32_t gSNPSiD = *(__IO const uint32_t *)(&USBx->CID + 0x1U); uint32_t DoepintReg = USBx_OUTEP(epnum)->DOEPINT; if ((gSNPSiD > USB_OTG_CORE_ID_300A) && @@ -2240,13 +2379,11 @@ static HAL_StatusTypeDef PCD_EP_OutSetupPacket_int(PCD_HandleTypeDef *hpcd, uint } #endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ - /** * @} */ #endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ #endif /* HAL_PCD_MODULE_ENABLED */ - /** * @} */ @@ -2254,5 +2391,3 @@ static HAL_StatusTypeDef PCD_EP_OutSetupPacket_int(PCD_HandleTypeDef *hpcd, uint /** * @} */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pcd_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pcd_ex.c index 51562ab8..b66be6ac 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pcd_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pcd_ex.c @@ -10,13 +10,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -116,7 +115,9 @@ HAL_StatusTypeDef HAL_PCDEx_SetRxFiFo(PCD_HandleTypeDef *hpcd, uint16_t size) return HAL_OK; } -#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) \ + || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) \ + || defined(STM32F423xx) /** * @brief Activate LPM feature. * @param hpcd PCD handle @@ -149,8 +150,11 @@ HAL_StatusTypeDef HAL_PCDEx_DeActivateLPM(PCD_HandleTypeDef *hpcd) return HAL_OK; } -#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || + defined(STM32F423xx) */ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) \ + || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) /** * @brief Handle BatteryCharging Process. * @param hpcd PCD handle @@ -164,26 +168,10 @@ void HAL_PCDEx_BCD_VBUSDetect(PCD_HandleTypeDef *hpcd) /* Enable DCD : Data Contact Detect */ USBx->GCCFG |= USB_OTG_GCCFG_DCDEN; - /* Wait Detect flag or a timeout is happen*/ - while ((USBx->GCCFG & USB_OTG_GCCFG_DCDET) == 0U) - { - /* Check for the Timeout */ - if ((HAL_GetTick() - tickstart) > 1000U) - { -#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) - hpcd->BCDCallback(hpcd, PCD_BCD_ERROR); -#else - HAL_PCDEx_BCD_Callback(hpcd, PCD_BCD_ERROR); -#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ - - return; - } - } - - /* Right response got */ - HAL_Delay(200U); + /* Wait for Min DCD Timeout */ + HAL_Delay(300U); - /* Check Detect flag*/ + /* Check Detect flag */ if ((USBx->GCCFG & USB_OTG_GCCFG_DCDET) == USB_OTG_GCCFG_DCDET) { #if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) @@ -193,11 +181,11 @@ void HAL_PCDEx_BCD_VBUSDetect(PCD_HandleTypeDef *hpcd) #endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ } - /*Primary detection: checks if connected to Standard Downstream Port + /* Primary detection: checks if connected to Standard Downstream Port (without charging capability) */ - USBx->GCCFG &= ~ USB_OTG_GCCFG_DCDEN; + USBx->GCCFG &= ~USB_OTG_GCCFG_DCDEN; HAL_Delay(50U); - USBx->GCCFG |= USB_OTG_GCCFG_PDEN; + USBx->GCCFG |= USB_OTG_GCCFG_PDEN; HAL_Delay(50U); if ((USBx->GCCFG & USB_OTG_GCCFG_PDET) == 0U) @@ -213,9 +201,9 @@ void HAL_PCDEx_BCD_VBUSDetect(PCD_HandleTypeDef *hpcd) { /* start secondary detection to check connection to Charging Downstream Port or Dedicated Charging Port */ - USBx->GCCFG &= ~ USB_OTG_GCCFG_PDEN; + USBx->GCCFG &= ~(USB_OTG_GCCFG_PDEN); HAL_Delay(50U); - USBx->GCCFG |= USB_OTG_GCCFG_SDEN; + USBx->GCCFG |= USB_OTG_GCCFG_SDEN; HAL_Delay(50U); if ((USBx->GCCFG & USB_OTG_GCCFG_SDET) == USB_OTG_GCCFG_SDET) @@ -229,7 +217,7 @@ void HAL_PCDEx_BCD_VBUSDetect(PCD_HandleTypeDef *hpcd) } else { - /* case Charging Downstream Port */ + /* case Charging Downstream Port */ #if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) hpcd->BCDCallback(hpcd, PCD_BCD_CHARGING_DOWNSTREAM_PORT); #else @@ -241,11 +229,23 @@ void HAL_PCDEx_BCD_VBUSDetect(PCD_HandleTypeDef *hpcd) /* Battery Charging capability discovery finished */ (void)HAL_PCDEx_DeActivateBCD(hpcd); + /* Check for the Timeout, else start USB Device */ + if ((HAL_GetTick() - tickstart) > 1000U) + { #if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) - hpcd->BCDCallback(hpcd, PCD_BCD_DISCOVERY_COMPLETED); + hpcd->BCDCallback(hpcd, PCD_BCD_ERROR); #else - HAL_PCDEx_BCD_Callback(hpcd, PCD_BCD_DISCOVERY_COMPLETED); + HAL_PCDEx_BCD_Callback(hpcd, PCD_BCD_ERROR); #endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + else + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->BCDCallback(hpcd, PCD_BCD_DISCOVERY_COMPLETED); +#else + HAL_PCDEx_BCD_Callback(hpcd, PCD_BCD_DISCOVERY_COMPLETED); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } } /** @@ -290,7 +290,8 @@ HAL_StatusTypeDef HAL_PCDEx_DeActivateBCD(PCD_HandleTypeDef *hpcd) return HAL_OK; } -#endif /* defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ +#endif /* defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ #endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ /** @@ -344,5 +345,3 @@ __weak void HAL_PCDEx_BCD_Callback(PCD_HandleTypeDef *hpcd, PCD_BCD_MsgTypeDef m /** * @} */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr.c index ba2ce59d..5ccde648 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr.c @@ -11,14 +11,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ @@ -100,11 +98,19 @@ void HAL_PWR_DeInit(void) * backup data registers and backup SRAM). * @note If the HSE divided by 2, 3, ..31 is used as the RTC clock, the * Backup Domain Access should be kept enabled. + * @note The following sequence is required to bypass the delay between + * DBP bit programming and the effective enabling of the backup domain. + * Please check the Errata Sheet for more details under "Possible delay + * in backup domain protection disabling/enabling after programming the + * DBP bit" section. * @retval None */ void HAL_PWR_EnableBkUpAccess(void) { + __IO uint32_t dummyread; *(__IO uint32_t *) CR_DBP_BB = (uint32_t)ENABLE; + dummyread = PWR->CR; + UNUSED(dummyread); } /** @@ -112,11 +118,19 @@ void HAL_PWR_EnableBkUpAccess(void) * backup data registers and backup SRAM). * @note If the HSE divided by 2, 3, ..31 is used as the RTC clock, the * Backup Domain Access should be kept enabled. + * @note The following sequence is required to bypass the delay between + * DBP bit programming and the effective disabling of the backup domain. + * Please check the Errata Sheet for more details under "Possible delay + * in backup domain protection disabling/enabling after programming the + * DBP bit" section. * @retval None */ void HAL_PWR_DisableBkUpAccess(void) { + __IO uint32_t dummyread; *(__IO uint32_t *) CR_DBP_BB = (uint32_t)DISABLE; + dummyread = PWR->CR; + UNUSED(dummyread); } /** @@ -165,10 +179,12 @@ void HAL_PWR_DisableBkUpAccess(void) ================== [..] (+) Entry: - The Sleep mode is entered by using the HAL_PWR_EnterSLEEPMode(PWR_MAINREGULATOR_ON, PWR_SLEEPENTRY_WFI) + The Sleep mode is entered by using the HAL_PWR_EnterSLEEPMode(Regulator, SLEEPEntry) functions with (++) PWR_SLEEPENTRY_WFI: enter SLEEP mode with WFI instruction (++) PWR_SLEEPENTRY_WFE: enter SLEEP mode with WFE instruction + (++) PWR_SLEEPENTRY_WFE_NO_EVT_CLEAR: Enter SLEEP mode with WFE instruction and + no clear of pending event before. -@@- The Regulator parameter is not used for the STM32F4 family and is kept as parameter just to maintain compatibility with the @@ -190,10 +206,17 @@ void HAL_PWR_DisableBkUpAccess(void) the HAL_PWREx_DisableFlashPowerDown() function. (+) Entry: - The Stop mode is entered using the HAL_PWR_EnterSTOPMode(PWR_MAINREGULATOR_ON) + The Stop mode is entered using the HAL_PWR_EnterSTOPMode(Regulator, STOPEntry) function with: - (++) Main regulator ON. - (++) Low Power regulator ON. + (++) Regulator: + (+++) Main regulator ON. + (+++) Low Power regulator ON. + (++) STOPEntry: + (+++) PWR_STOPENTRY_WFI : Enter STOP mode with WFI instruction. + (+++) PWR_STOPENTRY_WFE : Enter STOP mode with WFE instruction and + clear of pending events before. + (+++) PWR_STOPENTRY_WFE_NO_EVT_CLEAR : Enter STOP mode with WFE instruction and + no clear of pending event before. (+) Exit: Any EXTI Line (Internal or External) configured in Interrupt/Event mode. @@ -358,12 +381,18 @@ void HAL_PWR_DisableWakeUpPin(uint32_t WakeUpPinx) * just to maintain compatibility with the lower power families. * @param SLEEPEntry Specifies if SLEEP mode in entered with WFI or WFE instruction. * This parameter can be one of the following values: - * @arg PWR_SLEEPENTRY_WFI: enter SLEEP mode with WFI instruction - * @arg PWR_SLEEPENTRY_WFE: enter SLEEP mode with WFE instruction + * @arg PWR_SLEEPENTRY_WFI : Enter SLEEP mode with WFI instruction + * @arg PWR_SLEEPENTRY_WFE : Enter SLEEP mode with WFE instruction and + * clear of pending events before. + * @arg PWR_SLEEPENTRY_WFE_NO_EVT_CLEAR : Enter SLEEP mode with WFE instruction and + * no clear of pending event before. * @retval None */ void HAL_PWR_EnterSLEEPMode(uint32_t Regulator, uint8_t SLEEPEntry) { + /* Prevent unused argument(s) compilation warning */ + UNUSED(Regulator); + /* Check the parameters */ assert_param(IS_PWR_REGULATOR(Regulator)); assert_param(IS_PWR_SLEEP_ENTRY(SLEEPEntry)); @@ -379,9 +408,14 @@ void HAL_PWR_EnterSLEEPMode(uint32_t Regulator, uint8_t SLEEPEntry) } else { + if(SLEEPEntry != PWR_SLEEPENTRY_WFE_NO_EVT_CLEAR) + { + /* Clear all pending event */ + __SEV(); + __WFE(); + } + /* Request Wait For Event */ - __SEV(); - __WFE(); __WFE(); } } @@ -401,8 +435,11 @@ void HAL_PWR_EnterSLEEPMode(uint32_t Regulator, uint8_t SLEEPEntry) * @arg PWR_LOWPOWERREGULATOR_ON: Stop mode with low power regulator ON * @param STOPEntry Specifies if Stop mode in entered with WFI or WFE instruction. * This parameter can be one of the following values: - * @arg PWR_STOPENTRY_WFI: Enter Stop mode with WFI instruction - * @arg PWR_STOPENTRY_WFE: Enter Stop mode with WFE instruction + * @arg PWR_STOPENTRY_WFI : Enter Stop mode with WFI instruction + * @arg PWR_STOPENTRY_WFE : Enter Stop mode with WFE instruction and + * clear of pending events before. + * @arg PWR_STOPENTRY_WFE_NO_EVT_CLEAR : Enter STOP mode with WFE instruction and + * no clear of pending event before. * @retval None */ void HAL_PWR_EnterSTOPMode(uint32_t Regulator, uint8_t STOPEntry) @@ -425,9 +462,13 @@ void HAL_PWR_EnterSTOPMode(uint32_t Regulator, uint8_t STOPEntry) } else { + if(STOPEntry != PWR_STOPENTRY_WFE_NO_EVT_CLEAR) + { + /* Clear all pending event */ + __SEV(); + __WFE(); + } /* Request Wait For Event */ - __SEV(); - __WFE(); __WFE(); } /* Reset SLEEPDEEP bit of Cortex System Control Register */ @@ -555,5 +596,3 @@ void HAL_PWR_DisableSEVOnPend(void) /** * @} */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr_ex.c index 0d26083a..77f9c35b 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_pwr_ex.c @@ -10,14 +10,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ @@ -600,5 +598,3 @@ HAL_StatusTypeDef HAL_PWREx_EnterUnderDriveSTOPMode(uint32_t Regulator, uint8_t /** * @} */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc.c index 7640b65f..c04d33a6 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc.c @@ -54,14 +54,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ @@ -112,8 +110,8 @@ */ /** @defgroup RCC_Exported_Functions_Group1 Initialization and de-initialization functions - * @brief Initialization and Configuration functions - * + * @brief Initialization and Configuration functions + * @verbatim =============================================================================== ##### Initialization and de-initialization functions ##### @@ -220,10 +218,10 @@ __weak HAL_StatusTypeDef HAL_RCC_DeInit(void) */ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) { - uint32_t tickstart, pll_config; - + uint32_t tickstart; + uint32_t pll_config; /* Check Null pointer */ - if(RCC_OscInitStruct == NULL) + if (RCC_OscInitStruct == NULL) { return HAL_ERROR; } @@ -231,15 +229,15 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc /* Check the parameters */ assert_param(IS_RCC_OSCILLATORTYPE(RCC_OscInitStruct->OscillatorType)); /*------------------------------- HSE Configuration ------------------------*/ - if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSE) == RCC_OSCILLATORTYPE_HSE) + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSE) == RCC_OSCILLATORTYPE_HSE) { /* Check the parameters */ assert_param(IS_RCC_HSE(RCC_OscInitStruct->HSEState)); /* When the HSE is used as system clock or clock source for PLL in these cases HSE will not disabled */ - if((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE) ||\ - ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE) || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) { - if((__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) && (RCC_OscInitStruct->HSEState == RCC_HSE_OFF)) + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) && (RCC_OscInitStruct->HSEState == RCC_HSE_OFF)) { return HAL_ERROR; } @@ -250,15 +248,15 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc __HAL_RCC_HSE_CONFIG(RCC_OscInitStruct->HSEState); /* Check the HSE State */ - if((RCC_OscInitStruct->HSEState) != RCC_HSE_OFF) + if ((RCC_OscInitStruct->HSEState) != RCC_HSE_OFF) { /* Get Start Tick */ tickstart = HAL_GetTick(); /* Wait till HSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) { - if((HAL_GetTick() - tickstart ) > HSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -270,9 +268,9 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc tickstart = HAL_GetTick(); /* Wait till HSE is bypassed or disabled */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) { - if((HAL_GetTick() - tickstart ) > HSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -281,18 +279,18 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc } } /*----------------------------- HSI Configuration --------------------------*/ - if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSI) == RCC_OSCILLATORTYPE_HSI) + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSI) == RCC_OSCILLATORTYPE_HSI) { /* Check the parameters */ assert_param(IS_RCC_HSI(RCC_OscInitStruct->HSIState)); assert_param(IS_RCC_CALIBRATION_VALUE(RCC_OscInitStruct->HSICalibrationValue)); /* Check if HSI is used as system clock or as PLL source when PLL is selected as system clock */ - if((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI) ||\ - ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI) || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) { /* When HSI is used as system clock it will not disabled */ - if((__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) && (RCC_OscInitStruct->HSIState != RCC_HSI_ON)) + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) && (RCC_OscInitStruct->HSIState != RCC_HSI_ON)) { return HAL_ERROR; } @@ -306,7 +304,7 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc else { /* Check the HSI State */ - if((RCC_OscInitStruct->HSIState)!= RCC_HSI_OFF) + if ((RCC_OscInitStruct->HSIState) != RCC_HSI_OFF) { /* Enable the Internal High Speed oscillator (HSI). */ __HAL_RCC_HSI_ENABLE(); @@ -315,9 +313,9 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc tickstart = HAL_GetTick(); /* Wait till HSI is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > HSI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -335,9 +333,9 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc tickstart = HAL_GetTick(); /* Wait till HSI is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > HSI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -346,13 +344,13 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc } } /*------------------------------ LSI Configuration -------------------------*/ - if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSI) == RCC_OSCILLATORTYPE_LSI) + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSI) == RCC_OSCILLATORTYPE_LSI) { /* Check the parameters */ assert_param(IS_RCC_LSI(RCC_OscInitStruct->LSIState)); /* Check the LSI State */ - if((RCC_OscInitStruct->LSIState)!= RCC_LSI_OFF) + if ((RCC_OscInitStruct->LSIState) != RCC_LSI_OFF) { /* Enable the Internal Low Speed oscillator (LSI). */ __HAL_RCC_LSI_ENABLE(); @@ -361,9 +359,9 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc tickstart = HAL_GetTick(); /* Wait till LSI is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > LSI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -378,9 +376,9 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc tickstart = HAL_GetTick(); /* Wait till LSI is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > LSI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -388,7 +386,7 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc } } /*------------------------------ LSE Configuration -------------------------*/ - if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSE) == RCC_OSCILLATORTYPE_LSE) + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSE) == RCC_OSCILLATORTYPE_LSE) { FlagStatus pwrclkchanged = RESET; @@ -397,13 +395,13 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc /* Update LSE configuration in Backup Domain control register */ /* Requires to enable write access to Backup Domain of necessary */ - if(__HAL_RCC_PWR_IS_CLK_DISABLED()) + if (__HAL_RCC_PWR_IS_CLK_DISABLED()) { __HAL_RCC_PWR_CLK_ENABLE(); pwrclkchanged = SET; } - if(HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) + if (HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) { /* Enable write access to Backup domain */ SET_BIT(PWR->CR, PWR_CR_DBP); @@ -411,9 +409,9 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc /* Wait for Backup domain Write protection disable */ tickstart = HAL_GetTick(); - while(HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) + while (HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) { - if((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -423,15 +421,15 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc /* Set the new LSE configuration -----------------------------------------*/ __HAL_RCC_LSE_CONFIG(RCC_OscInitStruct->LSEState); /* Check the LSE State */ - if((RCC_OscInitStruct->LSEState) != RCC_LSE_OFF) + if ((RCC_OscInitStruct->LSEState) != RCC_LSE_OFF) { /* Get Start Tick*/ tickstart = HAL_GetTick(); /* Wait till LSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -443,9 +441,9 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc tickstart = HAL_GetTick(); /* Wait till LSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) != RESET) { - if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -453,7 +451,7 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc } /* Restore clock configuration if changed */ - if(pwrclkchanged == SET) + if (pwrclkchanged == SET) { __HAL_RCC_PWR_CLK_DISABLE(); } @@ -464,9 +462,9 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc if ((RCC_OscInitStruct->PLL.PLLState) != RCC_PLL_NONE) { /* Check if the PLL is used as system clock or not */ - if(__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL) + if (__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL) { - if((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_ON) + if ((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_ON) { /* Check the parameters */ assert_param(IS_RCC_PLLSOURCE(RCC_OscInitStruct->PLL.PLLSource)); @@ -481,10 +479,10 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc /* Get Start Tick */ tickstart = HAL_GetTick(); - /* Wait till PLL is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + /* Wait till PLL is disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -503,9 +501,9 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc tickstart = HAL_GetTick(); /* Wait till PLL is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -519,10 +517,10 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc /* Get Start Tick */ tickstart = HAL_GetTick(); - /* Wait till PLL is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + /* Wait till PLL is disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -532,7 +530,7 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc else { /* Check if there is a request to disable the PLL used as System clock source */ - if((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) + if ((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) { return HAL_ERROR; } @@ -540,11 +538,22 @@ __weak HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruc { /* Do not return HAL_ERROR if request repeats the current configuration */ pll_config = RCC->PLLCFGR; - if((READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || - (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != RCC_OscInitStruct->PLL.PLLM) || - (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != RCC_OscInitStruct->PLL.PLLN) || - (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != RCC_OscInitStruct->PLL.PLLP) || - (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != RCC_OscInitStruct->PLL.PLLQ)) +#if defined (RCC_PLLCFGR_PLLR) + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != (RCC_OscInitStruct->PLL.PLLM) << RCC_PLLCFGR_PLLM_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN) << RCC_PLLCFGR_PLLN_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U)) << RCC_PLLCFGR_PLLP_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLR) != (RCC_OscInitStruct->PLL.PLLR << RCC_PLLCFGR_PLLR_Pos))) +#else + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != (RCC_OscInitStruct->PLL.PLLM) << RCC_PLLCFGR_PLLM_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN) << RCC_PLLCFGR_PLLN_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U)) << RCC_PLLCFGR_PLLP_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos))) +#endif /* RCC_PLLCFGR_PLLR */ { return HAL_ERROR; } @@ -584,7 +593,7 @@ HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, ui uint32_t tickstart; /* Check Null pointer */ - if(RCC_ClkInitStruct == NULL) + if (RCC_ClkInitStruct == NULL) { return HAL_ERROR; } @@ -598,30 +607,30 @@ HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, ui (HCLK) and the supply voltage of the device. */ /* Increasing the number of wait states because of higher CPU frequency */ - if(FLatency > __HAL_FLASH_GET_LATENCY()) + if (FLatency > __HAL_FLASH_GET_LATENCY()) { /* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */ __HAL_FLASH_SET_LATENCY(FLatency); /* Check that the new number of wait states is taken into account to access the Flash memory by reading the FLASH_ACR register */ - if(__HAL_FLASH_GET_LATENCY() != FLatency) + if (__HAL_FLASH_GET_LATENCY() != FLatency) { return HAL_ERROR; } } /*-------------------------- HCLK Configuration --------------------------*/ - if(((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_HCLK) == RCC_CLOCKTYPE_HCLK) + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_HCLK) == RCC_CLOCKTYPE_HCLK) { /* Set the highest APBx dividers in order to ensure that we do not go through a non-spec phase whatever we decrease or increase HCLK. */ - if(((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1) + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1) { MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, RCC_HCLK_DIV16); } - if(((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2) + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2) { MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, (RCC_HCLK_DIV16 << 3)); } @@ -631,25 +640,25 @@ HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, ui } /*------------------------- SYSCLK Configuration ---------------------------*/ - if(((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_SYSCLK) == RCC_CLOCKTYPE_SYSCLK) + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_SYSCLK) == RCC_CLOCKTYPE_SYSCLK) { assert_param(IS_RCC_SYSCLKSOURCE(RCC_ClkInitStruct->SYSCLKSource)); /* HSE is selected as System Clock Source */ - if(RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_HSE) + if (RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_HSE) { /* Check the HSE ready flag */ - if(__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) + if (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) { return HAL_ERROR; } } /* PLL is selected as System Clock Source */ - else if((RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_PLLCLK) || - (RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_PLLRCLK)) + else if ((RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_PLLCLK) || + (RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_PLLRCLK)) { /* Check the PLL ready flag */ - if(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + if (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) { return HAL_ERROR; } @@ -658,7 +667,7 @@ HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, ui else { /* Check the HSI ready flag */ - if(__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) + if (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) { return HAL_ERROR; } @@ -679,38 +688,38 @@ HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, ui } /* Decreasing the number of wait states because of lower CPU frequency */ - if(FLatency < __HAL_FLASH_GET_LATENCY()) + if (FLatency < __HAL_FLASH_GET_LATENCY()) { - /* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */ + /* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */ __HAL_FLASH_SET_LATENCY(FLatency); /* Check that the new number of wait states is taken into account to access the Flash memory by reading the FLASH_ACR register */ - if(__HAL_FLASH_GET_LATENCY() != FLatency) + if (__HAL_FLASH_GET_LATENCY() != FLatency) { return HAL_ERROR; } } /*-------------------------- PCLK1 Configuration ---------------------------*/ - if(((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1) + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1) { assert_param(IS_RCC_PCLK(RCC_ClkInitStruct->APB1CLKDivider)); MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, RCC_ClkInitStruct->APB1CLKDivider); } /*-------------------------- PCLK2 Configuration ---------------------------*/ - if(((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2) + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2) { assert_param(IS_RCC_PCLK(RCC_ClkInitStruct->APB2CLKDivider)); MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, ((RCC_ClkInitStruct->APB2CLKDivider) << 3U)); } /* Update the SystemCoreClock global variable */ - SystemCoreClock = HAL_RCC_GetSysClockFreq() >> AHBPrescTable[(RCC->CFGR & RCC_CFGR_HPRE)>> RCC_CFGR_HPRE_Pos]; + SystemCoreClock = HAL_RCC_GetSysClockFreq() >> AHBPrescTable[(RCC->CFGR & RCC_CFGR_HPRE) >> RCC_CFGR_HPRE_Pos]; /* Configure the source of time base considering new system clocks settings */ - HAL_InitTick (uwTickPrio); + HAL_InitTick(uwTickPrio); return HAL_OK; } @@ -720,8 +729,8 @@ HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, ui */ /** @defgroup RCC_Exported_Functions_Group2 Peripheral Control functions - * @brief RCC clocks control functions - * + * @brief RCC clocks control functions + * @verbatim =============================================================================== ##### Peripheral Control functions ##### @@ -770,7 +779,7 @@ void HAL_RCC_MCOConfig(uint32_t RCC_MCOx, uint32_t RCC_MCOSource, uint32_t RCC_M assert_param(IS_RCC_MCO(RCC_MCOx)); assert_param(IS_RCC_MCODIV(RCC_MCODiv)); /* RCC_MCO1 */ - if(RCC_MCOx == RCC_MCO1) + if (RCC_MCOx == RCC_MCO1) { assert_param(IS_RCC_MCO1SOURCE(RCC_MCOSource)); @@ -788,7 +797,7 @@ void HAL_RCC_MCOConfig(uint32_t RCC_MCOx, uint32_t RCC_MCOSource, uint32_t RCC_M /* Mask MCO1 and MCO1PRE[2:0] bits then Select MCO1 clock source and prescaler */ MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO1 | RCC_CFGR_MCO1PRE), (RCC_MCOSource | RCC_MCODiv)); - /* This RCC MCO1 enable feature is available only on STM32F410xx devices */ + /* This RCC MCO1 enable feature is available only on STM32F410xx devices */ #if defined(RCC_CFGR_MCO1EN) __HAL_RCC_MCO1_ENABLE(); #endif /* RCC_CFGR_MCO1EN */ @@ -812,7 +821,7 @@ void HAL_RCC_MCOConfig(uint32_t RCC_MCOx, uint32_t RCC_MCOSource, uint32_t RCC_M /* Mask MCO2 and MCO2PRE[2:0] bits then Select MCO2 clock source and prescaler */ MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO2 | RCC_CFGR_MCO2PRE), (RCC_MCOSource | (RCC_MCODiv << 3U))); - /* This RCC MCO2 enable feature is available only on STM32F410Rx devices */ + /* This RCC MCO2 enable feature is available only on STM32F410Rx devices */ #if defined(RCC_CFGR_MCO2EN) __HAL_RCC_MCO2_ENABLE(); #endif /* RCC_CFGR_MCO2EN */ @@ -875,7 +884,9 @@ void HAL_RCC_DisableCSS(void) */ __weak uint32_t HAL_RCC_GetSysClockFreq(void) { - uint32_t pllm = 0U, pllvco = 0U, pllp = 0U; + uint32_t pllm = 0U; + uint32_t pllvco = 0U; + uint32_t pllp = 0U; uint32_t sysclockfreq = 0U; /* Get SYSCLK source -------------------------------------------------------*/ @@ -884,7 +895,7 @@ __weak uint32_t HAL_RCC_GetSysClockFreq(void) case RCC_CFGR_SWS_HSI: /* HSI used as system clock source */ { sysclockfreq = HSI_VALUE; - break; + break; } case RCC_CFGR_SWS_HSE: /* HSE used as system clock source */ { @@ -896,19 +907,19 @@ __weak uint32_t HAL_RCC_GetSysClockFreq(void) /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLLM) * PLLN SYSCLK = PLL_VCO / PLLP */ pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; - if(__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLSOURCE_HSI) + if (__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLSOURCE_HSI) { /* HSE used as PLL clock source */ - pllvco = (uint32_t) ((((uint64_t) HSE_VALUE * ((uint64_t) ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + pllvco = (uint32_t)((((uint64_t) HSE_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); } else { /* HSI used as PLL clock source */ - pllvco = (uint32_t) ((((uint64_t) HSI_VALUE * ((uint64_t) ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + pllvco = (uint32_t)((((uint64_t) HSI_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); } - pllp = ((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >> RCC_PLLCFGR_PLLP_Pos) + 1U) *2U); + pllp = ((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >> RCC_PLLCFGR_PLLP_Pos) + 1U) * 2U); - sysclockfreq = pllvco/pllp; + sysclockfreq = pllvco / pllp; break; } default: @@ -943,7 +954,7 @@ uint32_t HAL_RCC_GetHCLKFreq(void) uint32_t HAL_RCC_GetPCLK1Freq(void) { /* Get HCLK source and Compute PCLK1 frequency ---------------------------*/ - return (HAL_RCC_GetHCLKFreq() >> APBPrescTable[(RCC->CFGR & RCC_CFGR_PPRE1)>> RCC_CFGR_PPRE1_Pos]); + return (HAL_RCC_GetHCLKFreq() >> APBPrescTable[(RCC->CFGR & RCC_CFGR_PPRE1) >> RCC_CFGR_PPRE1_Pos]); } /** @@ -955,7 +966,7 @@ uint32_t HAL_RCC_GetPCLK1Freq(void) uint32_t HAL_RCC_GetPCLK2Freq(void) { /* Get HCLK source and Compute PCLK2 frequency ---------------------------*/ - return (HAL_RCC_GetHCLKFreq()>> APBPrescTable[(RCC->CFGR & RCC_CFGR_PPRE2)>> RCC_CFGR_PPRE2_Pos]); + return (HAL_RCC_GetHCLKFreq() >> APBPrescTable[(RCC->CFGR & RCC_CFGR_PPRE2) >> RCC_CFGR_PPRE2_Pos]); } /** @@ -971,11 +982,11 @@ __weak void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) RCC_OscInitStruct->OscillatorType = RCC_OSCILLATORTYPE_HSE | RCC_OSCILLATORTYPE_HSI | RCC_OSCILLATORTYPE_LSE | RCC_OSCILLATORTYPE_LSI; /* Get the HSE configuration -----------------------------------------------*/ - if((RCC->CR &RCC_CR_HSEBYP) == RCC_CR_HSEBYP) + if ((RCC->CR & RCC_CR_HSEBYP) == RCC_CR_HSEBYP) { RCC_OscInitStruct->HSEState = RCC_HSE_BYPASS; } - else if((RCC->CR &RCC_CR_HSEON) == RCC_CR_HSEON) + else if ((RCC->CR & RCC_CR_HSEON) == RCC_CR_HSEON) { RCC_OscInitStruct->HSEState = RCC_HSE_ON; } @@ -985,7 +996,7 @@ __weak void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } /* Get the HSI configuration -----------------------------------------------*/ - if((RCC->CR &RCC_CR_HSION) == RCC_CR_HSION) + if ((RCC->CR & RCC_CR_HSION) == RCC_CR_HSION) { RCC_OscInitStruct->HSIState = RCC_HSI_ON; } @@ -994,14 +1005,14 @@ __weak void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) RCC_OscInitStruct->HSIState = RCC_HSI_OFF; } - RCC_OscInitStruct->HSICalibrationValue = (uint32_t)((RCC->CR &RCC_CR_HSITRIM) >> RCC_CR_HSITRIM_Pos); + RCC_OscInitStruct->HSICalibrationValue = (uint32_t)((RCC->CR & RCC_CR_HSITRIM) >> RCC_CR_HSITRIM_Pos); /* Get the LSE configuration -----------------------------------------------*/ - if((RCC->BDCR &RCC_BDCR_LSEBYP) == RCC_BDCR_LSEBYP) + if ((RCC->BDCR & RCC_BDCR_LSEBYP) == RCC_BDCR_LSEBYP) { RCC_OscInitStruct->LSEState = RCC_LSE_BYPASS; } - else if((RCC->BDCR &RCC_BDCR_LSEON) == RCC_BDCR_LSEON) + else if ((RCC->BDCR & RCC_BDCR_LSEON) == RCC_BDCR_LSEON) { RCC_OscInitStruct->LSEState = RCC_LSE_ON; } @@ -1011,7 +1022,7 @@ __weak void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } /* Get the LSI configuration -----------------------------------------------*/ - if((RCC->CSR &RCC_CSR_LSION) == RCC_CSR_LSION) + if ((RCC->CSR & RCC_CSR_LSION) == RCC_CSR_LSION) { RCC_OscInitStruct->LSIState = RCC_LSI_ON; } @@ -1021,7 +1032,7 @@ __weak void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } /* Get the PLL configuration -----------------------------------------------*/ - if((RCC->CR &RCC_CR_PLLON) == RCC_CR_PLLON) + if ((RCC->CR & RCC_CR_PLLON) == RCC_CR_PLLON) { RCC_OscInitStruct->PLL.PLLState = RCC_PLL_ON; } @@ -1073,7 +1084,7 @@ void HAL_RCC_GetClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t *pF void HAL_RCC_NMI_IRQHandler(void) { /* Check RCC CSSF flag */ - if(__HAL_RCC_GET_IT(RCC_IT_CSS)) + if (__HAL_RCC_GET_IT(RCC_IT_CSS)) { /* RCC Clock Security System interrupt user callback */ HAL_RCC_CSSCallback(); @@ -1111,4 +1122,3 @@ __weak void HAL_RCC_CSSCallback(void) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc_ex.c index 0d3aacc8..7b3b20bb 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rcc_ex.c @@ -10,14 +10,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2017 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ @@ -52,8 +50,8 @@ */ /** @defgroup RCCEx_Exported_Functions_Group1 Extended Peripheral Control functions - * @brief Extended Peripheral Control functions - * + * @brief Extended Peripheral Control functions + * @verbatim =============================================================================== ##### Extended Peripheral Control functions ##### @@ -102,7 +100,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); /*------------------------ I2S APB1 configuration --------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == (RCC_PERIPHCLK_I2S_APB1)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == (RCC_PERIPHCLK_I2S_APB1)) { /* Check the parameters */ assert_param(IS_RCC_I2SAPB1CLKSOURCE(PeriphClkInit->I2sApb1ClockSelection)); @@ -110,7 +108,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure I2S Clock source */ __HAL_RCC_I2S_APB1_CONFIG(PeriphClkInit->I2sApb1ClockSelection); /* Enable the PLLI2S when it's used as clock source for I2S */ - if(PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S) + if (PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S) { plli2sused = 1U; } @@ -118,7 +116,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------------- I2S APB2 configuration ----------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == (RCC_PERIPHCLK_I2S_APB2)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == (RCC_PERIPHCLK_I2S_APB2)) { /* Check the parameters */ assert_param(IS_RCC_I2SAPB2CLKSOURCE(PeriphClkInit->I2sApb2ClockSelection)); @@ -126,7 +124,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure I2S Clock source */ __HAL_RCC_I2S_APB2_CONFIG(PeriphClkInit->I2sApb2ClockSelection); /* Enable the PLLI2S when it's used as clock source for I2S */ - if(PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S) + if (PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S) { plli2sused = 1U; } @@ -134,7 +132,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*--------------------------- SAI1 configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == (RCC_PERIPHCLK_SAI1)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == (RCC_PERIPHCLK_SAI1)) { /* Check the parameters */ assert_param(IS_RCC_SAI1CLKSOURCE(PeriphClkInit->Sai1ClockSelection)); @@ -142,12 +140,12 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure SAI1 Clock source */ __HAL_RCC_SAI1_CONFIG(PeriphClkInit->Sai1ClockSelection); /* Enable the PLLI2S when it's used as clock source for SAI */ - if(PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S) + if (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S) { plli2sused = 1U; } /* Enable the PLLSAI when it's used as clock source for SAI */ - if(PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI) + if (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI) { pllsaiused = 1U; } @@ -155,7 +153,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*-------------------------- SAI2 configuration ----------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == (RCC_PERIPHCLK_SAI2)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == (RCC_PERIPHCLK_SAI2)) { /* Check the parameters */ assert_param(IS_RCC_SAI2CLKSOURCE(PeriphClkInit->Sai2ClockSelection)); @@ -164,12 +162,12 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk __HAL_RCC_SAI2_CONFIG(PeriphClkInit->Sai2ClockSelection); /* Enable the PLLI2S when it's used as clock source for SAI */ - if(PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S) + if (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S) { plli2sused = 1U; } /* Enable the PLLSAI when it's used as clock source for SAI */ - if(PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI) + if (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI) { pllsaiused = 1U; } @@ -177,7 +175,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*----------------------------- RTC configuration --------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) { /* Check for RTC Parameters used to output RTCCLK */ assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); @@ -191,16 +189,16 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); - while((PWR->CR & PWR_CR_DBP) == RESET) + while ((PWR->CR & PWR_CR_DBP) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_DBP_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) { return HAL_TIMEOUT; } } /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); - if((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) { /* Store the content of BDCR register before the reset of Backup Domain */ tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); @@ -211,15 +209,15 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk RCC->BDCR = tmpreg1; /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ - if(HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) { /* Get tick */ tickstart = HAL_GetTick(); /* Wait till LSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -231,7 +229,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------------- TIM configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) { /* Configure Timer Prescaler */ __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); @@ -239,7 +237,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------------- FMPI2C1 Configuration -----------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_FMPI2C1) == RCC_PERIPHCLK_FMPI2C1) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_FMPI2C1) == RCC_PERIPHCLK_FMPI2C1) { /* Check the parameters */ assert_param(IS_RCC_FMPI2C1CLKSOURCE(PeriphClkInit->Fmpi2c1ClockSelection)); @@ -250,7 +248,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*------------------------------ CEC Configuration -------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) { /* Check the parameters */ assert_param(IS_RCC_CECCLKSOURCE(PeriphClkInit->CecClockSelection)); @@ -261,7 +259,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*----------------------------- CLK48 Configuration ------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) { /* Check the parameters */ assert_param(IS_RCC_CLK48CLKSOURCE(PeriphClkInit->Clk48ClockSelection)); @@ -270,7 +268,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk __HAL_RCC_CLK48_CONFIG(PeriphClkInit->Clk48ClockSelection); /* Enable the PLLSAI when it's used as clock source for CLK48 */ - if(PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP) + if (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP) { pllsaiused = 1U; } @@ -278,7 +276,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*----------------------------- SDIO Configuration -------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) { /* Check the parameters */ assert_param(IS_RCC_SDIOCLKSOURCE(PeriphClkInit->SdioClockSelection)); @@ -289,7 +287,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*------------------------------ SPDIFRX Configuration ---------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) { /* Check the parameters */ assert_param(IS_RCC_SPDIFRXCLKSOURCE(PeriphClkInit->SpdifClockSelection)); @@ -297,7 +295,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure the SPDIFRX clock source */ __HAL_RCC_SPDIFRX_CONFIG(PeriphClkInit->SpdifClockSelection); /* Enable the PLLI2S when it's used as clock source for SPDIFRX */ - if(PeriphClkInit->SpdifClockSelection == RCC_SPDIFRXCLKSOURCE_PLLI2SP) + if (PeriphClkInit->SpdifClockSelection == RCC_SPDIFRXCLKSOURCE_PLLI2SP) { plli2sused = 1U; } @@ -307,16 +305,16 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*---------------------------- PLLI2S Configuration ------------------------*/ /* PLLI2S is configured when a peripheral will use it as source clock : SAI1, SAI2, I2S on APB1, I2S on APB2 or SPDIFRX */ - if((plli2sused == 1U) || (PeriphClkInit->PeriphClockSelection == RCC_PERIPHCLK_PLLI2S)) + if ((plli2sused == 1U) || (PeriphClkInit->PeriphClockSelection == RCC_PERIPHCLK_PLLI2S)) { /* Disable the PLLI2S */ __HAL_RCC_PLLI2S_DISABLE(); /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLI2S is disabled */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -328,8 +326,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); /*------ In Case of PLLI2S is selected as source clock for I2S -----------*/ - if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == RCC_PERIPHCLK_I2S_APB1) && (PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S)) || - ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == RCC_PERIPHCLK_I2S_APB2) && (PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S))) + if (((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == RCC_PERIPHCLK_I2S_APB1) + && (PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == RCC_PERIPHCLK_I2S_APB2) && (PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S))) { /* check for Parameters */ assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); @@ -340,12 +339,14 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure the PLLI2S division factors */ /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM) */ /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ - __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN , plli2sp, plli2sq, PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, plli2sp, plli2sq, + PeriphClkInit->PLLI2S.PLLI2SR); } /*------- In Case of PLLI2S is selected as source clock for SAI ----------*/ - if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S)) || - ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S))) + if (((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) + && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S))) { /* Check for PLLI2S Parameters */ assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); @@ -359,14 +360,16 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ - __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN , plli2sp, PeriphClkInit->PLLI2S.PLLI2SQ, plli2sr); + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, plli2sp, + PeriphClkInit->PLLI2S.PLLI2SQ, plli2sr); /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLI2SDivQ); } /*------ In Case of PLLI2S is selected as source clock for SPDIFRX -------*/ - if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) && (PeriphClkInit->SpdifClockSelection == RCC_SPDIFRXCLKSOURCE_PLLI2SP)) + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) + && (PeriphClkInit->SpdifClockSelection == RCC_SPDIFRXCLKSOURCE_PLLI2SP)) { /* check for Parameters */ assert_param(IS_RCC_PLLI2SP_VALUE(PeriphClkInit->PLLI2S.PLLI2SP)); @@ -376,11 +379,12 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure the PLLI2S division factors */ /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM) */ /* SPDIFRXCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SP */ - __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SP, plli2sq, plli2sr); + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SP, + plli2sq, plli2sr); } - /*----------------- In Case of PLLI2S is just selected -----------------*/ - if((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + /*----------------- In Case of PLLI2S is just selected -----------------*/ + if ((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) { /* Check for Parameters */ assert_param(IS_RCC_PLLI2SP_VALUE(PeriphClkInit->PLLI2S.PLLI2SP)); @@ -389,7 +393,8 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure the PLLI2S division factors */ /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM) */ - __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SP, PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SP, + PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); } /* Enable the PLLI2S */ @@ -397,9 +402,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLI2S is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -410,16 +415,16 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*----------------------------- PLLSAI Configuration -----------------------*/ /* PLLSAI is configured when a peripheral will use it as source clock : SAI1, SAI2, CLK48 or SDIO */ - if(pllsaiused == 1U) + if (pllsaiused == 1U) { /* Disable PLLSAI Clock */ __HAL_RCC_PLLSAI_DISABLE(); /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLSAI is disabled */ - while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + while (__HAL_RCC_PLLSAI_GET_FLAG() != RESET) { - if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -431,8 +436,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk assert_param(IS_RCC_PLLSAIN_VALUE(PeriphClkInit->PLLSAI.PLLSAIN)); /*------ In Case of PLLSAI is selected as source clock for SAI -----------*/ - if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI)) || - ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI))) + if (((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) + && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI))) { /* check for PLLSAIQ Parameter */ assert_param(IS_RCC_PLLSAIQ_VALUE(PeriphClkInit->PLLSAI.PLLSAIQ)); @@ -444,7 +450,8 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ - __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIM, PeriphClkInit->PLLSAI.PLLSAIN , pllsaip, PeriphClkInit->PLLSAI.PLLSAIQ, 0U); + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIM, PeriphClkInit->PLLSAI.PLLSAIN, pllsaip, + PeriphClkInit->PLLSAI.PLLSAIQ, 0U); /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLSAIDivQ); @@ -452,7 +459,8 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*------ In Case of PLLSAI is selected as source clock for CLK48 ---------*/ /* In Case of PLLI2S is selected as source clock for CLK48 */ - if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP)) + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP)) { /* check for Parameters */ assert_param(IS_RCC_PLLSAIP_VALUE(PeriphClkInit->PLLSAI.PLLSAIP)); @@ -461,7 +469,8 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure the PLLSAI division factors */ /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) * (PLLI2SN/PLLSAIM) */ /* 48CLK = f(PLLSAI clock output) = f(VCO clock) / PLLSAIP */ - __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIM, PeriphClkInit->PLLSAI.PLLSAIN , PeriphClkInit->PLLSAI.PLLSAIP, pllsaiq, 0U); + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIM, PeriphClkInit->PLLSAI.PLLSAIN, PeriphClkInit->PLLSAI.PLLSAIP, + pllsaiq, 0U); } /* Enable PLLSAI Clock */ @@ -469,9 +478,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLSAI is ready */ - while(__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + while (__HAL_RCC_PLLSAI_GET_FLAG() == RESET) { - if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -493,11 +502,11 @@ void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) uint32_t tempreg; /* Set all possible values for the extended clock type parameter------------*/ - PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S_APB1 | RCC_PERIPHCLK_I2S_APB2 |\ - RCC_PERIPHCLK_SAI1 | RCC_PERIPHCLK_SAI2 |\ - RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC |\ - RCC_PERIPHCLK_CEC | RCC_PERIPHCLK_FMPI2C1 |\ - RCC_PERIPHCLK_CLK48 | RCC_PERIPHCLK_SDIO |\ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S_APB1 | RCC_PERIPHCLK_I2S_APB2 | \ + RCC_PERIPHCLK_SAI1 | RCC_PERIPHCLK_SAI2 | \ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC | \ + RCC_PERIPHCLK_CEC | RCC_PERIPHCLK_FMPI2C1 | \ + RCC_PERIPHCLK_CLK48 | RCC_PERIPHCLK_SDIO | \ RCC_PERIPHCLK_SPDIFRX; /* Get the PLLI2S Clock configuration --------------------------------------*/ @@ -582,18 +591,18 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) uint32_t vcooutput = 0U; switch (PeriphClk) { - case RCC_PERIPHCLK_SAI1: - case RCC_PERIPHCLK_SAI2: + case RCC_PERIPHCLK_SAI1: + case RCC_PERIPHCLK_SAI2: { saiclocksource = RCC->DCKCFGR; saiclocksource &= (RCC_DCKCFGR_SAI1SRC | RCC_DCKCFGR_SAI2SRC); switch (saiclocksource) { - case 0U: /* PLLSAI is the clock source for SAI*/ + case 0U: /* PLLSAI is the clock source for SAI*/ { /* Configure the PLLSAI division factor */ /* PLLSAI_VCO Input = PLL_SOURCE/PLLSAIM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) { /* In Case the PLL Source is HSI (Internal Clock) */ vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIM)); @@ -606,19 +615,19 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ tmpreg1 = (RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> 24U; - frequency = (vcoinput * ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> 6U))/(tmpreg1); + frequency = (vcoinput * ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> 6U)) / (tmpreg1); /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ tmpreg1 = (((RCC->DCKCFGR & RCC_DCKCFGR_PLLSAIDIVQ) >> 8U) + 1U); - frequency = frequency/(tmpreg1); + frequency = frequency / (tmpreg1); break; } - case RCC_DCKCFGR_SAI1SRC_0: /* PLLI2S is the clock source for SAI*/ - case RCC_DCKCFGR_SAI2SRC_0: /* PLLI2S is the clock source for SAI*/ + case RCC_DCKCFGR_SAI1SRC_0: /* PLLI2S is the clock source for SAI*/ + case RCC_DCKCFGR_SAI2SRC_0: /* PLLI2S is the clock source for SAI*/ { /* Configure the PLLI2S division factor */ /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) { /* In Case the PLL Source is HSI (Internal Clock) */ vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); @@ -632,19 +641,19 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ tmpreg1 = (RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> 24U; - frequency = (vcoinput * ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U))/(tmpreg1); + frequency = (vcoinput * ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U)) / (tmpreg1); /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ tmpreg1 = ((RCC->DCKCFGR & RCC_DCKCFGR_PLLI2SDIVQ) + 1U); - frequency = frequency/(tmpreg1); + frequency = frequency / (tmpreg1); break; } - case RCC_DCKCFGR_SAI1SRC_1: /* PLLR is the clock source for SAI*/ - case RCC_DCKCFGR_SAI2SRC_1: /* PLLR is the clock source for SAI*/ + case RCC_DCKCFGR_SAI1SRC_1: /* PLLR is the clock source for SAI*/ + case RCC_DCKCFGR_SAI2SRC_1: /* PLLR is the clock source for SAI*/ { /* Configure the PLLI2S division factor */ /* PLL_VCO Input = PLL_SOURCE/PLLM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) { /* In Case the PLL Source is HSI (Internal Clock) */ vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); @@ -658,17 +667,17 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLL_VCO Output = PLL_VCO Input * PLLN */ /* SAI_CLK_x = PLL_VCO Output/PLLR */ tmpreg1 = (RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U; - frequency = (vcoinput * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U))/(tmpreg1); + frequency = (vcoinput * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U)) / (tmpreg1); break; } - case RCC_DCKCFGR_SAI1SRC: /* External clock is the clock source for SAI*/ + case RCC_DCKCFGR_SAI1SRC: /* External clock is the clock source for SAI*/ { frequency = EXTERNAL_CLOCK_VALUE; break; } - case RCC_DCKCFGR_SAI2SRC: /* PLLSRC(HSE or HSI) is the clock source for SAI*/ + case RCC_DCKCFGR_SAI2SRC: /* PLLSRC(HSE or HSI) is the clock source for SAI*/ { - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) { /* In Case the PLL Source is HSI (Internal Clock) */ frequency = (uint32_t)(HSI_VALUE); @@ -680,32 +689,32 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) } break; } - default : + default : { break; } } break; } - case RCC_PERIPHCLK_I2S_APB1: + case RCC_PERIPHCLK_I2S_APB1: { /* Get the current I2S source */ srcclk = __HAL_RCC_GET_I2S_APB1_SOURCE(); switch (srcclk) { - /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ - case RCC_I2SAPB1CLKSOURCE_EXT: + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_EXT: { /* Set the I2S clock to the external clock value */ frequency = EXTERNAL_CLOCK_VALUE; break; } - /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ - case RCC_I2SAPB1CLKSOURCE_PLLI2S: + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_PLLI2S: { /* Configure the PLLI2S division factor */ /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); @@ -719,15 +728,15 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); break; } - /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ - case RCC_I2SAPB1CLKSOURCE_PLLR: + /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_PLLR: { /* Configure the PLL division factor R */ /* PLL_VCO Input = PLL_SOURCE/PLLM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); @@ -741,13 +750,13 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLL_VCO Output = PLL_VCO Input * PLLN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U) & (RCC_PLLCFGR_PLLN >> 6U))); /* I2S_CLK = PLL_VCO Output/PLLR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); break; } - /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ - case RCC_I2SAPB1CLKSOURCE_PLLSRC: + /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ + case RCC_I2SAPB1CLKSOURCE_PLLSRC: { - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { frequency = HSE_VALUE; } @@ -758,7 +767,7 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) break; } /* Clock not enabled for I2S*/ - default: + default: { frequency = 0U; break; @@ -766,25 +775,25 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) } break; } - case RCC_PERIPHCLK_I2S_APB2: + case RCC_PERIPHCLK_I2S_APB2: { /* Get the current I2S source */ srcclk = __HAL_RCC_GET_I2S_APB2_SOURCE(); switch (srcclk) { /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ - case RCC_I2SAPB2CLKSOURCE_EXT: + case RCC_I2SAPB2CLKSOURCE_EXT: { /* Set the I2S clock to the external clock value */ frequency = EXTERNAL_CLOCK_VALUE; break; } /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ - case RCC_I2SAPB2CLKSOURCE_PLLI2S: + case RCC_I2SAPB2CLKSOURCE_PLLI2S: { /* Configure the PLLI2S division factor */ /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); @@ -798,15 +807,15 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); break; } /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ - case RCC_I2SAPB2CLKSOURCE_PLLR: + case RCC_I2SAPB2CLKSOURCE_PLLR: { /* Configure the PLL division factor R */ /* PLL_VCO Input = PLL_SOURCE/PLLM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); @@ -820,13 +829,13 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLL_VCO Output = PLL_VCO Input * PLLN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U) & (RCC_PLLCFGR_PLLN >> 6U))); /* I2S_CLK = PLL_VCO Output/PLLR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); break; } /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ - case RCC_I2SAPB2CLKSOURCE_PLLSRC: + case RCC_I2SAPB2CLKSOURCE_PLLSRC: { - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { frequency = HSE_VALUE; } @@ -837,7 +846,7 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) break; } /* Clock not enabled for I2S*/ - default: + default: { frequency = 0U; break; @@ -845,6 +854,10 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) } break; } + default: + { + break; + } } return frequency; } @@ -877,7 +890,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); /*--------------------------- CLK48 Configuration --------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) { /* Check the parameters */ assert_param(IS_RCC_CLK48CLKSOURCE(PeriphClkInit->Clk48ClockSelection)); @@ -888,7 +901,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*------------------------------ SDIO Configuration ------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) { /* Check the parameters */ assert_param(IS_RCC_SDIOCLKSOURCE(PeriphClkInit->SdioClockSelection)); @@ -902,9 +915,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*------------------- Common configuration SAI/I2S -------------------------*/ /* In Case of SAI or I2S Clock Configuration through PLLI2S, PLLI2SN division factor is common parameters for both peripherals */ - if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || - (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == RCC_PERIPHCLK_SAI_PLLI2S) || - (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == RCC_PERIPHCLK_SAI_PLLI2S) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) { /* check for Parameters */ assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); @@ -914,9 +927,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLI2S is disabled */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -926,20 +939,20 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*---------------------- I2S configuration -------------------------------*/ /* In Case of I2S Clock Configuration through PLLI2S, PLLI2SR must be added only for I2S configuration */ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == (RCC_PERIPHCLK_I2S)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == (RCC_PERIPHCLK_I2S)) { /* check for Parameters */ assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); /* Configure the PLLI2S division factors */ /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLM) */ /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ - __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SR); } /*---------------------------- SAI configuration -------------------------*/ /* In Case of SAI Clock Configuration through PLLI2S, PLLI2SQ and PLLI2S_DIVQ must be added only for SAI configuration */ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == (RCC_PERIPHCLK_SAI_PLLI2S)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == (RCC_PERIPHCLK_SAI_PLLI2S)) { /* Check the PLLI2S division factors */ assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); @@ -951,20 +964,21 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ - __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SQ , tmpreg1); + __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, tmpreg1); /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLI2SDivQ); } /*----------------- In Case of PLLI2S is just selected -----------------*/ - if((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + if ((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) { /* Check for Parameters */ assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); /* Configure the PLLI2S multiplication and division factors */ - __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, + PeriphClkInit->PLLI2S.PLLI2SR); } /* Enable the PLLI2S */ @@ -972,9 +986,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLI2S is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -987,10 +1001,10 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*----------------------- Common configuration SAI/LTDC --------------------*/ /* In Case of SAI, LTDC or CLK48 Clock Configuration through PLLSAI, PLLSAIN division factor is common parameters for these peripherals */ - if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == RCC_PERIPHCLK_SAI_PLLSAI) || - (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC) || - ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && - (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP))) + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == RCC_PERIPHCLK_SAI_PLLSAI) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && + (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP))) { /* Check the PLLSAI division factors */ assert_param(IS_RCC_PLLSAIN_VALUE(PeriphClkInit->PLLSAI.PLLSAIN)); @@ -1000,9 +1014,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLSAI is disabled */ - while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + while (__HAL_RCC_PLLSAI_GET_FLAG() != RESET) { - if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -1012,7 +1026,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*---------------------------- SAI configuration -------------------------*/ /* In Case of SAI Clock Configuration through PLLSAI, PLLSAIQ and PLLSAI_DIVQ must be added only for SAI configuration */ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == (RCC_PERIPHCLK_SAI_PLLSAI)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == (RCC_PERIPHCLK_SAI_PLLSAI)) { assert_param(IS_RCC_PLLSAIQ_VALUE(PeriphClkInit->PLLSAI.PLLSAIQ)); assert_param(IS_RCC_PLLSAI_DIVQ_VALUE(PeriphClkInit->PLLSAIDivQ)); @@ -1030,7 +1044,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk } /*---------------------------- LTDC configuration ------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == (RCC_PERIPHCLK_LTDC)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == (RCC_PERIPHCLK_LTDC)) { assert_param(IS_RCC_PLLSAIR_VALUE(PeriphClkInit->PLLSAI.PLLSAIR)); assert_param(IS_RCC_PLLSAI_DIVR_VALUE(PeriphClkInit->PLLSAIDivR)); @@ -1049,8 +1063,8 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*---------------------------- CLK48 configuration ------------------------*/ /* Configure the PLLSAI when it is used as clock source for CLK48 */ - if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == (RCC_PERIPHCLK_CLK48)) && - (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP)) + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == (RCC_PERIPHCLK_CLK48)) && + (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLSAIP)) { assert_param(IS_RCC_PLLSAIP_VALUE(PeriphClkInit->PLLSAI.PLLSAIP)); @@ -1069,9 +1083,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLSAI is ready */ - while(__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + while (__HAL_RCC_PLLSAI_GET_FLAG() == RESET) { - if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -1082,7 +1096,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------------- RTC configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) { /* Check for RTC Parameters used to output RTCCLK */ assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); @@ -1096,16 +1110,16 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); - while((PWR->CR & PWR_CR_DBP) == RESET) + while ((PWR->CR & PWR_CR_DBP) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_DBP_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) { return HAL_TIMEOUT; } } /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); - if((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) { /* Store the content of BDCR register before the reset of Backup Domain */ tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); @@ -1116,15 +1130,15 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk RCC->BDCR = tmpreg1; /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ - if(HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) { /* Get tick */ tickstart = HAL_GetTick(); /* Wait till LSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -1136,7 +1150,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------------- TIM configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) { __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); } @@ -1155,9 +1169,9 @@ void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) uint32_t tempreg; /* Set all possible values for the extended clock type parameter------------*/ - PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_SAI_PLLSAI |\ - RCC_PERIPHCLK_SAI_PLLI2S | RCC_PERIPHCLK_LTDC |\ - RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC |\ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_SAI_PLLSAI | \ + RCC_PERIPHCLK_SAI_PLLI2S | RCC_PERIPHCLK_LTDC | \ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC | \ RCC_PERIPHCLK_CLK48 | RCC_PERIPHCLK_SDIO; /* Get the PLLI2S Clock configuration --------------------------------------*/ @@ -1176,7 +1190,7 @@ void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); - /* Get the CLK48 clock configuration -------------------------------------*/ + /* Get the CLK48 clock configuration -------------------------------------*/ PeriphClkInit->Clk48ClockSelection = __HAL_RCC_GET_CLK48_SOURCE(); /* Get the SDIO clock configuration ----------------------------------------*/ @@ -1211,25 +1225,25 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) uint32_t vcooutput = 0U; switch (PeriphClk) { - case RCC_PERIPHCLK_I2S: + case RCC_PERIPHCLK_I2S: { /* Get the current I2S source */ srcclk = __HAL_RCC_GET_I2S_SOURCE(); switch (srcclk) { - /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ - case RCC_I2SCLKSOURCE_EXT: + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SCLKSOURCE_EXT: { /* Set the I2S clock to the external clock value */ frequency = EXTERNAL_CLOCK_VALUE; break; } - /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ - case RCC_I2SCLKSOURCE_PLLI2S: + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SCLKSOURCE_PLLI2S: { /* Configure the PLLI2S division factor */ /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); @@ -1243,11 +1257,11 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); break; } /* Clock not enabled for I2S*/ - default: + default: { frequency = 0U; break; @@ -1255,6 +1269,10 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) } break; } + default: + { + break; + } } return frequency; } @@ -1288,7 +1306,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); /*----------------------------------- I2S APB1 configuration ---------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == (RCC_PERIPHCLK_I2S_APB1)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == (RCC_PERIPHCLK_I2S_APB1)) { /* Check the parameters */ assert_param(IS_RCC_I2SAPB1CLKSOURCE(PeriphClkInit->I2sApb1ClockSelection)); @@ -1296,7 +1314,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure I2S Clock source */ __HAL_RCC_I2S_APB1_CONFIG(PeriphClkInit->I2sApb1ClockSelection); /* Enable the PLLI2S when it's used as clock source for I2S */ - if(PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S) + if (PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S) { plli2sused = 1U; } @@ -1304,7 +1322,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*----------------------------------- I2S APB2 configuration ---------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == (RCC_PERIPHCLK_I2S_APB2)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == (RCC_PERIPHCLK_I2S_APB2)) { /* Check the parameters */ assert_param(IS_RCC_I2SAPB2CLKSOURCE(PeriphClkInit->I2sApb2ClockSelection)); @@ -1312,7 +1330,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure I2S Clock source */ __HAL_RCC_I2S_APB2_CONFIG(PeriphClkInit->I2sApb2ClockSelection); /* Enable the PLLI2S when it's used as clock source for I2S */ - if(PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S) + if (PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S) { plli2sused = 1U; } @@ -1321,7 +1339,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk #if defined(STM32F413xx) || defined(STM32F423xx) /*----------------------- SAI1 Block A configuration -----------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIA) == (RCC_PERIPHCLK_SAIA)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIA) == (RCC_PERIPHCLK_SAIA)) { /* Check the parameters */ assert_param(IS_RCC_SAIACLKSOURCE(PeriphClkInit->SaiAClockSelection)); @@ -1329,12 +1347,12 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure SAI1 Clock source */ __HAL_RCC_SAI_BLOCKACLKSOURCE_CONFIG(PeriphClkInit->SaiAClockSelection); /* Enable the PLLI2S when it's used as clock source for SAI */ - if(PeriphClkInit->SaiAClockSelection == RCC_SAIACLKSOURCE_PLLI2SR) + if (PeriphClkInit->SaiAClockSelection == RCC_SAIACLKSOURCE_PLLI2SR) { plli2sused = 1U; } /* Enable the PLLSAI when it's used as clock source for SAI */ - if(PeriphClkInit->SaiAClockSelection == RCC_SAIACLKSOURCE_PLLR) + if (PeriphClkInit->SaiAClockSelection == RCC_SAIACLKSOURCE_PLLR) { /* Check for PLL/DIVR parameters */ assert_param(IS_RCC_PLL_DIVR_VALUE(PeriphClkInit->PLLDivR)); @@ -1346,7 +1364,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------- SAI1 Block B configuration ------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIB) == (RCC_PERIPHCLK_SAIB)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIB) == (RCC_PERIPHCLK_SAIB)) { /* Check the parameters */ assert_param(IS_RCC_SAIBCLKSOURCE(PeriphClkInit->SaiBClockSelection)); @@ -1354,12 +1372,12 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure SAI1 Clock source */ __HAL_RCC_SAI_BLOCKBCLKSOURCE_CONFIG(PeriphClkInit->SaiBClockSelection); /* Enable the PLLI2S when it's used as clock source for SAI */ - if(PeriphClkInit->SaiBClockSelection == RCC_SAIBCLKSOURCE_PLLI2SR) + if (PeriphClkInit->SaiBClockSelection == RCC_SAIBCLKSOURCE_PLLI2SR) { plli2sused = 1U; } /* Enable the PLLSAI when it's used as clock source for SAI */ - if(PeriphClkInit->SaiBClockSelection == RCC_SAIBCLKSOURCE_PLLR) + if (PeriphClkInit->SaiBClockSelection == RCC_SAIBCLKSOURCE_PLLR) { /* Check for PLL/DIVR parameters */ assert_param(IS_RCC_PLL_DIVR_VALUE(PeriphClkInit->PLLDivR)); @@ -1372,7 +1390,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk #endif /* STM32F413xx || STM32F423xx */ /*------------------------------------ RTC configuration -------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) { /* Check for RTC Parameters used to output RTCCLK */ assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); @@ -1386,16 +1404,16 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); - while((PWR->CR & PWR_CR_DBP) == RESET) + while ((PWR->CR & PWR_CR_DBP) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_DBP_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) { return HAL_TIMEOUT; } } /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); - if((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) { /* Store the content of BDCR register before the reset of Backup Domain */ tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); @@ -1406,15 +1424,15 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk RCC->BDCR = tmpreg1; /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ - if(HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) { /* Get tick */ tickstart = HAL_GetTick(); /* Wait till LSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -1426,7 +1444,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*------------------------------------ TIM configuration -------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) { /* Configure Timer Prescaler */ __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); @@ -1434,7 +1452,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*------------------------------------- FMPI2C1 Configuration --------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_FMPI2C1) == RCC_PERIPHCLK_FMPI2C1) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_FMPI2C1) == RCC_PERIPHCLK_FMPI2C1) { /* Check the parameters */ assert_param(IS_RCC_FMPI2C1CLKSOURCE(PeriphClkInit->Fmpi2c1ClockSelection)); @@ -1445,7 +1463,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*------------------------------------- CLK48 Configuration ----------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) { /* Check the parameters */ assert_param(IS_RCC_CLK48CLKSOURCE(PeriphClkInit->Clk48ClockSelection)); @@ -1454,7 +1472,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk __HAL_RCC_CLK48_CONFIG(PeriphClkInit->Clk48ClockSelection); /* Enable the PLLI2S when it's used as clock source for CLK48 */ - if(PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLI2SQ) + if (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLI2SQ) { plli2sused = 1U; } @@ -1462,7 +1480,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*------------------------------------- SDIO Configuration -----------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) { /* Check the parameters */ assert_param(IS_RCC_SDIOCLKSOURCE(PeriphClkInit->SdioClockSelection)); @@ -1475,16 +1493,16 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*-------------------------------------- PLLI2S Configuration --------------*/ /* PLLI2S is configured when a peripheral will use it as source clock : I2S on APB1 or I2S on APB2*/ - if((plli2sused == 1U) || (PeriphClkInit->PeriphClockSelection == RCC_PERIPHCLK_PLLI2S)) + if ((plli2sused == 1U) || (PeriphClkInit->PeriphClockSelection == RCC_PERIPHCLK_PLLI2S)) { /* Disable the PLLI2S */ __HAL_RCC_PLLI2S_DISABLE(); /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLI2S is disabled */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -1499,10 +1517,11 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk __HAL_RCC_PLL_I2S_CONFIG(PeriphClkInit->PLLI2SSelection); /*------- In Case of PLLI2S is selected as source clock for I2S ----------*/ - if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == RCC_PERIPHCLK_I2S_APB1) && (PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S)) || - ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == RCC_PERIPHCLK_I2S_APB2) && (PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S)) || - ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLI2SQ)) || - ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) && (PeriphClkInit->SdioClockSelection == RCC_SDIOCLKSOURCE_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLI2SQ))) + if (((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB1) == RCC_PERIPHCLK_I2S_APB1) + && (PeriphClkInit->I2sApb1ClockSelection == RCC_I2SAPB1CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S_APB2) == RCC_PERIPHCLK_I2S_APB2) && (PeriphClkInit->I2sApb2ClockSelection == RCC_I2SAPB2CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLI2SQ)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDIO) == RCC_PERIPHCLK_SDIO) && (PeriphClkInit->SdioClockSelection == RCC_SDIOCLKSOURCE_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48CLKSOURCE_PLLI2SQ))) { /* check for Parameters */ assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); @@ -1511,13 +1530,15 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure the PLLI2S division factors */ /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM)*/ /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ - __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, + PeriphClkInit->PLLI2S.PLLI2SR); } #if defined(STM32F413xx) || defined(STM32F423xx) /*------- In Case of PLLI2S is selected as source clock for SAI ----------*/ - if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIA) == RCC_PERIPHCLK_SAIA) && (PeriphClkInit->SaiAClockSelection == RCC_SAIACLKSOURCE_PLLI2SR)) || - ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIB) == RCC_PERIPHCLK_SAIB) && (PeriphClkInit->SaiBClockSelection == RCC_SAIBCLKSOURCE_PLLI2SR))) + if (((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIA) == RCC_PERIPHCLK_SAIA) + && (PeriphClkInit->SaiAClockSelection == RCC_SAIACLKSOURCE_PLLI2SR)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAIB) == RCC_PERIPHCLK_SAIB) && (PeriphClkInit->SaiBClockSelection == RCC_SAIBCLKSOURCE_PLLI2SR))) { /* Check for PLLI2S Parameters */ assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); @@ -1530,7 +1551,8 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ - __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, plli2sq, PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, plli2sq, + PeriphClkInit->PLLI2S.PLLI2SR); /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVR */ __HAL_RCC_PLLI2S_PLLSAICLKDIVR_CONFIG(PeriphClkInit->PLLI2SDivR); @@ -1538,7 +1560,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk #endif /* STM32F413xx || STM32F423xx */ /*----------------- In Case of PLLI2S is just selected ------------------*/ - if((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + if ((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) { /* Check for Parameters */ assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); @@ -1547,7 +1569,8 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure the PLLI2S division factors */ /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM)*/ /* SPDIFRXCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SP */ - __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, + PeriphClkInit->PLLI2S.PLLI2SR); } /* Enable the PLLI2S */ @@ -1555,9 +1578,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLI2S is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -1567,7 +1590,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*-------------------- DFSDM1 clock source configuration -------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM1) == RCC_PERIPHCLK_DFSDM1) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM1) == RCC_PERIPHCLK_DFSDM1) { /* Check the parameters */ assert_param(IS_RCC_DFSDM1CLKSOURCE(PeriphClkInit->Dfsdm1ClockSelection)); @@ -1578,7 +1601,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*-------------------- DFSDM1 Audio clock source configuration -------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM1_AUDIO) == RCC_PERIPHCLK_DFSDM1_AUDIO) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM1_AUDIO) == RCC_PERIPHCLK_DFSDM1_AUDIO) { /* Check the parameters */ assert_param(IS_RCC_DFSDM1AUDIOCLKSOURCE(PeriphClkInit->Dfsdm1AudioClockSelection)); @@ -1590,7 +1613,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk #if defined(STM32F413xx) || defined(STM32F423xx) /*-------------------- DFSDM2 clock source configuration -------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM2) == RCC_PERIPHCLK_DFSDM2) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM2) == RCC_PERIPHCLK_DFSDM2) { /* Check the parameters */ assert_param(IS_RCC_DFSDM2CLKSOURCE(PeriphClkInit->Dfsdm2ClockSelection)); @@ -1601,7 +1624,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*-------------------- DFSDM2 Audio clock source configuration -------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM2_AUDIO) == RCC_PERIPHCLK_DFSDM2_AUDIO) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM2_AUDIO) == RCC_PERIPHCLK_DFSDM2_AUDIO) { /* Check the parameters */ assert_param(IS_RCC_DFSDM2AUDIOCLKSOURCE(PeriphClkInit->Dfsdm2AudioClockSelection)); @@ -1612,7 +1635,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------------- LPTIM1 Configuration ------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) { /* Check the parameters */ assert_param(IS_RCC_LPTIM1CLKSOURCE(PeriphClkInit->Lptim1ClockSelection)); @@ -1639,18 +1662,18 @@ void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) /* Set all possible values for the extended clock type parameter------------*/ #if defined(STM32F413xx) || defined(STM32F423xx) - PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S_APB1 | RCC_PERIPHCLK_I2S_APB2 |\ - RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC |\ - RCC_PERIPHCLK_FMPI2C1 | RCC_PERIPHCLK_CLK48 |\ - RCC_PERIPHCLK_SDIO | RCC_PERIPHCLK_DFSDM1 |\ - RCC_PERIPHCLK_DFSDM1_AUDIO | RCC_PERIPHCLK_DFSDM2 |\ - RCC_PERIPHCLK_DFSDM2_AUDIO | RCC_PERIPHCLK_LPTIM1 |\ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S_APB1 | RCC_PERIPHCLK_I2S_APB2 | \ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC | \ + RCC_PERIPHCLK_FMPI2C1 | RCC_PERIPHCLK_CLK48 | \ + RCC_PERIPHCLK_SDIO | RCC_PERIPHCLK_DFSDM1 | \ + RCC_PERIPHCLK_DFSDM1_AUDIO | RCC_PERIPHCLK_DFSDM2 | \ + RCC_PERIPHCLK_DFSDM2_AUDIO | RCC_PERIPHCLK_LPTIM1 | \ RCC_PERIPHCLK_SAIA | RCC_PERIPHCLK_SAIB; #else /* STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx */ - PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S_APB1 | RCC_PERIPHCLK_I2S_APB2 |\ - RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC |\ - RCC_PERIPHCLK_FMPI2C1 | RCC_PERIPHCLK_CLK48 |\ - RCC_PERIPHCLK_SDIO | RCC_PERIPHCLK_DFSDM1 |\ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S_APB1 | RCC_PERIPHCLK_I2S_APB2 | \ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC | \ + RCC_PERIPHCLK_FMPI2C1 | RCC_PERIPHCLK_CLK48 | \ + RCC_PERIPHCLK_SDIO | RCC_PERIPHCLK_DFSDM1 | \ RCC_PERIPHCLK_DFSDM1_AUDIO; #endif /* STM32F413xx || STM32F423xx */ @@ -1740,23 +1763,23 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) uint32_t vcooutput = 0U; switch (PeriphClk) { - case RCC_PERIPHCLK_I2S_APB1: + case RCC_PERIPHCLK_I2S_APB1: { /* Get the current I2S source */ srcclk = __HAL_RCC_GET_I2S_APB1_SOURCE(); switch (srcclk) { - /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ - case RCC_I2SAPB1CLKSOURCE_EXT: + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_EXT: { /* Set the I2S clock to the external clock value */ frequency = EXTERNAL_CLOCK_VALUE; break; } - /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ - case RCC_I2SAPB1CLKSOURCE_PLLI2S: + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_PLLI2S: { - if((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SSRC) == RCC_PLLI2SCFGR_PLLI2SSRC) + if ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SSRC) == RCC_PLLI2SCFGR_PLLI2SSRC) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(EXTERNAL_CLOCK_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); @@ -1765,7 +1788,7 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) { /* Configure the PLLI2S division factor */ /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); @@ -1779,15 +1802,15 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); break; } - /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ - case RCC_I2SAPB1CLKSOURCE_PLLR: + /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ + case RCC_I2SAPB1CLKSOURCE_PLLR: { /* Configure the PLL division factor R */ /* PLL_VCO Input = PLL_SOURCE/PLLM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); @@ -1801,13 +1824,13 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLL_VCO Output = PLL_VCO Input * PLLN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U) & (RCC_PLLCFGR_PLLN >> 6U))); /* I2S_CLK = PLL_VCO Output/PLLR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); break; } - /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ - case RCC_I2SAPB1CLKSOURCE_PLLSRC: + /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ + case RCC_I2SAPB1CLKSOURCE_PLLSRC: { - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { frequency = HSE_VALUE; } @@ -1818,7 +1841,7 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) break; } /* Clock not enabled for I2S*/ - default: + default: { frequency = 0U; break; @@ -1826,23 +1849,23 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) } break; } - case RCC_PERIPHCLK_I2S_APB2: + case RCC_PERIPHCLK_I2S_APB2: { /* Get the current I2S source */ srcclk = __HAL_RCC_GET_I2S_APB2_SOURCE(); switch (srcclk) { /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ - case RCC_I2SAPB2CLKSOURCE_EXT: + case RCC_I2SAPB2CLKSOURCE_EXT: { /* Set the I2S clock to the external clock value */ frequency = EXTERNAL_CLOCK_VALUE; break; } /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ - case RCC_I2SAPB2CLKSOURCE_PLLI2S: + case RCC_I2SAPB2CLKSOURCE_PLLI2S: { - if((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SSRC) == RCC_PLLI2SCFGR_PLLI2SSRC) + if ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SSRC) == RCC_PLLI2SCFGR_PLLI2SSRC) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(EXTERNAL_CLOCK_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); @@ -1851,7 +1874,7 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) { /* Configure the PLLI2S division factor */ /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); @@ -1865,15 +1888,15 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); break; } /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ - case RCC_I2SAPB2CLKSOURCE_PLLR: + case RCC_I2SAPB2CLKSOURCE_PLLR: { /* Configure the PLL division factor R */ /* PLL_VCO Input = PLL_SOURCE/PLLM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); @@ -1887,13 +1910,13 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLL_VCO Output = PLL_VCO Input * PLLN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U) & (RCC_PLLCFGR_PLLN >> 6U))); /* I2S_CLK = PLL_VCO Output/PLLR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); break; } /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ - case RCC_I2SAPB2CLKSOURCE_PLLSRC: + case RCC_I2SAPB2CLKSOURCE_PLLSRC: { - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { frequency = HSE_VALUE; } @@ -1903,8 +1926,8 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) } break; } - /* Clock not enabled for I2S*/ - default: + /* Clock not enabled for I2S*/ + default: { frequency = 0U; break; @@ -1912,6 +1935,10 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) } break; } + default: + { + break; + } } return frequency; } @@ -1939,7 +1966,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); /*---------------------------- RTC configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) { /* Check for RTC Parameters used to output RTCCLK */ assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); @@ -1953,16 +1980,16 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); - while((PWR->CR & PWR_CR_DBP) == RESET) + while ((PWR->CR & PWR_CR_DBP) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_DBP_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) { return HAL_TIMEOUT; } } /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); - if((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) { /* Store the content of BDCR register before the reset of Backup Domain */ tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); @@ -1973,15 +2000,15 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk RCC->BDCR = tmpreg1; /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ - if(HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) { /* Get tick */ tickstart = HAL_GetTick(); /* Wait till LSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -1993,14 +2020,14 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------------- TIM configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) { __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); } /*--------------------------------------------------------------------------*/ /*---------------------------- FMPI2C1 Configuration -----------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_FMPI2C1) == RCC_PERIPHCLK_FMPI2C1) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_FMPI2C1) == RCC_PERIPHCLK_FMPI2C1) { /* Check the parameters */ assert_param(IS_RCC_FMPI2C1CLKSOURCE(PeriphClkInit->Fmpi2c1ClockSelection)); @@ -2011,7 +2038,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------------- LPTIM1 Configuration ------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) { /* Check the parameters */ assert_param(IS_RCC_LPTIM1CLKSOURCE(PeriphClkInit->Lptim1ClockSelection)); @@ -2021,7 +2048,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk } /*---------------------------- I2S Configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) { /* Check the parameters */ assert_param(IS_RCC_I2SAPBCLKSOURCE(PeriphClkInit->I2SClockSelection)); @@ -2085,25 +2112,25 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) uint32_t vcooutput = 0U; switch (PeriphClk) { - case RCC_PERIPHCLK_I2S: + case RCC_PERIPHCLK_I2S: { /* Get the current I2S source */ srcclk = __HAL_RCC_GET_I2S_SOURCE(); switch (srcclk) { - /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ - case RCC_I2SAPBCLKSOURCE_EXT: + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SAPBCLKSOURCE_EXT: { /* Set the I2S clock to the external clock value */ frequency = EXTERNAL_CLOCK_VALUE; break; } - /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ - case RCC_I2SAPBCLKSOURCE_PLLR: + /* Check if I2S clock selection is PLL VCO Output divided by PLLR used as I2S clock */ + case RCC_I2SAPBCLKSOURCE_PLLR: { /* Configure the PLL division factor R */ /* PLL_VCO Input = PLL_SOURCE/PLLM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); @@ -2117,13 +2144,13 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLL_VCO Output = PLL_VCO Input * PLLN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6U) & (RCC_PLLCFGR_PLLN >> 6U))); /* I2S_CLK = PLL_VCO Output/PLLR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 28U) & (RCC_PLLCFGR_PLLR >> 28U))); break; } - /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ - case RCC_I2SAPBCLKSOURCE_PLLSRC: + /* Check if I2S clock selection is HSI or HSE depending from PLL source Clock */ + case RCC_I2SAPBCLKSOURCE_PLLSRC: { - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { frequency = HSE_VALUE; } @@ -2134,7 +2161,7 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) break; } /* Clock not enabled for I2S*/ - default: + default: { frequency = 0U; break; @@ -2142,6 +2169,10 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) } break; } + default: + { + break; + } } return frequency; } @@ -2174,9 +2205,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*----------------------- Common configuration SAI/I2S ---------------------*/ /* In Case of SAI or I2S Clock Configuration through PLLI2S, PLLI2SN division factor is common parameters for both peripherals */ - if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || - (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == RCC_PERIPHCLK_SAI_PLLI2S) || - (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == RCC_PERIPHCLK_SAI_PLLI2S) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) { /* check for Parameters */ assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); @@ -2186,9 +2217,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLI2S is disabled */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -2198,20 +2229,20 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*---------------------------- I2S configuration -------------------------*/ /* In Case of I2S Clock Configuration through PLLI2S, PLLI2SR must be added only for I2S configuration */ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == (RCC_PERIPHCLK_I2S)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == (RCC_PERIPHCLK_I2S)) { /* check for Parameters */ assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); /* Configure the PLLI2S division factors */ /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLM) */ /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ - __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SR); } /*---------------------------- SAI configuration -------------------------*/ /* In Case of SAI Clock Configuration through PLLI2S, PLLI2SQ and PLLI2S_DIVQ must be added only for SAI configuration */ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == (RCC_PERIPHCLK_SAI_PLLI2S)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLI2S) == (RCC_PERIPHCLK_SAI_PLLI2S)) { /* Check the PLLI2S division factors */ assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); @@ -2223,20 +2254,21 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ - __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SQ , tmpreg1); + __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, tmpreg1); /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLI2SDivQ); } /*----------------- In Case of PLLI2S is just selected -----------------*/ - if((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + if ((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) { /* Check for Parameters */ assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); /* Configure the PLLI2S multiplication and division factors */ - __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_SAICLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, + PeriphClkInit->PLLI2S.PLLI2SR); } /* Enable the PLLI2S */ @@ -2244,9 +2276,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLI2S is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -2259,8 +2291,8 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*----------------------- Common configuration SAI/LTDC --------------------*/ /* In Case of SAI or LTDC Clock Configuration through PLLSAI, PLLSAIN division factor is common parameters for both peripherals */ - if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == RCC_PERIPHCLK_SAI_PLLSAI) || - (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC)) + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == RCC_PERIPHCLK_SAI_PLLSAI) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC)) { /* Check the PLLSAI division factors */ assert_param(IS_RCC_PLLSAIN_VALUE(PeriphClkInit->PLLSAI.PLLSAIN)); @@ -2270,9 +2302,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLSAI is disabled */ - while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + while (__HAL_RCC_PLLSAI_GET_FLAG() != RESET) { - if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -2282,7 +2314,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*---------------------------- SAI configuration -------------------------*/ /* In Case of SAI Clock Configuration through PLLSAI, PLLSAIQ and PLLSAI_DIVQ must be added only for SAI configuration */ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == (RCC_PERIPHCLK_SAI_PLLSAI)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI_PLLSAI) == (RCC_PERIPHCLK_SAI_PLLSAI)) { assert_param(IS_RCC_PLLSAIQ_VALUE(PeriphClkInit->PLLSAI.PLLSAIQ)); assert_param(IS_RCC_PLLSAI_DIVQ_VALUE(PeriphClkInit->PLLSAIDivQ)); @@ -2292,13 +2324,13 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ - __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , PeriphClkInit->PLLSAI.PLLSAIQ, tmpreg1); + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN, PeriphClkInit->PLLSAI.PLLSAIQ, tmpreg1); /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLSAIDivQ); } /*---------------------------- LTDC configuration ------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == (RCC_PERIPHCLK_LTDC)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == (RCC_PERIPHCLK_LTDC)) { assert_param(IS_RCC_PLLSAIR_VALUE(PeriphClkInit->PLLSAI.PLLSAIR)); assert_param(IS_RCC_PLLSAI_DIVR_VALUE(PeriphClkInit->PLLSAIDivR)); @@ -2308,7 +2340,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ /* LTDC_CLK(first level) = PLLSAI_VCO Output/PLLSAIR */ - __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , tmpreg1, PeriphClkInit->PLLSAI.PLLSAIR); + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN, tmpreg1, PeriphClkInit->PLLSAI.PLLSAIR); /* LTDC_CLK = LTDC_CLK(first level)/PLLSAIDIVR */ __HAL_RCC_PLLSAI_PLLSAICLKDIVR_CONFIG(PeriphClkInit->PLLSAIDivR); } @@ -2317,9 +2349,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLSAI is ready */ - while(__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + while (__HAL_RCC_PLLSAI_GET_FLAG() == RESET) { - if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -2329,7 +2361,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------------- RTC configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) { /* Check for RTC Parameters used to output RTCCLK */ assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); @@ -2343,16 +2375,16 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); - while((PWR->CR & PWR_CR_DBP) == RESET) + while ((PWR->CR & PWR_CR_DBP) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_DBP_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) { return HAL_TIMEOUT; } } /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); - if((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) { /* Store the content of BDCR register before the reset of Backup Domain */ tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); @@ -2363,15 +2395,15 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk RCC->BDCR = tmpreg1; /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ - if(HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) { /* Get tick */ tickstart = HAL_GetTick(); /* Wait till LSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -2383,7 +2415,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /*--------------------------------------------------------------------------*/ /*---------------------------- TIM configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) { __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); } @@ -2449,25 +2481,25 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) uint32_t vcooutput = 0U; switch (PeriphClk) { - case RCC_PERIPHCLK_I2S: + case RCC_PERIPHCLK_I2S: { /* Get the current I2S source */ srcclk = __HAL_RCC_GET_I2S_SOURCE(); switch (srcclk) { - /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ - case RCC_I2SCLKSOURCE_EXT: + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SCLKSOURCE_EXT: { /* Set the I2S clock to the external clock value */ frequency = EXTERNAL_CLOCK_VALUE; break; } - /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ - case RCC_I2SCLKSOURCE_PLLI2S: + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SCLKSOURCE_PLLI2S: { /* Configure the PLLI2S division factor */ /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); @@ -2481,11 +2513,11 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); break; } /* Clock not enabled for I2S*/ - default: + default: { frequency = 0U; break; @@ -2493,6 +2525,10 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) } break; } + default: + { + break; + } } return frequency; } @@ -2521,8 +2557,8 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); /*---------------------------- I2S configuration ---------------------------*/ - if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || - (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) + if ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || + (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) { /* check for Parameters */ assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); @@ -2535,9 +2571,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLI2S is disabled */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -2548,12 +2584,13 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Configure the PLLI2S division factors */ /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLI2SM) */ /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ - __HAL_RCC_PLLI2S_I2SCLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_I2SCLK_CONFIG(PeriphClkInit->PLLI2S.PLLI2SM, PeriphClkInit->PLLI2S.PLLI2SN, + PeriphClkInit->PLLI2S.PLLI2SR); #else /* Configure the PLLI2S division factors */ /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * (PLLI2SN/PLLM) */ /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ - __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SR); + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SR); #endif /* STM32F411xE */ /* Enable the PLLI2S */ @@ -2561,9 +2598,9 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); /* Wait till PLLI2S is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -2572,7 +2609,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk } /*---------------------------- RTC configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) { /* Check for RTC Parameters used to output RTCCLK */ assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); @@ -2586,16 +2623,16 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk /* Get tick */ tickstart = HAL_GetTick(); - while((PWR->CR & PWR_CR_DBP) == RESET) + while ((PWR->CR & PWR_CR_DBP) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_DBP_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) { return HAL_TIMEOUT; } } /* Reset the Backup domain only if the RTC Clock source selection is modified from reset value */ tmpreg1 = (RCC->BDCR & RCC_BDCR_RTCSEL); - if((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + if ((tmpreg1 != 0x00000000U) && ((tmpreg1) != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) { /* Store the content of BDCR register before the reset of Backup Domain */ tmpreg1 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); @@ -2606,15 +2643,15 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk RCC->BDCR = tmpreg1; /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ - if(HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) { /* Get tick */ tickstart = HAL_GetTick(); /* Wait till LSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -2625,7 +2662,7 @@ HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClk } #if defined(STM32F401xC) || defined(STM32F401xE) || defined(STM32F411xE) /*---------------------------- TIM configuration ---------------------------*/ - if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + if (((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) { __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); } @@ -2689,26 +2726,26 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) uint32_t vcooutput = 0U; switch (PeriphClk) { - case RCC_PERIPHCLK_I2S: + case RCC_PERIPHCLK_I2S: { /* Get the current I2S source */ srcclk = __HAL_RCC_GET_I2S_SOURCE(); switch (srcclk) { - /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ - case RCC_I2SCLKSOURCE_EXT: + /* Check if I2S clock selection is External clock mapped on the I2S_CKIN pin used as I2S clock */ + case RCC_I2SCLKSOURCE_EXT: { /* Set the I2S clock to the external clock value */ frequency = EXTERNAL_CLOCK_VALUE; break; } - /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ - case RCC_I2SCLKSOURCE_PLLI2S: + /* Check if I2S clock selection is PLLI2S VCO output clock divided by PLLI2SR used as I2S clock */ + case RCC_I2SCLKSOURCE_PLLI2S: { #if defined(STM32F411xE) /* Configure the PLLI2S division factor */ /* PLLI2S_VCO Input = PLL_SOURCE/PLLI2SM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SM)); @@ -2721,7 +2758,7 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) #else /* Configure the PLLI2S division factor */ /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ - if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) + if ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSE) { /* Get the I2S source clock value */ vcoinput = (uint32_t)(HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); @@ -2735,11 +2772,11 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ vcooutput = (uint32_t)(vcoinput * (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6U) & (RCC_PLLI2SCFGR_PLLI2SN >> 6U))); /* I2S_CLK = PLLI2S_VCO Output/PLLI2SR */ - frequency = (uint32_t)(vcooutput /(((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); + frequency = (uint32_t)(vcooutput / (((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> 28U) & (RCC_PLLI2SCFGR_PLLI2SR >> 28U))); break; } /* Clock not enabled for I2S*/ - default: + default: { frequency = 0U; break; @@ -2747,6 +2784,10 @@ uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) } break; } + default: + { + break; + } } return frequency; } @@ -2769,7 +2810,7 @@ void HAL_RCCEx_SelectLSEMode(uint8_t Mode) { /* Check the parameters */ assert_param(IS_RCC_LSE_MODE(Mode)); - if(Mode == RCC_LSE_HIGHDRIVE_MODE) + if (Mode == RCC_LSE_HIGHDRIVE_MODE) { SET_BIT(RCC->BDCR, RCC_BDCR_LSEMOD); } @@ -2782,14 +2823,14 @@ void HAL_RCCEx_SelectLSEMode(uint8_t Mode) #endif /* STM32F410xx || STM32F411xE || STM32F446xx || STM32F469xx || STM32F479xx || STM32F412Zx || STM32F412Vx || STM32F412Rx || STM32F412Cx || STM32F413xx || STM32F423xx */ /** @defgroup RCCEx_Exported_Functions_Group2 Extended Clock management functions - * @brief Extended Clock management functions - * -@verbatim + * @brief Extended Clock management functions + * +@verbatim =============================================================================== ##### Extended clock management functions ##### =============================================================================== [..] - This subsection provides a set of functions allowing to control the + This subsection provides a set of functions allowing to control the activation or deactivation of PLLI2S, PLLSAI. @endverbatim * @{ @@ -2824,9 +2865,9 @@ HAL_StatusTypeDef HAL_RCCEx_EnablePLLI2S(RCC_PLLI2SInitTypeDef *PLLI2SInit) /* Wait till PLLI2S is disabled */ tickstart = HAL_GetTick(); - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -2869,16 +2910,16 @@ HAL_StatusTypeDef HAL_RCCEx_EnablePLLI2S(RCC_PLLI2SInitTypeDef *PLLI2SInit) /* Wait till PLLI2S is ready */ tickstart = HAL_GetTick(); - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; } } - return HAL_OK; + return HAL_OK; } /** @@ -2894,9 +2935,9 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLI2S(void) /* Wait till PLLI2S is disabled */ tickstart = HAL_GetTick(); - while(READ_BIT(RCC->CR, RCC_CR_PLLI2SRDY) != RESET) + while (READ_BIT(RCC->CR, RCC_CR_PLLI2SRDY) != RESET) { - if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -2937,9 +2978,9 @@ HAL_StatusTypeDef HAL_RCCEx_EnablePLLSAI(RCC_PLLSAIInitTypeDef *PLLSAIInit) /* Wait till PLLSAI is disabled */ tickstart = HAL_GetTick(); - while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + while (__HAL_RCC_PLLSAI_GET_FLAG() != RESET) { - if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -2973,16 +3014,16 @@ HAL_StatusTypeDef HAL_RCCEx_EnablePLLSAI(RCC_PLLSAIInitTypeDef *PLLSAIInit) /* Wait till PLLSAI is ready */ tickstart = HAL_GetTick(); - while(__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + while (__HAL_RCC_PLLSAI_GET_FLAG() == RESET) { - if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; } } - return HAL_OK; + return HAL_OK; } /** @@ -2998,9 +3039,9 @@ HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void) /* Wait till PLLSAI is disabled */ tickstart = HAL_GetTick(); - while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + while (__HAL_RCC_PLLSAI_GET_FLAG() != RESET) { - if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) { /* return in case of Timeout detected */ return HAL_TIMEOUT; @@ -3064,7 +3105,7 @@ uint32_t HAL_RCC_GetSysClockFreq(void) case RCC_CFGR_SWS_HSI: /* HSI used as system clock source */ { sysclockfreq = HSI_VALUE; - break; + break; } case RCC_CFGR_SWS_HSE: /* HSE used as system clock source */ { @@ -3076,19 +3117,19 @@ uint32_t HAL_RCC_GetSysClockFreq(void) /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLLM) * PLLN SYSCLK = PLL_VCO / PLLP */ pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; - if(__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLSOURCE_HSI) + if (__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLSOURCE_HSI) { /* HSE used as PLL clock source */ - pllvco = (uint32_t) ((((uint64_t) HSE_VALUE * ((uint64_t) ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + pllvco = (uint32_t)((((uint64_t) HSE_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); } else { /* HSI used as PLL clock source */ - pllvco = (uint32_t) ((((uint64_t) HSI_VALUE * ((uint64_t) ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + pllvco = (uint32_t)((((uint64_t) HSI_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); } - pllp = ((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >> RCC_PLLCFGR_PLLP_Pos) + 1U) *2U); + pllp = ((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >> RCC_PLLCFGR_PLLP_Pos) + 1U) * 2U); - sysclockfreq = pllvco/pllp; + sysclockfreq = pllvco / pllp; break; } case RCC_CFGR_SWS_PLLR: /* PLL/PLLR used as system clock source */ @@ -3096,19 +3137,19 @@ uint32_t HAL_RCC_GetSysClockFreq(void) /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLLM) * PLLN SYSCLK = PLL_VCO / PLLR */ pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; - if(__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLSOURCE_HSI) + if (__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLSOURCE_HSI) { /* HSE used as PLL clock source */ - pllvco = (uint32_t) ((((uint64_t) HSE_VALUE * ((uint64_t) ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + pllvco = (uint32_t)((((uint64_t) HSE_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); } else { /* HSI used as PLL clock source */ - pllvco = (uint32_t) ((((uint64_t) HSI_VALUE * ((uint64_t) ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + pllvco = (uint32_t)((((uint64_t) HSI_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); } pllr = ((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> RCC_PLLCFGR_PLLR_Pos); - sysclockfreq = pllvco/pllr; + sysclockfreq = pllvco / pllr; break; } default: @@ -3284,7 +3325,8 @@ HAL_StatusTypeDef HAL_RCC_DeInit(void) #endif /* RCC_CIR_PLLSAIRDYIE */ /* Clear all interrupt flags */ - SET_BIT(RCC->CIR, RCC_CIR_LSIRDYC | RCC_CIR_LSERDYC | RCC_CIR_HSIRDYC | RCC_CIR_HSERDYC | RCC_CIR_PLLRDYC | RCC_CIR_CSSC); + SET_BIT(RCC->CIR, RCC_CIR_LSIRDYC | RCC_CIR_LSERDYC | RCC_CIR_HSIRDYC | RCC_CIR_HSERDYC | RCC_CIR_PLLRDYC | + RCC_CIR_CSSC); #if defined(RCC_CIR_PLLI2SRDYC) SET_BIT(RCC->CIR, RCC_CIR_PLLI2SRDYC); @@ -3304,7 +3346,7 @@ HAL_StatusTypeDef HAL_RCC_DeInit(void) SystemCoreClock = HSI_VALUE; /* Adapt Systick interrupt period */ - if(HAL_InitTick(uwTickPrio) != HAL_OK) + if (HAL_InitTick(uwTickPrio) != HAL_OK) { return HAL_ERROR; } @@ -3334,26 +3376,35 @@ HAL_StatusTypeDef HAL_RCC_DeInit(void) */ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) { - uint32_t tickstart = 0U; + uint32_t tickstart; + uint32_t pll_config; + + /* Check Null pointer */ + if (RCC_OscInitStruct == NULL) + { + return HAL_ERROR; + } /* Check the parameters */ assert_param(IS_RCC_OSCILLATORTYPE(RCC_OscInitStruct->OscillatorType)); /*------------------------------- HSE Configuration ------------------------*/ - if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSE) == RCC_OSCILLATORTYPE_HSE) + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSE) == RCC_OSCILLATORTYPE_HSE) { /* Check the parameters */ assert_param(IS_RCC_HSE(RCC_OscInitStruct->HSEState)); /* When the HSE is used as system clock or clock source for PLL in these cases HSE will not disabled */ #if defined(STM32F446xx) - if((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE) ||\ - ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE)) ||\ - ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLLR) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE) + || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE)) || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLLR) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) #else - if((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE) ||\ - ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE) + || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) #endif /* STM32F446xx */ { - if((__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) && (RCC_OscInitStruct->HSEState == RCC_HSE_OFF)) + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) && (RCC_OscInitStruct->HSEState == RCC_HSE_OFF)) { return HAL_ERROR; } @@ -3364,15 +3415,15 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) __HAL_RCC_HSE_CONFIG(RCC_OscInitStruct->HSEState); /* Check the HSE State */ - if((RCC_OscInitStruct->HSEState) != RCC_HSE_OFF) + if ((RCC_OscInitStruct->HSEState) != RCC_HSE_OFF) { /* Get Start Tick*/ tickstart = HAL_GetTick(); /* Wait till HSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) { - if((HAL_GetTick() - tickstart ) > HSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3384,9 +3435,9 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) tickstart = HAL_GetTick(); /* Wait till HSE is bypassed or disabled */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) { - if((HAL_GetTick() - tickstart ) > HSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3395,7 +3446,7 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } } /*----------------------------- HSI Configuration --------------------------*/ - if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSI) == RCC_OSCILLATORTYPE_HSI) + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSI) == RCC_OSCILLATORTYPE_HSI) { /* Check the parameters */ assert_param(IS_RCC_HSI(RCC_OscInitStruct->HSIState)); @@ -3403,16 +3454,18 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) /* Check if HSI is used as system clock or as PLL source when PLL is selected as system clock */ #if defined(STM32F446xx) - if((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI) ||\ - ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI)) ||\ - ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLLR) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI) + || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI)) || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLLR) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) #else - if((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI) ||\ - ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI) + || \ + ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) #endif /* STM32F446xx */ { /* When HSI is used as system clock it will not disabled */ - if((__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) && (RCC_OscInitStruct->HSIState != RCC_HSI_ON)) + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) && (RCC_OscInitStruct->HSIState != RCC_HSI_ON)) { return HAL_ERROR; } @@ -3426,7 +3479,7 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) else { /* Check the HSI State */ - if((RCC_OscInitStruct->HSIState)!= RCC_HSI_OFF) + if ((RCC_OscInitStruct->HSIState) != RCC_HSI_OFF) { /* Enable the Internal High Speed oscillator (HSI). */ __HAL_RCC_HSI_ENABLE(); @@ -3435,9 +3488,9 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) tickstart = HAL_GetTick(); /* Wait till HSI is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > HSI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3455,9 +3508,9 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) tickstart = HAL_GetTick(); /* Wait till HSI is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > HSI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3466,13 +3519,13 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } } /*------------------------------ LSI Configuration -------------------------*/ - if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSI) == RCC_OSCILLATORTYPE_LSI) + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSI) == RCC_OSCILLATORTYPE_LSI) { /* Check the parameters */ assert_param(IS_RCC_LSI(RCC_OscInitStruct->LSIState)); /* Check the LSI State */ - if((RCC_OscInitStruct->LSIState)!= RCC_LSI_OFF) + if ((RCC_OscInitStruct->LSIState) != RCC_LSI_OFF) { /* Enable the Internal Low Speed oscillator (LSI). */ __HAL_RCC_LSI_ENABLE(); @@ -3481,9 +3534,9 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) tickstart = HAL_GetTick(); /* Wait till LSI is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > LSI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3498,9 +3551,9 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) tickstart = HAL_GetTick(); /* Wait till LSI is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > LSI_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3508,7 +3561,7 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } } /*------------------------------ LSE Configuration -------------------------*/ - if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSE) == RCC_OSCILLATORTYPE_LSE) + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSE) == RCC_OSCILLATORTYPE_LSE) { FlagStatus pwrclkchanged = RESET; @@ -3517,13 +3570,13 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) /* Update LSE configuration in Backup Domain control register */ /* Requires to enable write access to Backup Domain of necessary */ - if(__HAL_RCC_PWR_IS_CLK_DISABLED()) + if (__HAL_RCC_PWR_IS_CLK_DISABLED()) { __HAL_RCC_PWR_CLK_ENABLE(); pwrclkchanged = SET; } - if(HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) + if (HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) { /* Enable write access to Backup domain */ SET_BIT(PWR->CR, PWR_CR_DBP); @@ -3531,9 +3584,9 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) /* Wait for Backup domain Write protection disable */ tickstart = HAL_GetTick(); - while(HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) + while (HAL_IS_BIT_CLR(PWR->CR, PWR_CR_DBP)) { - if((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3543,15 +3596,15 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) /* Set the new LSE configuration -----------------------------------------*/ __HAL_RCC_LSE_CONFIG(RCC_OscInitStruct->LSEState); /* Check the LSE State */ - if((RCC_OscInitStruct->LSEState) != RCC_LSE_OFF) + if ((RCC_OscInitStruct->LSEState) != RCC_LSE_OFF) { /* Get Start Tick*/ tickstart = HAL_GetTick(); /* Wait till LSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) { - if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3563,9 +3616,9 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) tickstart = HAL_GetTick(); /* Wait till LSE is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) != RESET) { - if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3573,7 +3626,7 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } /* Restore clock configuration if changed */ - if(pwrclkchanged == SET) + if (pwrclkchanged == SET) { __HAL_RCC_PWR_CLK_DISABLE(); } @@ -3584,9 +3637,9 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) if ((RCC_OscInitStruct->PLL.PLLState) != RCC_PLL_NONE) { /* Check if the PLL is used as system clock or not */ - if(__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL) + if (__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL) { - if((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_ON) + if ((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_ON) { /* Check the parameters */ assert_param(IS_RCC_PLLSOURCE(RCC_OscInitStruct->PLL.PLLSource)); @@ -3603,22 +3656,21 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) tickstart = HAL_GetTick(); /* Wait till PLL is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) { return HAL_TIMEOUT; } } /* Configure the main PLL clock source, multiplication and division factors. */ - __HAL_RCC_PLL_CONFIG(RCC_OscInitStruct->PLL.PLLSource, - RCC_OscInitStruct->PLL.PLLM, - RCC_OscInitStruct->PLL.PLLN, - RCC_OscInitStruct->PLL.PLLP, - RCC_OscInitStruct->PLL.PLLQ, - RCC_OscInitStruct->PLL.PLLR); - + WRITE_REG(RCC->PLLCFGR, (RCC_OscInitStruct->PLL.PLLSource | \ + RCC_OscInitStruct->PLL.PLLM | \ + (RCC_OscInitStruct->PLL.PLLN << RCC_PLLCFGR_PLLN_Pos) | \ + (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U) << RCC_PLLCFGR_PLLP_Pos) | \ + (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos) | \ + (RCC_OscInitStruct->PLL.PLLR << RCC_PLLCFGR_PLLR_Pos))); /* Enable the main PLL. */ __HAL_RCC_PLL_ENABLE(); @@ -3626,9 +3678,9 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) tickstart = HAL_GetTick(); /* Wait till PLL is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) { - if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3643,9 +3695,9 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) tickstart = HAL_GetTick(); /* Wait till PLL is ready */ - while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) { - if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) { return HAL_TIMEOUT; } @@ -3654,7 +3706,35 @@ HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } else { - return HAL_ERROR; + /* Check if there is a request to disable the PLL used as System clock source */ + if ((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) + { + return HAL_ERROR; + } + else + { + /* Do not return HAL_ERROR if request repeats the current configuration */ + pll_config = RCC->PLLCFGR; +#if defined (RCC_PLLCFGR_PLLR) + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != (RCC_OscInitStruct->PLL.PLLM) << RCC_PLLCFGR_PLLM_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN) << RCC_PLLCFGR_PLLN_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U)) << RCC_PLLCFGR_PLLP_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLR) != (RCC_OscInitStruct->PLL.PLLR << RCC_PLLCFGR_PLLR_Pos))) +#else + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != (RCC_OscInitStruct->PLL.PLLM) << RCC_PLLCFGR_PLLM_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN) << RCC_PLLCFGR_PLLN_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != (((RCC_OscInitStruct->PLL.PLLP >> 1U) - 1U)) << RCC_PLLCFGR_PLLP_Pos) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos))) +#endif /* RCC_PLLCFGR_PLLR */ + { + return HAL_ERROR; + } + } } } return HAL_OK; @@ -3675,11 +3755,11 @@ void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) RCC_OscInitStruct->OscillatorType = RCC_OSCILLATORTYPE_HSE | RCC_OSCILLATORTYPE_HSI | RCC_OSCILLATORTYPE_LSE | RCC_OSCILLATORTYPE_LSI; /* Get the HSE configuration -----------------------------------------------*/ - if((RCC->CR &RCC_CR_HSEBYP) == RCC_CR_HSEBYP) + if ((RCC->CR & RCC_CR_HSEBYP) == RCC_CR_HSEBYP) { RCC_OscInitStruct->HSEState = RCC_HSE_BYPASS; } - else if((RCC->CR &RCC_CR_HSEON) == RCC_CR_HSEON) + else if ((RCC->CR & RCC_CR_HSEON) == RCC_CR_HSEON) { RCC_OscInitStruct->HSEState = RCC_HSE_ON; } @@ -3689,7 +3769,7 @@ void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } /* Get the HSI configuration -----------------------------------------------*/ - if((RCC->CR &RCC_CR_HSION) == RCC_CR_HSION) + if ((RCC->CR & RCC_CR_HSION) == RCC_CR_HSION) { RCC_OscInitStruct->HSIState = RCC_HSI_ON; } @@ -3698,14 +3778,14 @@ void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) RCC_OscInitStruct->HSIState = RCC_HSI_OFF; } - RCC_OscInitStruct->HSICalibrationValue = (uint32_t)((RCC->CR &RCC_CR_HSITRIM) >> RCC_CR_HSITRIM_Pos); + RCC_OscInitStruct->HSICalibrationValue = (uint32_t)((RCC->CR & RCC_CR_HSITRIM) >> RCC_CR_HSITRIM_Pos); /* Get the LSE configuration -----------------------------------------------*/ - if((RCC->BDCR &RCC_BDCR_LSEBYP) == RCC_BDCR_LSEBYP) + if ((RCC->BDCR & RCC_BDCR_LSEBYP) == RCC_BDCR_LSEBYP) { RCC_OscInitStruct->LSEState = RCC_LSE_BYPASS; } - else if((RCC->BDCR &RCC_BDCR_LSEON) == RCC_BDCR_LSEON) + else if ((RCC->BDCR & RCC_BDCR_LSEON) == RCC_BDCR_LSEON) { RCC_OscInitStruct->LSEState = RCC_LSE_ON; } @@ -3715,7 +3795,7 @@ void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } /* Get the LSI configuration -----------------------------------------------*/ - if((RCC->CSR &RCC_CSR_LSION) == RCC_CSR_LSION) + if ((RCC->CSR & RCC_CSR_LSION) == RCC_CSR_LSION) { RCC_OscInitStruct->LSIState = RCC_LSI_ON; } @@ -3725,7 +3805,7 @@ void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) } /* Get the PLL configuration -----------------------------------------------*/ - if((RCC->CR &RCC_CR_PLLON) == RCC_CR_PLLON) + if ((RCC->CR & RCC_CR_PLLON) == RCC_CR_PLLON) { RCC_OscInitStruct->PLL.PLLState = RCC_PLL_ON; } @@ -3751,4 +3831,3 @@ void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rtc.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rtc.c index 2d2be66d..0365accf 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rtc.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rtc.c @@ -14,7 +14,7 @@ ****************************************************************************** * @attention * - * Copyright (c) 2017 STMicroelectronics. + * Copyright (c) 2016 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file @@ -306,38 +306,50 @@ HAL_StatusTypeDef HAL_RTC_Init(RTC_HandleTypeDef *hrtc) /* Set RTC state */ hrtc->State = HAL_RTC_STATE_BUSY; - /* Disable the write protection for RTC registers */ - __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); + /* Check whether the calendar needs to be initialized */ + if (__HAL_RTC_IS_CALENDAR_INITIALIZED(hrtc) == 0U) + { + /* Disable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_DISABLE(hrtc); - /* Enter Initialization mode */ - status = RTC_EnterInitMode(hrtc); + /* Enter Initialization mode */ + status = RTC_EnterInitMode(hrtc); - if (status == HAL_OK) - { - /* Clear RTC_CR FMT, OSEL and POL Bits */ - hrtc->Instance->CR &= ((uint32_t)~(RTC_CR_FMT | RTC_CR_OSEL | RTC_CR_POL)); - /* Set RTC_CR register */ - hrtc->Instance->CR |= (uint32_t)(hrtc->Init.HourFormat | hrtc->Init.OutPut | hrtc->Init.OutPutPolarity); + if (status == HAL_OK) + { + /* Clear RTC_CR FMT, OSEL and POL Bits */ + hrtc->Instance->CR &= ((uint32_t)~(RTC_CR_FMT | RTC_CR_OSEL | RTC_CR_POL)); + /* Set RTC_CR register */ + hrtc->Instance->CR |= (uint32_t)(hrtc->Init.HourFormat | hrtc->Init.OutPut | hrtc->Init.OutPutPolarity); - /* Configure the RTC PRER */ - hrtc->Instance->PRER = (uint32_t)(hrtc->Init.SynchPrediv); - hrtc->Instance->PRER |= (uint32_t)(hrtc->Init.AsynchPrediv << RTC_PRER_PREDIV_A_Pos); + /* Configure the RTC PRER */ + hrtc->Instance->PRER = (uint32_t)(hrtc->Init.SynchPrediv); + hrtc->Instance->PRER |= (uint32_t)(hrtc->Init.AsynchPrediv << RTC_PRER_PREDIV_A_Pos); - /* Exit Initialization mode */ - status = RTC_ExitInitMode(hrtc); + /* Exit Initialization mode */ + status = RTC_ExitInitMode(hrtc); + } + + if (status == HAL_OK) + { + hrtc->Instance->TAFCR &= (uint32_t)~RTC_OUTPUT_TYPE_PUSHPULL; + hrtc->Instance->TAFCR |= (uint32_t)(hrtc->Init.OutPutType); + } + + /* Enable the write protection for RTC registers */ + __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); + } + else + { + /* The calendar is already initialized */ + status = HAL_OK; } if (status == HAL_OK) { - hrtc->Instance->TAFCR &= (uint32_t)~RTC_OUTPUT_TYPE_PUSHPULL; - hrtc->Instance->TAFCR |= (uint32_t)(hrtc->Init.OutPutType); - hrtc->State = HAL_RTC_STATE_READY; } - /* Enable the write protection for RTC registers */ - __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); - return status; } @@ -522,7 +534,7 @@ HAL_StatusTypeDef HAL_RTC_RegisterCallback(RTC_HandleTypeDef *hrtc, HAL_RTC_Call /** * @brief Unregisters an RTC Callback - * RTC callabck is redirected to the weak predefined callback + * RTC callback is redirected to the weak predefined callback * @param hrtc pointer to a RTC_HandleTypeDef structure that contains * the configuration information for RTC. * @param CallbackID ID of the callback to be unregistered @@ -1293,7 +1305,8 @@ HAL_StatusTypeDef HAL_RTC_SetAlarm_IT(RTC_HandleTypeDef *hrtc, RTC_AlarmTypeDef /* Wait till RTC ALRAWF flag is set and if timeout is reached exit */ do { - if (count-- == 0U) + count = count - 1U; + if (count == 0U) { /* Enable the write protection for RTC registers */ __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); @@ -1329,7 +1342,8 @@ HAL_StatusTypeDef HAL_RTC_SetAlarm_IT(RTC_HandleTypeDef *hrtc, RTC_AlarmTypeDef /* Wait till RTC ALRBWF flag is set and if timeout is reached exit */ do { - if (count-- == 0U) + count = count - 1U; + if (count == 0U) { /* Enable the write protection for RTC registers */ __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); @@ -1529,21 +1543,24 @@ HAL_StatusTypeDef HAL_RTC_GetAlarm(RTC_HandleTypeDef *hrtc, RTC_AlarmTypeDef *sA */ void HAL_RTC_AlarmIRQHandler(RTC_HandleTypeDef *hrtc) { + /* Clear the EXTI's line Flag for RTC Alarm */ + __HAL_RTC_ALARM_EXTI_CLEAR_FLAG(); + /* Get the Alarm A interrupt source enable status */ if (__HAL_RTC_ALARM_GET_IT_SOURCE(hrtc, RTC_IT_ALRA) != 0U) { /* Get the pending status of the Alarm A Interrupt */ if (__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRAF) != 0U) { + /* Clear the Alarm A interrupt pending bit */ + __HAL_RTC_ALARM_CLEAR_FLAG(hrtc, RTC_FLAG_ALRAF); + /* Alarm A callback */ #if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) hrtc->AlarmAEventCallback(hrtc); #else HAL_RTC_AlarmAEventCallback(hrtc); #endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ - - /* Clear the Alarm A interrupt pending bit */ - __HAL_RTC_ALARM_CLEAR_FLAG(hrtc, RTC_FLAG_ALRAF); } } @@ -1553,21 +1570,18 @@ void HAL_RTC_AlarmIRQHandler(RTC_HandleTypeDef *hrtc) /* Get the pending status of the Alarm B Interrupt */ if (__HAL_RTC_ALARM_GET_FLAG(hrtc, RTC_FLAG_ALRBF) != 0U) { + /* Clear the Alarm B interrupt pending bit */ + __HAL_RTC_ALARM_CLEAR_FLAG(hrtc, RTC_FLAG_ALRBF); + /* Alarm B callback */ #if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) hrtc->AlarmBEventCallback(hrtc); #else HAL_RTCEx_AlarmBEventCallback(hrtc); #endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ - - /* Clear the Alarm B interrupt pending bit */ - __HAL_RTC_ALARM_CLEAR_FLAG(hrtc, RTC_FLAG_ALRBF); } } - /* Clear the EXTI's line Flag for RTC Alarm */ - __HAL_RTC_ALARM_EXTI_CLEAR_FLAG(); - /* Change RTC state */ hrtc->State = HAL_RTC_STATE_READY; } @@ -1663,8 +1677,8 @@ HAL_StatusTypeDef HAL_RTC_WaitForSynchro(RTC_HandleTypeDef *hrtc) { uint32_t tickstart = 0U; - /* Clear RSF flag */ - hrtc->Instance->ISR &= (uint32_t)RTC_RSF_MASK; + /* Clear RSF flag, keep reserved bits at reset values (setting other flags has no effect) */ + hrtc->Instance->ISR = ((uint32_t)(RTC_RSF_MASK & RTC_ISR_RESERVED_MASK)); /* Get tick */ tickstart = HAL_GetTick(); @@ -1859,7 +1873,7 @@ HAL_StatusTypeDef RTC_ExitInitMode(RTC_HandleTypeDef *hrtc) */ uint8_t RTC_ByteToBcd2(uint8_t number) { - uint8_t bcdhigh = 0U; + uint32_t bcdhigh = 0U; while (number >= 10U) { @@ -1877,9 +1891,9 @@ uint8_t RTC_ByteToBcd2(uint8_t number) */ uint8_t RTC_Bcd2ToByte(uint8_t number) { - uint8_t tmp = 0U; - tmp = ((uint8_t)(number & (uint8_t)0xF0) >> (uint8_t)0x4) * 10; - return (tmp + (number & (uint8_t)0x0F)); + uint32_t tens = 0U; + tens = (((uint32_t)number & 0xF0U) >> 4U) * 10U; + return (uint8_t)(tens + ((uint32_t)number & 0x0FU)); } /** diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rtc_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rtc_ex.c index eb5708fa..f6aec5ea 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rtc_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_rtc_ex.c @@ -14,7 +14,7 @@ ****************************************************************************** * @attention * - * Copyright (c) 2017 STMicroelectronics. + * Copyright (c) 2016 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file @@ -605,6 +605,9 @@ HAL_StatusTypeDef HAL_RTCEx_DeactivateTamper(RTC_HandleTypeDef *hrtc, uint32_t T */ void HAL_RTCEx_TamperTimeStampIRQHandler(RTC_HandleTypeDef *hrtc) { + /* Clear the EXTI's Flag for RTC Timestamp and Tamper */ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_CLEAR_FLAG(); + /* Get the Timestamp interrupt source enable status */ if (__HAL_RTC_TIMESTAMP_GET_IT_SOURCE(hrtc, RTC_IT_TS) != 0U) { @@ -618,7 +621,8 @@ void HAL_RTCEx_TamperTimeStampIRQHandler(RTC_HandleTypeDef *hrtc) HAL_RTCEx_TimeStampEventCallback(hrtc); #endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ - /* Clear the Timestamp interrupt pending bit */ + /* Clear the Timestamp interrupt pending bit after returning from callback + as RTC_TSTR and RTC_TSDR registers are cleared when TSF bit is reset */ __HAL_RTC_TIMESTAMP_CLEAR_FLAG(hrtc, RTC_FLAG_TSF); } } @@ -629,15 +633,15 @@ void HAL_RTCEx_TamperTimeStampIRQHandler(RTC_HandleTypeDef *hrtc) /* Get the pending status of the Tamper 1 Interrupt */ if (__HAL_RTC_TAMPER_GET_FLAG(hrtc, RTC_FLAG_TAMP1F) != 0U) { + /* Clear the Tamper interrupt pending bit */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc, RTC_FLAG_TAMP1F); + /* Tamper callback */ #if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) hrtc->Tamper1EventCallback(hrtc); #else HAL_RTCEx_Tamper1EventCallback(hrtc); #endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ - - /* Clear the Tamper interrupt pending bit */ - __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc, RTC_FLAG_TAMP1F); } } @@ -648,22 +652,19 @@ void HAL_RTCEx_TamperTimeStampIRQHandler(RTC_HandleTypeDef *hrtc) /* Get the pending status of the Tamper 2 Interrupt */ if (__HAL_RTC_TAMPER_GET_FLAG(hrtc, RTC_FLAG_TAMP2F) != 0U) { + /* Clear the Tamper interrupt pending bit */ + __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc, RTC_FLAG_TAMP2F); + /* Tamper callback */ #if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) hrtc->Tamper2EventCallback(hrtc); #else HAL_RTCEx_Tamper2EventCallback(hrtc); #endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ - - /* Clear the Tamper interrupt pending bit */ - __HAL_RTC_TAMPER_CLEAR_FLAG(hrtc, RTC_FLAG_TAMP2F); } } #endif /* RTC_TAMPER2_SUPPORT */ - /* Clear the EXTI's Flag for RTC Timestamp and Tamper */ - __HAL_RTC_TAMPER_TIMESTAMP_EXTI_CLEAR_FLAG(); - /* Change RTC state */ hrtc->State = HAL_RTC_STATE_READY; } @@ -979,7 +980,8 @@ HAL_StatusTypeDef HAL_RTCEx_SetWakeUpTimer_IT(RTC_HandleTypeDef *hrtc, uint32_t /* Wait till RTC WUTWF flag is reset and if timeout is reached exit */ do { - if (count-- == 0U) + count = count - 1U; + if (count == 0U) { /* Enable the write protection for RTC registers */ __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); @@ -1006,7 +1008,8 @@ HAL_StatusTypeDef HAL_RTCEx_SetWakeUpTimer_IT(RTC_HandleTypeDef *hrtc, uint32_t /* Wait till RTC WUTWF flag is set and if timeout is reached exit */ do { - if (count-- == 0U) + count = count - 1U; + if (count == 0U) { /* Enable the write protection for RTC registers */ __HAL_RTC_WRITEPROTECTION_ENABLE(hrtc); @@ -1130,23 +1133,23 @@ uint32_t HAL_RTCEx_GetWakeUpTimer(RTC_HandleTypeDef *hrtc) */ void HAL_RTCEx_WakeUpTimerIRQHandler(RTC_HandleTypeDef *hrtc) { + /* Clear the EXTI's line Flag for RTC WakeUpTimer */ + __HAL_RTC_WAKEUPTIMER_EXTI_CLEAR_FLAG(); + /* Get the pending status of the Wakeup timer Interrupt */ if (__HAL_RTC_WAKEUPTIMER_GET_FLAG(hrtc, RTC_FLAG_WUTF) != 0U) { + /* Clear the Wakeup timer interrupt pending bit */ + __HAL_RTC_WAKEUPTIMER_CLEAR_FLAG(hrtc, RTC_FLAG_WUTF); + /* Wakeup timer callback */ #if (USE_HAL_RTC_REGISTER_CALLBACKS == 1) hrtc->WakeUpTimerEventCallback(hrtc); #else HAL_RTCEx_WakeUpTimerEventCallback(hrtc); #endif /* USE_HAL_RTC_REGISTER_CALLBACKS */ - - /* Clear the Wakeup timer interrupt pending bit */ - __HAL_RTC_WAKEUPTIMER_CLEAR_FLAG(hrtc, RTC_FLAG_WUTF); } - /* Clear the EXTI's line Flag for RTC WakeUpTimer */ - __HAL_RTC_WAKEUPTIMER_EXTI_CLEAR_FLAG(); - /* Change RTC state */ hrtc->State = HAL_RTC_STATE_READY; } diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_sd.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_sd.c index f4145e9e..fd50841f 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_sd.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_sd.c @@ -696,7 +696,11 @@ HAL_StatusTypeDef HAL_SD_ReadBlocks(SD_HandleTypeDef *hsd, uint8_t *pData, uint3 } /* Get error state */ +#if defined(SDIO_STA_STBITERR) + if(__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_DTIMEOUT) || (__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_STBITERR))) +#else /* SDIO_STA_STBITERR not defined */ if(__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_DTIMEOUT)) +#endif /* SDIO_STA_STBITERR */ { /* Clear all the static flags */ __HAL_SD_CLEAR_FLAG(hsd, SDIO_STATIC_FLAGS); @@ -911,7 +915,11 @@ HAL_StatusTypeDef HAL_SD_WriteBlocks(SD_HandleTypeDef *hsd, uint8_t *pData, uint } /* Get error state */ +#if defined(SDIO_STA_STBITERR) + if(__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_DTIMEOUT) || (__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_STBITERR))) +#else /* SDIO_STA_STBITERR not defined */ if(__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_DTIMEOUT)) +#endif /* SDIO_STA_STBITERR */ { /* Clear all the static flags */ __HAL_SD_CLEAR_FLAG(hsd, SDIO_STATIC_FLAGS); @@ -2921,13 +2929,17 @@ static uint32_t SD_SendSDStatus(SD_HandleTypeDef *hsd, uint32_t *pSDstatus) } } - if((HAL_GetTick() - tickstart) >= SDMMC_DATATIMEOUT) + if((HAL_GetTick() - tickstart) >= SDMMC_SWDATATIMEOUT) { return HAL_SD_ERROR_TIMEOUT; } } +#if defined(SDIO_STA_STBITERR) + if(__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_DTIMEOUT) || (__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_STBITERR))) +#else /* SDIO_STA_STBITERR not defined */ if(__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_DTIMEOUT)) +#endif /* SDIO_STA_STBITERR */ { return HAL_SD_ERROR_DATA_TIMEOUT; } @@ -2949,7 +2961,7 @@ static uint32_t SD_SendSDStatus(SD_HandleTypeDef *hsd, uint32_t *pSDstatus) *pData = SDIO_ReadFIFO(hsd->Instance); pData++; - if((HAL_GetTick() - tickstart) >= SDMMC_DATATIMEOUT) + if((HAL_GetTick() - tickstart) >= SDMMC_SWDATATIMEOUT) { return HAL_SD_ERROR_TIMEOUT; } @@ -3141,13 +3153,17 @@ static uint32_t SD_FindSCR(SD_HandleTypeDef *hsd, uint32_t *pSCR) break; } - if((HAL_GetTick() - tickstart) >= SDMMC_DATATIMEOUT) + if((HAL_GetTick() - tickstart) >= SDMMC_SWDATATIMEOUT) { return HAL_SD_ERROR_TIMEOUT; } } +#if defined(SDIO_STA_STBITERR) + if(__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_DTIMEOUT) || (__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_STBITERR))) +#else /* SDIO_STA_STBITERR not defined */ if(__HAL_SD_GET_FLAG(hsd, SDIO_FLAG_DTIMEOUT)) +#endif /* SDIO_STA_STBITERR */ { __HAL_SD_CLEAR_FLAG(hsd, SDIO_FLAG_DTIMEOUT); diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_spi.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_spi.c index 99531f71..99b5549c 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_spi.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_spi.c @@ -9,7 +9,17 @@ * + IO operation functions * + Peripheral Control functions * + Peripheral State functions + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### How to use this driver ##### @@ -131,7 +141,7 @@ DataSize = SPI_DATASIZE_8BIT: +----------------------------------------------------------------------------------------------+ | | | 2Lines Fullduplex | 2Lines RxOnly | 1Line | - | Process | Tranfert mode |---------------------|----------------------|----------------------| + | Process | Transfer mode |---------------------|----------------------|----------------------| | | | Master | Slave | Master | Slave | Master | Slave | |==============================================================================================| | T | Polling | Fpclk/2 | Fpclk/2 | NA | NA | NA | NA | @@ -156,7 +166,7 @@ DataSize = SPI_DATASIZE_16BIT: +----------------------------------------------------------------------------------------------+ | | | 2Lines Fullduplex | 2Lines RxOnly | 1Line | - | Process | Tranfert mode |---------------------|----------------------|----------------------| + | Process | Transfer mode |---------------------|----------------------|----------------------| | | | Master | Slave | Master | Slave | Master | Slave | |==============================================================================================| | T | Polling | Fpclk/2 | Fpclk/2 | NA | NA | NA | NA | @@ -184,18 +194,6 @@ (#) RX processes are HAL_SPI_Receive(), HAL_SPI_Receive_IT() and HAL_SPI_Receive_DMA() (#) TX processes are HAL_SPI_Transmit(), HAL_SPI_Transmit_IT() and HAL_SPI_Transmit_DMA() - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -331,6 +329,24 @@ HAL_StatusTypeDef HAL_SPI_Init(SPI_HandleTypeDef *hspi) { assert_param(IS_SPI_CPOL(hspi->Init.CLKPolarity)); assert_param(IS_SPI_CPHA(hspi->Init.CLKPhase)); + + if (hspi->Init.Mode == SPI_MODE_MASTER) + { + assert_param(IS_SPI_BAUDRATE_PRESCALER(hspi->Init.BaudRatePrescaler)); + } + else + { + /* Baudrate prescaler not use in Motoraola Slave mode. force to default value */ + hspi->Init.BaudRatePrescaler = SPI_BAUDRATEPRESCALER_2; + } + } + else + { + assert_param(IS_SPI_BAUDRATE_PRESCALER(hspi->Init.BaudRatePrescaler)); + + /* Force polarity and phase to TI protocaol requirements */ + hspi->Init.CLKPolarity = SPI_POLARITY_LOW; + hspi->Init.CLKPhase = SPI_PHASE_1EDGE; } #if (USE_SPI_CRC != 0U) assert_param(IS_SPI_CRC_CALCULATION(hspi->Init.CRCCalculation)); @@ -379,19 +395,25 @@ HAL_StatusTypeDef HAL_SPI_Init(SPI_HandleTypeDef *hspi) /*----------------------- SPIx CR1 & CR2 Configuration ---------------------*/ /* Configure : SPI Mode, Communication Mode, Data size, Clock polarity and phase, NSS management, Communication speed, First bit and CRC calculation state */ - WRITE_REG(hspi->Instance->CR1, (hspi->Init.Mode | hspi->Init.Direction | hspi->Init.DataSize | - hspi->Init.CLKPolarity | hspi->Init.CLKPhase | (hspi->Init.NSS & SPI_CR1_SSM) | - hspi->Init.BaudRatePrescaler | hspi->Init.FirstBit | hspi->Init.CRCCalculation)); + WRITE_REG(hspi->Instance->CR1, ((hspi->Init.Mode & (SPI_CR1_MSTR | SPI_CR1_SSI)) | + (hspi->Init.Direction & (SPI_CR1_RXONLY | SPI_CR1_BIDIMODE)) | + (hspi->Init.DataSize & SPI_CR1_DFF) | + (hspi->Init.CLKPolarity & SPI_CR1_CPOL) | + (hspi->Init.CLKPhase & SPI_CR1_CPHA) | + (hspi->Init.NSS & SPI_CR1_SSM) | + (hspi->Init.BaudRatePrescaler & SPI_CR1_BR_Msk) | + (hspi->Init.FirstBit & SPI_CR1_LSBFIRST) | + (hspi->Init.CRCCalculation & SPI_CR1_CRCEN))); /* Configure : NSS management, TI Mode */ - WRITE_REG(hspi->Instance->CR2, (((hspi->Init.NSS >> 16U) & SPI_CR2_SSOE) | hspi->Init.TIMode)); + WRITE_REG(hspi->Instance->CR2, (((hspi->Init.NSS >> 16U) & SPI_CR2_SSOE) | (hspi->Init.TIMode & SPI_CR2_FRF))); #if (USE_SPI_CRC != 0U) /*---------------------------- SPIx CRCPOLY Configuration ------------------*/ /* Configure : CRC Polynomial */ if (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE) { - WRITE_REG(hspi->Instance->CRCPR, hspi->Init.CRCPolynomial); + WRITE_REG(hspi->Instance->CRCPR, (hspi->Init.CRCPolynomial & SPI_CRCPR_CRCPOLY_Msk)); } #endif /* USE_SPI_CRC */ @@ -744,38 +766,35 @@ HAL_StatusTypeDef HAL_SPI_UnRegisterCallback(SPI_HandleTypeDef *hspi, HAL_SPI_Ca * @param Timeout Timeout duration * @retval HAL status */ -HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size, uint32_t Timeout) +HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size, uint32_t Timeout) { uint32_t tickstart; - HAL_StatusTypeDef errorcode = HAL_OK; uint16_t initial_TxXferCount; /* Check Direction parameter */ assert_param(IS_SPI_DIRECTION_2LINES_OR_1LINE(hspi->Init.Direction)); - /* Process Locked */ - __HAL_LOCK(hspi); - /* Init tickstart for timeout management*/ tickstart = HAL_GetTick(); initial_TxXferCount = Size; if (hspi->State != HAL_SPI_STATE_READY) { - errorcode = HAL_BUSY; - goto error; + return HAL_BUSY; } if ((pData == NULL) || (Size == 0U)) { - errorcode = HAL_ERROR; - goto error; + return HAL_ERROR; } + /* Process Locked */ + __HAL_LOCK(hspi); + /* Set the transaction information */ hspi->State = HAL_SPI_STATE_BUSY_TX; hspi->ErrorCode = HAL_SPI_ERROR_NONE; - hspi->pTxBuffPtr = (uint8_t *)pData; + hspi->pTxBuffPtr = (const uint8_t *)pData; hspi->TxXferSize = Size; hspi->TxXferCount = Size; @@ -789,6 +808,8 @@ HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint /* Configure communication direction : 1Line */ if (hspi->Init.Direction == SPI_DIRECTION_1LINE) { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); SPI_1LINE_TX(hspi); } @@ -812,7 +833,7 @@ HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint { if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) { - hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr += sizeof(uint16_t); hspi->TxXferCount--; } @@ -822,7 +843,7 @@ HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint /* Wait until TXE flag is set to send data */ if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) { - hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr += sizeof(uint16_t); hspi->TxXferCount--; } @@ -831,8 +852,9 @@ HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint /* Timeout management */ if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) { - errorcode = HAL_TIMEOUT; - goto error; + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; } } } @@ -842,7 +864,7 @@ HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint { if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) { - *((__IO uint8_t *)&hspi->Instance->DR) = (*hspi->pTxBuffPtr); + *((__IO uint8_t *)&hspi->Instance->DR) = *((const uint8_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr += sizeof(uint8_t); hspi->TxXferCount--; } @@ -851,7 +873,7 @@ HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint /* Wait until TXE flag is set to send data */ if (__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) { - *((__IO uint8_t *)&hspi->Instance->DR) = (*hspi->pTxBuffPtr); + *((__IO uint8_t *)&hspi->Instance->DR) = *((const uint8_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr += sizeof(uint8_t); hspi->TxXferCount--; } @@ -860,8 +882,9 @@ HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint /* Timeout management */ if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) { - errorcode = HAL_TIMEOUT; - goto error; + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; } } } @@ -886,16 +909,18 @@ HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint __HAL_SPI_CLEAR_OVRFLAG(hspi); } - if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) - { - errorcode = HAL_ERROR; - } - -error: hspi->State = HAL_SPI_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(hspi); - return errorcode; + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + return HAL_ERROR; + } + else + { + return HAL_OK; + } } /** @@ -909,8 +934,15 @@ HAL_StatusTypeDef HAL_SPI_Transmit(SPI_HandleTypeDef *hspi, uint8_t *pData, uint */ HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size, uint32_t Timeout) { +#if (USE_SPI_CRC != 0U) + __IO uint32_t tmpreg = 0U; +#endif /* USE_SPI_CRC */ uint32_t tickstart; - HAL_StatusTypeDef errorcode = HAL_OK; + + if (hspi->State != HAL_SPI_STATE_READY) + { + return HAL_BUSY; + } if ((hspi->Init.Mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES)) { @@ -919,24 +951,17 @@ HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint1 return HAL_SPI_TransmitReceive(hspi, pData, pData, Size, Timeout); } - /* Process Locked */ - __HAL_LOCK(hspi); - /* Init tickstart for timeout management*/ tickstart = HAL_GetTick(); - if (hspi->State != HAL_SPI_STATE_READY) - { - errorcode = HAL_BUSY; - goto error; - } - if ((pData == NULL) || (Size == 0U)) { - errorcode = HAL_ERROR; - goto error; + return HAL_ERROR; } + /* Process Locked */ + __HAL_LOCK(hspi); + /* Set the transaction information */ hspi->State = HAL_SPI_STATE_BUSY_RX; hspi->ErrorCode = HAL_SPI_ERROR_NONE; @@ -964,6 +989,8 @@ HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint1 /* Configure communication direction: 1Line */ if (hspi->Init.Direction == SPI_DIRECTION_1LINE) { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); SPI_1LINE_RX(hspi); } @@ -993,8 +1020,9 @@ HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint1 /* Timeout management */ if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) { - errorcode = HAL_TIMEOUT; - goto error; + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; } } } @@ -1016,8 +1044,9 @@ HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint1 /* Timeout management */ if ((((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) || (Timeout == 0U)) { - errorcode = HAL_TIMEOUT; - goto error; + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; } } } @@ -1034,8 +1063,8 @@ HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint1 if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, Timeout, tickstart) != HAL_OK) { /* the latest data has not been received */ - errorcode = HAL_TIMEOUT; - goto error; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; } /* Receive last data in 16 Bit mode */ @@ -1053,12 +1082,15 @@ HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint1 if (SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_RXNE, SET, Timeout, tickstart) != HAL_OK) { SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); - errorcode = HAL_TIMEOUT; - goto error; + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; } /* Read CRC to Flush DR and RXNE flag */ - READ_REG(hspi->Instance->DR); + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); } #endif /* USE_SPI_CRC */ @@ -1077,15 +1109,17 @@ HAL_StatusTypeDef HAL_SPI_Receive(SPI_HandleTypeDef *hspi, uint8_t *pData, uint1 } #endif /* USE_SPI_CRC */ + hspi->State = HAL_SPI_STATE_READY; + /* Unlock the process */ + __HAL_UNLOCK(hspi); if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) { - errorcode = HAL_ERROR; + return HAL_ERROR; + } + else + { + return HAL_OK; } - -error : - hspi->State = HAL_SPI_STATE_READY; - __HAL_UNLOCK(hspi); - return errorcode; } /** @@ -1098,24 +1132,23 @@ error : * @param Timeout Timeout duration * @retval HAL status */ -HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, uint16_t Size, - uint32_t Timeout) +HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size, uint32_t Timeout) { uint16_t initial_TxXferCount; uint32_t tmp_mode; HAL_SPI_StateTypeDef tmp_state; uint32_t tickstart; +#if (USE_SPI_CRC != 0U) + __IO uint32_t tmpreg = 0U; +#endif /* USE_SPI_CRC */ /* Variable used to alternate Rx and Tx during transfer */ uint32_t txallowed = 1U; - HAL_StatusTypeDef errorcode = HAL_OK; /* Check Direction parameter */ assert_param(IS_SPI_DIRECTION_2LINES(hspi->Init.Direction)); - /* Process Locked */ - __HAL_LOCK(hspi); - /* Init tickstart for timeout management*/ tickstart = HAL_GetTick(); @@ -1127,16 +1160,17 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxD if (!((tmp_state == HAL_SPI_STATE_READY) || \ ((tmp_mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES) && (tmp_state == HAL_SPI_STATE_BUSY_RX)))) { - errorcode = HAL_BUSY; - goto error; + return HAL_BUSY; } if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) { - errorcode = HAL_ERROR; - goto error; + return HAL_ERROR; } + /* Process Locked */ + __HAL_LOCK(hspi); + /* Don't overwrite in case of HAL_SPI_STATE_BUSY_RX */ if (hspi->State != HAL_SPI_STATE_BUSY_RX) { @@ -1148,7 +1182,7 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxD hspi->pRxBuffPtr = (uint8_t *)pRxData; hspi->RxXferCount = Size; hspi->RxXferSize = Size; - hspi->pTxBuffPtr = (uint8_t *)pTxData; + hspi->pTxBuffPtr = (const uint8_t *)pTxData; hspi->TxXferCount = Size; hspi->TxXferSize = Size; @@ -1176,16 +1210,25 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxD { if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) { - hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr += sizeof(uint16_t); hspi->TxXferCount--; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->TxXferCount == 0U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ + } while ((hspi->TxXferCount > 0U) || (hspi->RxXferCount > 0U)) { /* Check TXE flag */ if ((__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) && (hspi->TxXferCount > 0U) && (txallowed == 1U)) { - hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr += sizeof(uint16_t); hspi->TxXferCount--; /* Next Data is a reception (Rx). Tx not allowed */ @@ -1211,8 +1254,9 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxD } if (((HAL_GetTick() - tickstart) >= Timeout) && (Timeout != HAL_MAX_DELAY)) { - errorcode = HAL_TIMEOUT; - goto error; + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; } } } @@ -1221,16 +1265,24 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxD { if ((hspi->Init.Mode == SPI_MODE_SLAVE) || (initial_TxXferCount == 0x01U)) { - *((__IO uint8_t *)&hspi->Instance->DR) = (*hspi->pTxBuffPtr); + *((__IO uint8_t *)&hspi->Instance->DR) = *((const uint8_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr += sizeof(uint8_t); hspi->TxXferCount--; + +#if (USE_SPI_CRC != 0U) + /* Enable CRC Transmission */ + if ((hspi->TxXferCount == 0U) && (hspi->Init.CRCCalculation == SPI_CRCCALCULATION_ENABLE)) + { + SET_BIT(hspi->Instance->CR1, SPI_CR1_CRCNEXT); + } +#endif /* USE_SPI_CRC */ } while ((hspi->TxXferCount > 0U) || (hspi->RxXferCount > 0U)) { /* Check TXE flag */ if ((__HAL_SPI_GET_FLAG(hspi, SPI_FLAG_TXE)) && (hspi->TxXferCount > 0U) && (txallowed == 1U)) { - *(__IO uint8_t *)&hspi->Instance->DR = (*hspi->pTxBuffPtr); + *(__IO uint8_t *)&hspi->Instance->DR = *((const uint8_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr++; hspi->TxXferCount--; /* Next Data is a reception (Rx). Tx not allowed */ @@ -1256,8 +1308,9 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxD } if ((((HAL_GetTick() - tickstart) >= Timeout) && ((Timeout != HAL_MAX_DELAY))) || (Timeout == 0U)) { - errorcode = HAL_TIMEOUT; - goto error; + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; } } } @@ -1271,11 +1324,14 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxD { /* Error on the CRC reception */ SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); - errorcode = HAL_TIMEOUT; - goto error; + hspi->State = HAL_SPI_STATE_READY; + __HAL_UNLOCK(hspi); + return HAL_TIMEOUT; } /* Read CRC */ - READ_REG(hspi->Instance->DR); + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); } /* Check if CRC error occurred */ @@ -1284,17 +1340,17 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxD SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); /* Clear CRC Flag */ __HAL_SPI_CLEAR_CRCERRFLAG(hspi); - - errorcode = HAL_ERROR; + __HAL_UNLOCK(hspi); + return HAL_ERROR; } #endif /* USE_SPI_CRC */ /* Check the end of the transaction */ if (SPI_EndRxTxTransaction(hspi, Timeout, tickstart) != HAL_OK) { - errorcode = HAL_ERROR; hspi->ErrorCode = HAL_SPI_ERROR_FLAG; - goto error; + __HAL_UNLOCK(hspi); + return HAL_ERROR; } /* Clear overrun flag in 2 Lines communication mode because received is not read */ @@ -1303,10 +1359,19 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive(SPI_HandleTypeDef *hspi, uint8_t *pTxD __HAL_SPI_CLEAR_OVRFLAG(hspi); } -error : + hspi->State = HAL_SPI_STATE_READY; + /* Unlock the process */ __HAL_UNLOCK(hspi); - return errorcode; + + if (hspi->ErrorCode != HAL_SPI_ERROR_NONE) + { + return HAL_ERROR; + } + else + { + return HAL_OK; + } } /** @@ -1317,32 +1382,30 @@ error : * @param Size amount of data to be sent * @retval HAL status */ -HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size) +HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size) { - HAL_StatusTypeDef errorcode = HAL_OK; /* Check Direction parameter */ assert_param(IS_SPI_DIRECTION_2LINES_OR_1LINE(hspi->Init.Direction)); - /* Process Locked */ - __HAL_LOCK(hspi); if ((pData == NULL) || (Size == 0U)) { - errorcode = HAL_ERROR; - goto error; + return HAL_ERROR; } if (hspi->State != HAL_SPI_STATE_READY) { - errorcode = HAL_BUSY; - goto error; + return HAL_BUSY; } + /* Process Locked */ + __HAL_LOCK(hspi); + /* Set the transaction information */ hspi->State = HAL_SPI_STATE_BUSY_TX; hspi->ErrorCode = HAL_SPI_ERROR_NONE; - hspi->pTxBuffPtr = (uint8_t *)pData; + hspi->pTxBuffPtr = (const uint8_t *)pData; hspi->TxXferSize = Size; hspi->TxXferCount = Size; @@ -1365,6 +1428,8 @@ HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, u /* Configure communication direction : 1Line */ if (hspi->Init.Direction == SPI_DIRECTION_1LINE) { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); SPI_1LINE_TX(hspi); } @@ -1376,10 +1441,6 @@ HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, u } #endif /* USE_SPI_CRC */ - /* Enable TXE and ERR interrupt */ - __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_ERR)); - - /* Check if the SPI is already enabled */ if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) { @@ -1387,9 +1448,12 @@ HAL_StatusTypeDef HAL_SPI_Transmit_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, u __HAL_SPI_ENABLE(hspi); } -error : + /* Process Unlocked */ __HAL_UNLOCK(hspi); - return errorcode; + /* Enable TXE and ERR interrupt */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_ERR)); + + return HAL_OK; } /** @@ -1402,7 +1466,11 @@ error : */ HAL_StatusTypeDef HAL_SPI_Receive_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size) { - HAL_StatusTypeDef errorcode = HAL_OK; + + if (hspi->State != HAL_SPI_STATE_READY) + { + return HAL_BUSY; + } if ((hspi->Init.Direction == SPI_DIRECTION_2LINES) && (hspi->Init.Mode == SPI_MODE_MASTER)) { @@ -1411,21 +1479,15 @@ HAL_StatusTypeDef HAL_SPI_Receive_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, ui return HAL_SPI_TransmitReceive_IT(hspi, pData, pData, Size); } - /* Process Locked */ - __HAL_LOCK(hspi); - - if (hspi->State != HAL_SPI_STATE_READY) - { - errorcode = HAL_BUSY; - goto error; - } if ((pData == NULL) || (Size == 0U)) { - errorcode = HAL_ERROR; - goto error; + return HAL_ERROR; } + /* Process Locked */ + __HAL_LOCK(hspi); + /* Set the transaction information */ hspi->State = HAL_SPI_STATE_BUSY_RX; hspi->ErrorCode = HAL_SPI_ERROR_NONE; @@ -1452,6 +1514,8 @@ HAL_StatusTypeDef HAL_SPI_Receive_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, ui /* Configure communication direction : 1Line */ if (hspi->Init.Direction == SPI_DIRECTION_1LINE) { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); SPI_1LINE_RX(hspi); } @@ -1463,9 +1527,6 @@ HAL_StatusTypeDef HAL_SPI_Receive_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, ui } #endif /* USE_SPI_CRC */ - /* Enable TXE and ERR interrupt */ - __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); - /* Note : The SPI must be enabled after unlocking current process to avoid the risk of SPI interrupt handle execution before current process unlock */ @@ -1477,10 +1538,12 @@ HAL_StatusTypeDef HAL_SPI_Receive_IT(SPI_HandleTypeDef *hspi, uint8_t *pData, ui __HAL_SPI_ENABLE(hspi); } -error : /* Process Unlocked */ __HAL_UNLOCK(hspi); - return errorcode; + /* Enable RXNE and ERR interrupt */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); + + return HAL_OK; } /** @@ -1492,18 +1555,15 @@ error : * @param Size amount of data to be sent and received * @retval HAL status */ -HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, uint16_t Size) +HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, + uint16_t Size) { uint32_t tmp_mode; HAL_SPI_StateTypeDef tmp_state; - HAL_StatusTypeDef errorcode = HAL_OK; /* Check Direction parameter */ assert_param(IS_SPI_DIRECTION_2LINES(hspi->Init.Direction)); - /* Process locked */ - __HAL_LOCK(hspi); - /* Init temporary variables */ tmp_state = hspi->State; tmp_mode = hspi->Init.Mode; @@ -1511,16 +1571,17 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, uint8_t *p if (!((tmp_state == HAL_SPI_STATE_READY) || \ ((tmp_mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES) && (tmp_state == HAL_SPI_STATE_BUSY_RX)))) { - errorcode = HAL_BUSY; - goto error; + return HAL_BUSY; } if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) { - errorcode = HAL_ERROR; - goto error; + return HAL_ERROR; } + /* Process locked */ + __HAL_LOCK(hspi); + /* Don't overwrite in case of HAL_SPI_STATE_BUSY_RX */ if (hspi->State != HAL_SPI_STATE_BUSY_RX) { @@ -1529,7 +1590,7 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, uint8_t *p /* Set the transaction information */ hspi->ErrorCode = HAL_SPI_ERROR_NONE; - hspi->pTxBuffPtr = (uint8_t *)pTxData; + hspi->pTxBuffPtr = (const uint8_t *)pTxData; hspi->TxXferSize = Size; hspi->TxXferCount = Size; hspi->pRxBuffPtr = (uint8_t *)pRxData; @@ -1556,8 +1617,6 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, uint8_t *p } #endif /* USE_SPI_CRC */ - /* Enable TXE, RXNE and ERR interrupt */ - __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_RXNE | SPI_IT_ERR)); /* Check if the SPI is already enabled */ if ((hspi->Instance->CR1 & SPI_CR1_SPE) != SPI_CR1_SPE) @@ -1566,10 +1625,12 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive_IT(SPI_HandleTypeDef *hspi, uint8_t *p __HAL_SPI_ENABLE(hspi); } -error : /* Process Unlocked */ __HAL_UNLOCK(hspi); - return errorcode; + /* Enable TXE, RXNE and ERR interrupt */ + __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_RXNE | SPI_IT_ERR)); + + return HAL_OK; } /** @@ -1580,9 +1641,8 @@ error : * @param Size amount of data to be sent * @retval HAL status */ -HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size) +HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, const uint8_t *pData, uint16_t Size) { - HAL_StatusTypeDef errorcode = HAL_OK; /* Check tx dma handle */ assert_param(IS_SPI_DMA_HANDLE(hspi->hdmatx)); @@ -1590,25 +1650,23 @@ HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, /* Check Direction parameter */ assert_param(IS_SPI_DIRECTION_2LINES_OR_1LINE(hspi->Init.Direction)); - /* Process Locked */ - __HAL_LOCK(hspi); - if (hspi->State != HAL_SPI_STATE_READY) { - errorcode = HAL_BUSY; - goto error; + return HAL_BUSY; } if ((pData == NULL) || (Size == 0U)) { - errorcode = HAL_ERROR; - goto error; + return HAL_ERROR; } + /* Process Locked */ + __HAL_LOCK(hspi); + /* Set the transaction information */ hspi->State = HAL_SPI_STATE_BUSY_TX; hspi->ErrorCode = HAL_SPI_ERROR_NONE; - hspi->pTxBuffPtr = (uint8_t *)pData; + hspi->pTxBuffPtr = (const uint8_t *)pData; hspi->TxXferSize = Size; hspi->TxXferCount = Size; @@ -1622,6 +1680,8 @@ HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, /* Configure communication direction : 1Line */ if (hspi->Init.Direction == SPI_DIRECTION_1LINE) { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); SPI_1LINE_TX(hspi); } @@ -1651,10 +1711,9 @@ HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, { /* Update SPI error code */ SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); - errorcode = HAL_ERROR; - - hspi->State = HAL_SPI_STATE_READY; - goto error; + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return HAL_ERROR; } /* Check if the SPI is already enabled */ @@ -1664,16 +1723,16 @@ HAL_StatusTypeDef HAL_SPI_Transmit_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, __HAL_SPI_ENABLE(hspi); } + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + /* Enable the SPI Error Interrupt Bit */ __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_ERR)); /* Enable Tx DMA Request */ SET_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN); -error : - /* Process Unlocked */ - __HAL_UNLOCK(hspi); - return errorcode; + return HAL_OK; } /** @@ -1688,11 +1747,14 @@ error : */ HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, uint16_t Size) { - HAL_StatusTypeDef errorcode = HAL_OK; - /* Check rx dma handle */ assert_param(IS_SPI_DMA_HANDLE(hspi->hdmarx)); + if (hspi->State != HAL_SPI_STATE_READY) + { + return HAL_BUSY; + } + if ((hspi->Init.Direction == SPI_DIRECTION_2LINES) && (hspi->Init.Mode == SPI_MODE_MASTER)) { hspi->State = HAL_SPI_STATE_BUSY_RX; @@ -1704,21 +1766,14 @@ HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, u return HAL_SPI_TransmitReceive_DMA(hspi, pData, pData, Size); } - /* Process Locked */ - __HAL_LOCK(hspi); - - if (hspi->State != HAL_SPI_STATE_READY) - { - errorcode = HAL_BUSY; - goto error; - } - if ((pData == NULL) || (Size == 0U)) { - errorcode = HAL_ERROR; - goto error; + return HAL_ERROR; } + /* Process Locked */ + __HAL_LOCK(hspi); + /* Set the transaction information */ hspi->State = HAL_SPI_STATE_BUSY_RX; hspi->ErrorCode = HAL_SPI_ERROR_NONE; @@ -1735,6 +1790,8 @@ HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, u /* Configure communication direction : 1Line */ if (hspi->Init.Direction == SPI_DIRECTION_1LINE) { + /* Disable SPI Peripheral before set 1Line direction (BIDIOE bit) */ + __HAL_SPI_DISABLE(hspi); SPI_1LINE_RX(hspi); } @@ -1764,10 +1821,9 @@ HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, u { /* Update SPI error code */ SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); - errorcode = HAL_ERROR; - - hspi->State = HAL_SPI_STATE_READY; - goto error; + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return HAL_ERROR; } /* Check if the SPI is already enabled */ @@ -1777,16 +1833,16 @@ HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, u __HAL_SPI_ENABLE(hspi); } + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + /* Enable the SPI Error Interrupt Bit */ __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_ERR)); /* Enable Rx DMA Request */ SET_BIT(hspi->Instance->CR2, SPI_CR2_RXDMAEN); -error: - /* Process Unlocked */ - __HAL_UNLOCK(hspi); - return errorcode; + return HAL_OK; } /** @@ -1799,12 +1855,11 @@ HAL_StatusTypeDef HAL_SPI_Receive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pData, u * @param Size amount of data to be sent * @retval HAL status */ -HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, uint8_t *pTxData, uint8_t *pRxData, +HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, const uint8_t *pTxData, uint8_t *pRxData, uint16_t Size) { uint32_t tmp_mode; HAL_SPI_StateTypeDef tmp_state; - HAL_StatusTypeDef errorcode = HAL_OK; /* Check rx & tx dma handles */ assert_param(IS_SPI_DMA_HANDLE(hspi->hdmarx)); @@ -1813,9 +1868,6 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, uint8_t * /* Check Direction parameter */ assert_param(IS_SPI_DIRECTION_2LINES(hspi->Init.Direction)); - /* Process locked */ - __HAL_LOCK(hspi); - /* Init temporary variables */ tmp_state = hspi->State; tmp_mode = hspi->Init.Mode; @@ -1823,16 +1875,17 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, uint8_t * if (!((tmp_state == HAL_SPI_STATE_READY) || ((tmp_mode == SPI_MODE_MASTER) && (hspi->Init.Direction == SPI_DIRECTION_2LINES) && (tmp_state == HAL_SPI_STATE_BUSY_RX)))) { - errorcode = HAL_BUSY; - goto error; + return HAL_BUSY; } if ((pTxData == NULL) || (pRxData == NULL) || (Size == 0U)) { - errorcode = HAL_ERROR; - goto error; + return HAL_ERROR; } + /* Process locked */ + __HAL_LOCK(hspi); + /* Don't overwrite in case of HAL_SPI_STATE_BUSY_RX */ if (hspi->State != HAL_SPI_STATE_BUSY_RX) { @@ -1841,7 +1894,7 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, uint8_t * /* Set the transaction information */ hspi->ErrorCode = HAL_SPI_ERROR_NONE; - hspi->pTxBuffPtr = (uint8_t *)pTxData; + hspi->pTxBuffPtr = (const uint8_t *)pTxData; hspi->TxXferSize = Size; hspi->TxXferCount = Size; hspi->pRxBuffPtr = (uint8_t *)pRxData; @@ -1886,10 +1939,9 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, uint8_t * { /* Update SPI error code */ SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); - errorcode = HAL_ERROR; - - hspi->State = HAL_SPI_STATE_READY; - goto error; + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return HAL_ERROR; } /* Enable Rx DMA Request */ @@ -1908,10 +1960,9 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, uint8_t * { /* Update SPI error code */ SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_DMA); - errorcode = HAL_ERROR; - - hspi->State = HAL_SPI_STATE_READY; - goto error; + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + return HAL_ERROR; } /* Check if the SPI is already enabled */ @@ -1920,16 +1971,17 @@ HAL_StatusTypeDef HAL_SPI_TransmitReceive_DMA(SPI_HandleTypeDef *hspi, uint8_t * /* Enable SPI peripheral */ __HAL_SPI_ENABLE(hspi); } + + /* Process Unlocked */ + __HAL_UNLOCK(hspi); + /* Enable the SPI Error Interrupt Bit */ __HAL_SPI_ENABLE_IT(hspi, (SPI_IT_ERR)); /* Enable Tx DMA Request */ SET_BIT(hspi->Instance->CR2, SPI_CR2_TXDMAEN); -error : - /* Process Unlocked */ - __HAL_UNLOCK(hspi); - return errorcode; + return HAL_OK; } /** @@ -2587,7 +2639,7 @@ __weak void HAL_SPI_AbortCpltCallback(SPI_HandleTypeDef *hspi) * the configuration information for SPI module. * @retval SPI state */ -HAL_SPI_StateTypeDef HAL_SPI_GetState(SPI_HandleTypeDef *hspi) +HAL_SPI_StateTypeDef HAL_SPI_GetState(const SPI_HandleTypeDef *hspi) { /* Return SPI handle state */ return hspi->State; @@ -2599,7 +2651,7 @@ HAL_SPI_StateTypeDef HAL_SPI_GetState(SPI_HandleTypeDef *hspi) * the configuration information for SPI module. * @retval SPI error code in bitmap format */ -uint32_t HAL_SPI_GetError(SPI_HandleTypeDef *hspi) +uint32_t HAL_SPI_GetError(const SPI_HandleTypeDef *hspi) { /* Return SPI ErrorCode */ return hspi->ErrorCode; @@ -2685,6 +2737,9 @@ static void SPI_DMAReceiveCplt(DMA_HandleTypeDef *hdma) { SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ uint32_t tickstart; +#if (USE_SPI_CRC != 0U) + __IO uint32_t tmpreg = 0U; +#endif /* USE_SPI_CRC */ /* Init tickstart for timeout management*/ tickstart = HAL_GetTick(); @@ -2706,7 +2761,9 @@ static void SPI_DMAReceiveCplt(DMA_HandleTypeDef *hdma) SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); } /* Read CRC */ - READ_REG(hspi->Instance->DR); + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); } #endif /* USE_SPI_CRC */ @@ -2769,6 +2826,9 @@ static void SPI_DMATransmitReceiveCplt(DMA_HandleTypeDef *hdma) { SPI_HandleTypeDef *hspi = (SPI_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); /* Derogation MISRAC2012-Rule-11.5 */ uint32_t tickstart; +#if (USE_SPI_CRC != 0U) + __IO uint32_t tmpreg = 0U; +#endif /* USE_SPI_CRC */ /* Init tickstart for timeout management*/ tickstart = HAL_GetTick(); @@ -2789,7 +2849,9 @@ static void SPI_DMATransmitReceiveCplt(DMA_HandleTypeDef *hdma) SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_CRC); } /* Read CRC to Flush DR and RXNE flag */ - READ_REG(hspi->Instance->DR); + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); } #endif /* USE_SPI_CRC */ @@ -3100,8 +3162,15 @@ static void SPI_2linesRxISR_8BIT(struct __SPI_HandleTypeDef *hspi) */ static void SPI_2linesRxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi) { - /* Read 8bit CRC to flush Data Regsiter */ - READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + __IO uint8_t *ptmpreg8; + __IO uint8_t tmpreg8 = 0; + + /* Initialize the 8bit temporary pointer */ + ptmpreg8 = (__IO uint8_t *)&hspi->Instance->DR; + /* Read 8bit CRC to flush Data Register */ + tmpreg8 = *ptmpreg8; + /* To avoid GCC warning */ + UNUSED(tmpreg8); /* Disable RXNE and ERR interrupt */ __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); @@ -3121,7 +3190,7 @@ static void SPI_2linesRxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi) */ static void SPI_2linesTxISR_8BIT(struct __SPI_HandleTypeDef *hspi) { - *(__IO uint8_t *)&hspi->Instance->DR = (*hspi->pTxBuffPtr); + *(__IO uint8_t *)&hspi->Instance->DR = *((const uint8_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr++; hspi->TxXferCount--; @@ -3191,8 +3260,12 @@ static void SPI_2linesRxISR_16BIT(struct __SPI_HandleTypeDef *hspi) */ static void SPI_2linesRxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi) { - /* Read 16bit CRC to flush Data Regsiter */ - READ_REG(hspi->Instance->DR); + __IO uint32_t tmpreg = 0U; + + /* Read 16bit CRC to flush Data Register */ + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); /* Disable RXNE interrupt */ __HAL_SPI_DISABLE_IT(hspi, SPI_IT_RXNE); @@ -3210,7 +3283,7 @@ static void SPI_2linesRxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi) static void SPI_2linesTxISR_16BIT(struct __SPI_HandleTypeDef *hspi) { /* Transmit data in 16 Bit mode */ - hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr += sizeof(uint16_t); hspi->TxXferCount--; @@ -3247,8 +3320,15 @@ static void SPI_2linesTxISR_16BIT(struct __SPI_HandleTypeDef *hspi) */ static void SPI_RxISR_8BITCRC(struct __SPI_HandleTypeDef *hspi) { + __IO uint8_t *ptmpreg8; + __IO uint8_t tmpreg8 = 0; + + /* Initialize the 8bit temporary pointer */ + ptmpreg8 = (__IO uint8_t *)&hspi->Instance->DR; /* Read 8bit CRC to flush Data Register */ - READ_REG(*(__IO uint8_t *)&hspi->Instance->DR); + tmpreg8 = *ptmpreg8; + /* To avoid GCC warning */ + UNUSED(tmpreg8); SPI_CloseRx_ISR(hspi); } @@ -3296,8 +3376,12 @@ static void SPI_RxISR_8BIT(struct __SPI_HandleTypeDef *hspi) */ static void SPI_RxISR_16BITCRC(struct __SPI_HandleTypeDef *hspi) { + __IO uint32_t tmpreg = 0U; + /* Read 16bit CRC to flush Data Register */ - READ_REG(hspi->Instance->DR); + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); /* Disable RXNE and ERR interrupt */ __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_RXNE | SPI_IT_ERR)); @@ -3347,7 +3431,7 @@ static void SPI_RxISR_16BIT(struct __SPI_HandleTypeDef *hspi) */ static void SPI_TxISR_8BIT(struct __SPI_HandleTypeDef *hspi) { - *(__IO uint8_t *)&hspi->Instance->DR = (*hspi->pTxBuffPtr); + *(__IO uint8_t *)&hspi->Instance->DR = *((const uint8_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr++; hspi->TxXferCount--; @@ -3373,7 +3457,7 @@ static void SPI_TxISR_8BIT(struct __SPI_HandleTypeDef *hspi) static void SPI_TxISR_16BIT(struct __SPI_HandleTypeDef *hspi) { /* Transmit data in 16 Bit mode */ - hspi->Instance->DR = *((uint16_t *)hspi->pTxBuffPtr); + hspi->Instance->DR = *((const uint16_t *)hspi->pTxBuffPtr); hspi->pTxBuffPtr += sizeof(uint16_t); hspi->TxXferCount--; @@ -3403,15 +3487,26 @@ static void SPI_TxISR_16BIT(struct __SPI_HandleTypeDef *hspi) static HAL_StatusTypeDef SPI_WaitFlagStateUntilTimeout(SPI_HandleTypeDef *hspi, uint32_t Flag, FlagStatus State, uint32_t Timeout, uint32_t Tickstart) { + __IO uint32_t count; + uint32_t tmp_timeout; + uint32_t tmp_tickstart; + + /* Adjust Timeout value in case of end of transfer */ + tmp_timeout = Timeout - (HAL_GetTick() - Tickstart); + tmp_tickstart = HAL_GetTick(); + + /* Calculate Timeout based on a software loop to avoid blocking issue if Systick is disabled */ + count = tmp_timeout * ((SystemCoreClock * 32U) >> 20U); + while ((__HAL_SPI_GET_FLAG(hspi, Flag) ? SET : RESET) != State) { if (Timeout != HAL_MAX_DELAY) { - if (((HAL_GetTick() - Tickstart) >= Timeout) || (Timeout == 0U)) + if (((HAL_GetTick() - tmp_tickstart) >= tmp_timeout) || (tmp_timeout == 0U)) { /* Disable the SPI and reset the CRC: the CRC value should be cleared - on both master and slave sides in order to resynchronize the master - and slave for their respective CRC calculation */ + on both master and slave sides in order to resynchronize the master + and slave for their respective CRC calculation */ /* Disable TXE, RXNE and ERR interrupts for the interrupt process */ __HAL_SPI_DISABLE_IT(hspi, (SPI_IT_TXE | SPI_IT_RXNE | SPI_IT_ERR)); @@ -3436,6 +3531,12 @@ static HAL_StatusTypeDef SPI_WaitFlagStateUntilTimeout(SPI_HandleTypeDef *hspi, return HAL_TIMEOUT; } + /* If Systick is disabled or not incremented, deactivate timeout to go in disable loop procedure */ + if (count == 0U) + { + tmp_timeout = 0U; + } + count--; } } @@ -3502,6 +3603,13 @@ static HAL_StatusTypeDef SPI_EndRxTransaction(SPI_HandleTypeDef *hspi, uint32_t */ static HAL_StatusTypeDef SPI_EndRxTxTransaction(SPI_HandleTypeDef *hspi, uint32_t Timeout, uint32_t Tickstart) { + /* Wait until TXE flag */ + if(SPI_WaitFlagStateUntilTimeout(hspi, SPI_FLAG_TXE, SET, Timeout, Tickstart) != HAL_OK) + { + SET_BIT(hspi->ErrorCode, HAL_SPI_ERROR_FLAG); + return HAL_TIMEOUT; + } + /* Timeout in µs */ __IO uint32_t count = SPI_BSY_FLAG_WORKAROUND_TIMEOUT * (SystemCoreClock / 24U / 1000000U); /* Erratasheet: BSY bit may stay high at the end of a data transfer in Slave mode */ @@ -3545,7 +3653,7 @@ static void SPI_CloseRxTx_ISR(SPI_HandleTypeDef *hspi) uint32_t tickstart; __IO uint32_t count = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); - /* Init tickstart for timeout managment*/ + /* Init tickstart for timeout management */ tickstart = HAL_GetTick(); /* Disable ERR interrupt */ @@ -3761,6 +3869,7 @@ static void SPI_CloseTx_ISR(SPI_HandleTypeDef *hspi) */ static void SPI_AbortRx_ISR(SPI_HandleTypeDef *hspi) { + __IO uint32_t tmpreg = 0U; __IO uint32_t count = SPI_DEFAULT_TIMEOUT * (SystemCoreClock / 24U / 1000U); /* Wait until TXE flag is set */ @@ -3780,8 +3889,10 @@ static void SPI_AbortRx_ISR(SPI_HandleTypeDef *hspi) /* Disable TXEIE, RXNEIE and ERRIE(mode fault event, overrun error, TI frame error) interrupts */ CLEAR_BIT(hspi->Instance->CR2, (SPI_CR2_TXEIE | SPI_CR2_RXNEIE | SPI_CR2_ERRIE)); - /* Read CRC to flush Data Register */ - READ_REG(hspi->Instance->DR); + /* Flush Data Register by a blank read */ + tmpreg = READ_REG(hspi->Instance->DR); + /* To avoid GCC warning */ + UNUSED(tmpreg); hspi->State = HAL_SPI_STATE_ABORT; } @@ -3817,4 +3928,3 @@ static void SPI_AbortTx_ISR(SPI_HandleTypeDef *hspi) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim.c index eb98d572..d5978cca 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim.c @@ -29,6 +29,17 @@ * + Commutation Event configuration with Interruption and DMA * + TIM OCRef clear configuration * + TIM External Clock configuration + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### TIMER Generic features ##### @@ -103,14 +114,14 @@ allows the user to configure dynamically the driver callbacks. [..] - Use Function @ref HAL_TIM_RegisterCallback() to register a callback. - @ref HAL_TIM_RegisterCallback() takes as parameters the HAL peripheral handle, + Use Function HAL_TIM_RegisterCallback() to register a callback. + HAL_TIM_RegisterCallback() takes as parameters the HAL peripheral handle, the Callback ID and a pointer to the user callback function. [..] - Use function @ref HAL_TIM_UnRegisterCallback() to reset a callback to the default + Use function HAL_TIM_UnRegisterCallback() to reset a callback to the default weak function. - @ref HAL_TIM_UnRegisterCallback takes as parameters the HAL peripheral handle, + HAL_TIM_UnRegisterCallback takes as parameters the HAL peripheral handle, and the Callback ID. [..] @@ -146,7 +157,7 @@ [..] By default, after the Init and when the state is HAL_TIM_STATE_RESET all interrupt callbacks are set to the corresponding weak functions: - examples @ref HAL_TIM_TriggerCallback(), @ref HAL_TIM_ErrorCallback(). + examples HAL_TIM_TriggerCallback(), HAL_TIM_ErrorCallback(). [..] Exception done for MspInit and MspDeInit functions that are reset to the legacy weak @@ -160,7 +171,7 @@ all interrupt callbacks are set to the corresponding weak functions: in HAL_TIM_STATE_READY or HAL_TIM_STATE_RESET state, thus registered(user) MspInit / DeInit callbacks can be used during the Init / DeInit. In that case first register the MspInit/MspDeInit user callbacks - using @ref HAL_TIM_RegisterCallback() before calling DeInit or Init function. + using HAL_TIM_RegisterCallback() before calling DeInit or Init function. [..] When The compilation define USE_HAL_TIM_REGISTER_CALLBACKS is set to 0 or @@ -169,17 +180,6 @@ all interrupt callbacks are set to the corresponding weak functions: @endverbatim ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -198,15 +198,15 @@ all interrupt callbacks are set to the corresponding weak functions: /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ -/* Private macro -------------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ /** @addtogroup TIM_Private_Functions * @{ */ -static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); -static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); -static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config); static void TIM_TI1_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter); static void TIM_TI2_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, uint32_t TIM_ICFilter); @@ -218,10 +218,11 @@ static void TIM_TI4_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32 static void TIM_ITRx_SetConfig(TIM_TypeDef *TIMx, uint32_t InputTriggerSource); static void TIM_DMAPeriodElapsedCplt(DMA_HandleTypeDef *hdma); static void TIM_DMAPeriodElapsedHalfCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMADelayPulseCplt(DMA_HandleTypeDef *hdma); static void TIM_DMATriggerCplt(DMA_HandleTypeDef *hdma); static void TIM_DMATriggerHalfCplt(DMA_HandleTypeDef *hdma); static HAL_StatusTypeDef TIM_SlaveTimer_SetConfig(TIM_HandleTypeDef *htim, - TIM_SlaveConfigTypeDef *sSlaveConfig); + const TIM_SlaveConfigTypeDef *sSlaveConfig); /** * @} */ @@ -274,6 +275,7 @@ HAL_StatusTypeDef HAL_TIM_Base_Init(TIM_HandleTypeDef *htim) assert_param(IS_TIM_INSTANCE(htim->Instance)); assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); if (htim->State == HAL_TIM_STATE_RESET) @@ -303,6 +305,13 @@ HAL_StatusTypeDef HAL_TIM_Base_Init(TIM_HandleTypeDef *htim) /* Set the Time Base configuration */ TIM_Base_SetConfig(htim->Instance, &htim->Init); + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + /* Initialize the TIM state*/ htim->State = HAL_TIM_STATE_READY; @@ -336,6 +345,13 @@ HAL_StatusTypeDef HAL_TIM_Base_DeInit(TIM_HandleTypeDef *htim) HAL_TIM_Base_MspDeInit(htim); #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + /* Change TIM state */ htim->State = HAL_TIM_STATE_RESET; @@ -388,19 +404,29 @@ HAL_StatusTypeDef HAL_TIM_Base_Start(TIM_HandleTypeDef *htim) /* Check the parameters */ assert_param(IS_TIM_INSTANCE(htim->Instance)); + /* Check the TIM state */ + if (htim->State != HAL_TIM_STATE_READY) + { + return HAL_ERROR; + } + /* Set the TIM state */ htim->State = HAL_TIM_STATE_BUSY; /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } - /* Change the TIM state*/ - htim->State = HAL_TIM_STATE_READY; - /* Return function status */ return HAL_OK; } @@ -415,13 +441,10 @@ HAL_StatusTypeDef HAL_TIM_Base_Stop(TIM_HandleTypeDef *htim) /* Check the parameters */ assert_param(IS_TIM_INSTANCE(htim->Instance)); - /* Set the TIM state */ - htim->State = HAL_TIM_STATE_BUSY; - /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); - /* Change the TIM state*/ + /* Set the TIM state */ htim->State = HAL_TIM_STATE_READY; /* Return function status */ @@ -440,12 +463,28 @@ HAL_StatusTypeDef HAL_TIM_Base_Start_IT(TIM_HandleTypeDef *htim) /* Check the parameters */ assert_param(IS_TIM_INSTANCE(htim->Instance)); + /* Check the TIM state */ + if (htim->State != HAL_TIM_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + /* Enable the TIM Update interrupt */ __HAL_TIM_ENABLE_IT(htim, TIM_IT_UPDATE); /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } @@ -463,12 +502,16 @@ HAL_StatusTypeDef HAL_TIM_Base_Stop_IT(TIM_HandleTypeDef *htim) { /* Check the parameters */ assert_param(IS_TIM_INSTANCE(htim->Instance)); + /* Disable the TIM Update interrupt */ __HAL_TIM_DISABLE_IT(htim, TIM_IT_UPDATE); /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_READY; + /* Return function status */ return HAL_OK; } @@ -480,20 +523,21 @@ HAL_StatusTypeDef HAL_TIM_Base_Stop_IT(TIM_HandleTypeDef *htim) * @param Length The length of data to be transferred from memory to peripheral. * @retval HAL status */ -HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length) +HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, const uint32_t *pData, uint16_t Length) { uint32_t tmpsmcr; /* Check the parameters */ assert_param(IS_TIM_DMA_INSTANCE(htim->Instance)); + /* Set the TIM state */ if (htim->State == HAL_TIM_STATE_BUSY) { return HAL_BUSY; } else if (htim->State == HAL_TIM_STATE_READY) { - if ((pData == NULL) && (Length > 0U)) + if ((pData == NULL) || (Length == 0U)) { return HAL_ERROR; } @@ -504,7 +548,7 @@ HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pDat } else { - /* nothing to do */ + return HAL_ERROR; } /* Set the DMA Period elapsed callbacks */ @@ -515,8 +559,10 @@ HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pDat htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)pData, (uint32_t)&htim->Instance->ARR, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)pData, (uint32_t)&htim->Instance->ARR, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } @@ -524,8 +570,15 @@ HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pDat __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_UPDATE); /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } @@ -552,7 +605,7 @@ HAL_StatusTypeDef HAL_TIM_Base_Stop_DMA(TIM_HandleTypeDef *htim) /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); - /* Change the htim state */ + /* Set the TIM state */ htim->State = HAL_TIM_STATE_READY; /* Return function status */ @@ -606,6 +659,7 @@ HAL_StatusTypeDef HAL_TIM_OC_Init(TIM_HandleTypeDef *htim) assert_param(IS_TIM_INSTANCE(htim->Instance)); assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); if (htim->State == HAL_TIM_STATE_RESET) @@ -635,6 +689,13 @@ HAL_StatusTypeDef HAL_TIM_OC_Init(TIM_HandleTypeDef *htim) /* Init the base time for the Output Compare */ TIM_Base_SetConfig(htim->Instance, &htim->Init); + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + /* Initialize the TIM state*/ htim->State = HAL_TIM_STATE_READY; @@ -668,6 +729,13 @@ HAL_StatusTypeDef HAL_TIM_OC_DeInit(TIM_HandleTypeDef *htim) HAL_TIM_OC_MspDeInit(htim); #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + /* Change TIM state */ htim->State = HAL_TIM_STATE_RESET; @@ -725,6 +793,15 @@ HAL_StatusTypeDef HAL_TIM_OC_Start(TIM_HandleTypeDef *htim, uint32_t Channel) /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + /* Enable the Output compare channel */ TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); @@ -735,8 +812,15 @@ HAL_StatusTypeDef HAL_TIM_OC_Start(TIM_HandleTypeDef *htim, uint32_t Channel) } /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } @@ -773,6 +857,9 @@ HAL_StatusTypeDef HAL_TIM_OC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } @@ -790,11 +877,21 @@ HAL_StatusTypeDef HAL_TIM_OC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) */ HAL_StatusTypeDef HAL_TIM_OC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + switch (Channel) { case TIM_CHANNEL_1: @@ -826,27 +923,38 @@ HAL_StatusTypeDef HAL_TIM_OC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) } default: + status = HAL_ERROR; break; } - /* Enable the Output compare channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); - - if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + if (status == HAL_OK) { - /* Enable the main output */ - __HAL_TIM_MOE_ENABLE(htim); - } + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); - /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) - { - __HAL_TIM_ENABLE(htim); + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } } /* Return function status */ - return HAL_OK; + return status; } /** @@ -862,6 +970,8 @@ HAL_StatusTypeDef HAL_TIM_OC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) */ HAL_StatusTypeDef HAL_TIM_OC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); @@ -896,23 +1006,30 @@ HAL_StatusTypeDef HAL_TIM_OC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) } default: + status = HAL_ERROR; break; } - /* Disable the Output compare channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); - - if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + if (status == HAL_OK) { - /* Disable the Main Output */ - __HAL_TIM_MOE_DISABLE(htim); - } + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); - /* Disable the Peripheral */ - __HAL_TIM_DISABLE(htim); + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -928,31 +1045,34 @@ HAL_StatusTypeDef HAL_TIM_OC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) * @param Length The length of data to be transferred from memory to TIM peripheral * @retval HAL status */ -HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); - if (htim->State == HAL_TIM_STATE_BUSY) + /* Set the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) { return HAL_BUSY; } - else if (htim->State == HAL_TIM_STATE_READY) + else if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) { - if ((pData == NULL) && (Length > 0U)) + if ((pData == NULL) || (Length == 0U)) { return HAL_ERROR; } else { - htim->State = HAL_TIM_STATE_BUSY; + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); } } else { - /* nothing to do */ + return HAL_ERROR; } switch (Channel) @@ -967,8 +1087,10 @@ HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } @@ -987,8 +1109,10 @@ HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } @@ -1007,8 +1131,10 @@ HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 3 DMA request */ @@ -1026,8 +1152,10 @@ HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)pData, (uint32_t)&htim->Instance->CCR4, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)pData, (uint32_t)&htim->Instance->CCR4, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 4 DMA request */ @@ -1036,27 +1164,38 @@ HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel } default: + status = HAL_ERROR; break; } - /* Enable the Output compare channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); - - if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + if (status == HAL_OK) { - /* Enable the main output */ - __HAL_TIM_MOE_ENABLE(htim); - } + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); - /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) - { - __HAL_TIM_ENABLE(htim); + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1072,6 +1211,8 @@ HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel */ HAL_StatusTypeDef HAL_TIM_OC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); @@ -1110,26 +1251,30 @@ HAL_StatusTypeDef HAL_TIM_OC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) } default: + status = HAL_ERROR; break; } - /* Disable the Output compare channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); - - if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + if (status == HAL_OK) { - /* Disable the Main Output */ - __HAL_TIM_MOE_DISABLE(htim); - } + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); - /* Disable the Peripheral */ - __HAL_TIM_DISABLE(htim); + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } - /* Change the htim state */ - htim->State = HAL_TIM_STATE_READY; + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1179,6 +1324,7 @@ HAL_StatusTypeDef HAL_TIM_PWM_Init(TIM_HandleTypeDef *htim) assert_param(IS_TIM_INSTANCE(htim->Instance)); assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); if (htim->State == HAL_TIM_STATE_RESET) @@ -1208,6 +1354,13 @@ HAL_StatusTypeDef HAL_TIM_PWM_Init(TIM_HandleTypeDef *htim) /* Init the base time for the PWM */ TIM_Base_SetConfig(htim->Instance, &htim->Init); + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + /* Initialize the TIM state*/ htim->State = HAL_TIM_STATE_READY; @@ -1241,6 +1394,13 @@ HAL_StatusTypeDef HAL_TIM_PWM_DeInit(TIM_HandleTypeDef *htim) HAL_TIM_PWM_MspDeInit(htim); #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + /* Change TIM state */ htim->State = HAL_TIM_STATE_RESET; @@ -1298,6 +1458,15 @@ HAL_StatusTypeDef HAL_TIM_PWM_Start(TIM_HandleTypeDef *htim, uint32_t Channel) /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + /* Enable the Capture compare channel */ TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); @@ -1308,8 +1477,15 @@ HAL_StatusTypeDef HAL_TIM_PWM_Start(TIM_HandleTypeDef *htim, uint32_t Channel) } /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } @@ -1346,8 +1522,8 @@ HAL_StatusTypeDef HAL_TIM_PWM_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); - /* Change the htim state */ - htim->State = HAL_TIM_STATE_READY; + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); /* Return function status */ return HAL_OK; @@ -1366,10 +1542,21 @@ HAL_StatusTypeDef HAL_TIM_PWM_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) */ HAL_StatusTypeDef HAL_TIM_PWM_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; + /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + switch (Channel) { case TIM_CHANNEL_1: @@ -1401,27 +1588,38 @@ HAL_StatusTypeDef HAL_TIM_PWM_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel } default: + status = HAL_ERROR; break; } - /* Enable the Capture compare channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); - - if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + if (status == HAL_OK) { - /* Enable the main output */ - __HAL_TIM_MOE_ENABLE(htim); - } + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); - /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) - { - __HAL_TIM_ENABLE(htim); + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1437,6 +1635,8 @@ HAL_StatusTypeDef HAL_TIM_PWM_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel */ HAL_StatusTypeDef HAL_TIM_PWM_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); @@ -1471,23 +1671,30 @@ HAL_StatusTypeDef HAL_TIM_PWM_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) } default: + status = HAL_ERROR; break; } - /* Disable the Capture compare channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); - - if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + if (status == HAL_OK) { - /* Disable the Main Output */ - __HAL_TIM_MOE_DISABLE(htim); - } + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); - /* Disable the Peripheral */ - __HAL_TIM_DISABLE(htim); + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1503,31 +1710,34 @@ HAL_StatusTypeDef HAL_TIM_PWM_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) * @param Length The length of data to be transferred from memory to TIM peripheral * @retval HAL status */ -HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); - if (htim->State == HAL_TIM_STATE_BUSY) + /* Set the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) { return HAL_BUSY; } - else if (htim->State == HAL_TIM_STATE_READY) + else if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) { - if ((pData == NULL) && (Length > 0U)) + if ((pData == NULL) || (Length == 0U)) { return HAL_ERROR; } else { - htim->State = HAL_TIM_STATE_BUSY; + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); } } else { - /* nothing to do */ + return HAL_ERROR; } switch (Channel) @@ -1542,8 +1752,10 @@ HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channe htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } @@ -1562,8 +1774,10 @@ HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channe htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 2 DMA request */ @@ -1581,8 +1795,10 @@ HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channe htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Output Capture/Compare 3 request */ @@ -1600,8 +1816,10 @@ HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channe htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)pData, (uint32_t)&htim->Instance->CCR4, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)pData, (uint32_t)&htim->Instance->CCR4, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 4 DMA request */ @@ -1610,27 +1828,38 @@ HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channe } default: + status = HAL_ERROR; break; } - /* Enable the Capture compare channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); - - if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + if (status == HAL_OK) { - /* Enable the main output */ - __HAL_TIM_MOE_ENABLE(htim); - } + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); - /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) - { - __HAL_TIM_ENABLE(htim); + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1646,6 +1875,8 @@ HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channe */ HAL_StatusTypeDef HAL_TIM_PWM_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); @@ -1684,26 +1915,30 @@ HAL_StatusTypeDef HAL_TIM_PWM_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel } default: + status = HAL_ERROR; break; } - /* Disable the Capture compare channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); - - if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + if (status == HAL_OK) { - /* Disable the Main Output */ - __HAL_TIM_MOE_DISABLE(htim); - } + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); - /* Disable the Peripheral */ - __HAL_TIM_DISABLE(htim); + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } - /* Change the htim state */ - htim->State = HAL_TIM_STATE_READY; + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1753,6 +1988,7 @@ HAL_StatusTypeDef HAL_TIM_IC_Init(TIM_HandleTypeDef *htim) assert_param(IS_TIM_INSTANCE(htim->Instance)); assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); if (htim->State == HAL_TIM_STATE_RESET) @@ -1782,6 +2018,13 @@ HAL_StatusTypeDef HAL_TIM_IC_Init(TIM_HandleTypeDef *htim) /* Init the base time for the input capture */ TIM_Base_SetConfig(htim->Instance, &htim->Init); + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + /* Initialize the TIM state*/ htim->State = HAL_TIM_STATE_READY; @@ -1815,6 +2058,13 @@ HAL_StatusTypeDef HAL_TIM_IC_DeInit(TIM_HandleTypeDef *htim) HAL_TIM_IC_MspDeInit(htim); #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + /* Change TIM state */ htim->State = HAL_TIM_STATE_RESET; @@ -1868,16 +2118,36 @@ __weak void HAL_TIM_IC_MspDeInit(TIM_HandleTypeDef *htim) HAL_StatusTypeDef HAL_TIM_IC_Start(TIM_HandleTypeDef *htim, uint32_t Channel) { uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + HAL_TIM_ChannelStateTypeDef complementary_channel_state = TIM_CHANNEL_N_STATE_GET(htim, Channel); /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + /* Check the TIM channel state */ + if ((channel_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + /* Enable the Input Capture channel */ TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } @@ -1908,6 +2178,10 @@ HAL_StatusTypeDef HAL_TIM_IC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } @@ -1925,11 +2199,26 @@ HAL_StatusTypeDef HAL_TIM_IC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) */ HAL_StatusTypeDef HAL_TIM_IC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + HAL_TIM_ChannelStateTypeDef complementary_channel_state = TIM_CHANNEL_N_STATE_GET(htim, Channel); + /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + /* Check the TIM channel state */ + if ((channel_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + switch (Channel) { case TIM_CHANNEL_1: @@ -1961,20 +2250,32 @@ HAL_StatusTypeDef HAL_TIM_IC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) } default: + status = HAL_ERROR; break; } - /* Enable the Input Capture channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); - /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (status == HAL_OK) { - __HAL_TIM_ENABLE(htim); + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1990,6 +2291,8 @@ HAL_StatusTypeDef HAL_TIM_IC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) */ HAL_StatusTypeDef HAL_TIM_IC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); @@ -2024,17 +2327,25 @@ HAL_StatusTypeDef HAL_TIM_IC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) } default: + status = HAL_ERROR; break; } - /* Disable the Input Capture channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + if (status == HAL_OK) + { + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); - /* Disable the Peripheral */ - __HAL_TIM_DISABLE(htim); + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -2052,32 +2363,43 @@ HAL_StatusTypeDef HAL_TIM_IC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) */ HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + HAL_TIM_ChannelStateTypeDef complementary_channel_state = TIM_CHANNEL_N_STATE_GET(htim, Channel); + /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); assert_param(IS_TIM_DMA_CC_INSTANCE(htim->Instance)); - if (htim->State == HAL_TIM_STATE_BUSY) + /* Set the TIM channel state */ + if ((channel_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_state == HAL_TIM_CHANNEL_STATE_BUSY)) { return HAL_BUSY; } - else if (htim->State == HAL_TIM_STATE_READY) + else if ((channel_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_state == HAL_TIM_CHANNEL_STATE_READY)) { - if ((pData == NULL) && (Length > 0U)) + if ((pData == NULL) || (Length == 0U)) { return HAL_ERROR; } else { - htim->State = HAL_TIM_STATE_BUSY; + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); } } else { - /* nothing to do */ + return HAL_ERROR; } + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + switch (Channel) { case TIM_CHANNEL_1: @@ -2090,8 +2412,10 @@ HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 1 DMA request */ @@ -2109,8 +2433,10 @@ HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 2 DMA request */ @@ -2128,8 +2454,10 @@ HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)&htim->Instance->CCR3, (uint32_t)pData, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)&htim->Instance->CCR3, (uint32_t)pData, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 3 DMA request */ @@ -2147,8 +2475,10 @@ HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)&htim->Instance->CCR4, (uint32_t)pData, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)&htim->Instance->CCR4, (uint32_t)pData, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 4 DMA request */ @@ -2157,21 +2487,26 @@ HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel } default: + status = HAL_ERROR; break; } - /* Enable the Input Capture channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); - /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } /* Return function status */ - return HAL_OK; + return status; } /** @@ -2187,10 +2522,15 @@ HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel */ HAL_StatusTypeDef HAL_TIM_IC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); assert_param(IS_TIM_DMA_CC_INSTANCE(htim->Instance)); + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + switch (Channel) { case TIM_CHANNEL_1: @@ -2226,20 +2566,22 @@ HAL_StatusTypeDef HAL_TIM_IC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) } default: + status = HAL_ERROR; break; } - /* Disable the Input Capture channel */ - TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); - - /* Disable the Peripheral */ - __HAL_TIM_DISABLE(htim); + if (status == HAL_OK) + { + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); - /* Change the htim state */ - htim->State = HAL_TIM_STATE_READY; + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ - return HAL_OK; + return status; } /** * @} @@ -2273,6 +2615,9 @@ HAL_StatusTypeDef HAL_TIM_IC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) * requires a timer reset to avoid unexpected direction * due to DIR bit readonly in center aligned mode. * Ex: call @ref HAL_TIM_OnePulse_DeInit() before HAL_TIM_OnePulse_Init() + * @note When the timer instance is initialized in One Pulse mode, timer + * channels 1 and channel 2 are reserved and cannot be used for other + * purpose. * @param htim TIM One Pulse handle * @param OnePulseMode Select the One pulse mode. * This parameter can be one of the following values: @@ -2293,6 +2638,7 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_Init(TIM_HandleTypeDef *htim, uint32_t OnePul assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); assert_param(IS_TIM_OPM_MODE(OnePulseMode)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); if (htim->State == HAL_TIM_STATE_RESET) @@ -2328,6 +2674,15 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_Init(TIM_HandleTypeDef *htim, uint32_t OnePul /* Configure the OPM Mode */ htim->Instance->CR1 |= OnePulseMode; + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + /* Initialize the TIM state*/ htim->State = HAL_TIM_STATE_READY; @@ -2361,6 +2716,15 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_DeInit(TIM_HandleTypeDef *htim) HAL_TIM_OnePulse_MspDeInit(htim); #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + /* Change TIM state */ htim->State = HAL_TIM_STATE_RESET; @@ -2402,23 +2766,44 @@ __weak void HAL_TIM_OnePulse_MspDeInit(TIM_HandleTypeDef *htim) /** * @brief Starts the TIM One Pulse signal generation. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). * @param htim TIM One Pulse handle - * @param OutputChannel TIM Channels to be enabled - * This parameter can be one of the following values: - * @arg TIM_CHANNEL_1: TIM Channel 1 selected - * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @param OutputChannel See note above * @retval HAL status */ HAL_StatusTypeDef HAL_TIM_OnePulse_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel) { + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + /* Prevent unused argument(s) compilation warning */ UNUSED(OutputChannel); + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + /* Enable the Capture compare and the Input Capture channels (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output - in all combinations, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be enabled together + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be enabled together No need to enable the counter, it's enabled automatically by hardware (the counter starts in response to a stimulus and generate a pulse */ @@ -2438,11 +2823,12 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_Start(TIM_HandleTypeDef *htim, uint32_t Outpu /** * @brief Stops the TIM One Pulse signal generation. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). * @param htim TIM One Pulse handle - * @param OutputChannel TIM Channels to be disable - * This parameter can be one of the following values: - * @arg TIM_CHANNEL_1: TIM Channel 1 selected - * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @param OutputChannel See note above * @retval HAL status */ HAL_StatusTypeDef HAL_TIM_OnePulse_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel) @@ -2454,7 +2840,7 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_Stop(TIM_HandleTypeDef *htim, uint32_t Output (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output - in all combinations, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be disabled together */ + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be disabled together */ TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); @@ -2468,29 +2854,56 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_Stop(TIM_HandleTypeDef *htim, uint32_t Output /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } /** * @brief Starts the TIM One Pulse signal generation in interrupt mode. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). * @param htim TIM One Pulse handle - * @param OutputChannel TIM Channels to be enabled - * This parameter can be one of the following values: - * @arg TIM_CHANNEL_1: TIM Channel 1 selected - * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @param OutputChannel See note above * @retval HAL status */ HAL_StatusTypeDef HAL_TIM_OnePulse_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) { + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + /* Prevent unused argument(s) compilation warning */ UNUSED(OutputChannel); + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + /* Enable the Capture compare and the Input Capture channels (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output - in all combinations, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be enabled together + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be enabled together No need to enable the counter, it's enabled automatically by hardware (the counter starts in response to a stimulus and generate a pulse */ @@ -2516,11 +2929,12 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_Start_IT(TIM_HandleTypeDef *htim, uint32_t Ou /** * @brief Stops the TIM One Pulse signal generation in interrupt mode. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). * @param htim TIM One Pulse handle - * @param OutputChannel TIM Channels to be enabled - * This parameter can be one of the following values: - * @arg TIM_CHANNEL_1: TIM Channel 1 selected - * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @param OutputChannel See note above * @retval HAL status */ HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) @@ -2538,7 +2952,7 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Out (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output - in all combinations, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be disabled together */ + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be disabled together */ TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); @@ -2551,6 +2965,12 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Out /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } @@ -2589,11 +3009,14 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Out * @note Encoder mode and External clock mode 2 are not compatible and must not be selected together * Ex: A call for @ref HAL_TIM_Encoder_Init will erase the settings of @ref HAL_TIM_ConfigClockSource * using TIM_CLOCKSOURCE_ETRMODE2 and vice versa + * @note When the timer instance is initialized in Encoder mode, timer + * channels 1 and channel 2 are reserved and cannot be used for other + * purpose. * @param htim TIM Encoder Interface handle * @param sConfig TIM Encoder Interface configuration structure * @retval HAL status */ -HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, TIM_Encoder_InitTypeDef *sConfig) +HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, const TIM_Encoder_InitTypeDef *sConfig) { uint32_t tmpsmcr; uint32_t tmpccmr1; @@ -2606,10 +3029,10 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, TIM_Encoder_Ini } /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); - assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); assert_param(IS_TIM_ENCODER_MODE(sConfig->EncoderMode)); assert_param(IS_TIM_IC_SELECTION(sConfig->IC1Selection)); assert_param(IS_TIM_IC_SELECTION(sConfig->IC2Selection)); @@ -2619,6 +3042,7 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, TIM_Encoder_Ini assert_param(IS_TIM_IC_PRESCALER(sConfig->IC2Prescaler)); assert_param(IS_TIM_IC_FILTER(sConfig->IC1Filter)); assert_param(IS_TIM_IC_FILTER(sConfig->IC2Filter)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); if (htim->State == HAL_TIM_STATE_RESET) { @@ -2686,6 +3110,15 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, TIM_Encoder_Ini /* Write to TIMx CCER */ htim->Instance->CCER = tmpccer; + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + /* Initialize the TIM state*/ htim->State = HAL_TIM_STATE_READY; @@ -2720,6 +3153,15 @@ HAL_StatusTypeDef HAL_TIM_Encoder_DeInit(TIM_HandleTypeDef *htim) HAL_TIM_Encoder_MspDeInit(htim); #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + /* Change TIM state */ htim->State = HAL_TIM_STATE_RESET; @@ -2771,8 +3213,58 @@ __weak void HAL_TIM_Encoder_MspDeInit(TIM_HandleTypeDef *htim) */ HAL_StatusTypeDef HAL_TIM_Encoder_Start(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + /* Check the parameters */ - assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Set the TIM channel(s) state */ + if (Channel == TIM_CHANNEL_1) + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else if (Channel == TIM_CHANNEL_2) + { + if ((channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } /* Enable the encoder interface channels */ switch (Channel) @@ -2816,7 +3308,7 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Start(TIM_HandleTypeDef *htim, uint32_t Channe HAL_StatusTypeDef HAL_TIM_Encoder_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) { /* Check the parameters */ - assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); /* Disable the Input Capture channels 1 and 2 (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ @@ -2845,6 +3337,20 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Stop(TIM_HandleTypeDef *htim, uint32_t Channel /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM channel(s) state */ + if ((Channel == TIM_CHANNEL_1) || (Channel == TIM_CHANNEL_2)) + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + /* Return function status */ return HAL_OK; } @@ -2861,8 +3367,58 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Stop(TIM_HandleTypeDef *htim, uint32_t Channel */ HAL_StatusTypeDef HAL_TIM_Encoder_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + /* Check the parameters */ - assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Set the TIM channel(s) state */ + if (Channel == TIM_CHANNEL_1) + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else if (Channel == TIM_CHANNEL_2) + { + if ((channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } /* Enable the encoder interface channels */ /* Enable the capture compare Interrupts 1 and/or 2 */ @@ -2912,7 +3468,7 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Start_IT(TIM_HandleTypeDef *htim, uint32_t Cha HAL_StatusTypeDef HAL_TIM_Encoder_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { /* Check the parameters */ - assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); /* Disable the Input Capture channels 1 and 2 (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ @@ -2943,8 +3499,19 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Chan /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); - /* Change the htim state */ - htim->State = HAL_TIM_STATE_READY; + /* Set the TIM channel(s) state */ + if ((Channel == TIM_CHANNEL_1) || (Channel == TIM_CHANNEL_2)) + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ return HAL_OK; @@ -2966,27 +3533,95 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Chan HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData1, uint32_t *pData2, uint16_t Length) { + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + /* Check the parameters */ - assert_param(IS_TIM_DMA_CC_INSTANCE(htim->Instance)); + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); - if (htim->State == HAL_TIM_STATE_BUSY) + /* Set the TIM channel(s) state */ + if (Channel == TIM_CHANNEL_1) { - return HAL_BUSY; + if ((channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((pData1 == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } } - else if (htim->State == HAL_TIM_STATE_READY) + else if (Channel == TIM_CHANNEL_2) { - if ((((pData1 == NULL) || (pData2 == NULL))) && (Length > 0U)) + if ((channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY)) { - return HAL_ERROR; + return HAL_BUSY; + } + else if ((channel_2_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((pData2 == NULL) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } } else { - htim->State = HAL_TIM_STATE_BUSY; + return HAL_ERROR; } } else { - /* nothing to do */ + if ((channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (channel_2_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((((pData1 == NULL) || (pData2 == NULL))) || (Length == 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } } switch (Channel) @@ -3001,18 +3636,21 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Ch htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData1, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData1, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Input Capture DMA request */ __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + /* Enable the Peripheral */ __HAL_TIM_ENABLE(htim); - /* Enable the Capture compare channel */ - TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); break; } @@ -3025,22 +3663,25 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Ch /* Set the DMA error callback */ htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData2, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData2, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Input Capture DMA request */ __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + /* Enable the Peripheral */ __HAL_TIM_ENABLE(htim); - /* Enable the Capture compare channel */ - TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); break; } - case TIM_CHANNEL_ALL: + default: { /* Set the DMA capture callbacks */ htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; @@ -3050,8 +3691,10 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Ch htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData1, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData1, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } @@ -3063,27 +3706,29 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Ch htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData2, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData2, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } - /* Enable the Peripheral */ - __HAL_TIM_ENABLE(htim); - - /* Enable the Capture compare channel */ - TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); - TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); /* Enable the TIM Input Capture DMA request */ __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); /* Enable the TIM Input Capture DMA request */ __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); - break; - } - default: + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + break; + } } + /* Return function status */ return HAL_OK; } @@ -3101,7 +3746,7 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Ch HAL_StatusTypeDef HAL_TIM_Encoder_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) { /* Check the parameters */ - assert_param(IS_TIM_DMA_CC_INSTANCE(htim->Instance)); + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); /* Disable the Input Capture channels 1 and 2 (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ @@ -3136,8 +3781,19 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Cha /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); - /* Change the htim state */ - htim->State = HAL_TIM_STATE_READY; + /* Set the TIM channel(s) state */ + if ((Channel == TIM_CHANNEL_1) || (Channel == TIM_CHANNEL_2)) + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ return HAL_OK; @@ -3166,13 +3822,16 @@ HAL_StatusTypeDef HAL_TIM_Encoder_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Cha */ void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) { + uint32_t itsource = htim->Instance->DIER; + uint32_t itflag = htim->Instance->SR; + /* Capture compare 1 event */ - if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC1) != RESET) + if ((itflag & (TIM_FLAG_CC1)) == (TIM_FLAG_CC1)) { - if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC1) != RESET) + if ((itsource & (TIM_IT_CC1)) == (TIM_IT_CC1)) { { - __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC1); + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_CC1); htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; /* Input capture event */ @@ -3200,11 +3859,11 @@ void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) } } /* Capture compare 2 event */ - if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC2) != RESET) + if ((itflag & (TIM_FLAG_CC2)) == (TIM_FLAG_CC2)) { - if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC2) != RESET) + if ((itsource & (TIM_IT_CC2)) == (TIM_IT_CC2)) { - __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC2); + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_CC2); htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; /* Input capture event */ if ((htim->Instance->CCMR1 & TIM_CCMR1_CC2S) != 0x00U) @@ -3230,11 +3889,11 @@ void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) } } /* Capture compare 3 event */ - if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC3) != RESET) + if ((itflag & (TIM_FLAG_CC3)) == (TIM_FLAG_CC3)) { - if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC3) != RESET) + if ((itsource & (TIM_IT_CC3)) == (TIM_IT_CC3)) { - __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC3); + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_CC3); htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; /* Input capture event */ if ((htim->Instance->CCMR2 & TIM_CCMR2_CC3S) != 0x00U) @@ -3260,11 +3919,11 @@ void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) } } /* Capture compare 4 event */ - if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC4) != RESET) + if ((itflag & (TIM_FLAG_CC4)) == (TIM_FLAG_CC4)) { - if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC4) != RESET) + if ((itsource & (TIM_IT_CC4)) == (TIM_IT_CC4)) { - __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC4); + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_CC4); htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; /* Input capture event */ if ((htim->Instance->CCMR2 & TIM_CCMR2_CC4S) != 0x00U) @@ -3290,11 +3949,11 @@ void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) } } /* TIM Update event */ - if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_UPDATE) != RESET) + if ((itflag & (TIM_FLAG_UPDATE)) == (TIM_FLAG_UPDATE)) { - if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_UPDATE) != RESET) + if ((itsource & (TIM_IT_UPDATE)) == (TIM_IT_UPDATE)) { - __HAL_TIM_CLEAR_IT(htim, TIM_IT_UPDATE); + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_UPDATE); #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) htim->PeriodElapsedCallback(htim); #else @@ -3303,11 +3962,11 @@ void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) } } /* TIM Break input event */ - if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_BREAK) != RESET) + if ((itflag & (TIM_FLAG_BREAK)) == (TIM_FLAG_BREAK)) { - if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_BREAK) != RESET) + if ((itsource & (TIM_IT_BREAK)) == (TIM_IT_BREAK)) { - __HAL_TIM_CLEAR_IT(htim, TIM_IT_BREAK); + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_BREAK); #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) htim->BreakCallback(htim); #else @@ -3316,11 +3975,11 @@ void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) } } /* TIM Trigger detection event */ - if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_TRIGGER) != RESET) + if ((itflag & (TIM_FLAG_TRIGGER)) == (TIM_FLAG_TRIGGER)) { - if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_TRIGGER) != RESET) + if ((itsource & (TIM_IT_TRIGGER)) == (TIM_IT_TRIGGER)) { - __HAL_TIM_CLEAR_IT(htim, TIM_IT_TRIGGER); + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_TRIGGER); #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) htim->TriggerCallback(htim); #else @@ -3329,11 +3988,11 @@ void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) } } /* TIM commutation event */ - if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_COM) != RESET) + if ((itflag & (TIM_FLAG_COM)) == (TIM_FLAG_COM)) { - if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_COM) != RESET) + if ((itsource & (TIM_IT_COM)) == (TIM_IT_COM)) { - __HAL_TIM_CLEAR_IT(htim, TIM_FLAG_COM); + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_COM); #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) htim->CommutationCallback(htim); #else @@ -3380,9 +4039,11 @@ void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) * @retval HAL status */ HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, - TIM_OC_InitTypeDef *sConfig, + const TIM_OC_InitTypeDef *sConfig, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CHANNELS(Channel)); assert_param(IS_TIM_OC_MODE(sConfig->OCMode)); @@ -3391,8 +4052,6 @@ HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, /* Process Locked */ __HAL_LOCK(htim); - htim->State = HAL_TIM_STATE_BUSY; - switch (Channel) { case TIM_CHANNEL_1: @@ -3436,14 +4095,13 @@ HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, } default: + status = HAL_ERROR; break; } - htim->State = HAL_TIM_STATE_READY; - __HAL_UNLOCK(htim); - return HAL_OK; + return status; } /** @@ -3459,8 +4117,10 @@ HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, * @arg TIM_CHANNEL_4: TIM Channel 4 selected * @retval HAL status */ -HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_IC_InitTypeDef *sConfig, uint32_t Channel) +HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, const TIM_IC_InitTypeDef *sConfig, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); assert_param(IS_TIM_IC_POLARITY(sConfig->ICPolarity)); @@ -3471,8 +4131,6 @@ HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_IC_InitT /* Process Locked */ __HAL_LOCK(htim); - htim->State = HAL_TIM_STATE_BUSY; - if (Channel == TIM_CHANNEL_1) { /* TI1 Configuration */ @@ -3519,7 +4177,7 @@ HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_IC_InitT /* Set the IC3PSC value */ htim->Instance->CCMR2 |= sConfig->ICPrescaler; } - else + else if (Channel == TIM_CHANNEL_4) { /* TI4 Configuration */ assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); @@ -3535,12 +4193,14 @@ HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_IC_InitT /* Set the IC4PSC value */ htim->Instance->CCMR2 |= (sConfig->ICPrescaler << 8U); } - - htim->State = HAL_TIM_STATE_READY; + else + { + status = HAL_ERROR; + } __HAL_UNLOCK(htim); - return HAL_OK; + return status; } /** @@ -3557,9 +4217,11 @@ HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_IC_InitT * @retval HAL status */ HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, - TIM_OC_InitTypeDef *sConfig, + const TIM_OC_InitTypeDef *sConfig, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CHANNELS(Channel)); assert_param(IS_TIM_PWM_MODE(sConfig->OCMode)); @@ -3569,8 +4231,6 @@ HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, /* Process Locked */ __HAL_LOCK(htim); - htim->State = HAL_TIM_STATE_BUSY; - switch (Channel) { case TIM_CHANNEL_1: @@ -3642,14 +4302,13 @@ HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, } default: + status = HAL_ERROR; break; } - htim->State = HAL_TIM_STATE_READY; - __HAL_UNLOCK(htim); - return HAL_OK; + return status; } /** @@ -3674,6 +4333,7 @@ HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OnePulse_InitTypeDef *sConfig, uint32_t OutputChannel, uint32_t InputChannel) { + HAL_StatusTypeDef status = HAL_OK; TIM_OC_InitTypeDef temp1; /* Check the parameters */ @@ -3704,6 +4364,7 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_O TIM_OC1_SetConfig(htim->Instance, &temp1); break; } + case TIM_CHANNEL_2: { assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); @@ -3711,60 +4372,67 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_O TIM_OC2_SetConfig(htim->Instance, &temp1); break; } + default: + status = HAL_ERROR; break; } - switch (InputChannel) + if (status == HAL_OK) { - case TIM_CHANNEL_1: + switch (InputChannel) { - assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + case TIM_CHANNEL_1: + { + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); - TIM_TI1_SetConfig(htim->Instance, sConfig->ICPolarity, - sConfig->ICSelection, sConfig->ICFilter); + TIM_TI1_SetConfig(htim->Instance, sConfig->ICPolarity, + sConfig->ICSelection, sConfig->ICFilter); - /* Reset the IC1PSC Bits */ - htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; + /* Reset the IC1PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; - /* Select the Trigger source */ - htim->Instance->SMCR &= ~TIM_SMCR_TS; - htim->Instance->SMCR |= TIM_TS_TI1FP1; + /* Select the Trigger source */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI1FP1; - /* Select the Slave Mode */ - htim->Instance->SMCR &= ~TIM_SMCR_SMS; - htim->Instance->SMCR |= TIM_SLAVEMODE_TRIGGER; - break; - } - case TIM_CHANNEL_2: - { - assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + /* Select the Slave Mode */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_TRIGGER; + break; + } - TIM_TI2_SetConfig(htim->Instance, sConfig->ICPolarity, - sConfig->ICSelection, sConfig->ICFilter); + case TIM_CHANNEL_2: + { + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); - /* Reset the IC2PSC Bits */ - htim->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC; + TIM_TI2_SetConfig(htim->Instance, sConfig->ICPolarity, + sConfig->ICSelection, sConfig->ICFilter); - /* Select the Trigger source */ - htim->Instance->SMCR &= ~TIM_SMCR_TS; - htim->Instance->SMCR |= TIM_TS_TI2FP2; + /* Reset the IC2PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC; - /* Select the Slave Mode */ - htim->Instance->SMCR &= ~TIM_SMCR_SMS; - htim->Instance->SMCR |= TIM_SLAVEMODE_TRIGGER; - break; - } + /* Select the Trigger source */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI2FP2; - default: - break; + /* Select the Slave Mode */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_TRIGGER; + break; + } + + default: + status = HAL_ERROR; + break; + } } htim->State = HAL_TIM_STATE_READY; __HAL_UNLOCK(htim); - return HAL_OK; + return status; } else { @@ -3810,20 +4478,77 @@ HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_O * @note This function should be used only when BurstLength is equal to DMA data transfer length. * @retval HAL status */ -HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, uint32_t BurstRequestSrc, - uint32_t *BurstBuffer, uint32_t BurstLength) +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, const uint32_t *BurstBuffer, + uint32_t BurstLength) { + HAL_StatusTypeDef status; + + status = HAL_TIM_DMABurst_MultiWriteStart(htim, BurstBaseAddress, BurstRequestSrc, BurstBuffer, BurstLength, + ((BurstLength) >> 8U) + 1U); + + + + return status; +} + +/** + * @brief Configure the DMA Burst to transfer multiple Data from the memory to the TIM peripheral + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data write + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @param DataLength Data length. This parameter can be one value + * between 1 and 0xFFFF. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiWriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, const uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); assert_param(IS_TIM_DMA_BASE(BurstBaseAddress)); assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); assert_param(IS_TIM_DMA_LENGTH(BurstLength)); + assert_param(IS_TIM_DMA_DATA_LENGTH(DataLength)); - if (htim->State == HAL_TIM_STATE_BUSY) + if (htim->DMABurstState == HAL_DMA_BURST_STATE_BUSY) { return HAL_BUSY; } - else if (htim->State == HAL_TIM_STATE_READY) + else if (htim->DMABurstState == HAL_DMA_BURST_STATE_READY) { if ((BurstBuffer == NULL) && (BurstLength > 0U)) { @@ -3831,13 +4556,14 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t } else { - htim->State = HAL_TIM_STATE_BUSY; + htim->DMABurstState = HAL_DMA_BURST_STATE_BUSY; } } else { /* nothing to do */ } + switch (BurstRequestSrc) { case TIM_DMA_UPDATE: @@ -3850,8 +4576,10 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)BurstBuffer, (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -3867,8 +4595,9 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t /* Enable the DMA stream */ if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)BurstBuffer, - (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -3884,8 +4613,9 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t /* Enable the DMA stream */ if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)BurstBuffer, - (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -3901,8 +4631,9 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t /* Enable the DMA stream */ if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)BurstBuffer, - (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -3918,8 +4649,9 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t /* Enable the DMA stream */ if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)BurstBuffer, - (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -3935,8 +4667,9 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t /* Enable the DMA stream */ if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_COMMUTATION], (uint32_t)BurstBuffer, - (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -3952,25 +4685,28 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t /* Enable the DMA stream */ if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_TRIGGER], (uint32_t)BurstBuffer, - (uint32_t)&htim->Instance->DMAR, ((BurstLength) >> 8U) + 1U) != HAL_OK) + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; } default: + status = HAL_ERROR; break; } - /* configure the DMA Burst Mode */ - htim->Instance->DCR = (BurstBaseAddress | BurstLength); - - /* Enable the TIM DMA Request */ - __HAL_TIM_ENABLE_DMA(htim, BurstRequestSrc); - htim->State = HAL_TIM_STATE_READY; + if (status == HAL_OK) + { + /* Configure the DMA Burst Mode */ + htim->Instance->DCR = (BurstBaseAddress | BurstLength); + /* Enable the TIM DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, BurstRequestSrc); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -3982,6 +4718,7 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc) { HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); @@ -3990,50 +4727,104 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStop(TIM_HandleTypeDef *htim, uint32_t B { case TIM_DMA_UPDATE: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); break; } case TIM_DMA_CC1: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); break; } case TIM_DMA_CC2: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); break; } case TIM_DMA_CC3: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); break; } case TIM_DMA_CC4: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); break; } case TIM_DMA_COM: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_COMMUTATION]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_COMMUTATION]); break; } case TIM_DMA_TRIGGER: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_TRIGGER]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_TRIGGER]); break; } default: + status = HAL_ERROR; break; } - if (HAL_OK == status) - { - /* Disable the TIM Update DMA request */ - __HAL_TIM_DISABLE_DMA(htim, BurstRequestSrc); - } + if (status == HAL_OK) + { + /* Disable the TIM Update DMA request */ + __HAL_TIM_DISABLE_DMA(htim, BurstRequestSrc); + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + } + + /* Return function status */ + return status; +} + +/** + * @brief Configure the DMA Burst to transfer Data from the TIM peripheral to the memory + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data read + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @note This function should be used only when BurstLength is equal to DMA data transfer length. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength) +{ + HAL_StatusTypeDef status; + + status = HAL_TIM_DMABurst_MultiReadStart(htim, BurstBaseAddress, BurstRequestSrc, BurstBuffer, BurstLength, + ((BurstLength) >> 8U) + 1U); + - /* Return function status */ return status; } @@ -4072,23 +4863,28 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStop(TIM_HandleTypeDef *htim, uint32_t B * @param BurstBuffer The Buffer address. * @param BurstLength DMA Burst length. This parameter can be one value * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. - * @note This function should be used only when BurstLength is equal to DMA data transfer length. + * @param DataLength Data length. This parameter can be one value + * between 1 and 0xFFFF. * @retval HAL status */ -HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, - uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength) +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); assert_param(IS_TIM_DMA_BASE(BurstBaseAddress)); assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); assert_param(IS_TIM_DMA_LENGTH(BurstLength)); + assert_param(IS_TIM_DMA_DATA_LENGTH(DataLength)); - if (htim->State == HAL_TIM_STATE_BUSY) + if (htim->DMABurstState == HAL_DMA_BURST_STATE_BUSY) { return HAL_BUSY; } - else if (htim->State == HAL_TIM_STATE_READY) + else if (htim->DMABurstState == HAL_DMA_BURST_STATE_READY) { if ((BurstBuffer == NULL) && (BurstLength > 0U)) { @@ -4096,7 +4892,7 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t B } else { - htim->State = HAL_TIM_STATE_BUSY; + htim->DMABurstState = HAL_DMA_BURST_STATE_BUSY; } } else @@ -4115,8 +4911,10 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t B htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -4131,15 +4929,17 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t B htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; } case TIM_DMA_CC2: { - /* Set the DMA capture/compare callbacks */ + /* Set the DMA capture callbacks */ htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; @@ -4147,8 +4947,10 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t B htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -4163,8 +4965,10 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t B htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -4179,8 +4983,10 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t B htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -4195,8 +5001,10 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t B htim->hdma[TIM_DMA_ID_COMMUTATION]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_COMMUTATION], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_COMMUTATION], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; @@ -4211,26 +5019,30 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t B htim->hdma[TIM_DMA_ID_TRIGGER]->XferErrorCallback = TIM_DMAError ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_TRIGGER], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, ((BurstLength) >> 8U) + 1U) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_TRIGGER], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } break; } default: + status = HAL_ERROR; break; } - /* configure the DMA Burst Mode */ - htim->Instance->DCR = (BurstBaseAddress | BurstLength); - - /* Enable the TIM DMA Request */ - __HAL_TIM_ENABLE_DMA(htim, BurstRequestSrc); + if (status == HAL_OK) + { + /* Configure the DMA Burst Mode */ + htim->Instance->DCR = (BurstBaseAddress | BurstLength); - htim->State = HAL_TIM_STATE_READY; + /* Enable the TIM DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, BurstRequestSrc); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -4242,6 +5054,7 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t B HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc) { HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); @@ -4250,47 +5063,51 @@ HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStop(TIM_HandleTypeDef *htim, uint32_t Bu { case TIM_DMA_UPDATE: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); break; } case TIM_DMA_CC1: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); break; } case TIM_DMA_CC2: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); break; } case TIM_DMA_CC3: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); break; } case TIM_DMA_CC4: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); break; } case TIM_DMA_COM: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_COMMUTATION]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_COMMUTATION]); break; } case TIM_DMA_TRIGGER: { - status = HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_TRIGGER]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_TRIGGER]); break; } default: + status = HAL_ERROR; break; } - if (HAL_OK == status) + if (status == HAL_OK) { /* Disable the TIM Update DMA request */ __HAL_TIM_DISABLE_DMA(htim, BurstRequestSrc); + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; } /* Return function status */ @@ -4355,9 +5172,11 @@ HAL_StatusTypeDef HAL_TIM_GenerateEvent(TIM_HandleTypeDef *htim, uint32_t EventS * @retval HAL status */ HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, - TIM_ClearInputConfigTypeDef *sClearInputConfig, + const TIM_ClearInputConfigTypeDef *sClearInputConfig, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_OCXREF_CLEAR_INSTANCE(htim->Instance)); assert_param(IS_TIM_CLEARINPUT_SOURCE(sClearInputConfig->ClearInputSource)); @@ -4399,76 +5218,80 @@ HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, } default: + status = HAL_ERROR; break; } - switch (Channel) + if (status == HAL_OK) { - case TIM_CHANNEL_1: - { - if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) - { - /* Enable the OCREF clear feature for Channel 1 */ - SET_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC1CE); - } - else - { - /* Disable the OCREF clear feature for Channel 1 */ - CLEAR_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC1CE); - } - break; - } - case TIM_CHANNEL_2: - { - if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) - { - /* Enable the OCREF clear feature for Channel 2 */ - SET_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC2CE); - } - else - { - /* Disable the OCREF clear feature for Channel 2 */ - CLEAR_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC2CE); - } - break; - } - case TIM_CHANNEL_3: + switch (Channel) { - if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + case TIM_CHANNEL_1: { - /* Enable the OCREF clear feature for Channel 3 */ - SET_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC3CE); + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 1 */ + SET_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC1CE); + } + else + { + /* Disable the OCREF clear feature for Channel 1 */ + CLEAR_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC1CE); + } + break; } - else + case TIM_CHANNEL_2: { - /* Disable the OCREF clear feature for Channel 3 */ - CLEAR_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC3CE); + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 2 */ + SET_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC2CE); + } + else + { + /* Disable the OCREF clear feature for Channel 2 */ + CLEAR_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC2CE); + } + break; } - break; - } - case TIM_CHANNEL_4: - { - if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + case TIM_CHANNEL_3: { - /* Enable the OCREF clear feature for Channel 4 */ - SET_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC4CE); + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 3 */ + SET_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC3CE); + } + else + { + /* Disable the OCREF clear feature for Channel 3 */ + CLEAR_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC3CE); + } + break; } - else + case TIM_CHANNEL_4: { - /* Disable the OCREF clear feature for Channel 4 */ - CLEAR_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC4CE); + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 4 */ + SET_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC4CE); + } + else + { + /* Disable the OCREF clear feature for Channel 4 */ + CLEAR_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC4CE); + } + break; } - break; + default: + break; } - default: - break; } htim->State = HAL_TIM_STATE_READY; __HAL_UNLOCK(htim); - return HAL_OK; + return status; } /** @@ -4478,8 +5301,9 @@ HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, * contains the clock source information for the TIM peripheral. * @retval HAL status */ -HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, TIM_ClockConfigTypeDef *sClockSourceConfig) +HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, const TIM_ClockConfigTypeDef *sClockSourceConfig) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; /* Process Locked */ @@ -4609,13 +5433,14 @@ HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, TIM_ClockCo } default: + status = HAL_ERROR; break; } htim->State = HAL_TIM_STATE_READY; __HAL_UNLOCK(htim); - return HAL_OK; + return status; } /** @@ -4662,7 +5487,7 @@ HAL_StatusTypeDef HAL_TIM_ConfigTI1Input(TIM_HandleTypeDef *htim, uint32_t TI1_S * (Disable, Reset, Gated, Trigger, External clock mode 1). * @retval HAL status */ -HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, TIM_SlaveConfigTypeDef *sSlaveConfig) +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, const TIM_SlaveConfigTypeDef *sSlaveConfig) { /* Check the parameters */ assert_param(IS_TIM_SLAVE_INSTANCE(htim->Instance)); @@ -4703,7 +5528,7 @@ HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, TIM_SlaveC * @retval HAL status */ HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro_IT(TIM_HandleTypeDef *htim, - TIM_SlaveConfigTypeDef *sSlaveConfig) + const TIM_SlaveConfigTypeDef *sSlaveConfig) { /* Check the parameters */ assert_param(IS_TIM_SLAVE_INSTANCE(htim->Instance)); @@ -4745,7 +5570,7 @@ HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro_IT(TIM_HandleTypeDef *htim, * @arg TIM_CHANNEL_4: TIM Channel 4 selected * @retval Captured value */ -uint32_t HAL_TIM_ReadCapturedValue(TIM_HandleTypeDef *htim, uint32_t Channel) +uint32_t HAL_TIM_ReadCapturedValue(const TIM_HandleTypeDef *htim, uint32_t Channel) { uint32_t tmpreg = 0U; @@ -5019,8 +5844,6 @@ HAL_StatusTypeDef HAL_TIM_RegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_Call { return HAL_ERROR; } - /* Process locked */ - __HAL_LOCK(htim); if (htim->State == HAL_TIM_STATE_READY) { @@ -5136,7 +5959,7 @@ HAL_StatusTypeDef HAL_TIM_RegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_Call default : /* Return error status */ - status = HAL_ERROR; + status = HAL_ERROR; break; } } @@ -5202,19 +6025,16 @@ HAL_StatusTypeDef HAL_TIM_RegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_Call default : /* Return error status */ - status = HAL_ERROR; + status = HAL_ERROR; break; } } else { /* Return error status */ - status = HAL_ERROR; + status = HAL_ERROR; } - /* Release Lock */ - __HAL_UNLOCK(htim); - return status; } @@ -5257,124 +6077,148 @@ HAL_StatusTypeDef HAL_TIM_UnRegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_Ca { HAL_StatusTypeDef status = HAL_OK; - /* Process locked */ - __HAL_LOCK(htim); - if (htim->State == HAL_TIM_STATE_READY) { switch (CallbackID) { case HAL_TIM_BASE_MSPINIT_CB_ID : - htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; /* Legacy weak Base MspInit Callback */ + /* Legacy weak Base MspInit Callback */ + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; break; case HAL_TIM_BASE_MSPDEINIT_CB_ID : - htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; /* Legacy weak Base Msp DeInit Callback */ + /* Legacy weak Base Msp DeInit Callback */ + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; break; case HAL_TIM_IC_MSPINIT_CB_ID : - htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; /* Legacy weak IC Msp Init Callback */ + /* Legacy weak IC Msp Init Callback */ + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; break; case HAL_TIM_IC_MSPDEINIT_CB_ID : - htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; /* Legacy weak IC Msp DeInit Callback */ + /* Legacy weak IC Msp DeInit Callback */ + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; break; case HAL_TIM_OC_MSPINIT_CB_ID : - htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; /* Legacy weak OC Msp Init Callback */ + /* Legacy weak OC Msp Init Callback */ + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; break; case HAL_TIM_OC_MSPDEINIT_CB_ID : - htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; /* Legacy weak OC Msp DeInit Callback */ + /* Legacy weak OC Msp DeInit Callback */ + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; break; case HAL_TIM_PWM_MSPINIT_CB_ID : - htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; /* Legacy weak PWM Msp Init Callback */ + /* Legacy weak PWM Msp Init Callback */ + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; break; case HAL_TIM_PWM_MSPDEINIT_CB_ID : - htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; /* Legacy weak PWM Msp DeInit Callback */ + /* Legacy weak PWM Msp DeInit Callback */ + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; break; case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : - htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; /* Legacy weak One Pulse Msp Init Callback */ + /* Legacy weak One Pulse Msp Init Callback */ + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; break; case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : - htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; /* Legacy weak One Pulse Msp DeInit Callback */ + /* Legacy weak One Pulse Msp DeInit Callback */ + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; break; case HAL_TIM_ENCODER_MSPINIT_CB_ID : - htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; /* Legacy weak Encoder Msp Init Callback */ + /* Legacy weak Encoder Msp Init Callback */ + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; break; case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : - htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; /* Legacy weak Encoder Msp DeInit Callback */ + /* Legacy weak Encoder Msp DeInit Callback */ + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; break; case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : - htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; /* Legacy weak Hall Sensor Msp Init Callback */ + /* Legacy weak Hall Sensor Msp Init Callback */ + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; break; case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : - htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; /* Legacy weak Hall Sensor Msp DeInit Callback */ + /* Legacy weak Hall Sensor Msp DeInit Callback */ + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; break; case HAL_TIM_PERIOD_ELAPSED_CB_ID : - htim->PeriodElapsedCallback = HAL_TIM_PeriodElapsedCallback; /* Legacy weak Period Elapsed Callback */ + /* Legacy weak Period Elapsed Callback */ + htim->PeriodElapsedCallback = HAL_TIM_PeriodElapsedCallback; break; case HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID : - htim->PeriodElapsedHalfCpltCallback = HAL_TIM_PeriodElapsedHalfCpltCallback; /* Legacy weak Period Elapsed half complete Callback */ + /* Legacy weak Period Elapsed half complete Callback */ + htim->PeriodElapsedHalfCpltCallback = HAL_TIM_PeriodElapsedHalfCpltCallback; break; case HAL_TIM_TRIGGER_CB_ID : - htim->TriggerCallback = HAL_TIM_TriggerCallback; /* Legacy weak Trigger Callback */ + /* Legacy weak Trigger Callback */ + htim->TriggerCallback = HAL_TIM_TriggerCallback; break; case HAL_TIM_TRIGGER_HALF_CB_ID : - htim->TriggerHalfCpltCallback = HAL_TIM_TriggerHalfCpltCallback; /* Legacy weak Trigger half complete Callback */ + /* Legacy weak Trigger half complete Callback */ + htim->TriggerHalfCpltCallback = HAL_TIM_TriggerHalfCpltCallback; break; case HAL_TIM_IC_CAPTURE_CB_ID : - htim->IC_CaptureCallback = HAL_TIM_IC_CaptureCallback; /* Legacy weak IC Capture Callback */ + /* Legacy weak IC Capture Callback */ + htim->IC_CaptureCallback = HAL_TIM_IC_CaptureCallback; break; case HAL_TIM_IC_CAPTURE_HALF_CB_ID : - htim->IC_CaptureHalfCpltCallback = HAL_TIM_IC_CaptureHalfCpltCallback; /* Legacy weak IC Capture half complete Callback */ + /* Legacy weak IC Capture half complete Callback */ + htim->IC_CaptureHalfCpltCallback = HAL_TIM_IC_CaptureHalfCpltCallback; break; case HAL_TIM_OC_DELAY_ELAPSED_CB_ID : - htim->OC_DelayElapsedCallback = HAL_TIM_OC_DelayElapsedCallback; /* Legacy weak OC Delay Elapsed Callback */ + /* Legacy weak OC Delay Elapsed Callback */ + htim->OC_DelayElapsedCallback = HAL_TIM_OC_DelayElapsedCallback; break; case HAL_TIM_PWM_PULSE_FINISHED_CB_ID : - htim->PWM_PulseFinishedCallback = HAL_TIM_PWM_PulseFinishedCallback; /* Legacy weak PWM Pulse Finished Callback */ + /* Legacy weak PWM Pulse Finished Callback */ + htim->PWM_PulseFinishedCallback = HAL_TIM_PWM_PulseFinishedCallback; break; case HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID : - htim->PWM_PulseFinishedHalfCpltCallback = HAL_TIM_PWM_PulseFinishedHalfCpltCallback; /* Legacy weak PWM Pulse Finished half complete Callback */ + /* Legacy weak PWM Pulse Finished half complete Callback */ + htim->PWM_PulseFinishedHalfCpltCallback = HAL_TIM_PWM_PulseFinishedHalfCpltCallback; break; case HAL_TIM_ERROR_CB_ID : - htim->ErrorCallback = HAL_TIM_ErrorCallback; /* Legacy weak Error Callback */ + /* Legacy weak Error Callback */ + htim->ErrorCallback = HAL_TIM_ErrorCallback; break; case HAL_TIM_COMMUTATION_CB_ID : - htim->CommutationCallback = HAL_TIMEx_CommutCallback; /* Legacy weak Commutation Callback */ + /* Legacy weak Commutation Callback */ + htim->CommutationCallback = HAL_TIMEx_CommutCallback; break; case HAL_TIM_COMMUTATION_HALF_CB_ID : - htim->CommutationHalfCpltCallback = HAL_TIMEx_CommutHalfCpltCallback; /* Legacy weak Commutation half complete Callback */ + /* Legacy weak Commutation half complete Callback */ + htim->CommutationHalfCpltCallback = HAL_TIMEx_CommutHalfCpltCallback; break; case HAL_TIM_BREAK_CB_ID : - htim->BreakCallback = HAL_TIMEx_BreakCallback; /* Legacy weak Break Callback */ + /* Legacy weak Break Callback */ + htim->BreakCallback = HAL_TIMEx_BreakCallback; break; default : /* Return error status */ - status = HAL_ERROR; + status = HAL_ERROR; break; } } @@ -5383,76 +6227,87 @@ HAL_StatusTypeDef HAL_TIM_UnRegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_Ca switch (CallbackID) { case HAL_TIM_BASE_MSPINIT_CB_ID : - htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; /* Legacy weak Base MspInit Callback */ + /* Legacy weak Base MspInit Callback */ + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; break; case HAL_TIM_BASE_MSPDEINIT_CB_ID : - htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; /* Legacy weak Base Msp DeInit Callback */ + /* Legacy weak Base Msp DeInit Callback */ + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; break; case HAL_TIM_IC_MSPINIT_CB_ID : - htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; /* Legacy weak IC Msp Init Callback */ + /* Legacy weak IC Msp Init Callback */ + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; break; case HAL_TIM_IC_MSPDEINIT_CB_ID : - htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; /* Legacy weak IC Msp DeInit Callback */ + /* Legacy weak IC Msp DeInit Callback */ + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; break; case HAL_TIM_OC_MSPINIT_CB_ID : - htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; /* Legacy weak OC Msp Init Callback */ + /* Legacy weak OC Msp Init Callback */ + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; break; case HAL_TIM_OC_MSPDEINIT_CB_ID : - htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; /* Legacy weak OC Msp DeInit Callback */ + /* Legacy weak OC Msp DeInit Callback */ + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; break; case HAL_TIM_PWM_MSPINIT_CB_ID : - htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; /* Legacy weak PWM Msp Init Callback */ + /* Legacy weak PWM Msp Init Callback */ + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; break; case HAL_TIM_PWM_MSPDEINIT_CB_ID : - htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; /* Legacy weak PWM Msp DeInit Callback */ + /* Legacy weak PWM Msp DeInit Callback */ + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; break; case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : - htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; /* Legacy weak One Pulse Msp Init Callback */ + /* Legacy weak One Pulse Msp Init Callback */ + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; break; case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : - htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; /* Legacy weak One Pulse Msp DeInit Callback */ + /* Legacy weak One Pulse Msp DeInit Callback */ + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; break; case HAL_TIM_ENCODER_MSPINIT_CB_ID : - htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; /* Legacy weak Encoder Msp Init Callback */ + /* Legacy weak Encoder Msp Init Callback */ + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; break; case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : - htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; /* Legacy weak Encoder Msp DeInit Callback */ + /* Legacy weak Encoder Msp DeInit Callback */ + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; break; case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : - htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; /* Legacy weak Hall Sensor Msp Init Callback */ + /* Legacy weak Hall Sensor Msp Init Callback */ + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; break; case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : - htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; /* Legacy weak Hall Sensor Msp DeInit Callback */ + /* Legacy weak Hall Sensor Msp DeInit Callback */ + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; break; default : /* Return error status */ - status = HAL_ERROR; + status = HAL_ERROR; break; } } else { /* Return error status */ - status = HAL_ERROR; + status = HAL_ERROR; } - /* Release Lock */ - __HAL_UNLOCK(htim); - return status; } #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ @@ -5481,7 +6336,7 @@ HAL_StatusTypeDef HAL_TIM_UnRegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_Ca * @param htim TIM Base handle * @retval HAL state */ -HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(TIM_HandleTypeDef *htim) +HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(const TIM_HandleTypeDef *htim) { return htim->State; } @@ -5491,7 +6346,7 @@ HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(TIM_HandleTypeDef *htim) * @param htim TIM Output Compare handle * @retval HAL state */ -HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(TIM_HandleTypeDef *htim) +HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(const TIM_HandleTypeDef *htim) { return htim->State; } @@ -5501,7 +6356,7 @@ HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(TIM_HandleTypeDef *htim) * @param htim TIM handle * @retval HAL state */ -HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(TIM_HandleTypeDef *htim) +HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(const TIM_HandleTypeDef *htim) { return htim->State; } @@ -5511,7 +6366,7 @@ HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(TIM_HandleTypeDef *htim) * @param htim TIM IC handle * @retval HAL state */ -HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(TIM_HandleTypeDef *htim) +HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(const TIM_HandleTypeDef *htim) { return htim->State; } @@ -5521,7 +6376,7 @@ HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(TIM_HandleTypeDef *htim) * @param htim TIM OPM handle * @retval HAL state */ -HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(TIM_HandleTypeDef *htim) +HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(const TIM_HandleTypeDef *htim) { return htim->State; } @@ -5531,11 +6386,59 @@ HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(TIM_HandleTypeDef *htim) * @param htim TIM Encoder Interface handle * @retval HAL state */ -HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(TIM_HandleTypeDef *htim) +HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(const TIM_HandleTypeDef *htim) { return htim->State; } +/** + * @brief Return the TIM Encoder Mode handle state. + * @param htim TIM handle + * @retval Active channel + */ +HAL_TIM_ActiveChannel HAL_TIM_GetActiveChannel(const TIM_HandleTypeDef *htim) +{ + return htim->Channel; +} + +/** + * @brief Return actual state of the TIM channel. + * @param htim TIM handle + * @param Channel TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @arg TIM_CHANNEL_4: TIM Channel 4 + * @arg TIM_CHANNEL_5: TIM Channel 5 + * @arg TIM_CHANNEL_6: TIM Channel 6 + * @retval TIM Channel state + */ +HAL_TIM_ChannelStateTypeDef HAL_TIM_GetChannelState(const TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_TIM_ChannelStateTypeDef channel_state; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + + return channel_state; +} + +/** + * @brief Return actual state of a DMA burst operation. + * @param htim TIM handle + * @retval DMA burst state + */ +HAL_TIM_DMABurstStateTypeDef HAL_TIM_DMABurstState(const TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); + + return htim->DMABurstState; +} + /** * @} */ @@ -5557,13 +6460,38 @@ void TIM_DMAError(DMA_HandleTypeDef *hdma) { TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; - htim->State = HAL_TIM_STATE_READY; + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + htim->State = HAL_TIM_STATE_READY; + } #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) htim->ErrorCallback(htim); #else HAL_TIM_ErrorCallback(htim); #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; } /** @@ -5571,27 +6499,45 @@ void TIM_DMAError(DMA_HandleTypeDef *hdma) * @param hdma pointer to DMA handle. * @retval None */ -void TIM_DMADelayPulseCplt(DMA_HandleTypeDef *hdma) +static void TIM_DMADelayPulseCplt(DMA_HandleTypeDef *hdma) { TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; - htim->State = HAL_TIM_STATE_READY; - if (hdma == htim->hdma[TIM_DMA_ID_CC1]) { htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } } else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) { htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } } else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) { htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } } else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) { htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + } } else { @@ -5616,8 +6562,6 @@ void TIM_DMADelayPulseHalfCplt(DMA_HandleTypeDef *hdma) { TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; - htim->State = HAL_TIM_STATE_READY; - if (hdma == htim->hdma[TIM_DMA_ID_CC1]) { htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; @@ -5657,23 +6601,45 @@ void TIM_DMACaptureCplt(DMA_HandleTypeDef *hdma) { TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; - htim->State = HAL_TIM_STATE_READY; - if (hdma == htim->hdma[TIM_DMA_ID_CC1]) { htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } } else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) { htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } } else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) { htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } } else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) { htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + } } else { @@ -5698,8 +6664,6 @@ void TIM_DMACaptureHalfCplt(DMA_HandleTypeDef *hdma) { TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; - htim->State = HAL_TIM_STATE_READY; - if (hdma == htim->hdma[TIM_DMA_ID_CC1]) { htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; @@ -5739,7 +6703,10 @@ static void TIM_DMAPeriodElapsedCplt(DMA_HandleTypeDef *hdma) { TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; - htim->State = HAL_TIM_STATE_READY; + if (htim->hdma[TIM_DMA_ID_UPDATE]->Init.Mode == DMA_NORMAL) + { + htim->State = HAL_TIM_STATE_READY; + } #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) htim->PeriodElapsedCallback(htim); @@ -5757,8 +6724,6 @@ static void TIM_DMAPeriodElapsedHalfCplt(DMA_HandleTypeDef *hdma) { TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; - htim->State = HAL_TIM_STATE_READY; - #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) htim->PeriodElapsedHalfCpltCallback(htim); #else @@ -5775,7 +6740,10 @@ static void TIM_DMATriggerCplt(DMA_HandleTypeDef *hdma) { TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; - htim->State = HAL_TIM_STATE_READY; + if (htim->hdma[TIM_DMA_ID_TRIGGER]->Init.Mode == DMA_NORMAL) + { + htim->State = HAL_TIM_STATE_READY; + } #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) htim->TriggerCallback(htim); @@ -5793,8 +6761,6 @@ static void TIM_DMATriggerHalfCplt(DMA_HandleTypeDef *hdma) { TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; - htim->State = HAL_TIM_STATE_READY; - #if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) htim->TriggerHalfCpltCallback(htim); #else @@ -5808,7 +6774,7 @@ static void TIM_DMATriggerHalfCplt(DMA_HandleTypeDef *hdma) * @param Structure TIM Base configuration structure * @retval None */ -void TIM_Base_SetConfig(TIM_TypeDef *TIMx, TIM_Base_InitTypeDef *Structure) +void TIM_Base_SetConfig(TIM_TypeDef *TIMx, const TIM_Base_InitTypeDef *Structure) { uint32_t tmpcr1; tmpcr1 = TIMx->CR1; @@ -5848,25 +6814,33 @@ void TIM_Base_SetConfig(TIM_TypeDef *TIMx, TIM_Base_InitTypeDef *Structure) /* Generate an update event to reload the Prescaler and the repetition counter (only for advanced timer) value immediately */ TIMx->EGR = TIM_EGR_UG; + + /* Check if the update flag is set after the Update Generation, if so clear the UIF flag */ + if (HAL_IS_BIT_SET(TIMx->SR, TIM_FLAG_UPDATE)) + { + /* Clear the update flag */ + CLEAR_BIT(TIMx->SR, TIM_FLAG_UPDATE); + } } /** * @brief Timer Output Compare 1 configuration * @param TIMx to select the TIM peripheral - * @param OC_Config The ouput configuration structure + * @param OC_Config The output configuration structure * @retval None */ -static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config) { uint32_t tmpccmrx; uint32_t tmpccer; uint32_t tmpcr2; + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Disable the Channel 1: Reset the CC1E Bit */ TIMx->CCER &= ~TIM_CCER_CC1E; - /* Get the TIMx CCER register value */ - tmpccer = TIMx->CCER; /* Get the TIMx CR2 register value */ tmpcr2 = TIMx->CR2; @@ -5928,20 +6902,21 @@ static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) /** * @brief Timer Output Compare 2 configuration * @param TIMx to select the TIM peripheral - * @param OC_Config The ouput configuration structure + * @param OC_Config The output configuration structure * @retval None */ -void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config) { uint32_t tmpccmrx; uint32_t tmpccer; uint32_t tmpcr2; + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Disable the Channel 2: Reset the CC2E Bit */ TIMx->CCER &= ~TIM_CCER_CC2E; - /* Get the TIMx CCER register value */ - tmpccer = TIMx->CCER; /* Get the TIMx CR2 register value */ tmpcr2 = TIMx->CR2; @@ -5970,7 +6945,6 @@ void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) tmpccer |= (OC_Config->OCNPolarity << 4U); /* Reset the Output N State */ tmpccer &= ~TIM_CCER_CC2NE; - } if (IS_TIM_BREAK_INSTANCE(TIMx)) @@ -6004,20 +6978,21 @@ void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) /** * @brief Timer Output Compare 3 configuration * @param TIMx to select the TIM peripheral - * @param OC_Config The ouput configuration structure + * @param OC_Config The output configuration structure * @retval None */ -static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config) { uint32_t tmpccmrx; uint32_t tmpccer; uint32_t tmpcr2; + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Disable the Channel 3: Reset the CC2E Bit */ TIMx->CCER &= ~TIM_CCER_CC3E; - /* Get the TIMx CCER register value */ - tmpccer = TIMx->CCER; /* Get the TIMx CR2 register value */ tmpcr2 = TIMx->CR2; @@ -6078,20 +7053,21 @@ static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) /** * @brief Timer Output Compare 4 configuration * @param TIMx to select the TIM peripheral - * @param OC_Config The ouput configuration structure + * @param OC_Config The output configuration structure * @retval None */ -static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, const TIM_OC_InitTypeDef *OC_Config) { uint32_t tmpccmrx; uint32_t tmpccer; uint32_t tmpcr2; + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Disable the Channel 4: Reset the CC4E Bit */ TIMx->CCER &= ~TIM_CCER_CC4E; - /* Get the TIMx CCER register value */ - tmpccer = TIMx->CCER; /* Get the TIMx CR2 register value */ tmpcr2 = TIMx->CR2; @@ -6142,8 +7118,9 @@ static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) * @retval None */ static HAL_StatusTypeDef TIM_SlaveTimer_SetConfig(TIM_HandleTypeDef *htim, - TIM_SlaveConfigTypeDef *sSlaveConfig) + const TIM_SlaveConfigTypeDef *sSlaveConfig) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; uint32_t tmpccmr1; uint32_t tmpccer; @@ -6188,7 +7165,7 @@ static HAL_StatusTypeDef TIM_SlaveTimer_SetConfig(TIM_HandleTypeDef *htim, assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); - if(sSlaveConfig->SlaveMode == TIM_SLAVEMODE_GATED) + if (sSlaveConfig->SlaveMode == TIM_SLAVEMODE_GATED) { return HAL_ERROR; } @@ -6247,9 +7224,11 @@ static HAL_StatusTypeDef TIM_SlaveTimer_SetConfig(TIM_HandleTypeDef *htim, } default: + status = HAL_ERROR; break; } - return HAL_OK; + + return status; } /** @@ -6279,9 +7258,9 @@ void TIM_TI1_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ uint32_t tmpccer; /* Disable the Channel 1: Reset the CC1E Bit */ + tmpccer = TIMx->CCER; TIMx->CCER &= ~TIM_CCER_CC1E; tmpccmr1 = TIMx->CCMR1; - tmpccer = TIMx->CCER; /* Select the Input */ if (IS_TIM_CC2_INSTANCE(TIMx) != RESET) @@ -6369,9 +7348,9 @@ static void TIM_TI2_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32 uint32_t tmpccer; /* Disable the Channel 2: Reset the CC2E Bit */ + tmpccer = TIMx->CCER; TIMx->CCER &= ~TIM_CCER_CC2E; tmpccmr1 = TIMx->CCMR1; - tmpccer = TIMx->CCER; /* Select the Input */ tmpccmr1 &= ~TIM_CCMR1_CC2S; @@ -6408,9 +7387,9 @@ static void TIM_TI2_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t tmpccer; /* Disable the Channel 2: Reset the CC2E Bit */ + tmpccer = TIMx->CCER; TIMx->CCER &= ~TIM_CCER_CC2E; tmpccmr1 = TIMx->CCMR1; - tmpccer = TIMx->CCER; /* Set the filter */ tmpccmr1 &= ~TIM_CCMR1_IC2F; @@ -6452,9 +7431,9 @@ static void TIM_TI3_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32 uint32_t tmpccer; /* Disable the Channel 3: Reset the CC3E Bit */ + tmpccer = TIMx->CCER; TIMx->CCER &= ~TIM_CCER_CC3E; tmpccmr2 = TIMx->CCMR2; - tmpccer = TIMx->CCER; /* Select the Input */ tmpccmr2 &= ~TIM_CCMR2_CC3S; @@ -6500,9 +7479,9 @@ static void TIM_TI4_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32 uint32_t tmpccer; /* Disable the Channel 4: Reset the CC4E Bit */ + tmpccer = TIMx->CCER; TIMx->CCER &= ~TIM_CCER_CC4E; tmpccmr2 = TIMx->CCMR2; - tmpccer = TIMx->CCER; /* Select the Input */ tmpccmr2 &= ~TIM_CCMR2_CC4S; @@ -6623,19 +7602,19 @@ void TIM_CCxChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelStat void TIM_ResetCallback(TIM_HandleTypeDef *htim) { /* Reset the TIM callback to the legacy weak callbacks */ - htim->PeriodElapsedCallback = HAL_TIM_PeriodElapsedCallback; /* Legacy weak PeriodElapsedCallback */ - htim->PeriodElapsedHalfCpltCallback = HAL_TIM_PeriodElapsedHalfCpltCallback; /* Legacy weak PeriodElapsedHalfCpltCallback */ - htim->TriggerCallback = HAL_TIM_TriggerCallback; /* Legacy weak TriggerCallback */ - htim->TriggerHalfCpltCallback = HAL_TIM_TriggerHalfCpltCallback; /* Legacy weak TriggerHalfCpltCallback */ - htim->IC_CaptureCallback = HAL_TIM_IC_CaptureCallback; /* Legacy weak IC_CaptureCallback */ - htim->IC_CaptureHalfCpltCallback = HAL_TIM_IC_CaptureHalfCpltCallback; /* Legacy weak IC_CaptureHalfCpltCallback */ - htim->OC_DelayElapsedCallback = HAL_TIM_OC_DelayElapsedCallback; /* Legacy weak OC_DelayElapsedCallback */ - htim->PWM_PulseFinishedCallback = HAL_TIM_PWM_PulseFinishedCallback; /* Legacy weak PWM_PulseFinishedCallback */ - htim->PWM_PulseFinishedHalfCpltCallback = HAL_TIM_PWM_PulseFinishedHalfCpltCallback; /* Legacy weak PWM_PulseFinishedHalfCpltCallback */ - htim->ErrorCallback = HAL_TIM_ErrorCallback; /* Legacy weak ErrorCallback */ - htim->CommutationCallback = HAL_TIMEx_CommutCallback; /* Legacy weak CommutationCallback */ - htim->CommutationHalfCpltCallback = HAL_TIMEx_CommutHalfCpltCallback; /* Legacy weak CommutationHalfCpltCallback */ - htim->BreakCallback = HAL_TIMEx_BreakCallback; /* Legacy weak BreakCallback */ + htim->PeriodElapsedCallback = HAL_TIM_PeriodElapsedCallback; + htim->PeriodElapsedHalfCpltCallback = HAL_TIM_PeriodElapsedHalfCpltCallback; + htim->TriggerCallback = HAL_TIM_TriggerCallback; + htim->TriggerHalfCpltCallback = HAL_TIM_TriggerHalfCpltCallback; + htim->IC_CaptureCallback = HAL_TIM_IC_CaptureCallback; + htim->IC_CaptureHalfCpltCallback = HAL_TIM_IC_CaptureHalfCpltCallback; + htim->OC_DelayElapsedCallback = HAL_TIM_OC_DelayElapsedCallback; + htim->PWM_PulseFinishedCallback = HAL_TIM_PWM_PulseFinishedCallback; + htim->PWM_PulseFinishedHalfCpltCallback = HAL_TIM_PWM_PulseFinishedHalfCpltCallback; + htim->ErrorCallback = HAL_TIM_ErrorCallback; + htim->CommutationCallback = HAL_TIMEx_CommutCallback; + htim->CommutationHalfCpltCallback = HAL_TIMEx_CommutHalfCpltCallback; + htim->BreakCallback = HAL_TIMEx_BreakCallback; } #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ @@ -6651,4 +7630,3 @@ void TIM_ResetCallback(TIM_HandleTypeDef *htim) /** * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim_ex.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim_ex.c index 99a13c0f..889f8fb9 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim_ex.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_tim_ex.c @@ -10,6 +10,17 @@ * + Time Complementary signal break and dead time configuration * + Time Master and Slave synchronization configuration * + Timer remapping capabilities configuration + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### TIMER Extended features ##### @@ -54,24 +65,16 @@ the commutation event). (#) Activate the TIM peripheral using one of the start functions: - (++) Complementary Output Compare : HAL_TIMEx_OCN_Start(), HAL_TIMEx_OCN_Start_DMA(), HAL_TIMEx_OC_Start_IT() - (++) Complementary PWM generation : HAL_TIMEx_PWMN_Start(), HAL_TIMEx_PWMN_Start_DMA(), HAL_TIMEx_PWMN_Start_IT() + (++) Complementary Output Compare : HAL_TIMEx_OCN_Start(), HAL_TIMEx_OCN_Start_DMA(), + HAL_TIMEx_OCN_Start_IT() + (++) Complementary PWM generation : HAL_TIMEx_PWMN_Start(), HAL_TIMEx_PWMN_Start_DMA(), + HAL_TIMEx_PWMN_Start_IT() (++) Complementary One-pulse mode output : HAL_TIMEx_OnePulseN_Start(), HAL_TIMEx_OnePulseN_Start_IT() - (++) Hall Sensor output : HAL_TIMEx_HallSensor_Start(), HAL_TIMEx_HallSensor_Start_DMA(), HAL_TIMEx_HallSensor_Start_IT(). + (++) Hall Sensor output : HAL_TIMEx_HallSensor_Start(), HAL_TIMEx_HallSensor_Start_DMA(), + HAL_TIMEx_HallSensor_Start_IT(). @endverbatim ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -90,9 +93,11 @@ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ -/* Private macro -------------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ +static void TIM_DMADelayPulseNCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMAErrorCCxN(DMA_HandleTypeDef *hdma); static void TIM_CCxNChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelNState); /* Exported functions --------------------------------------------------------*/ @@ -123,11 +128,14 @@ static void TIM_CCxNChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Cha */ /** * @brief Initializes the TIM Hall Sensor Interface and initialize the associated handle. + * @note When the timer instance is initialized in Hall Sensor Interface mode, + * timer channels 1 and channel 2 are reserved and cannot be used for + * other purpose. * @param htim TIM Hall Sensor Interface handle * @param sConfig TIM Hall Sensor configuration structure * @retval HAL status */ -HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, TIM_HallSensor_InitTypeDef *sConfig) +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, const TIM_HallSensor_InitTypeDef *sConfig) { TIM_OC_InitTypeDef OC_Config; @@ -143,6 +151,7 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, TIM_HallSen assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); assert_param(IS_TIM_IC_POLARITY(sConfig->IC1Polarity)); + assert_param(IS_TIM_PERIOD(htim, htim->Init.Period)); assert_param(IS_TIM_IC_PRESCALER(sConfig->IC1Prescaler)); assert_param(IS_TIM_IC_FILTER(sConfig->IC1Filter)); @@ -208,6 +217,15 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, TIM_HallSen htim->Instance->CR2 &= ~TIM_CR2_MMS; htim->Instance->CR2 |= TIM_TRGO_OC2REF; + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + /* Initialize the TIM state*/ htim->State = HAL_TIM_STATE_READY; @@ -241,6 +259,15 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_DeInit(TIM_HandleTypeDef *htim) HAL_TIMEx_HallSensor_MspDeInit(htim); #endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + /* Change TIM state */ htim->State = HAL_TIM_STATE_RESET; @@ -288,17 +315,44 @@ __weak void HAL_TIMEx_HallSensor_MspDeInit(TIM_HandleTypeDef *htim) HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start(TIM_HandleTypeDef *htim) { uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); /* Check the parameters */ assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + /* Enable the Input Capture channel 1 - (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } @@ -318,12 +372,19 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop(TIM_HandleTypeDef *htim) assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); /* Disable the Input Capture channels 1, 2 and 3 - (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } @@ -336,20 +397,47 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop(TIM_HandleTypeDef *htim) HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_IT(TIM_HandleTypeDef *htim) { uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); /* Check the parameters */ assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + /* Enable the capture compare Interrupts 1 event */ __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); /* Enable the Input Capture channel 1 - (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } @@ -369,7 +457,8 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_IT(TIM_HandleTypeDef *htim) assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); /* Disable the Input Capture channel 1 - (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); /* Disable the capture compare Interrupts event */ @@ -378,6 +467,12 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_IT(TIM_HandleTypeDef *htim) /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } @@ -392,31 +487,39 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_IT(TIM_HandleTypeDef *htim) HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length) { uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); /* Check the parameters */ assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); - if (htim->State == HAL_TIM_STATE_BUSY) + /* Set the TIM channel state */ + if ((channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY)) { return HAL_BUSY; } - else if (htim->State == HAL_TIM_STATE_READY) + else if ((channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_READY)) { - if (((uint32_t)pData == 0U) && (Length > 0U)) + if ((pData == NULL) || (Length == 0U)) { return HAL_ERROR; } else { - htim->State = HAL_TIM_STATE_BUSY; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); } } else { - /* nothing to do */ + return HAL_ERROR; } + /* Enable the Input Capture channel 1 - (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); /* Set the DMA Input Capture 1 Callbacks */ @@ -428,14 +531,22 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_DMA(TIM_HandleTypeDef *htim, uint32 /* Enable the DMA stream for Capture 1*/ if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData, Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the capture compare 1 Interrupt */ __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } @@ -455,7 +566,8 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_DMA(TIM_HandleTypeDef *htim) assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); /* Disable the Input Capture channel 1 - (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); @@ -463,9 +575,14 @@ HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_DMA(TIM_HandleTypeDef *htim) __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } @@ -512,6 +629,15 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Start(TIM_HandleTypeDef *htim, uint32_t Channel) /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + /* Enable the Capture compare channel N */ TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); @@ -519,8 +645,15 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Start(TIM_HandleTypeDef *htim, uint32_t Channel) __HAL_TIM_MOE_ENABLE(htim); /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } @@ -554,6 +687,9 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } @@ -571,11 +707,21 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) */ HAL_StatusTypeDef HAL_TIMEx_OCN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + switch (Channel) { case TIM_CHANNEL_1: @@ -601,27 +747,38 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Chann default: + status = HAL_ERROR; break; } - /* Enable the TIM Break interrupt */ - __HAL_TIM_ENABLE_IT(htim, TIM_IT_BREAK); + if (status == HAL_OK) + { + /* Enable the TIM Break interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_BREAK); - /* Enable the Capture compare channel N */ - TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); - /* Enable the Main Output */ - __HAL_TIM_MOE_ENABLE(htim); + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); - /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) - { - __HAL_TIM_ENABLE(htim); + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } } /* Return function status */ - return HAL_OK; + return status; } /** @@ -637,7 +794,9 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Chann */ HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpccer; + /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); @@ -665,27 +824,34 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channe } default: + status = HAL_ERROR; break; } - /* Disable the Capture compare channel N */ - TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); - - /* Disable the TIM Break interrupt (only if no more channel is active) */ - tmpccer = htim->Instance->CCER; - if ((tmpccer & (TIM_CCER_CC1NE | TIM_CCER_CC2NE | TIM_CCER_CC3NE)) == (uint32_t)RESET) + if (status == HAL_OK) { - __HAL_TIM_DISABLE_IT(htim, TIM_IT_BREAK); - } + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); - /* Disable the Main Output */ - __HAL_TIM_MOE_DISABLE(htim); + /* Disable the TIM Break interrupt (only if no more channel is active) */ + tmpccer = htim->Instance->CCER; + if ((tmpccer & TIM_CCER_CCxNE_MASK) == (uint32_t)RESET) + { + __HAL_TIM_DISABLE_IT(htim, TIM_IT_BREAK); + } - /* Disable the Peripheral */ - __HAL_TIM_DISABLE(htim); + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -701,31 +867,34 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channe * @param Length The length of data to be transferred from memory to TIM peripheral * @retval HAL status */ -HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); - if (htim->State == HAL_TIM_STATE_BUSY) + /* Set the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) { return HAL_BUSY; } - else if (htim->State == HAL_TIM_STATE_READY) + else if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) { - if (((uint32_t)pData == 0U) && (Length > 0U)) + if ((pData == NULL) || (Length == 0U)) { return HAL_ERROR; } else { - htim->State = HAL_TIM_STATE_BUSY; + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); } } else { - /* nothing to do */ + return HAL_ERROR; } switch (Channel) @@ -733,15 +902,17 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Chan case TIM_CHANNEL_1: { /* Set the DMA compare callbacks */ - htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseNCplt; htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; /* Set the DMA error callback */ - htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAErrorCCxN ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Output Compare DMA request */ @@ -752,15 +923,17 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Chan case TIM_CHANNEL_2: { /* Set the DMA compare callbacks */ - htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseNCplt; htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; /* Set the DMA error callback */ - htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAErrorCCxN ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Output Compare DMA request */ @@ -771,15 +944,17 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Chan case TIM_CHANNEL_3: { /* Set the DMA compare callbacks */ - htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseNCplt; htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; /* Set the DMA error callback */ - htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAErrorCCxN ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Output Compare DMA request */ @@ -788,24 +963,35 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Chan } default: + status = HAL_ERROR; break; } - /* Enable the Capture compare channel N */ - TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + if (status == HAL_OK) + { + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); - /* Enable the Main Output */ - __HAL_TIM_MOE_ENABLE(htim); + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); - /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) - { - __HAL_TIM_ENABLE(htim); + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } } /* Return function status */ - return HAL_OK; + return status; } /** @@ -821,6 +1007,8 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Chan */ HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); @@ -851,23 +1039,27 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Chann } default: + status = HAL_ERROR; break; } - /* Disable the Capture compare channel N */ - TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + if (status == HAL_OK) + { + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); - /* Disable the Main Output */ - __HAL_TIM_MOE_DISABLE(htim); + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); - /* Disable the Peripheral */ - __HAL_TIM_DISABLE(htim); + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); - /* Change the htim state */ - htim->State = HAL_TIM_STATE_READY; + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -889,17 +1081,6 @@ HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Chann (+) Stop the Complementary PWM and disable interrupts. (+) Start the Complementary PWM and enable DMA transfers. (+) Stop the Complementary PWM and disable DMA transfers. - (+) Start the Complementary Input Capture measurement. - (+) Stop the Complementary Input Capture. - (+) Start the Complementary Input Capture and enable interrupts. - (+) Stop the Complementary Input Capture and disable interrupts. - (+) Start the Complementary Input Capture and enable DMA transfers. - (+) Stop the Complementary Input Capture and disable DMA transfers. - (+) Start the Complementary One Pulse generation. - (+) Stop the Complementary One Pulse. - (+) Start the Complementary One Pulse and enable interrupts. - (+) Stop the Complementary One Pulse and disable interrupts. - @endverbatim * @{ */ @@ -921,6 +1102,15 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Start(TIM_HandleTypeDef *htim, uint32_t Channel /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + /* Enable the complementary PWM output */ TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); @@ -928,8 +1118,15 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Start(TIM_HandleTypeDef *htim, uint32_t Channel __HAL_TIM_MOE_ENABLE(htim); /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else { __HAL_TIM_ENABLE(htim); } @@ -962,6 +1159,9 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } @@ -979,11 +1179,21 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) */ HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + switch (Channel) { case TIM_CHANNEL_1: @@ -1008,27 +1218,38 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Chan } default: + status = HAL_ERROR; break; } - /* Enable the TIM Break interrupt */ - __HAL_TIM_ENABLE_IT(htim, TIM_IT_BREAK); + if (status == HAL_OK) + { + /* Enable the TIM Break interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_BREAK); - /* Enable the complementary PWM output */ - TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); - /* Enable the Main Output */ - __HAL_TIM_MOE_ENABLE(htim); + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); - /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) - { - __HAL_TIM_ENABLE(htim); + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1044,6 +1265,7 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Chan */ HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpccer; /* Check the parameters */ @@ -1073,27 +1295,34 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Chann } default: + status = HAL_ERROR; break; } - /* Disable the complementary PWM output */ - TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); - - /* Disable the TIM Break interrupt (only if no more channel is active) */ - tmpccer = htim->Instance->CCER; - if ((tmpccer & (TIM_CCER_CC1NE | TIM_CCER_CC2NE | TIM_CCER_CC3NE)) == (uint32_t)RESET) + if (status == HAL_OK) { - __HAL_TIM_DISABLE_IT(htim, TIM_IT_BREAK); - } + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); - /* Disable the Main Output */ - __HAL_TIM_MOE_DISABLE(htim); + /* Disable the TIM Break interrupt (only if no more channel is active) */ + tmpccer = htim->Instance->CCER; + if ((tmpccer & TIM_CCER_CCxNE_MASK) == (uint32_t)RESET) + { + __HAL_TIM_DISABLE_IT(htim, TIM_IT_BREAK); + } - /* Disable the Peripheral */ - __HAL_TIM_DISABLE(htim); + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1109,46 +1338,52 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Chann * @param Length The length of data to be transferred from memory to TIM peripheral * @retval HAL status */ -HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, const uint32_t *pData, + uint16_t Length) { + HAL_StatusTypeDef status = HAL_OK; uint32_t tmpsmcr; /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); - if (htim->State == HAL_TIM_STATE_BUSY) + /* Set the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) { return HAL_BUSY; } - else if (htim->State == HAL_TIM_STATE_READY) + else if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) { - if (((uint32_t)pData == 0U) && (Length > 0U)) + if ((pData == NULL) || (Length == 0U)) { return HAL_ERROR; } else { - htim->State = HAL_TIM_STATE_BUSY; + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); } } else { - /* nothing to do */ + return HAL_ERROR; } + switch (Channel) { case TIM_CHANNEL_1: { /* Set the DMA compare callbacks */ - htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseNCplt; htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; /* Set the DMA error callback */ - htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAErrorCCxN ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 1 DMA request */ @@ -1159,15 +1394,17 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Cha case TIM_CHANNEL_2: { /* Set the DMA compare callbacks */ - htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseNCplt; htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; /* Set the DMA error callback */ - htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAErrorCCxN ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 2 DMA request */ @@ -1178,15 +1415,17 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Cha case TIM_CHANNEL_3: { /* Set the DMA compare callbacks */ - htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseNCplt; htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; /* Set the DMA error callback */ - htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAErrorCCxN ; /* Enable the DMA stream */ - if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, Length) != HAL_OK) + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) { + /* Return error status */ return HAL_ERROR; } /* Enable the TIM Capture/Compare 3 DMA request */ @@ -1195,24 +1434,35 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Cha } default: + status = HAL_ERROR; break; } - /* Enable the complementary PWM output */ - TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + if (status == HAL_OK) + { + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); - /* Enable the Main Output */ - __HAL_TIM_MOE_ENABLE(htim); + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); - /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ - tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; - if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) - { - __HAL_TIM_ENABLE(htim); + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1228,6 +1478,8 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Cha */ HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) { + HAL_StatusTypeDef status = HAL_OK; + /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); @@ -1258,23 +1510,27 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Chan } default: + status = HAL_ERROR; break; } - /* Disable the complementary PWM output */ - TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + if (status == HAL_OK) + { + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); - /* Disable the Main Output */ - __HAL_TIM_MOE_DISABLE(htim); + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); - /* Disable the Peripheral */ - __HAL_TIM_DISABLE(htim); + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); - /* Change the htim state */ - htim->State = HAL_TIM_STATE_READY; + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } /* Return function status */ - return HAL_OK; + return status; } /** @@ -1302,8 +1558,10 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Chan /** * @brief Starts the TIM One Pulse signal generation on the complementary * output. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). * @param htim TIM One Pulse handle - * @param OutputChannel TIM Channel to be enabled + * @param OutputChannel pulse output channel to enable * This parameter can be one of the following values: * @arg TIM_CHANNEL_1: TIM Channel 1 selected * @arg TIM_CHANNEL_2: TIM Channel 2 selected @@ -1311,11 +1569,33 @@ HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Chan */ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel) { + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); - /* Enable the complementary One Pulse output */ + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the complementary One Pulse output channel and the Input Capture channel */ TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_ENABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_ENABLE); /* Enable the Main Output */ __HAL_TIM_MOE_ENABLE(htim); @@ -1327,8 +1607,10 @@ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start(TIM_HandleTypeDef *htim, uint32_t Ou /** * @brief Stops the TIM One Pulse signal generation on the complementary * output. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). * @param htim TIM One Pulse handle - * @param OutputChannel TIM Channel to be disabled + * @param OutputChannel pulse output channel to disable * This parameter can be one of the following values: * @arg TIM_CHANNEL_1: TIM Channel 1 selected * @arg TIM_CHANNEL_2: TIM Channel 2 selected @@ -1336,12 +1618,14 @@ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start(TIM_HandleTypeDef *htim, uint32_t Ou */ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel) { + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); - /* Disable the complementary One Pulse output */ + /* Disable the complementary One Pulse output channel and the Input Capture channel */ TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_DISABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_DISABLE); /* Disable the Main Output */ __HAL_TIM_MOE_DISABLE(htim); @@ -1349,6 +1633,12 @@ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop(TIM_HandleTypeDef *htim, uint32_t Out /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } @@ -1356,8 +1646,10 @@ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop(TIM_HandleTypeDef *htim, uint32_t Out /** * @brief Starts the TIM One Pulse signal generation in interrupt mode on the * complementary channel. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). * @param htim TIM One Pulse handle - * @param OutputChannel TIM Channel to be enabled + * @param OutputChannel pulse output channel to enable * This parameter can be one of the following values: * @arg TIM_CHANNEL_1: TIM Channel 1 selected * @arg TIM_CHANNEL_2: TIM Channel 2 selected @@ -1365,17 +1657,39 @@ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop(TIM_HandleTypeDef *htim, uint32_t Out */ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) { + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + /* Enable the TIM Capture/Compare 1 interrupt */ __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); /* Enable the TIM Capture/Compare 2 interrupt */ __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); - /* Enable the complementary One Pulse output */ + /* Enable the complementary One Pulse output channel and the Input Capture channel */ TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_ENABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_ENABLE); /* Enable the Main Output */ __HAL_TIM_MOE_ENABLE(htim); @@ -1387,8 +1701,10 @@ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start_IT(TIM_HandleTypeDef *htim, uint32_t /** * @brief Stops the TIM One Pulse signal generation in interrupt mode on the * complementary channel. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). * @param htim TIM One Pulse handle - * @param OutputChannel TIM Channel to be disabled + * @param OutputChannel pulse output channel to disable * This parameter can be one of the following values: * @arg TIM_CHANNEL_1: TIM Channel 1 selected * @arg TIM_CHANNEL_2: TIM Channel 2 selected @@ -1396,6 +1712,8 @@ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start_IT(TIM_HandleTypeDef *htim, uint32_t */ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) { + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + /* Check the parameters */ assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); @@ -1405,8 +1723,9 @@ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t /* Disable the TIM Capture/Compare 2 interrupt */ __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); - /* Disable the complementary One Pulse output */ + /* Disable the complementary One Pulse output channel and the Input Capture channel */ TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_DISABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_DISABLE); /* Disable the Main Output */ __HAL_TIM_MOE_DISABLE(htim); @@ -1414,6 +1733,12 @@ HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t /* Disable the Peripheral */ __HAL_TIM_DISABLE(htim); + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + /* Return function status */ return HAL_OK; } @@ -1627,7 +1952,7 @@ HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_DMA(TIM_HandleTypeDef *htim, uint3 * @retval HAL status */ HAL_StatusTypeDef HAL_TIMEx_MasterConfigSynchronization(TIM_HandleTypeDef *htim, - TIM_MasterConfigTypeDef *sMasterConfig) + const TIM_MasterConfigTypeDef *sMasterConfig) { uint32_t tmpcr2; uint32_t tmpsmcr; @@ -1688,7 +2013,7 @@ HAL_StatusTypeDef HAL_TIMEx_MasterConfigSynchronization(TIM_HandleTypeDef *htim, * @retval HAL status */ HAL_StatusTypeDef HAL_TIMEx_ConfigBreakDeadTime(TIM_HandleTypeDef *htim, - TIM_BreakDeadTimeConfigTypeDef *sBreakDeadTimeConfig) + const TIM_BreakDeadTimeConfigTypeDef *sBreakDeadTimeConfig) { /* Keep this variable initialized to 0 as it is used to configure BDTR register */ uint32_t tmpbdtr = 0U; @@ -1765,12 +2090,12 @@ HAL_StatusTypeDef HAL_TIMEx_ConfigBreakDeadTime(TIM_HandleTypeDef *htim, */ HAL_StatusTypeDef HAL_TIMEx_RemapConfig(TIM_HandleTypeDef *htim, uint32_t Remap) { - __HAL_LOCK(htim); - /* Check parameters */ assert_param(IS_TIM_REMAP(htim->Instance, Remap)); -#if defined(LPTIM_OR_TIM1_ITR2_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) + __HAL_LOCK(htim); + +#if defined(LPTIM_OR_TIM1_ITR2_RMP) && defined(LPTIM_OR_TIM5_ITR1_RMP) && defined(LPTIM_OR_TIM9_ITR1_RMP) if ((Remap & LPTIM_REMAP_MASK) == LPTIM_REMAP_MASK) { /* Connect TIMx internal trigger to LPTIM1 output */ @@ -1787,7 +2112,7 @@ HAL_StatusTypeDef HAL_TIMEx_RemapConfig(TIM_HandleTypeDef *htim, uint32_t Remap) #else /* Set the Timer remapping configuration */ WRITE_REG(htim->Instance->OR, Remap); -#endif /* LPTIM_OR_TIM1_ITR2_RMP && LPTIM_OR_TIM5_ITR1_RMP && LPTIM_OR_TIM5_ITR1_RMP */ +#endif /* LPTIM_OR_TIM1_ITR2_RMP && LPTIM_OR_TIM5_ITR1_RMP && LPTIM_OR_TIM9_ITR1_RMP */ __HAL_UNLOCK(htim); @@ -1815,7 +2140,7 @@ HAL_StatusTypeDef HAL_TIMEx_RemapConfig(TIM_HandleTypeDef *htim, uint32_t Remap) */ /** - * @brief Hall commutation changed callback in non-blocking mode + * @brief Commutation callback in non-blocking mode * @param htim TIM handle * @retval None */ @@ -1829,7 +2154,7 @@ __weak void HAL_TIMEx_CommutCallback(TIM_HandleTypeDef *htim) */ } /** - * @brief Hall commutation changed half complete callback in non-blocking mode + * @brief Commutation half complete callback in non-blocking mode * @param htim TIM handle * @retval None */ @@ -1844,7 +2169,7 @@ __weak void HAL_TIMEx_CommutHalfCpltCallback(TIM_HandleTypeDef *htim) } /** - * @brief Hall Break detection callback in non-blocking mode + * @brief Break detection callback in non-blocking mode * @param htim TIM handle * @retval None */ @@ -1881,11 +2206,32 @@ __weak void HAL_TIMEx_BreakCallback(TIM_HandleTypeDef *htim) * @param htim TIM Hall Sensor handle * @retval HAL state */ -HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(TIM_HandleTypeDef *htim) +HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(const TIM_HandleTypeDef *htim) { return htim->State; } +/** + * @brief Return actual state of the TIM complementary channel. + * @param htim TIM handle + * @param ChannelN TIM Complementary channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @retval TIM Complementary channel state + */ +HAL_TIM_ChannelStateTypeDef HAL_TIMEx_GetChannelNState(const TIM_HandleTypeDef *htim, uint32_t ChannelN) +{ + HAL_TIM_ChannelStateTypeDef channel_state; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, ChannelN)); + + channel_state = TIM_CHANNEL_N_STATE_GET(htim, ChannelN); + + return channel_state; +} /** * @} */ @@ -1895,7 +2241,7 @@ HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(TIM_HandleTypeDef *htim) */ /* Private functions ---------------------------------------------------------*/ -/** @defgroup TIMEx_Private_Functions TIMEx Private Functions +/** @defgroup TIMEx_Private_Functions TIM Extended Private Functions * @{ */ @@ -1938,6 +2284,94 @@ void TIMEx_DMACommutationHalfCplt(DMA_HandleTypeDef *hdma) } +/** + * @brief TIM DMA Delay Pulse complete callback (complementary channel). + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMADelayPulseNCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA error callback (complementary channel) + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMAErrorCCxN(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->ErrorCallback(htim); +#else + HAL_TIM_ErrorCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + /** * @brief Enables or disables the TIM Capture Compare Channel xN. * @param TIMx to select the TIM peripheral @@ -1954,13 +2388,13 @@ static void TIM_CCxNChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Cha { uint32_t tmp; - tmp = TIM_CCER_CC1NE << (Channel & 0x1FU); /* 0x1FU = 31 bits max shift */ + tmp = TIM_CCER_CC1NE << (Channel & 0xFU); /* 0xFU = 15 bits max shift */ /* Reset the CCxNE Bit */ TIMx->CCER &= ~tmp; /* Set or reset the CCxNE Bit */ - TIMx->CCER |= (uint32_t)(ChannelNState << (Channel & 0x1FU)); /* 0x1FU = 31 bits max shift */ + TIMx->CCER |= (uint32_t)(ChannelNState << (Channel & 0xFU)); /* 0xFU = 15 bits max shift */ } /** * @} @@ -1974,5 +2408,3 @@ static void TIM_CCxNChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t Cha /** * @} */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_uart.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_uart.c index eaf41144..33a5f002 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_uart.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_hal_uart.c @@ -9,6 +9,18 @@ * + IO operation functions * + Peripheral Control functions * + Peripheral State and Errors functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### How to use this driver ##### @@ -21,7 +33,7 @@ (##) Enable the USARTx interface clock. (##) UART pins configuration: (+++) Enable the clock for the UART GPIOs. - (+++) Configure these UART pins (TX as alternate function pull-up, RX as alternate function Input). + (+++) Configure the UART TX/RX pins as alternate function pull-up. (##) NVIC configuration if you need to use interrupt process (HAL_UART_Transmit_IT() and HAL_UART_Receive_IT() APIs): (+++) Configure the USARTx interrupt priority. @@ -72,8 +84,8 @@ allows the user to configure dynamically the driver callbacks. [..] - Use Function @ref HAL_UART_RegisterCallback() to register a user callback. - Function @ref HAL_UART_RegisterCallback() allows to register following callbacks: + Use Function HAL_UART_RegisterCallback() to register a user callback. + Function HAL_UART_RegisterCallback() allows to register following callbacks: (+) TxHalfCpltCallback : Tx Half Complete Callback. (+) TxCpltCallback : Tx Complete Callback. (+) RxHalfCpltCallback : Rx Half Complete Callback. @@ -88,9 +100,9 @@ and a pointer to the user callback function. [..] - Use function @ref HAL_UART_UnRegisterCallback() to reset a callback to the default + Use function HAL_UART_UnRegisterCallback() to reset a callback to the default weak (surcharged) function. - @ref HAL_UART_UnRegisterCallback() takes as parameters the HAL peripheral handle, + HAL_UART_UnRegisterCallback() takes as parameters the HAL peripheral handle, and the Callback ID. This function allows to reset following callbacks: (+) TxHalfCpltCallback : Tx Half Complete Callback. @@ -105,13 +117,17 @@ (+) MspDeInitCallback : UART MspDeInit. [..] - By default, after the @ref HAL_UART_Init() and when the state is HAL_UART_STATE_RESET + For specific callback RxEventCallback, use dedicated registration/reset functions: + respectively HAL_UART_RegisterRxEventCallback() , HAL_UART_UnRegisterRxEventCallback(). + + [..] + By default, after the HAL_UART_Init() and when the state is HAL_UART_STATE_RESET all callbacks are set to the corresponding weak (surcharged) functions: - examples @ref HAL_UART_TxCpltCallback(), @ref HAL_UART_RxHalfCpltCallback(). + examples HAL_UART_TxCpltCallback(), HAL_UART_RxHalfCpltCallback(). Exception done for MspInit and MspDeInit functions that are respectively - reset to the legacy weak (surcharged) functions in the @ref HAL_UART_Init() - and @ref HAL_UART_DeInit() only when these callbacks are null (not registered beforehand). - If not, MspInit or MspDeInit are not null, the @ref HAL_UART_Init() and @ref HAL_UART_DeInit() + reset to the legacy weak (surcharged) functions in the HAL_UART_Init() + and HAL_UART_DeInit() only when these callbacks are null (not registered beforehand). + If not, MspInit or MspDeInit are not null, the HAL_UART_Init() and HAL_UART_DeInit() keep and use the user MspInit/MspDeInit callbacks (registered beforehand). [..] @@ -120,8 +136,8 @@ in HAL_UART_STATE_READY or HAL_UART_STATE_RESET state, thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. In that case first register the MspInit/MspDeInit user callbacks - using @ref HAL_UART_RegisterCallback() before calling @ref HAL_UART_DeInit() - or @ref HAL_UART_Init() function. + using HAL_UART_RegisterCallback() before calling HAL_UART_DeInit() + or HAL_UART_Init() function. [..] When The compilation define USE_HAL_UART_REGISTER_CALLBACKS is set to 0 or @@ -168,6 +184,40 @@ (+) Resume the DMA Transfer using HAL_UART_DMAResume() (+) Stop the DMA Transfer using HAL_UART_DMAStop() + + [..] This subsection also provides a set of additional functions providing enhanced reception + services to user. (For example, these functions allow application to handle use cases + where number of data to be received is unknown). + + (#) Compared to standard reception services which only consider number of received + data elements as reception completion criteria, these functions also consider additional events + as triggers for updating reception status to caller : + (+) Detection of inactivity period (RX line has not been active for a given period). + (++) RX inactivity detected by IDLE event, i.e. RX line has been in idle state (normally high state) + for 1 frame time, after last received byte. + + (#) There are two mode of transfer: + (+) Blocking mode: The reception is performed in polling mode, until either expected number of data is received, + or till IDLE event occurs. Reception is handled only during function execution. + When function exits, no data reception could occur. HAL status and number of actually received data elements, + are returned by function after finishing transfer. + (+) Non-Blocking mode: The reception is performed using Interrupts or DMA. + These API's return the HAL status. + The end of the data processing will be indicated through the + dedicated UART IRQ when using Interrupt mode or the DMA IRQ when using DMA mode. + The HAL_UARTEx_RxEventCallback() user callback will be executed during Receive process + The HAL_UART_ErrorCallback()user callback will be executed when a reception error is detected. + + (#) Blocking mode API: + (+) HAL_UARTEx_ReceiveToIdle() + + (#) Non-Blocking mode API with Interrupt: + (+) HAL_UARTEx_ReceiveToIdle_IT() + + (#) Non-Blocking mode API with DMA: + (+) HAL_UARTEx_ReceiveToIdle_DMA() + + *** UART HAL driver macros list *** ============================================= [..] @@ -186,7 +236,7 @@ @endverbatim [..] - (@) Additionnal remark: If the parity is enabled, then the MSB bit of the data written + (@) Additional remark: If the parity is enabled, then the MSB bit of the data written in the data register is transmitted but is changed by the parity bit. Depending on the frame length defined by the M bit (8-bits or 9-bits), the possible UART frame formats are as listed in the following table: @@ -202,17 +252,6 @@ | 1 | 1 | | SB | 8 bit data | PB | STB | | +-------------------------------------------------------------+ ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * - ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ @@ -261,7 +300,8 @@ static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma); static HAL_StatusTypeDef UART_Transmit_IT(UART_HandleTypeDef *huart); static HAL_StatusTypeDef UART_EndTransmit_IT(UART_HandleTypeDef *huart); static HAL_StatusTypeDef UART_Receive_IT(UART_HandleTypeDef *huart); -static HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, uint32_t Tickstart, uint32_t Timeout); +static HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, + uint32_t Tickstart, uint32_t Timeout); static void UART_SetConfig(UART_HandleTypeDef *huart); /** @@ -380,6 +420,7 @@ HAL_StatusTypeDef HAL_UART_Init(UART_HandleTypeDef *huart) huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; return HAL_OK; } @@ -449,6 +490,7 @@ HAL_StatusTypeDef HAL_HalfDuplex_Init(UART_HandleTypeDef *huart) huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; return HAL_OK; } @@ -529,6 +571,7 @@ HAL_StatusTypeDef HAL_LIN_Init(UART_HandleTypeDef *huart, uint32_t BreakDetectLe huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; return HAL_OK; } @@ -612,6 +655,7 @@ HAL_StatusTypeDef HAL_MultiProcessor_Init(UART_HandleTypeDef *huart, uint8_t Add huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; return HAL_OK; } @@ -653,6 +697,8 @@ HAL_StatusTypeDef HAL_UART_DeInit(UART_HandleTypeDef *huart) huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_RESET; huart->RxState = HAL_UART_STATE_RESET; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + huart->RxEventType = HAL_UART_RXEVENT_TC; /* Process Unlock */ __HAL_UNLOCK(huart); @@ -694,6 +740,8 @@ __weak void HAL_UART_MspDeInit(UART_HandleTypeDef *huart) /** * @brief Register a User UART Callback * To be used instead of the weak predefined callback + * @note The HAL_UART_RegisterCallback() may be called before HAL_UART_Init(), HAL_HalfDuplex_Init(), HAL_LIN_Init(), + * HAL_MultiProcessor_Init() to register callbacks for HAL_UART_MSPINIT_CB_ID and HAL_UART_MSPDEINIT_CB_ID * @param huart uart handle * @param CallbackID ID of the callback to be registered * This parameter can be one of the following values: @@ -710,7 +758,8 @@ __weak void HAL_UART_MspDeInit(UART_HandleTypeDef *huart) * @param pCallback pointer to the Callback function * @retval HAL status */ -HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID, pUART_CallbackTypeDef pCallback) +HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID, + pUART_CallbackTypeDef pCallback) { HAL_StatusTypeDef status = HAL_OK; @@ -721,8 +770,6 @@ HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_ return HAL_ERROR; } - /* Process locked */ - __HAL_LOCK(huart); if (huart->gState == HAL_UART_STATE_READY) { @@ -807,15 +854,15 @@ HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_ status = HAL_ERROR; } - /* Release Lock */ - __HAL_UNLOCK(huart); - return status; } /** * @brief Unregister an UART Callback * UART callaback is redirected to the weak predefined callback + * @note The HAL_UART_UnRegisterCallback() may be called before HAL_UART_Init(), HAL_HalfDuplex_Init(), + * HAL_LIN_Init(), HAL_MultiProcessor_Init() to un-register callbacks for HAL_UART_MSPINIT_CB_ID + * and HAL_UART_MSPDEINIT_CB_ID * @param huart uart handle * @param CallbackID ID of the callback to be unregistered * This parameter can be one of the following values: @@ -835,9 +882,6 @@ HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UAR { HAL_StatusTypeDef status = HAL_OK; - /* Process locked */ - __HAL_LOCK(huart); - if (HAL_UART_STATE_READY == huart->gState) { switch (CallbackID) @@ -921,11 +965,75 @@ HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UAR status = HAL_ERROR; } + return status; +} + +/** + * @brief Register a User UART Rx Event Callback + * To be used instead of the weak predefined callback + * @param huart Uart handle + * @param pCallback Pointer to the Rx Event Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_RegisterRxEventCallback(UART_HandleTypeDef *huart, pUART_RxEventCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(huart); + + if (huart->gState == HAL_UART_STATE_READY) + { + huart->RxEventCallback = pCallback; + } + else + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + } + /* Release Lock */ __HAL_UNLOCK(huart); return status; } + +/** + * @brief UnRegister the UART Rx Event Callback + * UART Rx Event Callback is redirected to the weak HAL_UARTEx_RxEventCallback() predefined callback + * @param huart Uart handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_UnRegisterRxEventCallback(UART_HandleTypeDef *huart) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(huart); + + if (huart->gState == HAL_UART_STATE_READY) + { + huart->RxEventCallback = HAL_UARTEx_RxEventCallback; /* Legacy weak UART Rx Event Callback */ + } + else + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(huart); + return status; +} #endif /* USE_HAL_UART_REGISTER_CALLBACKS */ /** @@ -991,6 +1099,9 @@ HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UAR (+) HAL_UART_AbortTransmitCpltCallback() (+) HAL_UART_AbortReceiveCpltCallback() + (#) A Rx Event Reception Callback (Rx event notification) is available for Non_Blocking modes of enhanced reception services: + (+) HAL_UARTEx_RxEventCallback() + (#) In Non-Blocking mode transfers, possible errors are split into 2 categories. Errors are handled as follows : (+) Error is considered as Recoverable and non blocking : Transfer could go till end, but error severity is @@ -1021,9 +1132,10 @@ HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UAR * @param Timeout Timeout duration * @retval HAL status */ -HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout) +HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size, uint32_t Timeout) { - uint16_t *tmp; + const uint8_t *pdata8bits; + const uint16_t *pdata16bits; uint32_t tickstart = 0U; /* Check that a Tx process is not already ongoing */ @@ -1034,53 +1146,52 @@ HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, uint8_t *pData, u return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(huart); - huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_BUSY_TX; - /* Init tickstart for timeout managment */ + /* Init tickstart for timeout management */ tickstart = HAL_GetTick(); huart->TxXferSize = Size; huart->TxXferCount = Size; - /* Process Unlocked */ - __HAL_UNLOCK(huart); + /* In case of 9bits/No Parity transfer, pData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (const uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } while (huart->TxXferCount > 0U) { - huart->TxXferCount--; - if (huart->Init.WordLength == UART_WORDLENGTH_9B) + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TXE, RESET, tickstart, Timeout) != HAL_OK) { - if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TXE, RESET, tickstart, Timeout) != HAL_OK) - { - return HAL_TIMEOUT; - } - tmp = (uint16_t *) pData; - huart->Instance->DR = (*tmp & (uint16_t)0x01FF); - if (huart->Init.Parity == UART_PARITY_NONE) - { - pData += 2U; - } - else - { - pData += 1U; - } + huart->gState = HAL_UART_STATE_READY; + + return HAL_TIMEOUT; + } + if (pdata8bits == NULL) + { + huart->Instance->DR = (uint16_t)(*pdata16bits & 0x01FFU); + pdata16bits++; } else { - if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TXE, RESET, tickstart, Timeout) != HAL_OK) - { - return HAL_TIMEOUT; - } - huart->Instance->DR = (*pData++ & (uint8_t)0xFF); + huart->Instance->DR = (uint8_t)(*pdata8bits & 0xFFU); + pdata8bits++; } + huart->TxXferCount--; } if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TC, RESET, tickstart, Timeout) != HAL_OK) { + huart->gState = HAL_UART_STATE_READY; + return HAL_TIMEOUT; } @@ -1109,7 +1220,8 @@ HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, uint8_t *pData, u */ HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout) { - uint16_t *tmp; + uint8_t *pdata8bits; + uint16_t *pdata16bits; uint32_t tickstart = 0U; /* Check that a Rx process is not already ongoing */ @@ -1120,60 +1232,55 @@ HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, ui return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(huart); - huart->ErrorCode = HAL_UART_ERROR_NONE; huart->RxState = HAL_UART_STATE_BUSY_RX; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; - /* Init tickstart for timeout managment */ + /* Init tickstart for timeout management */ tickstart = HAL_GetTick(); huart->RxXferSize = Size; huart->RxXferCount = Size; - /* Process Unlocked */ - __HAL_UNLOCK(huart); + /* In case of 9bits/No Parity transfer, pRxData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } /* Check the remain data to be received */ while (huart->RxXferCount > 0U) { - huart->RxXferCount--; - if (huart->Init.WordLength == UART_WORDLENGTH_9B) + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_RXNE, RESET, tickstart, Timeout) != HAL_OK) { - if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_RXNE, RESET, tickstart, Timeout) != HAL_OK) - { - return HAL_TIMEOUT; - } - tmp = (uint16_t *) pData; - if (huart->Init.Parity == UART_PARITY_NONE) - { - *tmp = (uint16_t)(huart->Instance->DR & (uint16_t)0x01FF); - pData += 2U; - } - else - { - *tmp = (uint16_t)(huart->Instance->DR & (uint16_t)0x00FF); - pData += 1U; - } + huart->RxState = HAL_UART_STATE_READY; + return HAL_TIMEOUT; + } + if (pdata8bits == NULL) + { + *pdata16bits = (uint16_t)(huart->Instance->DR & 0x01FF); + pdata16bits++; } else { - if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_RXNE, RESET, tickstart, Timeout) != HAL_OK) + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) || ((huart->Init.WordLength == UART_WORDLENGTH_8B) && (huart->Init.Parity == UART_PARITY_NONE))) { - return HAL_TIMEOUT; - } - if (huart->Init.Parity == UART_PARITY_NONE) - { - *pData++ = (uint8_t)(huart->Instance->DR & (uint8_t)0x00FF); + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x00FF); } else { - *pData++ = (uint8_t)(huart->Instance->DR & (uint8_t)0x007F); + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x007F); } - + pdata8bits++; } + huart->RxXferCount--; } /* At end of Rx process, restore huart->RxState to Ready */ @@ -1198,7 +1305,7 @@ HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, ui * @param Size Amount of data elements (u8 or u16) to be sent * @retval HAL status */ -HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size) { /* Check that a Tx process is not already ongoing */ if (huart->gState == HAL_UART_STATE_READY) @@ -1208,9 +1315,6 @@ HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, uint8_t *pData return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(huart); - huart->pTxBuffPtr = pData; huart->TxXferSize = Size; huart->TxXferCount = Size; @@ -1218,9 +1322,6 @@ HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, uint8_t *pData huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_BUSY_TX; - /* Process Unlocked */ - __HAL_UNLOCK(huart); - /* Enable the UART Transmit data register empty Interrupt */ __HAL_UART_ENABLE_IT(huart, UART_IT_TXE); @@ -1253,29 +1354,10 @@ HAL_StatusTypeDef HAL_UART_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(huart); - - huart->pRxBuffPtr = pData; - huart->RxXferSize = Size; - huart->RxXferCount = Size; - - huart->ErrorCode = HAL_UART_ERROR_NONE; - huart->RxState = HAL_UART_STATE_BUSY_RX; - - /* Process Unlocked */ - __HAL_UNLOCK(huart); + /* Set Reception type to Standard reception */ + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; - /* Enable the UART Parity Error Interrupt */ - __HAL_UART_ENABLE_IT(huart, UART_IT_PE); - - /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ - __HAL_UART_ENABLE_IT(huart, UART_IT_ERR); - - /* Enable the UART Data Register not empty Interrupt */ - __HAL_UART_ENABLE_IT(huart, UART_IT_RXNE); - - return HAL_OK; + return (UART_Start_Receive_IT(huart, pData, Size)); } else { @@ -1294,9 +1376,9 @@ HAL_StatusTypeDef HAL_UART_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, * @param Size Amount of data elements (u8 or u16) to be sent * @retval HAL status */ -HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size) { - uint32_t *tmp; + const uint32_t *tmp; /* Check that a Tx process is not already ongoing */ if (huart->gState == HAL_UART_STATE_READY) @@ -1306,9 +1388,6 @@ HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, uint8_t *pDat return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(huart); - huart->pTxBuffPtr = pData; huart->TxXferSize = Size; huart->TxXferCount = Size; @@ -1329,18 +1408,15 @@ HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, uint8_t *pDat huart->hdmatx->XferAbortCallback = NULL; /* Enable the UART transmit DMA stream */ - tmp = (uint32_t *)&pData; - HAL_DMA_Start_IT(huart->hdmatx, *(uint32_t *)tmp, (uint32_t)&huart->Instance->DR, Size); + tmp = (const uint32_t *)&pData; + HAL_DMA_Start_IT(huart->hdmatx, *(const uint32_t *)tmp, (uint32_t)&huart->Instance->DR, Size); /* Clear the TC flag in the SR register by writing 0 to it */ __HAL_UART_CLEAR_FLAG(huart, UART_FLAG_TC); - /* Process Unlocked */ - __HAL_UNLOCK(huart); - /* Enable the DMA transfer for transmit request by setting the DMAT bit in the UART CR3 register */ - SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); return HAL_OK; } @@ -1364,8 +1440,6 @@ HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, uint8_t *pDat */ HAL_StatusTypeDef HAL_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) { - uint32_t *tmp; - /* Check that a Rx process is not already ongoing */ if (huart->RxState == HAL_UART_STATE_READY) { @@ -1374,48 +1448,10 @@ HAL_StatusTypeDef HAL_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData return HAL_ERROR; } - /* Process Locked */ - __HAL_LOCK(huart); - - huart->pRxBuffPtr = pData; - huart->RxXferSize = Size; - - huart->ErrorCode = HAL_UART_ERROR_NONE; - huart->RxState = HAL_UART_STATE_BUSY_RX; - - /* Set the UART DMA transfer complete callback */ - huart->hdmarx->XferCpltCallback = UART_DMAReceiveCplt; - - /* Set the UART DMA Half transfer complete callback */ - huart->hdmarx->XferHalfCpltCallback = UART_DMARxHalfCplt; + /* Set Reception type to Standard reception */ + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; - /* Set the DMA error callback */ - huart->hdmarx->XferErrorCallback = UART_DMAError; - - /* Set the DMA abort callback */ - huart->hdmarx->XferAbortCallback = NULL; - - /* Enable the DMA stream */ - tmp = (uint32_t *)&pData; - HAL_DMA_Start_IT(huart->hdmarx, (uint32_t)&huart->Instance->DR, *(uint32_t *)tmp, Size); - - /* Clear the Overrun flag just before enabling the DMA Rx request: can be mandatory for the second transfer */ - __HAL_UART_CLEAR_OREFLAG(huart); - - /* Process Unlocked */ - __HAL_UNLOCK(huart); - - /* Enable the UART Parity Error Interrupt */ - SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); - - /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ - SET_BIT(huart->Instance->CR3, USART_CR3_EIE); - - /* Enable the DMA transfer for the receiver request by setting the DMAR bit - in the UART CR3 register */ - SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); - - return HAL_OK; + return (UART_Start_Receive_DMA(huart, pData, Size)); } else { @@ -1433,30 +1469,24 @@ HAL_StatusTypeDef HAL_UART_DMAPause(UART_HandleTypeDef *huart) { uint32_t dmarequest = 0x00U; - /* Process Locked */ - __HAL_LOCK(huart); - dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT); if ((huart->gState == HAL_UART_STATE_BUSY_TX) && dmarequest) { /* Disable the UART DMA Tx request */ - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); } dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); if ((huart->RxState == HAL_UART_STATE_BUSY_RX) && dmarequest) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ - CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); - CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Disable the UART DMA Rx request */ - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); } - /* Process Unlocked */ - __HAL_UNLOCK(huart); - return HAL_OK; } @@ -1468,13 +1498,11 @@ HAL_StatusTypeDef HAL_UART_DMAPause(UART_HandleTypeDef *huart) */ HAL_StatusTypeDef HAL_UART_DMAResume(UART_HandleTypeDef *huart) { - /* Process Locked */ - __HAL_LOCK(huart); if (huart->gState == HAL_UART_STATE_BUSY_TX) { /* Enable the UART DMA Tx request */ - SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); } if (huart->RxState == HAL_UART_STATE_BUSY_RX) @@ -1482,64 +1510,338 @@ HAL_StatusTypeDef HAL_UART_DMAResume(UART_HandleTypeDef *huart) /* Clear the Overrun flag before resuming the Rx transfer*/ __HAL_UART_CLEAR_OREFLAG(huart); - /* Reenable PE and ERR (Frame error, noise error, overrun error) interrupts */ - SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); - SET_BIT(huart->Instance->CR3, USART_CR3_EIE); + /* Re-enable PE and ERR (Frame error, noise error, overrun error) interrupts */ + if (huart->Init.Parity != UART_PARITY_NONE) + { + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); + } + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Enable the UART DMA Rx request */ - SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); } - /* Process Unlocked */ - __HAL_UNLOCK(huart); + return HAL_OK; +} + +/** + * @brief Stops the DMA Transfer. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart) +{ + uint32_t dmarequest = 0x00U; + /* The Lock is not implemented on this API to allow the user application + to call the HAL UART API under callbacks HAL_UART_TxCpltCallback() / HAL_UART_RxCpltCallback(): + when calling HAL_DMA_Abort() API the DMA TX/RX Transfer complete interrupt is generated + and the correspond call back is executed HAL_UART_TxCpltCallback() / HAL_UART_RxCpltCallback() + */ + + /* Stop UART DMA Tx request if ongoing */ + dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT); + if ((huart->gState == HAL_UART_STATE_BUSY_TX) && dmarequest) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx stream */ + if (huart->hdmatx != NULL) + { + HAL_DMA_Abort(huart->hdmatx); + } + UART_EndTxTransfer(huart); + } + + /* Stop UART DMA Rx request if ongoing */ + dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); + if ((huart->RxState == HAL_UART_STATE_BUSY_RX) && dmarequest) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx stream */ + if (huart->hdmarx != NULL) + { + HAL_DMA_Abort(huart->hdmarx); + } + UART_EndRxTransfer(huart); + } + + return HAL_OK; +} + +/** + * @brief Receive an amount of data in blocking mode till either the expected number of data is received or an IDLE event occurs. + * @note HAL_OK is returned if reception is completed (expected number of data has been received) + * or if reception is stopped after IDLE event (less than the expected number of data has been received) + * In this case, RxLen output parameter indicates number of data available in reception buffer. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M = 01), + * the received data is handled as a set of uint16_t. In this case, Size must indicate the number + * of uint16_t available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (uint8_t or uint16_t data elements). + * @param Size Amount of data elements (uint8_t or uint16_t) to be received. + * @param RxLen Number of data elements finally received (could be lower than Size, in case reception ends on IDLE event) + * @param Timeout Timeout duration expressed in ms (covers the whole reception sequence). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint16_t *RxLen, + uint32_t Timeout) +{ + uint8_t *pdata8bits; + uint16_t *pdata16bits; + uint32_t tickstart; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + huart->ReceptionType = HAL_UART_RECEPTION_TOIDLE; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + /* Init tickstart for timeout management */ + tickstart = HAL_GetTick(); + + huart->RxXferSize = Size; + huart->RxXferCount = Size; + + /* In case of 9bits/No Parity transfer, pRxData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } + + /* Initialize output number of received elements */ + *RxLen = 0U; + + /* as long as data have to be received */ + while (huart->RxXferCount > 0U) + { + /* Check if IDLE flag is set */ + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_IDLE)) + { + /* Clear IDLE flag in ISR */ + __HAL_UART_CLEAR_IDLEFLAG(huart); + + /* If Set, but no data ever received, clear flag without exiting loop */ + /* If Set, and data has already been received, this means Idle Event is valid : End reception */ + if (*RxLen > 0U) + { + huart->RxEventType = HAL_UART_RXEVENT_IDLE; + huart->RxState = HAL_UART_STATE_READY; + + return HAL_OK; + } + } + + /* Check if RXNE flag is set */ + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_RXNE)) + { + if (pdata8bits == NULL) + { + *pdata16bits = (uint16_t)(huart->Instance->DR & (uint16_t)0x01FF); + pdata16bits++; + } + else + { + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) || ((huart->Init.WordLength == UART_WORDLENGTH_8B) && (huart->Init.Parity == UART_PARITY_NONE))) + { + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x00FF); + } + else + { + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x007F); + } + + pdata8bits++; + } + /* Increment number of received elements */ + *RxLen += 1U; + huart->RxXferCount--; + } + + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0U)) + { + huart->RxState = HAL_UART_STATE_READY; + + return HAL_TIMEOUT; + } + } + } + + /* Set number of received elements in output parameter : RxLen */ + *RxLen = huart->RxXferSize - huart->RxXferCount; + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in interrupt mode till either the expected number of data is received or an IDLE event occurs. + * @note Reception is initiated by this function call. Further progress of reception is achieved thanks + * to UART interrupts raised by RXNE and IDLE events. Callback is called at end of reception indicating + * number of received data elements. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M = 01), + * the received data is handled as a set of uint16_t. In this case, Size must indicate the number + * of uint16_t available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (uint8_t or uint16_t data elements). + * @param Size Amount of data elements (uint8_t or uint16_t) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef status; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + /* Set Reception type to reception till IDLE Event*/ + huart->ReceptionType = HAL_UART_RECEPTION_TOIDLE; + huart->RxEventType = HAL_UART_RXEVENT_TC; + + status = UART_Start_Receive_IT(huart, pData, Size); + + /* Check Rx process has been successfully started */ + if (status == HAL_OK) + { + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + __HAL_UART_CLEAR_IDLEFLAG(huart); + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + else + { + /* In case of errors already pending when reception is started, + Interrupts may have already been raised and lead to reception abortion. + (Overrun error for instance). + In such case Reception Type has been reset to HAL_UART_RECEPTION_STANDARD. */ + status = HAL_ERROR; + } + } - return HAL_OK; + return status; + } + else + { + return HAL_BUSY; + } } /** - * @brief Stops the DMA Transfer. - * @param huart Pointer to a UART_HandleTypeDef structure that contains - * the configuration information for the specified UART module. + * @brief Receive an amount of data in DMA mode till either the expected number of data is received or an IDLE event occurs. + * @note Reception is initiated by this function call. Further progress of reception is achieved thanks + * to DMA services, transferring automatically received data elements in user reception buffer and + * calling registered callbacks at half/end of reception. UART IDLE events are also used to consider + * reception phase as ended. In all cases, callback execution will indicate number of received data elements. + * @note When the UART parity is enabled (PCE = 1), the received data contain + * the parity bit (MSB position). + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M = 01), + * the received data is handled as a set of uint16_t. In this case, Size must indicate the number + * of uint16_t available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (uint8_t or uint16_t data elements). + * @param Size Amount of data elements (uint8_t or uint16_t) to be received. * @retval HAL status */ -HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart) +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) { - uint32_t dmarequest = 0x00U; - /* The Lock is not implemented on this API to allow the user application - to call the HAL UART API under callbacks HAL_UART_TxCpltCallback() / HAL_UART_RxCpltCallback(): - when calling HAL_DMA_Abort() API the DMA TX/RX Transfer complete interrupt is generated - and the correspond call back is executed HAL_UART_TxCpltCallback() / HAL_UART_RxCpltCallback() - */ + HAL_StatusTypeDef status; - /* Stop UART DMA Tx request if ongoing */ - dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT); - if ((huart->gState == HAL_UART_STATE_BUSY_TX) && dmarequest) + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) { - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); - - /* Abort the UART DMA Tx stream */ - if (huart->hdmatx != NULL) + if ((pData == NULL) || (Size == 0U)) { - HAL_DMA_Abort(huart->hdmatx); + return HAL_ERROR; } - UART_EndTxTransfer(huart); - } - /* Stop UART DMA Rx request if ongoing */ - dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); - if ((huart->RxState == HAL_UART_STATE_BUSY_RX) && dmarequest) - { - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + /* Set Reception type to reception till IDLE Event*/ + huart->ReceptionType = HAL_UART_RECEPTION_TOIDLE; + huart->RxEventType = HAL_UART_RXEVENT_TC; - /* Abort the UART DMA Rx stream */ - if (huart->hdmarx != NULL) + status = UART_Start_Receive_DMA(huart, pData, Size); + + /* Check Rx process has been successfully started */ + if (status == HAL_OK) { - HAL_DMA_Abort(huart->hdmarx); + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + __HAL_UART_CLEAR_IDLEFLAG(huart); + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + else + { + /* In case of errors already pending when reception is started, + Interrupts may have already been raised and lead to reception abortion. + (Overrun error for instance). + In such case Reception Type has been reset to HAL_UART_RECEPTION_STANDARD. */ + status = HAL_ERROR; + } } - UART_EndRxTransfer(huart); + + return status; + } + else + { + return HAL_BUSY; } +} - return HAL_OK; +/** + * @brief Provide Rx Event type that has lead to RxEvent callback execution. + * @note When HAL_UARTEx_ReceiveToIdle_IT() or HAL_UARTEx_ReceiveToIdle_DMA() API are called, progress + * of reception process is provided to application through calls of Rx Event callback (either default one + * HAL_UARTEx_RxEventCallback() or user registered one). As several types of events could occur (IDLE event, + * Half Transfer, or Transfer Complete), this function allows to retrieve the Rx Event type that has lead + * to Rx Event callback execution. + * @note This function is expected to be called within the user implementation of Rx Event Callback, + * in order to provide the accurate value : + * In Interrupt Mode : + * - HAL_UART_RXEVENT_TC : when Reception has been completed (expected nb of data has been received) + * - HAL_UART_RXEVENT_IDLE : when Idle event occurred prior reception has been completed (nb of + * received data is lower than expected one) + * In DMA Mode : + * - HAL_UART_RXEVENT_TC : when Reception has been completed (expected nb of data has been received) + * - HAL_UART_RXEVENT_HT : when half of expected nb of data has been received + * - HAL_UART_RXEVENT_IDLE : when Idle event occurred prior reception has been completed (nb of + * received data is lower than expected one). + * In DMA mode, RxEvent callback could be called several times; + * When DMA is configured in Normal Mode, HT event does not stop Reception process; + * When DMA is configured in Circular Mode, HT, TC or IDLE events don't stop Reception process; + * @param huart UART handle. + * @retval Rx Event Type (returned value will be a value of @ref UART_RxEvent_Type_Values) + */ +HAL_UART_RxEventTypeTypeDef HAL_UARTEx_GetRxEventType(UART_HandleTypeDef *huart) +{ + /* Return Rx Event type value, as stored in UART handle */ + return(huart->RxEventType); } /** @@ -1553,17 +1855,23 @@ HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart) * - Set handle State to READY * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. * @retval HAL status -*/ + */ HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart) { /* Disable TXEIE, TCIE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ - CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); - CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } /* Disable the UART DMA Tx request if enabled */ if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) { - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Abort the UART DMA Tx stream: use blocking DMA Abort API (no callback) */ if (huart->hdmatx != NULL) @@ -1588,7 +1896,7 @@ HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart) /* Disable the UART DMA Rx request if enabled */ if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx stream: use blocking DMA Abort API (no callback) */ if (huart->hdmarx != NULL) @@ -1620,6 +1928,7 @@ HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart) /* Restore huart->RxState and huart->gState to Ready */ huart->RxState = HAL_UART_STATE_READY; huart->gState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; return HAL_OK; } @@ -1635,16 +1944,16 @@ HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart) * - Set handle State to READY * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. * @retval HAL status -*/ + */ HAL_StatusTypeDef HAL_UART_AbortTransmit(UART_HandleTypeDef *huart) { /* Disable TXEIE and TCIE interrupts */ - CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); /* Disable the UART DMA Tx request if enabled */ if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) { - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Abort the UART DMA Tx stream : use blocking DMA Abort API (no callback) */ if (huart->hdmatx != NULL) @@ -1686,17 +1995,23 @@ HAL_StatusTypeDef HAL_UART_AbortTransmit(UART_HandleTypeDef *huart) * - Set handle State to READY * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. * @retval HAL status -*/ + */ HAL_StatusTypeDef HAL_UART_AbortReceive(UART_HandleTypeDef *huart) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ - CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); - CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } /* Disable the UART DMA Rx request if enabled */ if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx stream : use blocking DMA Abort API (no callback) */ if (huart->hdmarx != NULL) @@ -1723,6 +2038,7 @@ HAL_StatusTypeDef HAL_UART_AbortReceive(UART_HandleTypeDef *huart) /* Restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; return HAL_OK; } @@ -1740,14 +2056,20 @@ HAL_StatusTypeDef HAL_UART_AbortReceive(UART_HandleTypeDef *huart) * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be * considered as completed only when user abort complete callback is executed (not when exiting function). * @retval HAL status -*/ + */ HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart) { uint32_t AbortCplt = 0x01U; /* Disable TXEIE, TCIE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ - CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); - CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } /* If DMA Tx and/or DMA Rx Handles are associated to UART Handle, DMA Abort complete callbacks should be initialised before any call to DMA Abort functions */ @@ -1784,7 +2106,7 @@ HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart) if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) { /* Disable DMA Tx at UART level */ - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Abort the UART DMA Tx stream : use non blocking DMA Abort API (callback) */ if (huart->hdmatx != NULL) @@ -1807,7 +2129,7 @@ HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart) /* Disable the UART DMA Rx request if enabled */ if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx stream : use non blocking DMA Abort API (callback) */ if (huart->hdmarx != NULL) @@ -1841,6 +2163,7 @@ HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart) /* Restore huart->gState and huart->RxState to Ready */ huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; /* As no DMA to be aborted, call directly user Abort complete callback */ #if (USE_HAL_UART_REGISTER_CALLBACKS == 1) @@ -1868,16 +2191,16 @@ HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart) * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be * considered as completed only when user abort complete callback is executed (not when exiting function). * @retval HAL status -*/ + */ HAL_StatusTypeDef HAL_UART_AbortTransmit_IT(UART_HandleTypeDef *huart) { /* Disable TXEIE and TCIE interrupts */ - CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); /* Disable the UART DMA Tx request if enabled */ if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) { - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Abort the UART DMA Tx stream : use blocking DMA Abort API (no callback) */ if (huart->hdmatx != NULL) @@ -1945,17 +2268,23 @@ HAL_StatusTypeDef HAL_UART_AbortTransmit_IT(UART_HandleTypeDef *huart) * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be * considered as completed only when user abort complete callback is executed (not when exiting function). * @retval HAL status -*/ + */ HAL_StatusTypeDef HAL_UART_AbortReceive_IT(UART_HandleTypeDef *huart) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ - CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); - CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } /* Disable the UART DMA Rx request if enabled */ if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx stream : use blocking DMA Abort API (no callback) */ if (huart->hdmarx != NULL) @@ -1978,6 +2307,7 @@ HAL_StatusTypeDef HAL_UART_AbortReceive_IT(UART_HandleTypeDef *huart) /* Restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; /* As no DMA to be aborted, call directly user Abort complete callback */ #if (USE_HAL_UART_REGISTER_CALLBACKS == 1) @@ -1996,6 +2326,7 @@ HAL_StatusTypeDef HAL_UART_AbortReceive_IT(UART_HandleTypeDef *huart) /* Restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; /* As no DMA to be aborted, call directly user Abort complete callback */ #if (USE_HAL_UART_REGISTER_CALLBACKS == 1) @@ -2037,7 +2368,8 @@ void HAL_UART_IRQHandler(UART_HandleTypeDef *huart) } /* If some errors occur */ - if ((errorflags != RESET) && (((cr3its & USART_CR3_EIE) != RESET) || ((cr1its & (USART_CR1_RXNEIE | USART_CR1_PEIE)) != RESET))) + if ((errorflags != RESET) && (((cr3its & USART_CR3_EIE) != RESET) + || ((cr1its & (USART_CR1_RXNEIE | USART_CR1_PEIE)) != RESET))) { /* UART parity error interrupt occurred ----------------------------------*/ if (((isrflags & USART_SR_PE) != RESET) && ((cr1its & USART_CR1_PEIE) != RESET)) @@ -2058,7 +2390,8 @@ void HAL_UART_IRQHandler(UART_HandleTypeDef *huart) } /* UART Over-Run interrupt occurred --------------------------------------*/ - if (((isrflags & USART_SR_ORE) != RESET) && (((cr1its & USART_CR1_RXNEIE) != RESET) || ((cr3its & USART_CR3_EIE) != RESET))) + if (((isrflags & USART_SR_ORE) != RESET) && (((cr1its & USART_CR1_RXNEIE) != RESET) + || ((cr3its & USART_CR3_EIE) != RESET))) { huart->ErrorCode |= HAL_UART_ERROR_ORE; } @@ -2085,7 +2418,7 @@ void HAL_UART_IRQHandler(UART_HandleTypeDef *huart) /* Disable the UART DMA Rx request if enabled */ if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx stream */ if (huart->hdmarx != NULL) @@ -2141,6 +2474,100 @@ void HAL_UART_IRQHandler(UART_HandleTypeDef *huart) return; } /* End if some error occurs */ + /* Check current reception Mode : + If Reception till IDLE event has been selected : */ + if ((huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + && ((isrflags & USART_SR_IDLE) != 0U) + && ((cr1its & USART_SR_IDLE) != 0U)) + { + __HAL_UART_CLEAR_IDLEFLAG(huart); + + /* Check if DMA mode is enabled in UART */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + /* DMA mode enabled */ + /* Check received length : If all expected data are received, do nothing, + (DMA cplt callback will be called). + Otherwise, if at least one data has already been received, IDLE event is to be notified to user */ + uint16_t nb_remaining_rx_data = (uint16_t) __HAL_DMA_GET_COUNTER(huart->hdmarx); + if ((nb_remaining_rx_data > 0U) + && (nb_remaining_rx_data < huart->RxXferSize)) + { + /* Reception is not complete */ + huart->RxXferCount = nb_remaining_rx_data; + + /* In Normal mode, end DMA xfer and HAL UART Rx process*/ + if (huart->hdmarx->Init.Mode != DMA_CIRCULAR) + { + /* Disable PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the DMA transfer for the receiver request by resetting the DMAR bit + in the UART CR3 register */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + + /* Last bytes received, so no need as the abort is immediate */ + (void)HAL_DMA_Abort(huart->hdmarx); + } + + /* Initialize type of RxEvent that correspond to RxEvent callback execution; + In this case, Rx Event type is Idle Event */ + huart->RxEventType = HAL_UART_RXEVENT_IDLE; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, (huart->RxXferSize - huart->RxXferCount)); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, (huart->RxXferSize - huart->RxXferCount)); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + return; + } + else + { + /* DMA mode not enabled */ + /* Check received length : If all expected data are received, do nothing. + Otherwise, if at least one data has already been received, IDLE event is to be notified to user */ + uint16_t nb_rx_data = huart->RxXferSize - huart->RxXferCount; + if ((huart->RxXferCount > 0U) + && (nb_rx_data > 0U)) + { + /* Disable the UART Parity Error Interrupt and RXNE interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + + /* Disable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Rx process is completed, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + + /* Initialize type of RxEvent that correspond to RxEvent callback execution; + In this case, Rx Event type is Idle Event */ + huart->RxEventType = HAL_UART_RXEVENT_IDLE; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxEventCallback(huart, nb_rx_data); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, nb_rx_data); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + return; + } + } + /* UART in mode Transmitter ------------------------------------------------*/ if (((isrflags & USART_SR_TXE) != RESET) && ((cr1its & USART_CR1_TXEIE) != RESET)) { @@ -2276,6 +2703,24 @@ __weak void HAL_UART_AbortReceiveCpltCallback(UART_HandleTypeDef *huart) */ } +/** + * @brief Reception Event Callback (Rx event notification called after use of advanced reception service). + * @param huart UART handle + * @param Size Number of data available in application reception buffer (indicates a position in + * reception buffer until which, data are available) + * @retval None + */ +__weak void HAL_UARTEx_RxEventCallback(UART_HandleTypeDef *huart, uint16_t Size) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + UNUSED(Size); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UARTEx_RxEventCallback can be implemented in the user file. + */ +} + /** * @} */ @@ -2316,7 +2761,7 @@ HAL_StatusTypeDef HAL_LIN_SendBreak(UART_HandleTypeDef *huart) huart->gState = HAL_UART_STATE_BUSY; /* Send break characters */ - SET_BIT(huart->Instance->CR1, USART_CR1_SBK); + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_SBK); huart->gState = HAL_UART_STATE_READY; @@ -2343,9 +2788,10 @@ HAL_StatusTypeDef HAL_MultiProcessor_EnterMuteMode(UART_HandleTypeDef *huart) huart->gState = HAL_UART_STATE_BUSY; /* Enable the USART mute mode by setting the RWU bit in the CR1 register */ - SET_BIT(huart->Instance->CR1, USART_CR1_RWU); + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_RWU); huart->gState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; /* Process Unlocked */ __HAL_UNLOCK(huart); @@ -2370,9 +2816,10 @@ HAL_StatusTypeDef HAL_MultiProcessor_ExitMuteMode(UART_HandleTypeDef *huart) huart->gState = HAL_UART_STATE_BUSY; /* Disable the USART mute mode by clearing the RWU bit in the CR1 register */ - CLEAR_BIT(huart->Instance->CR1, USART_CR1_RWU); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_RWU); huart->gState = HAL_UART_STATE_READY; + huart->RxEventType = HAL_UART_RXEVENT_TC; /* Process Unlocked */ __HAL_UNLOCK(huart); @@ -2478,7 +2925,7 @@ HAL_StatusTypeDef HAL_HalfDuplex_EnableReceiver(UART_HandleTypeDef *huart) * the configuration information for the specified UART module. * @retval HAL state */ -HAL_UART_StateTypeDef HAL_UART_GetState(UART_HandleTypeDef *huart) +HAL_UART_StateTypeDef HAL_UART_GetState(const UART_HandleTypeDef *huart) { uint32_t temp1 = 0x00U, temp2 = 0x00U; temp1 = huart->gState; @@ -2493,7 +2940,7 @@ HAL_UART_StateTypeDef HAL_UART_GetState(UART_HandleTypeDef *huart) * the configuration information for the specified UART. * @retval UART Error Code */ -uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart) +uint32_t HAL_UART_GetError(const UART_HandleTypeDef *huart) { return huart->ErrorCode; } @@ -2527,6 +2974,7 @@ void UART_InitCallbacksToDefault(UART_HandleTypeDef *huart) huart->AbortCpltCallback = HAL_UART_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ huart->AbortTransmitCpltCallback = HAL_UART_AbortTransmitCpltCallback; /* Legacy weak AbortTransmitCpltCallback */ huart->AbortReceiveCpltCallback = HAL_UART_AbortReceiveCpltCallback; /* Legacy weak AbortReceiveCpltCallback */ + huart->RxEventCallback = HAL_UARTEx_RxEventCallback; /* Legacy weak RxEventCallback */ } #endif /* USE_HAL_UART_REGISTER_CALLBACKS */ @@ -2547,10 +2995,10 @@ static void UART_DMATransmitCplt(DMA_HandleTypeDef *hdma) /* Disable the DMA transfer for transmit request by setting the DMAT bit in the UART CR3 register */ - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Enable the UART Transmit Complete Interrupt */ - SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); } /* DMA Circular mode */ @@ -2594,29 +3042,57 @@ static void UART_DMATxHalfCplt(DMA_HandleTypeDef *hdma) static void UART_DMAReceiveCplt(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + /* DMA Normal mode*/ if ((hdma->Instance->CR & DMA_SxCR_CIRC) == 0U) { huart->RxXferCount = 0U; /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ - CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); - CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Disable the DMA transfer for the receiver request by setting the DMAR bit in the UART CR3 register */ - CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* At end of Rx process, restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; + + /* If Reception till IDLE event has been selected, Disable IDLE Interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + } + + /* Initialize type of RxEvent that correspond to RxEvent callback execution; + In this case, Rx Event type is Transfer Complete */ + huart->RxEventType = HAL_UART_RXEVENT_TC; + + /* Check current reception Mode : + If Reception till IDLE event has been selected : use Rx Event callback */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, huart->RxXferSize); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, huart->RxXferSize); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ } + else + { + /* In other cases : use Rx Complete callback */ #if (USE_HAL_UART_REGISTER_CALLBACKS == 1) - /*Call registered Rx complete callback*/ - huart->RxCpltCallback(huart); + /*Call registered Rx complete callback*/ + huart->RxCpltCallback(huart); #else - /*Call legacy weak Rx complete callback*/ - HAL_UART_RxCpltCallback(huart); + /*Call legacy weak Rx complete callback*/ + HAL_UART_RxCpltCallback(huart); #endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } } /** @@ -2629,13 +3105,33 @@ static void UART_DMARxHalfCplt(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + /* Initialize type of RxEvent that correspond to RxEvent callback execution; + In this case, Rx Event type is Half Transfer */ + huart->RxEventType = HAL_UART_RXEVENT_HT; + + /* Check current reception Mode : + If Reception till IDLE event has been selected : use Rx Event callback */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, huart->RxXferSize / 2U); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, huart->RxXferSize / 2U); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + else + { + /* In other cases : use Rx Half Complete callback */ #if (USE_HAL_UART_REGISTER_CALLBACKS == 1) - /*Call registered Rx Half complete callback*/ - huart->RxHalfCpltCallback(huart); + /*Call registered Rx Half complete callback*/ + huart->RxHalfCpltCallback(huart); #else - /*Call legacy weak Rx Half complete callback*/ - HAL_UART_RxHalfCpltCallback(huart); + /*Call legacy weak Rx Half complete callback*/ + HAL_UART_RxHalfCpltCallback(huart); #endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } } /** @@ -2676,16 +3172,18 @@ static void UART_DMAError(DMA_HandleTypeDef *hdma) } /** - * @brief This function handles UART Communication Timeout. + * @brief This function handles UART Communication Timeout. It waits + * until a flag is no longer in the specified status. * @param huart Pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @param Flag specifies the UART flag to check. - * @param Status The new Flag status (SET or RESET). + * @param Status The actual Flag status (SET or RESET). * @param Tickstart Tick start value * @param Timeout Timeout duration * @retval HAL status */ -static HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, uint32_t Tickstart, uint32_t Timeout) +static HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, + uint32_t Tickstart, uint32_t Timeout) { /* Wait until flag is set */ while ((__HAL_UART_GET_FLAG(huart, Flag) ? SET : RESET) == Status) @@ -2693,25 +3191,128 @@ static HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, /* Check for the Timeout */ if (Timeout != HAL_MAX_DELAY) { - if ((Timeout == 0U) || ((HAL_GetTick() - Tickstart) > Timeout)) + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) { - /* Disable TXE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts for the interrupt process */ - CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE)); - CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); - huart->gState = HAL_UART_STATE_READY; - huart->RxState = HAL_UART_STATE_READY; + return HAL_TIMEOUT; + } - /* Process Unlocked */ - __HAL_UNLOCK(huart); + if ((READ_BIT(huart->Instance->CR1, USART_CR1_RE) != 0U) && (Flag != UART_FLAG_TXE) && (Flag != UART_FLAG_TC)) + { + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_ORE) == SET) + { + /* Clear Overrun Error flag*/ + __HAL_UART_CLEAR_OREFLAG(huart); - return HAL_TIMEOUT; + /* Blocking error : transfer is aborted + Set the UART state ready to be able to start again the process, + Disable Rx Interrupts if ongoing */ + UART_EndRxTransfer(huart); + + huart->ErrorCode = HAL_UART_ERROR_ORE; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_ERROR; + } } } } return HAL_OK; } +/** + * @brief Start Receive operation in interrupt mode. + * @note This function could be called by all HAL UART API providing reception in Interrupt mode. + * @note When calling this function, parameters validity is considered as already checked, + * i.e. Rx State, buffer address, ... + * UART Handle is assumed as Locked. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef UART_Start_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + huart->pRxBuffPtr = pData; + huart->RxXferSize = Size; + huart->RxXferCount = Size; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + + if (huart->Init.Parity != UART_PARITY_NONE) + { + /* Enable the UART Parity Error Interrupt */ + __HAL_UART_ENABLE_IT(huart, UART_IT_PE); + } + + /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + __HAL_UART_ENABLE_IT(huart, UART_IT_ERR); + + /* Enable the UART Data Register not empty Interrupt */ + __HAL_UART_ENABLE_IT(huart, UART_IT_RXNE); + + return HAL_OK; +} + +/** + * @brief Start Receive operation in DMA mode. + * @note This function could be called by all HAL UART API providing reception in DMA mode. + * @note When calling this function, parameters validity is considered as already checked, + * i.e. Rx State, buffer address, ... + * UART Handle is assumed as Locked. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef UART_Start_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + uint32_t *tmp; + + huart->pRxBuffPtr = pData; + huart->RxXferSize = Size; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + + /* Set the UART DMA transfer complete callback */ + huart->hdmarx->XferCpltCallback = UART_DMAReceiveCplt; + + /* Set the UART DMA Half transfer complete callback */ + huart->hdmarx->XferHalfCpltCallback = UART_DMARxHalfCplt; + + /* Set the DMA error callback */ + huart->hdmarx->XferErrorCallback = UART_DMAError; + + /* Set the DMA abort callback */ + huart->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + tmp = (uint32_t *)&pData; + HAL_DMA_Start_IT(huart->hdmarx, (uint32_t)&huart->Instance->DR, *(uint32_t *)tmp, Size); + + /* Clear the Overrun flag just before enabling the DMA Rx request: can be mandatory for the second transfer */ + __HAL_UART_CLEAR_OREFLAG(huart); + + if (huart->Init.Parity != UART_PARITY_NONE) + { + /* Enable the UART Parity Error Interrupt */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); + } + + /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Enable the DMA transfer for the receiver request by setting the DMAR bit + in the UART CR3 register */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + return HAL_OK; +} + /** * @brief End ongoing Tx transfer on UART peripheral (following error detection or Transmit completion). * @param huart UART handle. @@ -2720,7 +3321,7 @@ static HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, static void UART_EndTxTransfer(UART_HandleTypeDef *huart) { /* Disable TXEIE and TCIE interrupts */ - CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); /* At end of Tx process, restore huart->gState to Ready */ huart->gState = HAL_UART_STATE_READY; @@ -2734,11 +3335,18 @@ static void UART_EndTxTransfer(UART_HandleTypeDef *huart) static void UART_EndRxTransfer(UART_HandleTypeDef *huart) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ - CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); - CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* In case of reception waiting for IDLE event, disable also the IDLE IE interrupt source */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } /* At end of Rx process, restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; } /** @@ -2797,6 +3405,7 @@ static void UART_DMATxAbortCallback(DMA_HandleTypeDef *hdma) /* Restore huart->gState and huart->RxState to Ready */ huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; /* Call user Abort complete callback */ #if (USE_HAL_UART_REGISTER_CALLBACKS == 1) @@ -2842,6 +3451,7 @@ static void UART_DMARxAbortCallback(DMA_HandleTypeDef *hdma) /* Restore huart->gState and huart->RxState to Ready */ huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; /* Call user Abort complete callback */ #if (USE_HAL_UART_REGISTER_CALLBACKS == 1) @@ -2898,6 +3508,7 @@ static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma) /* Restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; /* Call user Abort complete callback */ #if (USE_HAL_UART_REGISTER_CALLBACKS == 1) @@ -2917,23 +3528,16 @@ static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma) */ static HAL_StatusTypeDef UART_Transmit_IT(UART_HandleTypeDef *huart) { - uint16_t *tmp; + const uint16_t *tmp; /* Check that a Tx process is ongoing */ if (huart->gState == HAL_UART_STATE_BUSY_TX) { - if (huart->Init.WordLength == UART_WORDLENGTH_9B) + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) { - tmp = (uint16_t *) huart->pTxBuffPtr; + tmp = (const uint16_t *) huart->pTxBuffPtr; huart->Instance->DR = (uint16_t)(*tmp & (uint16_t)0x01FF); - if (huart->Init.Parity == UART_PARITY_NONE) - { - huart->pTxBuffPtr += 2U; - } - else - { - huart->pTxBuffPtr += 1U; - } + huart->pTxBuffPtr += 2U; } else { @@ -2942,7 +3546,7 @@ static HAL_StatusTypeDef UART_Transmit_IT(UART_HandleTypeDef *huart) if (--huart->TxXferCount == 0U) { - /* Disable the UART Transmit Complete Interrupt */ + /* Disable the UART Transmit Data Register Empty Interrupt */ __HAL_UART_DISABLE_IT(huart, UART_IT_TXE); /* Enable the UART Transmit Complete Interrupt */ @@ -2989,35 +3593,33 @@ static HAL_StatusTypeDef UART_EndTransmit_IT(UART_HandleTypeDef *huart) */ static HAL_StatusTypeDef UART_Receive_IT(UART_HandleTypeDef *huart) { - uint16_t *tmp; + uint8_t *pdata8bits; + uint16_t *pdata16bits; /* Check that a Rx process is ongoing */ if (huart->RxState == HAL_UART_STATE_BUSY_RX) { - if (huart->Init.WordLength == UART_WORDLENGTH_9B) + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) { - tmp = (uint16_t *) huart->pRxBuffPtr; - if (huart->Init.Parity == UART_PARITY_NONE) - { - *tmp = (uint16_t)(huart->Instance->DR & (uint16_t)0x01FF); - huart->pRxBuffPtr += 2U; - } - else - { - *tmp = (uint16_t)(huart->Instance->DR & (uint16_t)0x00FF); - huart->pRxBuffPtr += 1U; - } + pdata8bits = NULL; + pdata16bits = (uint16_t *) huart->pRxBuffPtr; + *pdata16bits = (uint16_t)(huart->Instance->DR & (uint16_t)0x01FF); + huart->pRxBuffPtr += 2U; } else { - if (huart->Init.Parity == UART_PARITY_NONE) + pdata8bits = (uint8_t *) huart->pRxBuffPtr; + pdata16bits = NULL; + + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) || ((huart->Init.WordLength == UART_WORDLENGTH_8B) && (huart->Init.Parity == UART_PARITY_NONE))) { - *huart->pRxBuffPtr++ = (uint8_t)(huart->Instance->DR & (uint8_t)0x00FF); + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x00FF); } else { - *huart->pRxBuffPtr++ = (uint8_t)(huart->Instance->DR & (uint8_t)0x007F); + *pdata8bits = (uint8_t)(huart->Instance->DR & (uint8_t)0x007F); } + huart->pRxBuffPtr += 1U; } if (--huart->RxXferCount == 0U) @@ -3034,13 +3636,45 @@ static HAL_StatusTypeDef UART_Receive_IT(UART_HandleTypeDef *huart) /* Rx process is completed, restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; + /* Initialize type of RxEvent to Transfer Complete */ + huart->RxEventType = HAL_UART_RXEVENT_TC; + + /* Check current reception Mode : + If Reception till IDLE event has been selected : */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + /* Set reception type to Standard */ + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Disable IDLE interrupt */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + + /* Check if IDLE flag is set */ + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_IDLE)) + { + /* Clear IDLE flag in ISR */ + __HAL_UART_CLEAR_IDLEFLAG(huart); + } + #if (USE_HAL_UART_REGISTER_CALLBACKS == 1) - /*Call registered Rx complete callback*/ - huart->RxCpltCallback(huart); + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, huart->RxXferSize); #else - /*Call legacy weak Rx complete callback*/ - HAL_UART_RxCpltCallback(huart); + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, huart->RxXferSize); #endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + else + { + /* Standard reception API called */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxCpltCallback(huart); +#else + /*Call legacy weak Rx complete callback*/ + HAL_UART_RxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } return HAL_OK; } @@ -3090,62 +3724,35 @@ static void UART_SetConfig(UART_HandleTypeDef *huart) /* Configure the UART HFC: Set CTSE and RTSE bits according to huart->Init.HwFlowCtl value */ MODIFY_REG(huart->Instance->CR3, (USART_CR3_RTSE | USART_CR3_CTSE), huart->Init.HwFlowCtl); - /* Check the Over Sampling */ - if (huart->Init.OverSampling == UART_OVERSAMPLING_8) - { - /*-------------------------- USART BRR Configuration ---------------------*/ + #if defined(USART6) && defined(UART9) && defined(UART10) if ((huart->Instance == USART1) || (huart->Instance == USART6) || (huart->Instance == UART9) || (huart->Instance == UART10)) { pclk = HAL_RCC_GetPCLK2Freq(); - huart->Instance->BRR = UART_BRR_SAMPLING8(pclk, huart->Init.BaudRate); } #elif defined(USART6) if ((huart->Instance == USART1) || (huart->Instance == USART6)) { pclk = HAL_RCC_GetPCLK2Freq(); - huart->Instance->BRR = UART_BRR_SAMPLING8(pclk, huart->Init.BaudRate); } #else if (huart->Instance == USART1) { pclk = HAL_RCC_GetPCLK2Freq(); - huart->Instance->BRR = UART_BRR_SAMPLING8(pclk, huart->Init.BaudRate); } #endif /* USART6 */ else { pclk = HAL_RCC_GetPCLK1Freq(); - huart->Instance->BRR = UART_BRR_SAMPLING8(pclk, huart->Init.BaudRate); } + /*-------------------------- USART BRR Configuration ---------------------*/ + if (huart->Init.OverSampling == UART_OVERSAMPLING_8) + { + huart->Instance->BRR = UART_BRR_SAMPLING8(pclk, huart->Init.BaudRate); } else { - /*-------------------------- USART BRR Configuration ---------------------*/ -#if defined(USART6) && defined(UART9) && defined(UART10) - if ((huart->Instance == USART1) || (huart->Instance == USART6) || (huart->Instance == UART9) || (huart->Instance == UART10)) - { - pclk = HAL_RCC_GetPCLK2Freq(); - huart->Instance->BRR = UART_BRR_SAMPLING16(pclk, huart->Init.BaudRate); - } -#elif defined(USART6) - if ((huart->Instance == USART1) || (huart->Instance == USART6)) - { - pclk = HAL_RCC_GetPCLK2Freq(); - huart->Instance->BRR = UART_BRR_SAMPLING16(pclk, huart->Init.BaudRate); - } -#else - if (huart->Instance == USART1) - { - pclk = HAL_RCC_GetPCLK2Freq(); - huart->Instance->BRR = UART_BRR_SAMPLING16(pclk, huart->Init.BaudRate); - } -#endif /* USART6 */ - else - { - pclk = HAL_RCC_GetPCLK1Freq(); - huart->Instance->BRR = UART_BRR_SAMPLING16(pclk, huart->Init.BaudRate); - } + huart->Instance->BRR = UART_BRR_SAMPLING16(pclk, huart->Init.BaudRate); } } @@ -3162,4 +3769,3 @@ static void UART_SetConfig(UART_HandleTypeDef *huart) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_ll_adc.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_ll_adc.c new file mode 100644 index 00000000..cdb750fe --- /dev/null +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_ll_adc.c @@ -0,0 +1,922 @@ +/** + ****************************************************************************** + * @file stm32f4xx_ll_adc.c + * @author MCD Application Team + * @brief ADC LL module driver + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ +#if defined(USE_FULL_LL_DRIVER) + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f4xx_ll_adc.h" +#include "stm32f4xx_ll_bus.h" + +#ifdef USE_FULL_ASSERT +#include "stm32_assert.h" +#else +#define assert_param(expr) ((void)0U) +#endif + +/** @addtogroup STM32F4xx_LL_Driver + * @{ + */ + +#if defined (ADC1) || defined (ADC2) || defined (ADC3) + +/** @addtogroup ADC_LL ADC + * @{ + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ + +/** @addtogroup ADC_LL_Private_Macros + * @{ + */ + +/* Check of parameters for configuration of ADC hierarchical scope: */ +/* common to several ADC instances. */ +#define IS_LL_ADC_COMMON_CLOCK(__CLOCK__) \ + ( ((__CLOCK__) == LL_ADC_CLOCK_SYNC_PCLK_DIV2) \ + || ((__CLOCK__) == LL_ADC_CLOCK_SYNC_PCLK_DIV4) \ + || ((__CLOCK__) == LL_ADC_CLOCK_SYNC_PCLK_DIV6) \ + || ((__CLOCK__) == LL_ADC_CLOCK_SYNC_PCLK_DIV8) \ + ) + +/* Check of parameters for configuration of ADC hierarchical scope: */ +/* ADC instance. */ +#define IS_LL_ADC_RESOLUTION(__RESOLUTION__) \ + ( ((__RESOLUTION__) == LL_ADC_RESOLUTION_12B) \ + || ((__RESOLUTION__) == LL_ADC_RESOLUTION_10B) \ + || ((__RESOLUTION__) == LL_ADC_RESOLUTION_8B) \ + || ((__RESOLUTION__) == LL_ADC_RESOLUTION_6B) \ + ) + +#define IS_LL_ADC_DATA_ALIGN(__DATA_ALIGN__) \ + ( ((__DATA_ALIGN__) == LL_ADC_DATA_ALIGN_RIGHT) \ + || ((__DATA_ALIGN__) == LL_ADC_DATA_ALIGN_LEFT) \ + ) + +#define IS_LL_ADC_SCAN_SELECTION(__SCAN_SELECTION__) \ + ( ((__SCAN_SELECTION__) == LL_ADC_SEQ_SCAN_DISABLE) \ + || ((__SCAN_SELECTION__) == LL_ADC_SEQ_SCAN_ENABLE) \ + ) + +#define IS_LL_ADC_SEQ_SCAN_MODE(__SEQ_SCAN_MODE__) \ + ( ((__SCAN_MODE__) == LL_ADC_SEQ_SCAN_DISABLE) \ + || ((__SCAN_MODE__) == LL_ADC_SEQ_SCAN_ENABLE) \ + ) + +/* Check of parameters for configuration of ADC hierarchical scope: */ +/* ADC group regular */ +#define IS_LL_ADC_REG_TRIG_SOURCE(__REG_TRIG_SOURCE__) \ + ( ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_SOFTWARE) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM1_CH1) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM1_CH2) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM1_CH3) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM2_CH2) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM2_CH3) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM2_CH4) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM2_TRGO) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM3_CH1) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM3_TRGO) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM4_CH4) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM5_CH1) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM5_CH2) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM5_CH3) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM8_CH1) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_TIM8_TRGO) \ + || ((__REG_TRIG_SOURCE__) == LL_ADC_REG_TRIG_EXT_EXTI_LINE11) \ + ) +#define IS_LL_ADC_REG_CONTINUOUS_MODE(__REG_CONTINUOUS_MODE__) \ + ( ((__REG_CONTINUOUS_MODE__) == LL_ADC_REG_CONV_SINGLE) \ + || ((__REG_CONTINUOUS_MODE__) == LL_ADC_REG_CONV_CONTINUOUS) \ + ) + +#define IS_LL_ADC_REG_DMA_TRANSFER(__REG_DMA_TRANSFER__) \ + ( ((__REG_DMA_TRANSFER__) == LL_ADC_REG_DMA_TRANSFER_NONE) \ + || ((__REG_DMA_TRANSFER__) == LL_ADC_REG_DMA_TRANSFER_LIMITED) \ + || ((__REG_DMA_TRANSFER__) == LL_ADC_REG_DMA_TRANSFER_UNLIMITED) \ + ) + +#define IS_LL_ADC_REG_FLAG_EOC_SELECTION(__REG_FLAG_EOC_SELECTION__) \ + ( ((__REG_FLAG_EOC_SELECTION__) == LL_ADC_REG_FLAG_EOC_SEQUENCE_CONV) \ + || ((__REG_FLAG_EOC_SELECTION__) == LL_ADC_REG_FLAG_EOC_UNITARY_CONV) \ + ) + +#define IS_LL_ADC_REG_SEQ_SCAN_LENGTH(__REG_SEQ_SCAN_LENGTH__) \ + ( ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_DISABLE) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_2RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_3RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_4RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_5RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_6RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_7RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_8RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_9RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_10RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_11RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_12RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_13RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_14RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_15RANKS) \ + || ((__REG_SEQ_SCAN_LENGTH__) == LL_ADC_REG_SEQ_SCAN_ENABLE_16RANKS) \ + ) + +#define IS_LL_ADC_REG_SEQ_SCAN_DISCONT_MODE(__REG_SEQ_DISCONT_MODE__) \ + ( ((__REG_SEQ_DISCONT_MODE__) == LL_ADC_REG_SEQ_DISCONT_DISABLE) \ + || ((__REG_SEQ_DISCONT_MODE__) == LL_ADC_REG_SEQ_DISCONT_1RANK) \ + || ((__REG_SEQ_DISCONT_MODE__) == LL_ADC_REG_SEQ_DISCONT_2RANKS) \ + || ((__REG_SEQ_DISCONT_MODE__) == LL_ADC_REG_SEQ_DISCONT_3RANKS) \ + || ((__REG_SEQ_DISCONT_MODE__) == LL_ADC_REG_SEQ_DISCONT_4RANKS) \ + || ((__REG_SEQ_DISCONT_MODE__) == LL_ADC_REG_SEQ_DISCONT_5RANKS) \ + || ((__REG_SEQ_DISCONT_MODE__) == LL_ADC_REG_SEQ_DISCONT_6RANKS) \ + || ((__REG_SEQ_DISCONT_MODE__) == LL_ADC_REG_SEQ_DISCONT_7RANKS) \ + || ((__REG_SEQ_DISCONT_MODE__) == LL_ADC_REG_SEQ_DISCONT_8RANKS) \ + ) + +/* Check of parameters for configuration of ADC hierarchical scope: */ +/* ADC group injected */ +#define IS_LL_ADC_INJ_TRIG_SOURCE(__INJ_TRIG_SOURCE__) \ + ( ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_SOFTWARE) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM1_CH4) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM1_TRGO) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM2_CH1) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM2_TRGO) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM3_CH2) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM3_CH4) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM4_CH1) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM4_CH2) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM4_CH3) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM4_TRGO) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM5_CH4) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM5_TRGO) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM8_CH2) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM8_CH3) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_TIM8_CH4) \ + || ((__INJ_TRIG_SOURCE__) == LL_ADC_INJ_TRIG_EXT_EXTI_LINE15) \ + ) + +#define IS_LL_ADC_INJ_TRIG_EXT_EDGE(__INJ_TRIG_EXT_EDGE__) \ + ( ((__INJ_TRIG_EXT_EDGE__) == LL_ADC_INJ_TRIG_EXT_RISING) \ + || ((__INJ_TRIG_EXT_EDGE__) == LL_ADC_INJ_TRIG_EXT_FALLING) \ + || ((__INJ_TRIG_EXT_EDGE__) == LL_ADC_INJ_TRIG_EXT_RISINGFALLING) \ + ) + +#define IS_LL_ADC_INJ_TRIG_AUTO(__INJ_TRIG_AUTO__) \ + ( ((__INJ_TRIG_AUTO__) == LL_ADC_INJ_TRIG_INDEPENDENT) \ + || ((__INJ_TRIG_AUTO__) == LL_ADC_INJ_TRIG_FROM_GRP_REGULAR) \ + ) + +#define IS_LL_ADC_INJ_SEQ_SCAN_LENGTH(__INJ_SEQ_SCAN_LENGTH__) \ + ( ((__INJ_SEQ_SCAN_LENGTH__) == LL_ADC_INJ_SEQ_SCAN_DISABLE) \ + || ((__INJ_SEQ_SCAN_LENGTH__) == LL_ADC_INJ_SEQ_SCAN_ENABLE_2RANKS) \ + || ((__INJ_SEQ_SCAN_LENGTH__) == LL_ADC_INJ_SEQ_SCAN_ENABLE_3RANKS) \ + || ((__INJ_SEQ_SCAN_LENGTH__) == LL_ADC_INJ_SEQ_SCAN_ENABLE_4RANKS) \ + ) + +#define IS_LL_ADC_INJ_SEQ_SCAN_DISCONT_MODE(__INJ_SEQ_DISCONT_MODE__) \ + ( ((__INJ_SEQ_DISCONT_MODE__) == LL_ADC_INJ_SEQ_DISCONT_DISABLE) \ + || ((__INJ_SEQ_DISCONT_MODE__) == LL_ADC_INJ_SEQ_DISCONT_1RANK) \ + ) + +#if defined(ADC_MULTIMODE_SUPPORT) +/* Check of parameters for configuration of ADC hierarchical scope: */ +/* multimode. */ +#if defined(ADC3) +#define IS_LL_ADC_MULTI_MODE(__MULTI_MODE__) \ + ( ((__MULTI_MODE__) == LL_ADC_MULTI_INDEPENDENT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_REG_SIMULT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_REG_INTERL) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_INJ_SIMULT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_INJ_ALTERN) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_REG_SIM_INJ_SIM) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_REG_SIM_INJ_ALT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_REG_INT_INJ_SIM) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_TRIPLE_REG_SIM_INJ_SIM) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_TRIPLE_REG_SIM_INJ_ALT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_TRIPLE_INJ_SIMULT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_TRIPLE_REG_SIMULT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_TRIPLE_REG_INTERL) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_TRIPLE_INJ_ALTERN) \ + ) +#else +#define IS_LL_ADC_MULTI_MODE(__MULTI_MODE__) \ + ( ((__MULTI_MODE__) == LL_ADC_MULTI_INDEPENDENT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_REG_SIMULT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_REG_INTERL) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_INJ_SIMULT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_INJ_ALTERN) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_REG_SIM_INJ_SIM) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_REG_SIM_INJ_ALT) \ + || ((__MULTI_MODE__) == LL_ADC_MULTI_DUAL_REG_INT_INJ_SIM) \ + ) +#endif + +#define IS_LL_ADC_MULTI_DMA_TRANSFER(__MULTI_DMA_TRANSFER__) \ + ( ((__MULTI_DMA_TRANSFER__) == LL_ADC_MULTI_REG_DMA_EACH_ADC) \ + || ((__MULTI_DMA_TRANSFER__) == LL_ADC_MULTI_REG_DMA_LIMIT_1) \ + || ((__MULTI_DMA_TRANSFER__) == LL_ADC_MULTI_REG_DMA_LIMIT_2) \ + || ((__MULTI_DMA_TRANSFER__) == LL_ADC_MULTI_REG_DMA_LIMIT_3) \ + || ((__MULTI_DMA_TRANSFER__) == LL_ADC_MULTI_REG_DMA_UNLMT_1) \ + || ((__MULTI_DMA_TRANSFER__) == LL_ADC_MULTI_REG_DMA_UNLMT_2) \ + || ((__MULTI_DMA_TRANSFER__) == LL_ADC_MULTI_REG_DMA_UNLMT_3) \ + ) + +#define IS_LL_ADC_MULTI_TWOSMP_DELAY(__MULTI_TWOSMP_DELAY__) \ + ( ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_5CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_6CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_7CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_8CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_9CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_10CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_11CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_12CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_13CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_14CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_15CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_16CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_17CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_18CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_19CYCLES) \ + || ((__MULTI_TWOSMP_DELAY__) == LL_ADC_MULTI_TWOSMP_DELAY_20CYCLES) \ + ) + +#define IS_LL_ADC_MULTI_MASTER_SLAVE(__MULTI_MASTER_SLAVE__) \ + ( ((__MULTI_MASTER_SLAVE__) == LL_ADC_MULTI_MASTER) \ + || ((__MULTI_MASTER_SLAVE__) == LL_ADC_MULTI_SLAVE) \ + || ((__MULTI_MASTER_SLAVE__) == LL_ADC_MULTI_MASTER_SLAVE) \ + ) + +#endif /* ADC_MULTIMODE_SUPPORT */ +/** + * @} + */ + + +/* Private function prototypes -----------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup ADC_LL_Exported_Functions + * @{ + */ + +/** @addtogroup ADC_LL_EF_Init + * @{ + */ + +/** + * @brief De-initialize registers of all ADC instances belonging to + * the same ADC common instance to their default reset values. + * @param ADCxy_COMMON ADC common instance + * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) + * @retval An ErrorStatus enumeration value: + * - SUCCESS: ADC common registers are de-initialized + * - ERROR: not applicable + */ +ErrorStatus LL_ADC_CommonDeInit(ADC_Common_TypeDef *ADCxy_COMMON) +{ + /* Check the parameters */ + assert_param(IS_ADC_COMMON_INSTANCE(ADCxy_COMMON)); + + + /* Force reset of ADC clock (core clock) */ + LL_APB2_GRP1_ForceReset(LL_APB2_GRP1_PERIPH_ADC); + + /* Release reset of ADC clock (core clock) */ + LL_APB2_GRP1_ReleaseReset(LL_APB2_GRP1_PERIPH_ADC); + + return SUCCESS; +} + +/** + * @brief Initialize some features of ADC common parameters + * (all ADC instances belonging to the same ADC common instance) + * and multimode (for devices with several ADC instances available). + * @note The setting of ADC common parameters is conditioned to + * ADC instances state: + * All ADC instances belonging to the same ADC common instance + * must be disabled. + * @param ADCxy_COMMON ADC common instance + * (can be set directly from CMSIS definition or by using helper macro @ref __LL_ADC_COMMON_INSTANCE() ) + * @param ADC_CommonInitStruct Pointer to a @ref LL_ADC_CommonInitTypeDef structure + * @retval An ErrorStatus enumeration value: + * - SUCCESS: ADC common registers are initialized + * - ERROR: ADC common registers are not initialized + */ +ErrorStatus LL_ADC_CommonInit(ADC_Common_TypeDef *ADCxy_COMMON, LL_ADC_CommonInitTypeDef *ADC_CommonInitStruct) +{ + ErrorStatus status = SUCCESS; + + /* Check the parameters */ + assert_param(IS_ADC_COMMON_INSTANCE(ADCxy_COMMON)); + assert_param(IS_LL_ADC_COMMON_CLOCK(ADC_CommonInitStruct->CommonClock)); + +#if defined(ADC_MULTIMODE_SUPPORT) + assert_param(IS_LL_ADC_MULTI_MODE(ADC_CommonInitStruct->Multimode)); + if (ADC_CommonInitStruct->Multimode != LL_ADC_MULTI_INDEPENDENT) + { + assert_param(IS_LL_ADC_MULTI_DMA_TRANSFER(ADC_CommonInitStruct->MultiDMATransfer)); + assert_param(IS_LL_ADC_MULTI_TWOSMP_DELAY(ADC_CommonInitStruct->MultiTwoSamplingDelay)); + } +#endif /* ADC_MULTIMODE_SUPPORT */ + + /* Note: Hardware constraint (refer to description of functions */ + /* "LL_ADC_SetCommonXXX()" and "LL_ADC_SetMultiXXX()"): */ + /* On this STM32 series, setting of these features is conditioned to */ + /* ADC state: */ + /* All ADC instances of the ADC common group must be disabled. */ + if (__LL_ADC_IS_ENABLED_ALL_COMMON_INSTANCE(ADCxy_COMMON) == 0UL) + { + /* Configuration of ADC hierarchical scope: */ + /* - common to several ADC */ + /* (all ADC instances belonging to the same ADC common instance) */ + /* - Set ADC clock (conversion clock) */ + /* - multimode (if several ADC instances available on the */ + /* selected device) */ + /* - Set ADC multimode configuration */ + /* - Set ADC multimode DMA transfer */ + /* - Set ADC multimode: delay between 2 sampling phases */ +#if defined(ADC_MULTIMODE_SUPPORT) + if (ADC_CommonInitStruct->Multimode != LL_ADC_MULTI_INDEPENDENT) + { + MODIFY_REG(ADCxy_COMMON->CCR, + ADC_CCR_ADCPRE + | ADC_CCR_MULTI + | ADC_CCR_DMA + | ADC_CCR_DDS + | ADC_CCR_DELAY + , + ADC_CommonInitStruct->CommonClock + | ADC_CommonInitStruct->Multimode + | ADC_CommonInitStruct->MultiDMATransfer + | ADC_CommonInitStruct->MultiTwoSamplingDelay + ); + } + else + { + MODIFY_REG(ADCxy_COMMON->CCR, + ADC_CCR_ADCPRE + | ADC_CCR_MULTI + | ADC_CCR_DMA + | ADC_CCR_DDS + | ADC_CCR_DELAY + , + ADC_CommonInitStruct->CommonClock + | LL_ADC_MULTI_INDEPENDENT + ); + } +#else + LL_ADC_SetCommonClock(ADCxy_COMMON, ADC_CommonInitStruct->CommonClock); +#endif + } + else + { + /* Initialization error: One or several ADC instances belonging to */ + /* the same ADC common instance are not disabled. */ + status = ERROR; + } + + return status; +} + +/** + * @brief Set each @ref LL_ADC_CommonInitTypeDef field to default value. + * @param ADC_CommonInitStruct Pointer to a @ref LL_ADC_CommonInitTypeDef structure + * whose fields will be set to default values. + * @retval None + */ +void LL_ADC_CommonStructInit(LL_ADC_CommonInitTypeDef *ADC_CommonInitStruct) +{ + /* Set ADC_CommonInitStruct fields to default values */ + /* Set fields of ADC common */ + /* (all ADC instances belonging to the same ADC common instance) */ + ADC_CommonInitStruct->CommonClock = LL_ADC_CLOCK_SYNC_PCLK_DIV2; + +#if defined(ADC_MULTIMODE_SUPPORT) + /* Set fields of ADC multimode */ + ADC_CommonInitStruct->Multimode = LL_ADC_MULTI_INDEPENDENT; + ADC_CommonInitStruct->MultiDMATransfer = LL_ADC_MULTI_REG_DMA_EACH_ADC; + ADC_CommonInitStruct->MultiTwoSamplingDelay = LL_ADC_MULTI_TWOSMP_DELAY_5CYCLES; +#endif /* ADC_MULTIMODE_SUPPORT */ +} + +/** + * @brief De-initialize registers of the selected ADC instance + * to their default reset values. + * @note To reset all ADC instances quickly (perform a hard reset), + * use function @ref LL_ADC_CommonDeInit(). + * @param ADCx ADC instance + * @retval An ErrorStatus enumeration value: + * - SUCCESS: ADC registers are de-initialized + * - ERROR: ADC registers are not de-initialized + */ +ErrorStatus LL_ADC_DeInit(ADC_TypeDef *ADCx) +{ + ErrorStatus status = SUCCESS; + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(ADCx)); + + /* Disable ADC instance if not already disabled. */ + if (LL_ADC_IsEnabled(ADCx) == 1UL) + { + /* Set ADC group regular trigger source to SW start to ensure to not */ + /* have an external trigger event occurring during the conversion stop */ + /* ADC disable process. */ + LL_ADC_REG_SetTriggerSource(ADCx, LL_ADC_REG_TRIG_SOFTWARE); + + /* Set ADC group injected trigger source to SW start to ensure to not */ + /* have an external trigger event occurring during the conversion stop */ + /* ADC disable process. */ + LL_ADC_INJ_SetTriggerSource(ADCx, LL_ADC_INJ_TRIG_SOFTWARE); + + /* Disable the ADC instance */ + LL_ADC_Disable(ADCx); + } + + /* Check whether ADC state is compliant with expected state */ + /* (hardware requirements of bits state to reset registers below) */ + if (READ_BIT(ADCx->CR2, ADC_CR2_ADON) == 0UL) + { + /* ========== Reset ADC registers ========== */ + /* Reset register SR */ + CLEAR_BIT(ADCx->SR, + (LL_ADC_FLAG_STRT + | LL_ADC_FLAG_JSTRT + | LL_ADC_FLAG_EOCS + | LL_ADC_FLAG_OVR + | LL_ADC_FLAG_JEOS + | LL_ADC_FLAG_AWD1) + ); + + /* Reset register CR1 */ + CLEAR_BIT(ADCx->CR1, + (ADC_CR1_OVRIE | ADC_CR1_RES | ADC_CR1_AWDEN + | ADC_CR1_JAWDEN + | ADC_CR1_DISCNUM | ADC_CR1_JDISCEN | ADC_CR1_DISCEN + | ADC_CR1_JAUTO | ADC_CR1_AWDSGL | ADC_CR1_SCAN + | ADC_CR1_JEOCIE | ADC_CR1_AWDIE | ADC_CR1_EOCIE + | ADC_CR1_AWDCH) + ); + + /* Reset register CR2 */ + CLEAR_BIT(ADCx->CR2, + (ADC_CR2_SWSTART | ADC_CR2_EXTEN | ADC_CR2_EXTSEL + | ADC_CR2_JSWSTART | ADC_CR2_JEXTEN | ADC_CR2_JEXTSEL + | ADC_CR2_ALIGN | ADC_CR2_EOCS + | ADC_CR2_DDS | ADC_CR2_DMA + | ADC_CR2_CONT | ADC_CR2_ADON) + ); + + /* Reset register SMPR1 */ + CLEAR_BIT(ADCx->SMPR1, + (ADC_SMPR1_SMP18 | ADC_SMPR1_SMP17 | ADC_SMPR1_SMP16 + | ADC_SMPR1_SMP15 | ADC_SMPR1_SMP14 | ADC_SMPR1_SMP13 + | ADC_SMPR1_SMP12 | ADC_SMPR1_SMP11 | ADC_SMPR1_SMP10) + ); + + /* Reset register SMPR2 */ + CLEAR_BIT(ADCx->SMPR2, + (ADC_SMPR2_SMP9 + | ADC_SMPR2_SMP8 | ADC_SMPR2_SMP7 | ADC_SMPR2_SMP6 + | ADC_SMPR2_SMP5 | ADC_SMPR2_SMP4 | ADC_SMPR2_SMP3 + | ADC_SMPR2_SMP2 | ADC_SMPR2_SMP1 | ADC_SMPR2_SMP0) + ); + + /* Reset register JOFR1 */ + CLEAR_BIT(ADCx->JOFR1, ADC_JOFR1_JOFFSET1); + /* Reset register JOFR2 */ + CLEAR_BIT(ADCx->JOFR2, ADC_JOFR2_JOFFSET2); + /* Reset register JOFR3 */ + CLEAR_BIT(ADCx->JOFR3, ADC_JOFR3_JOFFSET3); + /* Reset register JOFR4 */ + CLEAR_BIT(ADCx->JOFR4, ADC_JOFR4_JOFFSET4); + + /* Reset register HTR */ + SET_BIT(ADCx->HTR, ADC_HTR_HT); + /* Reset register LTR */ + CLEAR_BIT(ADCx->LTR, ADC_LTR_LT); + + /* Reset register SQR1 */ + CLEAR_BIT(ADCx->SQR1, + (ADC_SQR1_L + | ADC_SQR1_SQ16 + | ADC_SQR1_SQ15 | ADC_SQR1_SQ14 | ADC_SQR1_SQ13) + ); + + /* Reset register SQR2 */ + CLEAR_BIT(ADCx->SQR2, + (ADC_SQR2_SQ12 | ADC_SQR2_SQ11 | ADC_SQR2_SQ10 + | ADC_SQR2_SQ9 | ADC_SQR2_SQ8 | ADC_SQR2_SQ7) + ); + + /* Reset register SQR3 */ + CLEAR_BIT(ADCx->SQR3, + (ADC_SQR3_SQ6 | ADC_SQR3_SQ5 | ADC_SQR3_SQ4 + | ADC_SQR3_SQ3 | ADC_SQR3_SQ2 | ADC_SQR3_SQ1) + ); + + /* Reset register JSQR */ + CLEAR_BIT(ADCx->JSQR, + (ADC_JSQR_JL + | ADC_JSQR_JSQ4 | ADC_JSQR_JSQ3 + | ADC_JSQR_JSQ2 | ADC_JSQR_JSQ1) + ); + + /* Reset register DR */ + /* bits in access mode read only, no direct reset applicable */ + + /* Reset registers JDR1, JDR2, JDR3, JDR4 */ + /* bits in access mode read only, no direct reset applicable */ + + /* Reset register CCR */ + CLEAR_BIT(ADC->CCR, ADC_CCR_TSVREFE | ADC_CCR_ADCPRE); + } + + return status; +} + +/** + * @brief Initialize some features of ADC instance. + * @note These parameters have an impact on ADC scope: ADC instance. + * Affects both group regular and group injected (availability + * of ADC group injected depends on STM32 families). + * Refer to corresponding unitary functions into + * @ref ADC_LL_EF_Configuration_ADC_Instance . + * @note The setting of these parameters by function @ref LL_ADC_Init() + * is conditioned to ADC state: + * ADC instance must be disabled. + * This condition is applied to all ADC features, for efficiency + * and compatibility over all STM32 families. However, the different + * features can be set under different ADC state conditions + * (setting possible with ADC enabled without conversion on going, + * ADC enabled with conversion on going, ...) + * Each feature can be updated afterwards with a unitary function + * and potentially with ADC in a different state than disabled, + * refer to description of each function for setting + * conditioned to ADC state. + * @note After using this function, some other features must be configured + * using LL unitary functions. + * The minimum configuration remaining to be done is: + * - Set ADC group regular or group injected sequencer: + * map channel on the selected sequencer rank. + * Refer to function @ref LL_ADC_REG_SetSequencerRanks(). + * - Set ADC channel sampling time + * Refer to function LL_ADC_SetChannelSamplingTime(); + * @param ADCx ADC instance + * @param ADC_InitStruct Pointer to a @ref LL_ADC_REG_InitTypeDef structure + * @retval An ErrorStatus enumeration value: + * - SUCCESS: ADC registers are initialized + * - ERROR: ADC registers are not initialized + */ +ErrorStatus LL_ADC_Init(ADC_TypeDef *ADCx, LL_ADC_InitTypeDef *ADC_InitStruct) +{ + ErrorStatus status = SUCCESS; + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(ADCx)); + + assert_param(IS_LL_ADC_RESOLUTION(ADC_InitStruct->Resolution)); + assert_param(IS_LL_ADC_DATA_ALIGN(ADC_InitStruct->DataAlignment)); + assert_param(IS_LL_ADC_SCAN_SELECTION(ADC_InitStruct->SequencersScanMode)); + + /* Note: Hardware constraint (refer to description of this function): */ + /* ADC instance must be disabled. */ + if (LL_ADC_IsEnabled(ADCx) == 0UL) + { + /* Configuration of ADC hierarchical scope: */ + /* - ADC instance */ + /* - Set ADC data resolution */ + /* - Set ADC conversion data alignment */ + MODIFY_REG(ADCx->CR1, + ADC_CR1_RES + | ADC_CR1_SCAN + , + ADC_InitStruct->Resolution + | ADC_InitStruct->SequencersScanMode + ); + + MODIFY_REG(ADCx->CR2, + ADC_CR2_ALIGN + , + ADC_InitStruct->DataAlignment + ); + + } + else + { + /* Initialization error: ADC instance is not disabled. */ + status = ERROR; + } + return status; +} + +/** + * @brief Set each @ref LL_ADC_InitTypeDef field to default value. + * @param ADC_InitStruct Pointer to a @ref LL_ADC_InitTypeDef structure + * whose fields will be set to default values. + * @retval None + */ +void LL_ADC_StructInit(LL_ADC_InitTypeDef *ADC_InitStruct) +{ + /* Set ADC_InitStruct fields to default values */ + /* Set fields of ADC instance */ + ADC_InitStruct->Resolution = LL_ADC_RESOLUTION_12B; + ADC_InitStruct->DataAlignment = LL_ADC_DATA_ALIGN_RIGHT; + + /* Enable scan mode to have a generic behavior with ADC of other */ + /* STM32 families, without this setting available: */ + /* ADC group regular sequencer and ADC group injected sequencer depend */ + /* only of their own configuration. */ + ADC_InitStruct->SequencersScanMode = LL_ADC_SEQ_SCAN_ENABLE; + +} + +/** + * @brief Initialize some features of ADC group regular. + * @note These parameters have an impact on ADC scope: ADC group regular. + * Refer to corresponding unitary functions into + * @ref ADC_LL_EF_Configuration_ADC_Group_Regular + * (functions with prefix "REG"). + * @note The setting of these parameters by function @ref LL_ADC_Init() + * is conditioned to ADC state: + * ADC instance must be disabled. + * This condition is applied to all ADC features, for efficiency + * and compatibility over all STM32 families. However, the different + * features can be set under different ADC state conditions + * (setting possible with ADC enabled without conversion on going, + * ADC enabled with conversion on going, ...) + * Each feature can be updated afterwards with a unitary function + * and potentially with ADC in a different state than disabled, + * refer to description of each function for setting + * conditioned to ADC state. + * @note After using this function, other features must be configured + * using LL unitary functions. + * The minimum configuration remaining to be done is: + * - Set ADC group regular or group injected sequencer: + * map channel on the selected sequencer rank. + * Refer to function @ref LL_ADC_REG_SetSequencerRanks(). + * - Set ADC channel sampling time + * Refer to function LL_ADC_SetChannelSamplingTime(); + * @param ADCx ADC instance + * @param ADC_REG_InitStruct Pointer to a @ref LL_ADC_REG_InitTypeDef structure + * @retval An ErrorStatus enumeration value: + * - SUCCESS: ADC registers are initialized + * - ERROR: ADC registers are not initialized + */ +ErrorStatus LL_ADC_REG_Init(ADC_TypeDef *ADCx, LL_ADC_REG_InitTypeDef *ADC_REG_InitStruct) +{ + ErrorStatus status = SUCCESS; + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(ADCx)); + assert_param(IS_LL_ADC_REG_TRIG_SOURCE(ADC_REG_InitStruct->TriggerSource)); + assert_param(IS_LL_ADC_REG_SEQ_SCAN_LENGTH(ADC_REG_InitStruct->SequencerLength)); + if (ADC_REG_InitStruct->SequencerLength != LL_ADC_REG_SEQ_SCAN_DISABLE) + { + assert_param(IS_LL_ADC_REG_SEQ_SCAN_DISCONT_MODE(ADC_REG_InitStruct->SequencerDiscont)); + } + assert_param(IS_LL_ADC_REG_CONTINUOUS_MODE(ADC_REG_InitStruct->ContinuousMode)); + assert_param(IS_LL_ADC_REG_DMA_TRANSFER(ADC_REG_InitStruct->DMATransfer)); + + /* ADC group regular continuous mode and discontinuous mode */ + /* can not be enabled simultenaeously */ + assert_param((ADC_REG_InitStruct->ContinuousMode == LL_ADC_REG_CONV_SINGLE) + || (ADC_REG_InitStruct->SequencerDiscont == LL_ADC_REG_SEQ_DISCONT_DISABLE)); + + /* Note: Hardware constraint (refer to description of this function): */ + /* ADC instance must be disabled. */ + if (LL_ADC_IsEnabled(ADCx) == 0UL) + { + /* Configuration of ADC hierarchical scope: */ + /* - ADC group regular */ + /* - Set ADC group regular trigger source */ + /* - Set ADC group regular sequencer length */ + /* - Set ADC group regular sequencer discontinuous mode */ + /* - Set ADC group regular continuous mode */ + /* - Set ADC group regular conversion data transfer: no transfer or */ + /* transfer by DMA, and DMA requests mode */ + /* Note: On this STM32 series, ADC trigger edge is set when starting */ + /* ADC conversion. */ + /* Refer to function @ref LL_ADC_REG_StartConversionExtTrig(). */ + if (ADC_REG_InitStruct->SequencerLength != LL_ADC_REG_SEQ_SCAN_DISABLE) + { + MODIFY_REG(ADCx->CR1, + ADC_CR1_DISCEN + | ADC_CR1_DISCNUM + , + ADC_REG_InitStruct->SequencerDiscont + ); + } + else + { + MODIFY_REG(ADCx->CR1, + ADC_CR1_DISCEN + | ADC_CR1_DISCNUM + , + LL_ADC_REG_SEQ_DISCONT_DISABLE + ); + } + + MODIFY_REG(ADCx->CR2, + ADC_CR2_EXTSEL + | ADC_CR2_EXTEN + | ADC_CR2_CONT + | ADC_CR2_DMA + | ADC_CR2_DDS + , + (ADC_REG_InitStruct->TriggerSource & ADC_CR2_EXTSEL) + | ADC_REG_InitStruct->ContinuousMode + | ADC_REG_InitStruct->DMATransfer + ); + + /* Set ADC group regular sequencer length and scan direction */ + /* Note: Hardware constraint (refer to description of this function): */ + /* Note: If ADC instance feature scan mode is disabled */ + /* (refer to ADC instance initialization structure */ + /* parameter @ref SequencersScanMode */ + /* or function @ref LL_ADC_SetSequencersScanMode() ), */ + /* this parameter is discarded. */ + LL_ADC_REG_SetSequencerLength(ADCx, ADC_REG_InitStruct->SequencerLength); + } + else + { + /* Initialization error: ADC instance is not disabled. */ + status = ERROR; + } + return status; +} + +/** + * @brief Set each @ref LL_ADC_REG_InitTypeDef field to default value. + * @param ADC_REG_InitStruct Pointer to a @ref LL_ADC_REG_InitTypeDef structure + * whose fields will be set to default values. + * @retval None + */ +void LL_ADC_REG_StructInit(LL_ADC_REG_InitTypeDef *ADC_REG_InitStruct) +{ + /* Set ADC_REG_InitStruct fields to default values */ + /* Set fields of ADC group regular */ + /* Note: On this STM32 series, ADC trigger edge is set when starting */ + /* ADC conversion. */ + /* Refer to function @ref LL_ADC_REG_StartConversionExtTrig(). */ + ADC_REG_InitStruct->TriggerSource = LL_ADC_REG_TRIG_SOFTWARE; + ADC_REG_InitStruct->SequencerLength = LL_ADC_REG_SEQ_SCAN_DISABLE; + ADC_REG_InitStruct->SequencerDiscont = LL_ADC_REG_SEQ_DISCONT_DISABLE; + ADC_REG_InitStruct->ContinuousMode = LL_ADC_REG_CONV_SINGLE; + ADC_REG_InitStruct->DMATransfer = LL_ADC_REG_DMA_TRANSFER_NONE; +} + +/** + * @brief Initialize some features of ADC group injected. + * @note These parameters have an impact on ADC scope: ADC group injected. + * Refer to corresponding unitary functions into + * @ref ADC_LL_EF_Configuration_ADC_Group_Regular + * (functions with prefix "INJ"). + * @note The setting of these parameters by function @ref LL_ADC_Init() + * is conditioned to ADC state: + * ADC instance must be disabled. + * This condition is applied to all ADC features, for efficiency + * and compatibility over all STM32 families. However, the different + * features can be set under different ADC state conditions + * (setting possible with ADC enabled without conversion on going, + * ADC enabled with conversion on going, ...) + * Each feature can be updated afterwards with a unitary function + * and potentially with ADC in a different state than disabled, + * refer to description of each function for setting + * conditioned to ADC state. + * @note After using this function, other features must be configured + * using LL unitary functions. + * The minimum configuration remaining to be done is: + * - Set ADC group injected sequencer: + * map channel on the selected sequencer rank. + * Refer to function @ref LL_ADC_INJ_SetSequencerRanks(). + * - Set ADC channel sampling time + * Refer to function LL_ADC_SetChannelSamplingTime(); + * @param ADCx ADC instance + * @param ADC_INJ_InitStruct Pointer to a @ref LL_ADC_INJ_InitTypeDef structure + * @retval An ErrorStatus enumeration value: + * - SUCCESS: ADC registers are initialized + * - ERROR: ADC registers are not initialized + */ +ErrorStatus LL_ADC_INJ_Init(ADC_TypeDef *ADCx, LL_ADC_INJ_InitTypeDef *ADC_INJ_InitStruct) +{ + ErrorStatus status = SUCCESS; + + /* Check the parameters */ + assert_param(IS_ADC_ALL_INSTANCE(ADCx)); + assert_param(IS_LL_ADC_INJ_TRIG_SOURCE(ADC_INJ_InitStruct->TriggerSource)); + assert_param(IS_LL_ADC_INJ_SEQ_SCAN_LENGTH(ADC_INJ_InitStruct->SequencerLength)); + if (ADC_INJ_InitStruct->SequencerLength != LL_ADC_INJ_SEQ_SCAN_DISABLE) + { + assert_param(IS_LL_ADC_INJ_SEQ_SCAN_DISCONT_MODE(ADC_INJ_InitStruct->SequencerDiscont)); + } + assert_param(IS_LL_ADC_INJ_TRIG_AUTO(ADC_INJ_InitStruct->TrigAuto)); + + /* Note: Hardware constraint (refer to description of this function): */ + /* ADC instance must be disabled. */ + if (LL_ADC_IsEnabled(ADCx) == 0UL) + { + /* Configuration of ADC hierarchical scope: */ + /* - ADC group injected */ + /* - Set ADC group injected trigger source */ + /* - Set ADC group injected sequencer length */ + /* - Set ADC group injected sequencer discontinuous mode */ + /* - Set ADC group injected conversion trigger: independent or */ + /* from ADC group regular */ + /* Note: On this STM32 series, ADC trigger edge is set when starting */ + /* ADC conversion. */ + /* Refer to function @ref LL_ADC_INJ_StartConversionExtTrig(). */ + if (ADC_INJ_InitStruct->SequencerLength != LL_ADC_REG_SEQ_SCAN_DISABLE) + { + MODIFY_REG(ADCx->CR1, + ADC_CR1_JDISCEN + | ADC_CR1_JAUTO + , + ADC_INJ_InitStruct->SequencerDiscont + | ADC_INJ_InitStruct->TrigAuto + ); + } + else + { + MODIFY_REG(ADCx->CR1, + ADC_CR1_JDISCEN + | ADC_CR1_JAUTO + , + LL_ADC_REG_SEQ_DISCONT_DISABLE + | ADC_INJ_InitStruct->TrigAuto + ); + } + + MODIFY_REG(ADCx->CR2, + ADC_CR2_JEXTSEL + | ADC_CR2_JEXTEN + , + (ADC_INJ_InitStruct->TriggerSource & ADC_CR2_JEXTSEL) + ); + + /* Note: Hardware constraint (refer to description of this function): */ + /* Note: If ADC instance feature scan mode is disabled */ + /* (refer to ADC instance initialization structure */ + /* parameter @ref SequencersScanMode */ + /* or function @ref LL_ADC_SetSequencersScanMode() ), */ + /* this parameter is discarded. */ + LL_ADC_INJ_SetSequencerLength(ADCx, ADC_INJ_InitStruct->SequencerLength); + } + else + { + /* Initialization error: ADC instance is not disabled. */ + status = ERROR; + } + return status; +} + +/** + * @brief Set each @ref LL_ADC_INJ_InitTypeDef field to default value. + * @param ADC_INJ_InitStruct Pointer to a @ref LL_ADC_INJ_InitTypeDef structure + * whose fields will be set to default values. + * @retval None + */ +void LL_ADC_INJ_StructInit(LL_ADC_INJ_InitTypeDef *ADC_INJ_InitStruct) +{ + /* Set ADC_INJ_InitStruct fields to default values */ + /* Set fields of ADC group injected */ + ADC_INJ_InitStruct->TriggerSource = LL_ADC_INJ_TRIG_SOFTWARE; + ADC_INJ_InitStruct->SequencerLength = LL_ADC_INJ_SEQ_SCAN_DISABLE; + ADC_INJ_InitStruct->SequencerDiscont = LL_ADC_INJ_SEQ_DISCONT_DISABLE; + ADC_INJ_InitStruct->TrigAuto = LL_ADC_INJ_TRIG_INDEPENDENT; +} + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#endif /* ADC1 || ADC2 || ADC3 */ + +/** + * @} + */ + +#endif /* USE_FULL_LL_DRIVER */ + diff --git a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_ll_usb.c b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_ll_usb.c index da8da4a6..40b6d83a 100644 --- a/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_ll_usb.c +++ b/Drivers/STM32F4xx_HAL_Driver/Src/stm32f4xx_ll_usb.c @@ -11,29 +11,30 @@ * + Peripheral Control functions * + Peripheral State functions * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** @verbatim ============================================================================== ##### How to use this driver ##### ============================================================================== [..] - (#) Fill parameters of Init structure in USB_OTG_CfgTypeDef structure. + (#) Fill parameters of Init structure in USB_CfgTypeDef structure. (#) Call USB_CoreInit() API to initialize the USB Core peripheral. (#) The upper HAL HCD/PCD driver will call the right routines for its internal processes. @endverbatim - ****************************************************************************** - * @attention - * - *

© Copyright (c) 2016 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under BSD 3-Clause license, - * the "License"; You may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * opensource.org/licenses/BSD-3-Clause - * + ****************************************************************************** */ @@ -82,7 +83,6 @@ static HAL_StatusTypeDef USB_CoreReset(USB_OTG_GlobalTypeDef *USBx); HAL_StatusTypeDef USB_CoreInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg) { HAL_StatusTypeDef ret; - if (cfg.phy_itface == USB_OTG_ULPI_PHY) { USBx->GCCFG &= ~(USB_OTG_GCCFG_PWRDWN); @@ -96,7 +96,8 @@ HAL_StatusTypeDef USB_CoreInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef c { USBx->GUSBCFG |= USB_OTG_GUSBCFG_ULPIEVBUSD; } - /* Reset after a PHY select */ + + /* Reset after a PHY select */ ret = USB_CoreReset(USBx); } else /* FS interface (embedded Phy) */ @@ -247,21 +248,39 @@ HAL_StatusTypeDef USB_DisableGlobalInt(USB_OTG_GlobalTypeDef *USBx) */ HAL_StatusTypeDef USB_SetCurrentMode(USB_OTG_GlobalTypeDef *USBx, USB_OTG_ModeTypeDef mode) { + uint32_t ms = 0U; + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_FHMOD | USB_OTG_GUSBCFG_FDMOD); if (mode == USB_HOST_MODE) { USBx->GUSBCFG |= USB_OTG_GUSBCFG_FHMOD; + + do + { + HAL_Delay(10U); + ms += 10U; + } while ((USB_GetMode(USBx) != (uint32_t)USB_HOST_MODE) && (ms < HAL_USB_CURRENT_MODE_MAX_DELAY_MS)); } else if (mode == USB_DEVICE_MODE) { USBx->GUSBCFG |= USB_OTG_GUSBCFG_FDMOD; + + do + { + HAL_Delay(10U); + ms += 10U; + } while ((USB_GetMode(USBx) != (uint32_t)USB_DEVICE_MODE) && (ms < HAL_USB_CURRENT_MODE_MAX_DELAY_MS)); } else { return HAL_ERROR; } - HAL_Delay(50U); + + if (ms == HAL_USB_CURRENT_MODE_MAX_DELAY_MS) + { + return HAL_ERROR; + } return HAL_OK; } @@ -285,7 +304,9 @@ HAL_StatusTypeDef USB_DevInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cf USBx->DIEPTXF[i] = 0U; } -#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) \ + || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) \ + || defined(STM32F423xx) /* VBUS Sensing setup */ if (cfg.vbus_sensing_enable == 0U) { @@ -322,14 +343,13 @@ HAL_StatusTypeDef USB_DevInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cf USBx->GCCFG &= ~USB_OTG_GCCFG_NOVBUSSENS; USBx->GCCFG |= USB_OTG_GCCFG_VBUSBSEN; } -#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ +#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || + defined(STM32F423xx) */ /* Restart the Phy Clock */ USBx_PCGCCTL = 0U; - /* Device mode configuration */ - USBx_DEVICE->DCFG |= DCFG_FRAME_INTERVAL_80; - if (cfg.phy_itface == USB_OTG_ULPI_PHY) { if (cfg.speed == USBD_HS_SPEED) @@ -443,7 +463,7 @@ HAL_StatusTypeDef USB_DevInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cf } /** - * @brief USB_OTG_FlushTxFifo : Flush a Tx FIFO + * @brief USB_FlushTxFifo Flush a Tx FIFO * @param USBx Selected device * @param num FIFO number * This parameter can be a value from 1 to 15 @@ -452,13 +472,28 @@ HAL_StatusTypeDef USB_DevInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cf */ HAL_StatusTypeDef USB_FlushTxFifo(USB_OTG_GlobalTypeDef *USBx, uint32_t num) { - uint32_t count = 0U; + __IO uint32_t count = 0U; + /* Wait for AHB master IDLE state. */ + do + { + count++; + + if (count > HAL_USB_TIMEOUT) + { + return HAL_TIMEOUT; + } + } while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_AHBIDL) == 0U); + + /* Flush TX Fifo */ + count = 0U; USBx->GRSTCTL = (USB_OTG_GRSTCTL_TXFFLSH | (num << 6)); do { - if (++count > 200000U) + count++; + + if (count > HAL_USB_TIMEOUT) { return HAL_TIMEOUT; } @@ -468,19 +503,34 @@ HAL_StatusTypeDef USB_FlushTxFifo(USB_OTG_GlobalTypeDef *USBx, uint32_t num) } /** - * @brief USB_FlushRxFifo : Flush Rx FIFO + * @brief USB_FlushRxFifo Flush Rx FIFO * @param USBx Selected device * @retval HAL status */ HAL_StatusTypeDef USB_FlushRxFifo(USB_OTG_GlobalTypeDef *USBx) { - uint32_t count = 0; + __IO uint32_t count = 0U; + + /* Wait for AHB master IDLE state. */ + do + { + count++; + + if (count > HAL_USB_TIMEOUT) + { + return HAL_TIMEOUT; + } + } while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_AHBIDL) == 0U); + /* Flush RX Fifo */ + count = 0U; USBx->GRSTCTL = USB_OTG_GRSTCTL_RXFFLSH; do { - if (++count > 200000U) + count++; + + if (count > HAL_USB_TIMEOUT) { return HAL_TIMEOUT; } @@ -500,7 +550,7 @@ HAL_StatusTypeDef USB_FlushRxFifo(USB_OTG_GlobalTypeDef *USBx) * @arg USB_OTG_SPEED_FULL: Full speed mode * @retval Hal status */ -HAL_StatusTypeDef USB_SetDevSpeed(USB_OTG_GlobalTypeDef *USBx, uint8_t speed) +HAL_StatusTypeDef USB_SetDevSpeed(const USB_OTG_GlobalTypeDef *USBx, uint8_t speed) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -513,10 +563,10 @@ HAL_StatusTypeDef USB_SetDevSpeed(USB_OTG_GlobalTypeDef *USBx, uint8_t speed) * @param USBx Selected device * @retval speed device speed * This parameter can be one of these values: - * @arg PCD_SPEED_HIGH: High speed mode - * @arg PCD_SPEED_FULL: Full speed mode + * @arg USBD_HS_SPEED: High speed mode + * @arg USBD_FS_SPEED: Full speed mode */ -uint8_t USB_GetDevSpeed(USB_OTG_GlobalTypeDef *USBx) +uint8_t USB_GetDevSpeed(const USB_OTG_GlobalTypeDef *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; uint8_t speed; @@ -545,7 +595,7 @@ uint8_t USB_GetDevSpeed(USB_OTG_GlobalTypeDef *USBx) * @param ep pointer to endpoint structure * @retval HAL status */ -HAL_StatusTypeDef USB_ActivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +HAL_StatusTypeDef USB_ActivateEndpoint(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t epnum = (uint32_t)ep->num; @@ -583,7 +633,7 @@ HAL_StatusTypeDef USB_ActivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTy * @param ep pointer to endpoint structure * @retval HAL status */ -HAL_StatusTypeDef USB_ActivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +HAL_StatusTypeDef USB_ActivateDedicatedEndpoint(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t epnum = (uint32_t)ep->num; @@ -622,7 +672,7 @@ HAL_StatusTypeDef USB_ActivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB * @param ep pointer to endpoint structure * @retval HAL status */ -HAL_StatusTypeDef USB_DeactivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +HAL_StatusTypeDef USB_DeactivateEndpoint(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t epnum = (uint32_t)ep->num; @@ -669,7 +719,7 @@ HAL_StatusTypeDef USB_DeactivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EP * @param ep pointer to endpoint structure * @retval HAL status */ -HAL_StatusTypeDef USB_DeactivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +HAL_StatusTypeDef USB_DeactivateDedicatedEndpoint(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t epnum = (uint32_t)ep->num; @@ -736,14 +786,29 @@ HAL_StatusTypeDef USB_EPStartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef */ USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); - USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (((ep->xfer_len + ep->maxpacket - 1U) / ep->maxpacket) << 19)); - USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_XFRSIZ & ep->xfer_len); - if (ep->type == EP_TYPE_ISOC) + if (epnum == 0U) { - USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_MULCNT); - USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_MULCNT & (1U << 29)); + if (ep->xfer_len > ep->maxpacket) + { + ep->xfer_len = ep->maxpacket; + } + + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (1U << 19)); } + else + { + pktcnt = (uint16_t)((ep->xfer_len + ep->maxpacket - 1U) / ep->maxpacket); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (pktcnt << 19)); + + if (ep->type == EP_TYPE_ISOC) + { + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_MULCNT); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_MULCNT & (pktcnt << 29)); + } + } + + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_XFRSIZ & ep->xfer_len); } if (dma == 1U) @@ -805,16 +870,34 @@ HAL_StatusTypeDef USB_EPStartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_XFRSIZ); USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_PKTCNT); - if (ep->xfer_len == 0U) + if (epnum == 0U) { - USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_XFRSIZ & ep->maxpacket); + if (ep->xfer_len > 0U) + { + ep->xfer_len = ep->maxpacket; + } + + /* Store transfer size, for EP0 this is equal to endpoint max packet size */ + ep->xfer_size = ep->maxpacket; + + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_XFRSIZ & ep->xfer_size); USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_PKTCNT & (1U << 19)); } else { - pktcnt = (uint16_t)((ep->xfer_len + ep->maxpacket - 1U) / ep->maxpacket); - USBx_OUTEP(epnum)->DOEPTSIZ |= USB_OTG_DOEPTSIZ_PKTCNT & ((uint32_t)pktcnt << 19); - USBx_OUTEP(epnum)->DOEPTSIZ |= USB_OTG_DOEPTSIZ_XFRSIZ & (ep->maxpacket * pktcnt); + if (ep->xfer_len == 0U) + { + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_XFRSIZ & ep->maxpacket); + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_PKTCNT & (1U << 19)); + } + else + { + pktcnt = (uint16_t)((ep->xfer_len + ep->maxpacket - 1U) / ep->maxpacket); + ep->xfer_size = ep->maxpacket * pktcnt; + + USBx_OUTEP(epnum)->DOEPTSIZ |= USB_OTG_DOEPTSIZ_PKTCNT & ((uint32_t)pktcnt << 19); + USBx_OUTEP(epnum)->DOEPTSIZ |= USB_OTG_DOEPTSIZ_XFRSIZ & ep->xfer_size; + } } if (dma == 1U) @@ -843,103 +926,64 @@ HAL_StatusTypeDef USB_EPStartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef return HAL_OK; } + /** - * @brief USB_EP0StartXfer : setup and starts a transfer over the EP 0 - * @param USBx Selected device - * @param ep pointer to endpoint structure - * @param dma USB dma enabled or disabled - * This parameter can be one of these values: - * 0 : DMA feature not used - * 1 : DMA feature used - * @retval HAL status - */ -HAL_StatusTypeDef USB_EP0StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma) + * @brief USB_EPStoptXfer Stop transfer on an EP + * @param USBx usb device instance + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_EPStopXfer(const USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) { + __IO uint32_t count = 0U; + HAL_StatusTypeDef ret = HAL_OK; uint32_t USBx_BASE = (uint32_t)USBx; - uint32_t epnum = (uint32_t)ep->num; /* IN endpoint */ if (ep->is_in == 1U) { - /* Zero Length Packet? */ - if (ep->xfer_len == 0U) + /* EP enable, IN data in FIFO */ + if (((USBx_INEP(ep->num)->DIEPCTL) & USB_OTG_DIEPCTL_EPENA) == USB_OTG_DIEPCTL_EPENA) { - USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); - USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (1U << 19)); - USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); - } - else - { - /* Program the transfer size and packet count - * as follows: xfersize = N * maxpacket + - * short_packet pktcnt = N + (short_packet - * exist ? 1 : 0) - */ - USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); - USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); - - if (ep->xfer_len > ep->maxpacket) - { - ep->xfer_len = ep->maxpacket; - } - USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (1U << 19)); - USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_XFRSIZ & ep->xfer_len); - } + USBx_INEP(ep->num)->DIEPCTL |= (USB_OTG_DIEPCTL_SNAK); + USBx_INEP(ep->num)->DIEPCTL |= (USB_OTG_DIEPCTL_EPDIS); - if (dma == 1U) - { - if ((uint32_t)ep->dma_addr != 0U) + do { - USBx_INEP(epnum)->DIEPDMA = (uint32_t)(ep->dma_addr); - } + count++; - /* EP enable, IN data in FIFO */ - USBx_INEP(epnum)->DIEPCTL |= (USB_OTG_DIEPCTL_CNAK | USB_OTG_DIEPCTL_EPENA); - } - else - { - /* EP enable, IN data in FIFO */ - USBx_INEP(epnum)->DIEPCTL |= (USB_OTG_DIEPCTL_CNAK | USB_OTG_DIEPCTL_EPENA); - - /* Enable the Tx FIFO Empty Interrupt for this EP */ - if (ep->xfer_len > 0U) - { - USBx_DEVICE->DIEPEMPMSK |= 1UL << (ep->num & EP_ADDR_MSK); - } + if (count > 10000U) + { + ret = HAL_ERROR; + break; + } + } while (((USBx_INEP(ep->num)->DIEPCTL) & USB_OTG_DIEPCTL_EPENA) == USB_OTG_DIEPCTL_EPENA); } } else /* OUT endpoint */ { - /* Program the transfer size and packet count as follows: - * pktcnt = N - * xfersize = N * maxpacket - */ - USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_XFRSIZ); - USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_PKTCNT); - - if (ep->xfer_len > 0U) + if (((USBx_OUTEP(ep->num)->DOEPCTL) & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA) { - ep->xfer_len = ep->maxpacket; - } + USBx_OUTEP(ep->num)->DOEPCTL |= (USB_OTG_DOEPCTL_SNAK); + USBx_OUTEP(ep->num)->DOEPCTL |= (USB_OTG_DOEPCTL_EPDIS); - USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_PKTCNT & (1U << 19)); - USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_XFRSIZ & (ep->maxpacket)); - - if (dma == 1U) - { - if ((uint32_t)ep->xfer_buff != 0U) + do { - USBx_OUTEP(epnum)->DOEPDMA = (uint32_t)(ep->xfer_buff); - } - } + count++; - /* EP enable */ - USBx_OUTEP(epnum)->DOEPCTL |= (USB_OTG_DOEPCTL_CNAK | USB_OTG_DOEPCTL_EPENA); + if (count > 10000U) + { + ret = HAL_ERROR; + break; + } + } while (((USBx_OUTEP(ep->num)->DOEPCTL) & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA); + } } - return HAL_OK; + return ret; } + /** * @brief USB_WritePacket : Writes a packet into the Tx FIFO associated * with the EP/channel @@ -953,12 +997,13 @@ HAL_StatusTypeDef USB_EP0StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDe * 1 : DMA feature used * @retval HAL status */ -HAL_StatusTypeDef USB_WritePacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *src, +HAL_StatusTypeDef USB_WritePacket(const USB_OTG_GlobalTypeDef *USBx, uint8_t *src, uint8_t ch_ep_num, uint16_t len, uint8_t dma) { uint32_t USBx_BASE = (uint32_t)USBx; - uint32_t *pSrc = (uint32_t *)src; - uint32_t count32b, i; + uint8_t *pSrc = src; + uint32_t count32b; + uint32_t i; if (dma == 0U) { @@ -967,6 +1012,9 @@ HAL_StatusTypeDef USB_WritePacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *src, { USBx_DFIFO((uint32_t)ch_ep_num) = __UNALIGNED_UINT32_READ(pSrc); pSrc++; + pSrc++; + pSrc++; + pSrc++; } } @@ -980,17 +1028,37 @@ HAL_StatusTypeDef USB_WritePacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *src, * @param len Number of bytes to read * @retval pointer to destination buffer */ -void *USB_ReadPacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *dest, uint16_t len) +void *USB_ReadPacket(const USB_OTG_GlobalTypeDef *USBx, uint8_t *dest, uint16_t len) { uint32_t USBx_BASE = (uint32_t)USBx; - uint32_t *pDest = (uint32_t *)dest; + uint8_t *pDest = dest; + uint32_t pData; uint32_t i; - uint32_t count32b = ((uint32_t)len + 3U) / 4U; + uint32_t count32b = (uint32_t)len >> 2U; + uint16_t remaining_bytes = len % 4U; for (i = 0U; i < count32b; i++) { __UNALIGNED_UINT32_WRITE(pDest, USBx_DFIFO(0U)); pDest++; + pDest++; + pDest++; + pDest++; + } + + /* When Number of data is not word aligned, read the remaining byte */ + if (remaining_bytes != 0U) + { + i = 0U; + __UNALIGNED_UINT32_WRITE(&pData, USBx_DFIFO(0U)); + + do + { + *(uint8_t *)pDest = (uint8_t)(pData >> (8U * (uint8_t)(i))); + i++; + pDest++; + remaining_bytes--; + } while (remaining_bytes != 0U); } return ((void *)pDest); @@ -1002,7 +1070,7 @@ void *USB_ReadPacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *dest, uint16_t len) * @param ep pointer to endpoint structure * @retval HAL status */ -HAL_StatusTypeDef USB_EPSetStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +HAL_StatusTypeDef USB_EPSetStall(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t epnum = (uint32_t)ep->num; @@ -1033,7 +1101,7 @@ HAL_StatusTypeDef USB_EPSetStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef * @param ep pointer to endpoint structure * @retval HAL status */ -HAL_StatusTypeDef USB_EPClearStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +HAL_StatusTypeDef USB_EPClearStall(const USB_OTG_GlobalTypeDef *USBx, const USB_OTG_EPTypeDef *ep) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t epnum = (uint32_t)ep->num; @@ -1103,7 +1171,7 @@ HAL_StatusTypeDef USB_StopDevice(USB_OTG_GlobalTypeDef *USBx) * This parameter can be a value from 0 to 255 * @retval HAL status */ -HAL_StatusTypeDef USB_SetDevAddress(USB_OTG_GlobalTypeDef *USBx, uint8_t address) +HAL_StatusTypeDef USB_SetDevAddress(const USB_OTG_GlobalTypeDef *USBx, uint8_t address) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -1118,7 +1186,7 @@ HAL_StatusTypeDef USB_SetDevAddress(USB_OTG_GlobalTypeDef *USBx, uint8_t addres * @param USBx Selected device * @retval HAL status */ -HAL_StatusTypeDef USB_DevConnect(USB_OTG_GlobalTypeDef *USBx) +HAL_StatusTypeDef USB_DevConnect(const USB_OTG_GlobalTypeDef *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -1135,7 +1203,7 @@ HAL_StatusTypeDef USB_DevConnect(USB_OTG_GlobalTypeDef *USBx) * @param USBx Selected device * @retval HAL status */ -HAL_StatusTypeDef USB_DevDisconnect(USB_OTG_GlobalTypeDef *USBx) +HAL_StatusTypeDef USB_DevDisconnect(const USB_OTG_GlobalTypeDef *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -1150,9 +1218,9 @@ HAL_StatusTypeDef USB_DevDisconnect(USB_OTG_GlobalTypeDef *USBx) /** * @brief USB_ReadInterrupts: return the global USB interrupt status * @param USBx Selected device - * @retval HAL status + * @retval USB Global Interrupt status */ -uint32_t USB_ReadInterrupts(USB_OTG_GlobalTypeDef *USBx) +uint32_t USB_ReadInterrupts(USB_OTG_GlobalTypeDef const *USBx) { uint32_t tmpreg; @@ -1162,12 +1230,29 @@ uint32_t USB_ReadInterrupts(USB_OTG_GlobalTypeDef *USBx) return tmpreg; } +/** + * @brief USB_ReadChInterrupts: return USB channel interrupt status + * @param USBx Selected device + * @param chnum Channel number + * @retval USB Channel Interrupt status + */ +uint32_t USB_ReadChInterrupts(const USB_OTG_GlobalTypeDef *USBx, uint8_t chnum) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t tmpreg; + + tmpreg = USBx_HC(chnum)->HCINT; + tmpreg &= USBx_HC(chnum)->HCINTMSK; + + return tmpreg; +} + /** * @brief USB_ReadDevAllOutEpInterrupt: return the USB device OUT endpoints interrupt status * @param USBx Selected device - * @retval HAL status + * @retval USB Device OUT EP interrupt status */ -uint32_t USB_ReadDevAllOutEpInterrupt(USB_OTG_GlobalTypeDef *USBx) +uint32_t USB_ReadDevAllOutEpInterrupt(const USB_OTG_GlobalTypeDef *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t tmpreg; @@ -1181,9 +1266,9 @@ uint32_t USB_ReadDevAllOutEpInterrupt(USB_OTG_GlobalTypeDef *USBx) /** * @brief USB_ReadDevAllInEpInterrupt: return the USB device IN endpoints interrupt status * @param USBx Selected device - * @retval HAL status + * @retval USB Device IN EP interrupt status */ -uint32_t USB_ReadDevAllInEpInterrupt(USB_OTG_GlobalTypeDef *USBx) +uint32_t USB_ReadDevAllInEpInterrupt(const USB_OTG_GlobalTypeDef *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t tmpreg; @@ -1201,7 +1286,7 @@ uint32_t USB_ReadDevAllInEpInterrupt(USB_OTG_GlobalTypeDef *USBx) * This parameter can be a value from 0 to 15 * @retval Device OUT EP Interrupt register */ -uint32_t USB_ReadDevOutEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum) +uint32_t USB_ReadDevOutEPInterrupt(const USB_OTG_GlobalTypeDef *USBx, uint8_t epnum) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t tmpreg; @@ -1219,10 +1304,12 @@ uint32_t USB_ReadDevOutEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum) * This parameter can be a value from 0 to 15 * @retval Device IN EP Interrupt register */ -uint32_t USB_ReadDevInEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum) +uint32_t USB_ReadDevInEPInterrupt(const USB_OTG_GlobalTypeDef *USBx, uint8_t epnum) { uint32_t USBx_BASE = (uint32_t)USBx; - uint32_t tmpreg, msk, emp; + uint32_t tmpreg; + uint32_t msk; + uint32_t emp; msk = USBx_DEVICE->DIEPMSK; emp = USBx_DEVICE->DIEPEMPMSK; @@ -1240,7 +1327,7 @@ uint32_t USB_ReadDevInEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum) */ void USB_ClearInterrupts(USB_OTG_GlobalTypeDef *USBx, uint32_t interrupt) { - USBx->GINTSTS |= interrupt; + USBx->GINTSTS &= interrupt; } /** @@ -1251,7 +1338,7 @@ void USB_ClearInterrupts(USB_OTG_GlobalTypeDef *USBx, uint32_t interrupt) * 0 : Host * 1 : Device */ -uint32_t USB_GetMode(USB_OTG_GlobalTypeDef *USBx) +uint32_t USB_GetMode(const USB_OTG_GlobalTypeDef *USBx) { return ((USBx->GINTSTS) & 0x1U); } @@ -1261,7 +1348,7 @@ uint32_t USB_GetMode(USB_OTG_GlobalTypeDef *USBx) * @param USBx Selected device * @retval HAL status */ -HAL_StatusTypeDef USB_ActivateSetup(USB_OTG_GlobalTypeDef *USBx) +HAL_StatusTypeDef USB_ActivateSetup(const USB_OTG_GlobalTypeDef *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -1283,10 +1370,10 @@ HAL_StatusTypeDef USB_ActivateSetup(USB_OTG_GlobalTypeDef *USBx) * @param psetup pointer to setup packet * @retval HAL status */ -HAL_StatusTypeDef USB_EP0_OutStart(USB_OTG_GlobalTypeDef *USBx, uint8_t dma, uint8_t *psetup) +HAL_StatusTypeDef USB_EP0_OutStart(const USB_OTG_GlobalTypeDef *USBx, uint8_t dma, const uint8_t *psetup) { uint32_t USBx_BASE = (uint32_t)USBx; - uint32_t gSNPSiD = *(__IO uint32_t *)(&USBx->CID + 0x1U); + uint32_t gSNPSiD = *(__IO const uint32_t *)(&USBx->CID + 0x1U); if (gSNPSiD > USB_OTG_CORE_ID_300A) { @@ -1318,12 +1405,14 @@ HAL_StatusTypeDef USB_EP0_OutStart(USB_OTG_GlobalTypeDef *USBx, uint8_t dma, uin */ static HAL_StatusTypeDef USB_CoreReset(USB_OTG_GlobalTypeDef *USBx) { - uint32_t count = 0U; + __IO uint32_t count = 0U; /* Wait for AHB master IDLE state. */ do { - if (++count > 200000U) + count++; + + if (count > HAL_USB_TIMEOUT) { return HAL_TIMEOUT; } @@ -1335,7 +1424,9 @@ static HAL_StatusTypeDef USB_CoreReset(USB_OTG_GlobalTypeDef *USBx) do { - if (++count > 200000U) + count++; + + if (count > HAL_USB_TIMEOUT) { return HAL_TIMEOUT; } @@ -1354,13 +1445,16 @@ static HAL_StatusTypeDef USB_CoreReset(USB_OTG_GlobalTypeDef *USBx) */ HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg) { + HAL_StatusTypeDef ret = HAL_OK; uint32_t USBx_BASE = (uint32_t)USBx; uint32_t i; /* Restart the Phy Clock */ USBx_PCGCCTL = 0U; -#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#if defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) \ + || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) \ + || defined(STM32F423xx) /* Disable HW VBUS sensing */ USBx->GCCFG &= ~(USB_OTG_GCCFG_VBDEN); #else @@ -1371,13 +1465,17 @@ HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef c USBx->GCCFG |= USB_OTG_GCCFG_NOVBUSSENS; USBx->GCCFG &= ~USB_OTG_GCCFG_VBUSBSEN; USBx->GCCFG &= ~USB_OTG_GCCFG_VBUSASEN; -#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ -#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) +#endif /* defined(STM32F446xx) || defined(STM32F469xx) || defined(STM32F479xx) || defined(STM32F412Zx) || + defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || + defined(STM32F423xx) */ +#if defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) \ + || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) /* Disable Battery chargin detector */ USBx->GCCFG &= ~(USB_OTG_GCCFG_BCDEN); -#endif /* defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ +#endif /* defined(STM32F412Zx) || defined(STM32F412Vx) || defined(STM32F412Rx) || + defined(STM32F412Cx) || defined(STM32F413xx) || defined(STM32F423xx) */ - if ((USBx->CID & (0x1U << 8)) != 0U) + if ((USBx->GUSBCFG & USB_OTG_GUSBCFG_PHYSEL) == 0U) { if (cfg.speed == USBH_FSLS_SPEED) { @@ -1397,28 +1495,30 @@ HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef c } /* Make sure the FIFOs are flushed. */ - (void)USB_FlushTxFifo(USBx, 0x10U); /* all Tx FIFOs */ - (void)USB_FlushRxFifo(USBx); + if (USB_FlushTxFifo(USBx, 0x10U) != HAL_OK) /* all Tx FIFOs */ + { + ret = HAL_ERROR; + } + + if (USB_FlushRxFifo(USBx) != HAL_OK) + { + ret = HAL_ERROR; + } /* Clear all pending HC Interrupts */ for (i = 0U; i < cfg.Host_channels; i++) { - USBx_HC(i)->HCINT = 0xFFFFFFFFU; + USBx_HC(i)->HCINT = CLEAR_INTERRUPT_MASK; USBx_HC(i)->HCINTMSK = 0U; } - /* Enable VBUS driving */ - (void)USB_DriveVbus(USBx, 1U); - - HAL_Delay(200U); - /* Disable all interrupts. */ USBx->GINTMSK = 0U; /* Clear any pending interrupts */ - USBx->GINTSTS = 0xFFFFFFFFU; - - if ((USBx->CID & (0x1U << 8)) != 0U) + USBx->GINTSTS = CLEAR_INTERRUPT_MASK; +#if defined (USB_OTG_HS) + if (USBx == USB_OTG_HS) { /* set Rx FIFO size */ USBx->GRXFSIZ = 0x200U; @@ -1426,6 +1526,7 @@ HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef c USBx->HPTXFSIZ = (uint32_t)(((0xE0U << 16) & USB_OTG_HPTXFSIZ_PTXFD) | 0x300U); } else +#endif /* defined (USB_OTG_HS) */ { /* set Rx FIFO size */ USBx->GRXFSIZ = 0x80U; @@ -1444,7 +1545,7 @@ HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef c USB_OTG_GINTMSK_SOFM | USB_OTG_GINTSTS_DISCINT | \ USB_OTG_GINTMSK_PXFRM_IISOOXFRM | USB_OTG_GINTMSK_WUIM); - return HAL_OK; + return ret; } /** @@ -1457,7 +1558,7 @@ HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef c * HCFG_6_MHZ : Low Speed 6 MHz Clock * @retval HAL status */ -HAL_StatusTypeDef USB_InitFSLSPClkSel(USB_OTG_GlobalTypeDef *USBx, uint8_t freq) +HAL_StatusTypeDef USB_InitFSLSPClkSel(const USB_OTG_GlobalTypeDef *USBx, uint8_t freq) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -1466,15 +1567,15 @@ HAL_StatusTypeDef USB_InitFSLSPClkSel(USB_OTG_GlobalTypeDef *USBx, uint8_t freq) if (freq == HCFG_48_MHZ) { - USBx_HOST->HFIR = 48000U; + USBx_HOST->HFIR = HFIR_48_MHZ; } else if (freq == HCFG_6_MHZ) { - USBx_HOST->HFIR = 6000U; + USBx_HOST->HFIR = HFIR_6_MHZ; } else { - /* ... */ + return HAL_ERROR; } return HAL_OK; @@ -1487,7 +1588,7 @@ HAL_StatusTypeDef USB_InitFSLSPClkSel(USB_OTG_GlobalTypeDef *USBx, uint8_t freq) * @note (1)The application must wait at least 10 ms * before clearing the reset bit. */ -HAL_StatusTypeDef USB_ResetPort(USB_OTG_GlobalTypeDef *USBx) +HAL_StatusTypeDef USB_ResetPort(const USB_OTG_GlobalTypeDef *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -1514,7 +1615,7 @@ HAL_StatusTypeDef USB_ResetPort(USB_OTG_GlobalTypeDef *USBx) * 1 : Activate VBUS * @retval HAL status */ -HAL_StatusTypeDef USB_DriveVbus(USB_OTG_GlobalTypeDef *USBx, uint8_t state) +HAL_StatusTypeDef USB_DriveVbus(const USB_OTG_GlobalTypeDef *USBx, uint8_t state) { uint32_t USBx_BASE = (uint32_t)USBx; __IO uint32_t hprt0 = 0U; @@ -1544,7 +1645,7 @@ HAL_StatusTypeDef USB_DriveVbus(USB_OTG_GlobalTypeDef *USBx, uint8_t state) * @arg HCD_SPEED_FULL: Full speed mode * @arg HCD_SPEED_LOW: Low speed mode */ -uint32_t USB_GetHostSpeed(USB_OTG_GlobalTypeDef *USBx) +uint32_t USB_GetHostSpeed(USB_OTG_GlobalTypeDef const *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; __IO uint32_t hprt0 = 0U; @@ -1558,7 +1659,7 @@ uint32_t USB_GetHostSpeed(USB_OTG_GlobalTypeDef *USBx) * @param USBx Selected device * @retval current frame number */ -uint32_t USB_GetCurrentFrame(USB_OTG_GlobalTypeDef *USBx) +uint32_t USB_GetCurrentFrame(USB_OTG_GlobalTypeDef const *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -1586,7 +1687,7 @@ uint32_t USB_GetCurrentFrame(USB_OTG_GlobalTypeDef *USBx) * @arg EP_TYPE_BULK: Bulk type * @arg EP_TYPE_INTR: Interrupt type * @param mps Max Packet Size - * This parameter can be a value from 0 to32K + * This parameter can be a value from 0 to 32K * @retval HAL state */ HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num, @@ -1595,10 +1696,12 @@ HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num, { HAL_StatusTypeDef ret = HAL_OK; uint32_t USBx_BASE = (uint32_t)USBx; - uint32_t HCcharEpDir, HCcharLowSpeed; + uint32_t HCcharEpDir; + uint32_t HCcharLowSpeed; + uint32_t HostCoreSpeed; /* Clear old interrupt conditions for this host channel. */ - USBx_HC((uint32_t)ch_num)->HCINT = 0xFFFFFFFFU; + USBx_HC((uint32_t)ch_num)->HCINT = CLEAR_INTERRUPT_MASK; /* Enable channel interrupts required for this transfer. */ switch (ep_type) @@ -1618,10 +1721,13 @@ HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num, } else { - if ((USBx->CID & (0x1U << 8)) != 0U) +#if defined (USB_OTG_HS) + if (USBx == USB_OTG_HS) { - USBx_HC((uint32_t)ch_num)->HCINTMSK |= (USB_OTG_HCINTMSK_NYET | USB_OTG_HCINTMSK_ACKM); + USBx_HC((uint32_t)ch_num)->HCINTMSK |= USB_OTG_HCINTMSK_NYET | + USB_OTG_HCINTMSK_ACKM; } +#endif /* defined (USB_OTG_HS) */ } break; @@ -1658,6 +1764,12 @@ HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num, break; } + /* Clear Hub Start Split transaction */ + USBx_HC((uint32_t)ch_num)->HCSPLT = 0U; + + /* Enable host channel Halt interrupt */ + USBx_HC((uint32_t)ch_num)->HCINTMSK |= USB_OTG_HCINTMSK_CHHM; + /* Enable the top level host channel interrupt. */ USBx_HOST->HAINTMSK |= 1UL << (ch_num & 0xFU); @@ -1674,7 +1786,10 @@ HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num, HCcharEpDir = 0U; } - if (speed == HPRT0_PRTSPD_LOW_SPEED) + HostCoreSpeed = USB_GetHostSpeed(USBx); + + /* LS device plugged to HUB */ + if ((speed == HPRT0_PRTSPD_LOW_SPEED) && (HostCoreSpeed != HPRT0_PRTSPD_LOW_SPEED)) { HCcharLowSpeed = (0x1U << 17) & USB_OTG_HCCHAR_LSDEV; } @@ -1686,11 +1801,12 @@ HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num, USBx_HC((uint32_t)ch_num)->HCCHAR = (((uint32_t)dev_address << 22) & USB_OTG_HCCHAR_DAD) | ((((uint32_t)epnum & 0x7FU) << 11) & USB_OTG_HCCHAR_EPNUM) | (((uint32_t)ep_type << 18) & USB_OTG_HCCHAR_EPTYP) | - ((uint32_t)mps & USB_OTG_HCCHAR_MPSIZ) | HCcharEpDir | HCcharLowSpeed; + ((uint32_t)mps & USB_OTG_HCCHAR_MPSIZ) | + USB_OTG_HCCHAR_MC_0 | HCcharEpDir | HCcharLowSpeed; - if (ep_type == EP_TYPE_INTR) + if ((ep_type == EP_TYPE_INTR) || (ep_type == EP_TYPE_ISOC)) { - USBx_HC((uint32_t)ch_num)->HCCHAR |= USB_OTG_HCCHAR_ODDFRM ; + USBx_HC((uint32_t)ch_num)->HCCHAR |= USB_OTG_HCCHAR_ODDFRM; } return ret; @@ -1710,52 +1826,126 @@ HAL_StatusTypeDef USB_HC_StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_HCTypeDe { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t ch_num = (uint32_t)hc->ch_num; - static __IO uint32_t tmpreg = 0U; + __IO uint32_t tmpreg; uint8_t is_oddframe; uint16_t len_words; uint16_t num_packets; - uint16_t max_hc_pkt_count = 256U; + uint16_t max_hc_pkt_count = HC_MAX_PKT_CNT; - if (((USBx->CID & (0x1U << 8)) != 0U) && (hc->speed == USBH_HS_SPEED)) +#if defined (USB_OTG_HS) + if (USBx == USB_OTG_HS) { - if ((dma == 0U) && (hc->do_ping == 1U)) - { - (void)USB_DoPing(USBx, hc->ch_num); - return HAL_OK; - } - else if (dma == 1U) + /* in DMA mode host Core automatically issues ping in case of NYET/NAK */ + if (dma == 1U) { - USBx_HC(ch_num)->HCINTMSK &= ~(USB_OTG_HCINTMSK_NYET | USB_OTG_HCINTMSK_ACKM); - hc->do_ping = 0U; + if (((hc->ep_type == EP_TYPE_CTRL) || (hc->ep_type == EP_TYPE_BULK)) && (hc->do_ssplit == 0U)) + { + + USBx_HC((uint32_t)ch_num)->HCINTMSK &= ~(USB_OTG_HCINTMSK_NYET | + USB_OTG_HCINTMSK_ACKM | + USB_OTG_HCINTMSK_NAKM); + } } else { - /* ... */ + if ((hc->speed == USBH_HS_SPEED) && (hc->do_ping == 1U)) + { + (void)USB_DoPing(USBx, hc->ch_num); + return HAL_OK; + } } } +#endif /* defined (USB_OTG_HS) */ - /* Compute the expected number of packets associated to the transfer */ - if (hc->xfer_len > 0U) + if (hc->do_ssplit == 1U) { - num_packets = (uint16_t)((hc->xfer_len + hc->max_packet - 1U) / hc->max_packet); + /* Set number of packet to 1 for Split transaction */ + num_packets = 1U; - if (num_packets > max_hc_pkt_count) + if (hc->ep_is_in != 0U) + { + hc->XferSize = (uint32_t)num_packets * hc->max_packet; + } + else { - num_packets = max_hc_pkt_count; - hc->xfer_len = (uint32_t)num_packets * hc->max_packet; + if (hc->ep_type == EP_TYPE_ISOC) + { + if (hc->xfer_len > ISO_SPLT_MPS) + { + /* Isochrone Max Packet Size for Split mode */ + hc->XferSize = hc->max_packet; + hc->xfer_len = hc->XferSize; + + if ((hc->iso_splt_xactPos == HCSPLT_BEGIN) || (hc->iso_splt_xactPos == HCSPLT_MIDDLE)) + { + hc->iso_splt_xactPos = HCSPLT_MIDDLE; + } + else + { + hc->iso_splt_xactPos = HCSPLT_BEGIN; + } + } + else + { + hc->XferSize = hc->xfer_len; + + if ((hc->iso_splt_xactPos != HCSPLT_BEGIN) && (hc->iso_splt_xactPos != HCSPLT_MIDDLE)) + { + hc->iso_splt_xactPos = HCSPLT_FULL; + } + else + { + hc->iso_splt_xactPos = HCSPLT_END; + } + } + } + else + { + if ((dma == 1U) && (hc->xfer_len > hc->max_packet)) + { + hc->XferSize = (uint32_t)num_packets * hc->max_packet; + } + else + { + hc->XferSize = hc->xfer_len; + } + } } } else { - num_packets = 1U; - } - if (hc->ep_is_in != 0U) - { - hc->xfer_len = (uint32_t)num_packets * hc->max_packet; + /* Compute the expected number of packets associated to the transfer */ + if (hc->xfer_len > 0U) + { + num_packets = (uint16_t)((hc->xfer_len + hc->max_packet - 1U) / hc->max_packet); + + if (num_packets > max_hc_pkt_count) + { + num_packets = max_hc_pkt_count; + hc->XferSize = (uint32_t)num_packets * hc->max_packet; + } + } + else + { + num_packets = 1U; + } + + /* + * For IN channel HCTSIZ.XferSize is expected to be an integer multiple of + * max_packet size. + */ + if (hc->ep_is_in != 0U) + { + hc->XferSize = (uint32_t)num_packets * hc->max_packet; + } + else + { + hc->XferSize = hc->xfer_len; + } } /* Initialize the HCTSIZn register */ - USBx_HC(ch_num)->HCTSIZ = (hc->xfer_len & USB_OTG_HCTSIZ_XFRSIZ) | + USBx_HC(ch_num)->HCTSIZ = (hc->XferSize & USB_OTG_HCTSIZ_XFRSIZ) | (((uint32_t)num_packets << 19) & USB_OTG_HCTSIZ_PKTCNT) | (((uint32_t)hc->data_pid << 29) & USB_OTG_HCTSIZ_DPID); @@ -1769,6 +1959,65 @@ HAL_StatusTypeDef USB_HC_StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_HCTypeDe USBx_HC(ch_num)->HCCHAR &= ~USB_OTG_HCCHAR_ODDFRM; USBx_HC(ch_num)->HCCHAR |= (uint32_t)is_oddframe << 29; + if (hc->do_ssplit == 1U) + { + /* Set Hub start Split transaction */ + USBx_HC((uint32_t)ch_num)->HCSPLT = ((uint32_t)hc->hub_addr << USB_OTG_HCSPLT_HUBADDR_Pos) | + (uint32_t)hc->hub_port_nbr | USB_OTG_HCSPLT_SPLITEN; + + /* unmask ack & nyet for IN/OUT transactions */ + USBx_HC((uint32_t)ch_num)->HCINTMSK |= (USB_OTG_HCINTMSK_ACKM | + USB_OTG_HCINTMSK_NYET); + + if ((hc->do_csplit == 1U) && (hc->ep_is_in == 0U)) + { + USBx_HC((uint32_t)ch_num)->HCSPLT |= USB_OTG_HCSPLT_COMPLSPLT; + USBx_HC((uint32_t)ch_num)->HCINTMSK |= USB_OTG_HCINTMSK_NYET; + } + + if (((hc->ep_type == EP_TYPE_ISOC) || (hc->ep_type == EP_TYPE_INTR)) && + (hc->do_csplit == 1U) && (hc->ep_is_in == 1U)) + { + USBx_HC((uint32_t)ch_num)->HCSPLT |= USB_OTG_HCSPLT_COMPLSPLT; + } + + /* Position management for iso out transaction on split mode */ + if ((hc->ep_type == EP_TYPE_ISOC) && (hc->ep_is_in == 0U)) + { + /* Set data payload position */ + switch (hc->iso_splt_xactPos) + { + case HCSPLT_BEGIN: + /* First data payload for OUT Transaction */ + USBx_HC((uint32_t)ch_num)->HCSPLT |= USB_OTG_HCSPLT_XACTPOS_1; + break; + + case HCSPLT_MIDDLE: + /* Middle data payload for OUT Transaction */ + USBx_HC((uint32_t)ch_num)->HCSPLT |= USB_OTG_HCSPLT_XACTPOS_Pos; + break; + + case HCSPLT_END: + /* End data payload for OUT Transaction */ + USBx_HC((uint32_t)ch_num)->HCSPLT |= USB_OTG_HCSPLT_XACTPOS_0; + break; + + case HCSPLT_FULL: + /* Entire data payload for OUT Transaction */ + USBx_HC((uint32_t)ch_num)->HCSPLT |= USB_OTG_HCSPLT_XACTPOS; + break; + + default: + break; + } + } + } + else + { + /* Clear Hub Start Split transaction */ + USBx_HC((uint32_t)ch_num)->HCSPLT = 0U; + } + /* Set host channel enable */ tmpreg = USBx_HC(ch_num)->HCCHAR; tmpreg &= ~USB_OTG_HCCHAR_CHDIS; @@ -1790,7 +2039,7 @@ HAL_StatusTypeDef USB_HC_StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_HCTypeDe return HAL_OK; } - if ((hc->ep_is_in == 0U) && (hc->xfer_len > 0U)) + if ((hc->ep_is_in == 0U) && (hc->xfer_len > 0U) && (hc->do_csplit == 0U)) { switch (hc->ep_type) { @@ -1836,7 +2085,7 @@ HAL_StatusTypeDef USB_HC_StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_HCTypeDe * @param USBx Selected device * @retval HAL state */ -uint32_t USB_HC_ReadInterrupt(USB_OTG_GlobalTypeDef *USBx) +uint32_t USB_HC_ReadInterrupt(const USB_OTG_GlobalTypeDef *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -1850,30 +2099,50 @@ uint32_t USB_HC_ReadInterrupt(USB_OTG_GlobalTypeDef *USBx) * This parameter can be a value from 1 to 15 * @retval HAL state */ -HAL_StatusTypeDef USB_HC_Halt(USB_OTG_GlobalTypeDef *USBx, uint8_t hc_num) +HAL_StatusTypeDef USB_HC_Halt(const USB_OTG_GlobalTypeDef *USBx, uint8_t hc_num) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t hcnum = (uint32_t)hc_num; - uint32_t count = 0U; + __IO uint32_t count = 0U; uint32_t HcEpType = (USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_EPTYP) >> 18; + uint32_t ChannelEna = (USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_CHENA) >> 31; + uint32_t SplitEna = (USBx_HC(hcnum)->HCSPLT & USB_OTG_HCSPLT_SPLITEN) >> 31; + + /* In buffer DMA, Channel disable must not be programmed for non-split periodic channels. + At the end of the next uframe/frame (in the worst case), the core generates a channel halted + and disables the channel automatically. */ + + if ((((USBx->GAHBCFG & USB_OTG_GAHBCFG_DMAEN) == USB_OTG_GAHBCFG_DMAEN) && (SplitEna == 0U)) && + ((ChannelEna == 0U) || (((HcEpType == HCCHAR_ISOC) || (HcEpType == HCCHAR_INTR))))) + { + return HAL_OK; + } /* Check for space in the request queue to issue the halt. */ if ((HcEpType == HCCHAR_CTRL) || (HcEpType == HCCHAR_BULK)) { USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHDIS; - if ((USBx->HNPTXSTS & (0xFFU << 16)) == 0U) + if ((USBx->GAHBCFG & USB_OTG_GAHBCFG_DMAEN) == 0U) { - USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_CHENA; - USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; - USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_EPDIR; - do + if ((USBx->HNPTXSTS & (0xFFU << 16)) == 0U) { - if (++count > 1000U) + USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_CHENA; + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; + do { - break; - } - } while ((USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_CHENA) == USB_OTG_HCCHAR_CHENA); + count++; + + if (count > 1000U) + { + break; + } + } while ((USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_CHENA) == USB_OTG_HCCHAR_CHENA); + } + else + { + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; + } } else { @@ -1888,10 +2157,11 @@ HAL_StatusTypeDef USB_HC_Halt(USB_OTG_GlobalTypeDef *USBx, uint8_t hc_num) { USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_CHENA; USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; - USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_EPDIR; do { - if (++count > 1000U) + count++; + + if (count > 1000U) { break; } @@ -1913,7 +2183,7 @@ HAL_StatusTypeDef USB_HC_Halt(USB_OTG_GlobalTypeDef *USBx, uint8_t hc_num) * This parameter can be a value from 1 to 15 * @retval HAL state */ -HAL_StatusTypeDef USB_DoPing(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num) +HAL_StatusTypeDef USB_DoPing(const USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num) { uint32_t USBx_BASE = (uint32_t)USBx; uint32_t chnum = (uint32_t)ch_num; @@ -1939,16 +2209,24 @@ HAL_StatusTypeDef USB_DoPing(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num) */ HAL_StatusTypeDef USB_StopHost(USB_OTG_GlobalTypeDef *USBx) { + HAL_StatusTypeDef ret = HAL_OK; uint32_t USBx_BASE = (uint32_t)USBx; - uint32_t count = 0U; + __IO uint32_t count = 0U; uint32_t value; uint32_t i; (void)USB_DisableGlobalInt(USBx); - /* Flush FIFO */ - (void)USB_FlushTxFifo(USBx, 0x10U); - (void)USB_FlushRxFifo(USBx); + /* Flush USB FIFO */ + if (USB_FlushTxFifo(USBx, 0x10U) != HAL_OK) /* all Tx FIFOs */ + { + ret = HAL_ERROR; + } + + if (USB_FlushRxFifo(USBx) != HAL_OK) + { + ret = HAL_ERROR; + } /* Flush out any leftover queued requests. */ for (i = 0U; i <= 15U; i++) @@ -1971,7 +2249,9 @@ HAL_StatusTypeDef USB_StopHost(USB_OTG_GlobalTypeDef *USBx) do { - if (++count > 1000U) + count++; + + if (count > 1000U) { break; } @@ -1979,12 +2259,12 @@ HAL_StatusTypeDef USB_StopHost(USB_OTG_GlobalTypeDef *USBx) } /* Clear any pending Host interrupts */ - USBx_HOST->HAINT = 0xFFFFFFFFU; - USBx->GINTSTS = 0xFFFFFFFFU; + USBx_HOST->HAINT = CLEAR_INTERRUPT_MASK; + USBx->GINTSTS = CLEAR_INTERRUPT_MASK; (void)USB_EnableGlobalInt(USBx); - return HAL_OK; + return ret; } /** @@ -1992,7 +2272,7 @@ HAL_StatusTypeDef USB_StopHost(USB_OTG_GlobalTypeDef *USBx) * @param USBx Selected device * @retval HAL status */ -HAL_StatusTypeDef USB_ActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx) +HAL_StatusTypeDef USB_ActivateRemoteWakeup(const USB_OTG_GlobalTypeDef *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -2010,7 +2290,7 @@ HAL_StatusTypeDef USB_ActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx) * @param USBx Selected device * @retval HAL status */ -HAL_StatusTypeDef USB_DeActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx) +HAL_StatusTypeDef USB_DeActivateRemoteWakeup(const USB_OTG_GlobalTypeDef *USBx) { uint32_t USBx_BASE = (uint32_t)USBx; @@ -2021,7 +2301,6 @@ HAL_StatusTypeDef USB_DeActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx) } #endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ - /** * @} */ @@ -2035,5 +2314,3 @@ HAL_StatusTypeDef USB_DeActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx) /** * @} */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/GRBL Driver STM32F401 Debug F407 25MHz ETH.launch b/GRBL Driver STM32F401 Debug F407 25MHz ETH.launch new file mode 100644 index 00000000..8b03fd80 --- /dev/null +++ b/GRBL Driver STM32F401 Debug F407 25MHz ETH.launch @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/GRBL Driver STM32F401 Debug F446 Nucleo144.launch b/GRBL Driver STM32F401 Debug F446 Nucleo144.launch index afd06c39..eb632df8 100644 --- a/GRBL Driver STM32F401 Debug F446 Nucleo144.launch +++ b/GRBL Driver STM32F401 Debug F446 Nucleo144.launch @@ -80,4 +80,5 @@ + diff --git a/Inc/driver.h b/Inc/driver.h index c38c9f2e..b99342d8 100644 --- a/Inc/driver.h +++ b/Inc/driver.h @@ -190,7 +190,7 @@ #error "Nucleo64 based boards does not support USB CDC communication!" #endif -#if ETHERNET_ENABLE && !defined(SPI_IRQ_PIN) +#if ETHERNET_ENABLE && defined(_WIZCHIP_) && !defined(SPI_IRQ_PIN) #error "Board does not support ethernet!" #endif diff --git a/Inc/enet.h b/Inc/enet.h new file mode 100644 index 00000000..37cb819e --- /dev/null +++ b/Inc/enet.h @@ -0,0 +1,30 @@ +/* + enet.h - lwIP driver glue code for STM32F4xx processors + + Part of grblHAL + + Copyright (c) 2021-2024 Terje Io + + grblHAL is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + grblHAL is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with grblHAL. If not, see . +*/ + +#ifndef __ENET_H__ +#define __ENET_H__ + +#include "driver.h" + +bool enet_init (void); +bool enet_start (void); + +#endif diff --git a/Inc/my_machine.h b/Inc/my_machine.h index cbaeb2fd..af1c412c 100644 --- a/Inc/my_machine.h +++ b/Inc/my_machine.h @@ -78,6 +78,7 @@ //#define SPINDLE_OFFSET 1 // Uncomment to enable settings for laser spindle XY-offset. // ********************** //#define MODBUS_ENABLE 1 // Set to 1 for auto direction, 2 for direction signal on auxiliary output pin. +//#define ETHERNET_ENABLE 1 // Ethernet streaming. Uses networking plugin. //#define _WIZCHIP_ 5500 // Enables ethernet via WIZnet breakout connected via SPI. Set to 5500 for W5500 chip, 5105 for W5100S. //#define WEBUI_ENABLE 3 // Enable ESP3D-WEBUI plugin along with networking and SD card plugins. // NOTE: Only fully compatible with F412 and F429 MCUs. @@ -159,7 +160,7 @@ //#define MDNS_ENABLE 1 // mDNS daemon. //#define SSDP_ENABLE 1 // SSDP daemon - requires HTTP enabled. //#define MQTT_ENABLE 1 // MQTT client API, only enable if needed by plugin code. -#if SDCARD_ENABLE || WEBUI_ENABLE +#if SDCARD_ENABLE || WEBUI_ENABLE //#define FTP_ENABLE 1 // Ftp daemon - requires SD card enabled. //#define HTTP_ENABLE 1 // http daemon - requires SD card enabled. //#define WEBDAV_ENABLE 1 // webdav protocol - requires http daemon and SD card enabled. diff --git a/Inc/stm32f4xx_hal_conf.h b/Inc/stm32f4xx_hal_conf.h index b9d37e78..71629235 100644 --- a/Inc/stm32f4xx_hal_conf.h +++ b/Inc/stm32f4xx_hal_conf.h @@ -44,7 +44,7 @@ #define HAL_DAC_MODULE_ENABLED /* #define HAL_DCMI_MODULE_ENABLED */ /* #define HAL_DMA2D_MODULE_ENABLED */ -/* #define HAL_ETH_MODULE_ENABLED */ +#define HAL_ETH_MODULE_ENABLED /* #define HAL_NAND_MODULE_ENABLED */ /* #define HAL_NOR_MODULE_ENABLED */ /* #define HAL_PCCARD_MODULE_ENABLED */ diff --git a/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Inc/usbd_cdc.h b/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Inc/usbd_cdc.h index b484218e..aeac6bf5 100644 --- a/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Inc/usbd_cdc.h +++ b/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Inc/usbd_cdc.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2015 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2015 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -41,9 +40,15 @@ extern "C" { /** @defgroup usbd_cdc_Exported_Defines * @{ */ +#ifndef CDC_IN_EP #define CDC_IN_EP 0x81U /* EP1 for data IN */ +#endif /* CDC_IN_EP */ +#ifndef CDC_OUT_EP #define CDC_OUT_EP 0x01U /* EP1 for data OUT */ +#endif /* CDC_OUT_EP */ +#ifndef CDC_CMD_EP #define CDC_CMD_EP 0x82U /* EP2 for CDC commands */ +#endif /* CDC_CMD_EP */ #ifndef CDC_HS_BINTERVAL #define CDC_HS_BINTERVAL 0x10U @@ -65,6 +70,7 @@ extern "C" { #define CDC_DATA_FS_IN_PACKET_SIZE CDC_DATA_FS_MAX_PACKET_SIZE #define CDC_DATA_FS_OUT_PACKET_SIZE CDC_DATA_FS_MAX_PACKET_SIZE +#define CDC_REQ_MAX_DATA_SIZE 0x7U /*---------------------------------------------------------------------*/ /* CDC definitions */ /*---------------------------------------------------------------------*/ @@ -110,7 +116,7 @@ typedef struct _USBD_CDC_Itf typedef struct { - uint32_t data[CDC_DATA_HS_MAX_PACKET_SIZE / 4U]; /* Force 32bits alignment */ + uint32_t data[CDC_DATA_HS_MAX_PACKET_SIZE / 4U]; /* Force 32-bit alignment */ uint8_t CmdOpCode; uint8_t CmdLength; uint8_t *RxBuffer; @@ -148,12 +154,17 @@ extern USBD_ClassTypeDef USBD_CDC; uint8_t USBD_CDC_RegisterInterface(USBD_HandleTypeDef *pdev, USBD_CDC_ItfTypeDef *fops); +#ifdef USE_USBD_COMPOSITE +uint8_t USBD_CDC_SetTxBuffer(USBD_HandleTypeDef *pdev, uint8_t *pbuff, + uint32_t length, uint8_t ClassId); +uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev, uint8_t ClassId); +#else uint8_t USBD_CDC_SetTxBuffer(USBD_HandleTypeDef *pdev, uint8_t *pbuff, uint32_t length); - +uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev); +#endif /* USE_USBD_COMPOSITE */ uint8_t USBD_CDC_SetRxBuffer(USBD_HandleTypeDef *pdev, uint8_t *pbuff); uint8_t USBD_CDC_ReceivePacket(USBD_HandleTypeDef *pdev); -uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev); /** * @} */ @@ -171,4 +182,3 @@ uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev); * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c b/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c index 52080abe..ad8da254 100644 --- a/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c +++ b/Middlewares/ST/STM32_USB_Device_Library/Class/CDC/Src/usbd_cdc.c @@ -10,6 +10,17 @@ * - Command IN transfer (class requests management) * - Error management * + ****************************************************************************** + * @attention + * + * Copyright (c) 2015 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** * @verbatim * * =================================================================== @@ -37,17 +48,6 @@ * @endverbatim * ****************************************************************************** - * @attention - * - *

© Copyright (c) 2015 STMicroelectronics. - * All rights reserved.

- * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 - * - ****************************************************************************** */ /* BSPDependencies @@ -105,13 +105,14 @@ static uint8_t USBD_CDC_Setup(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *re static uint8_t USBD_CDC_DataIn(USBD_HandleTypeDef *pdev, uint8_t epnum); static uint8_t USBD_CDC_DataOut(USBD_HandleTypeDef *pdev, uint8_t epnum); static uint8_t USBD_CDC_EP0_RxReady(USBD_HandleTypeDef *pdev); - +#ifndef USE_USBD_COMPOSITE static uint8_t *USBD_CDC_GetFSCfgDesc(uint16_t *length); static uint8_t *USBD_CDC_GetHSCfgDesc(uint16_t *length); static uint8_t *USBD_CDC_GetOtherSpeedCfgDesc(uint16_t *length); -static uint8_t *USBD_CDC_GetOtherSpeedCfgDesc(uint16_t *length); uint8_t *USBD_CDC_GetDeviceQualifierDescriptor(uint16_t *length); +#endif /* USE_USBD_COMPOSITE */ +#ifndef USE_USBD_COMPOSITE /* USB Standard Device Descriptor */ __ALIGN_BEGIN static uint8_t USBD_CDC_DeviceQualifierDesc[USB_LEN_DEV_QUALIFIER_DESC] __ALIGN_END = { @@ -126,7 +127,7 @@ __ALIGN_BEGIN static uint8_t USBD_CDC_DeviceQualifierDesc[USB_LEN_DEV_QUALIFIER_ 0x01, 0x00, }; - +#endif /* USE_USBD_COMPOSITE */ /** * @} */ @@ -142,128 +143,45 @@ USBD_ClassTypeDef USBD_CDC = USBD_CDC_Init, USBD_CDC_DeInit, USBD_CDC_Setup, - NULL, /* EP0_TxSent, */ + NULL, /* EP0_TxSent */ USBD_CDC_EP0_RxReady, USBD_CDC_DataIn, USBD_CDC_DataOut, NULL, NULL, NULL, +#ifdef USE_USBD_COMPOSITE + NULL, + NULL, + NULL, + NULL, +#else USBD_CDC_GetHSCfgDesc, USBD_CDC_GetFSCfgDesc, USBD_CDC_GetOtherSpeedCfgDesc, USBD_CDC_GetDeviceQualifierDescriptor, +#endif /* USE_USBD_COMPOSITE */ }; +#ifndef USE_USBD_COMPOSITE /* USB CDC device Configuration Descriptor */ -__ALIGN_BEGIN static uint8_t USBD_CDC_CfgHSDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN_END = +__ALIGN_BEGIN static uint8_t USBD_CDC_CfgDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN_END = { /* Configuration Descriptor */ 0x09, /* bLength: Configuration Descriptor size */ USB_DESC_TYPE_CONFIGURATION, /* bDescriptorType: Configuration */ - USB_CDC_CONFIG_DESC_SIZ, /* wTotalLength:no of returned bytes */ + USB_CDC_CONFIG_DESC_SIZ, /* wTotalLength */ 0x00, - 0x02, /* bNumInterfaces: 2 interface */ - 0x01, /* bConfigurationValue: Configuration value */ - 0x00, /* iConfiguration: Index of string descriptor describing the configuration */ - 0xC0, /* bmAttributes: self powered */ - 0x32, /* MaxPower 0 mA */ - - /*---------------------------------------------------------------------------*/ - - /* Interface Descriptor */ - 0x09, /* bLength: Interface Descriptor size */ - USB_DESC_TYPE_INTERFACE, /* bDescriptorType: Interface */ - /* Interface descriptor type */ - 0x00, /* bInterfaceNumber: Number of Interface */ - 0x00, /* bAlternateSetting: Alternate setting */ - 0x01, /* bNumEndpoints: One endpoints used */ - 0x02, /* bInterfaceClass: Communication Interface Class */ - 0x02, /* bInterfaceSubClass: Abstract Control Model */ - 0x01, /* bInterfaceProtocol: Common AT commands */ - 0x00, /* iInterface: */ - - /* Header Functional Descriptor */ - 0x05, /* bLength: Endpoint Descriptor size */ - 0x24, /* bDescriptorType: CS_INTERFACE */ - 0x00, /* bDescriptorSubtype: Header Func Desc */ - 0x10, /* bcdCDC: spec release number */ - 0x01, - - /* Call Management Functional Descriptor */ - 0x05, /* bFunctionLength */ - 0x24, /* bDescriptorType: CS_INTERFACE */ - 0x01, /* bDescriptorSubtype: Call Management Func Desc */ - 0x00, /* bmCapabilities: D0+D1 */ - 0x01, /* bDataInterface: 1 */ - - /* ACM Functional Descriptor */ - 0x04, /* bFunctionLength */ - 0x24, /* bDescriptorType: CS_INTERFACE */ - 0x02, /* bDescriptorSubtype: Abstract Control Management desc */ - 0x02, /* bmCapabilities */ - - /* Union Functional Descriptor */ - 0x05, /* bFunctionLength */ - 0x24, /* bDescriptorType: CS_INTERFACE */ - 0x06, /* bDescriptorSubtype: Union func desc */ - 0x00, /* bMasterInterface: Communication class interface */ - 0x01, /* bSlaveInterface0: Data Class Interface */ - - /* Endpoint 2 Descriptor */ - 0x07, /* bLength: Endpoint Descriptor size */ - USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ - CDC_CMD_EP, /* bEndpointAddress */ - 0x03, /* bmAttributes: Interrupt */ - LOBYTE(CDC_CMD_PACKET_SIZE), /* wMaxPacketSize: */ - HIBYTE(CDC_CMD_PACKET_SIZE), - CDC_HS_BINTERVAL, /* bInterval: */ - /*---------------------------------------------------------------------------*/ - - /* Data class interface descriptor */ - 0x09, /* bLength: Endpoint Descriptor size */ - USB_DESC_TYPE_INTERFACE, /* bDescriptorType: */ - 0x01, /* bInterfaceNumber: Number of Interface */ - 0x00, /* bAlternateSetting: Alternate setting */ - 0x02, /* bNumEndpoints: Two endpoints used */ - 0x0A, /* bInterfaceClass: CDC */ - 0x00, /* bInterfaceSubClass: */ - 0x00, /* bInterfaceProtocol: */ - 0x00, /* iInterface: */ - - /* Endpoint OUT Descriptor */ - 0x07, /* bLength: Endpoint Descriptor size */ - USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ - CDC_OUT_EP, /* bEndpointAddress */ - 0x02, /* bmAttributes: Bulk */ - LOBYTE(CDC_DATA_HS_MAX_PACKET_SIZE), /* wMaxPacketSize: */ - HIBYTE(CDC_DATA_HS_MAX_PACKET_SIZE), - 0x00, /* bInterval: ignore for Bulk transfer */ - - /* Endpoint IN Descriptor */ - 0x07, /* bLength: Endpoint Descriptor size */ - USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ - CDC_IN_EP, /* bEndpointAddress */ - 0x02, /* bmAttributes: Bulk */ - LOBYTE(CDC_DATA_HS_MAX_PACKET_SIZE), /* wMaxPacketSize: */ - HIBYTE(CDC_DATA_HS_MAX_PACKET_SIZE), - 0x00 /* bInterval: ignore for Bulk transfer */ -}; - - -/* USB CDC device Configuration Descriptor */ -__ALIGN_BEGIN static uint8_t USBD_CDC_CfgFSDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN_END = -{ - /* Configuration Descriptor */ - 0x09, /* bLength: Configuration Descriptor size */ - USB_DESC_TYPE_CONFIGURATION, /* bDescriptorType: Configuration */ - USB_CDC_CONFIG_DESC_SIZ, /* wTotalLength:no of returned bytes */ - 0x00, - 0x02, /* bNumInterfaces: 2 interface */ + 0x02, /* bNumInterfaces: 2 interfaces */ 0x01, /* bConfigurationValue: Configuration value */ - 0x00, /* iConfiguration: Index of string descriptor describing the configuration */ - 0xC0, /* bmAttributes: self powered */ - 0x32, /* MaxPower 0 mA */ + 0x00, /* iConfiguration: Index of string descriptor + describing the configuration */ +#if (USBD_SELF_POWERED == 1U) + 0xC0, /* bmAttributes: Bus Powered according to user configuration */ +#else + 0x80, /* bmAttributes: Bus Powered according to user configuration */ +#endif /* USBD_SELF_POWERED */ + USBD_MAX_POWER, /* MaxPower (mA) */ /*---------------------------------------------------------------------------*/ @@ -273,11 +191,11 @@ __ALIGN_BEGIN static uint8_t USBD_CDC_CfgFSDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN /* Interface descriptor type */ 0x00, /* bInterfaceNumber: Number of Interface */ 0x00, /* bAlternateSetting: Alternate setting */ - 0x01, /* bNumEndpoints: One endpoints used */ + 0x01, /* bNumEndpoints: One endpoint used */ 0x02, /* bInterfaceClass: Communication Interface Class */ 0x02, /* bInterfaceSubClass: Abstract Control Model */ 0x01, /* bInterfaceProtocol: Common AT commands */ - 0x00, /* iInterface: */ + 0x00, /* iInterface */ /* Header Functional Descriptor */ 0x05, /* bLength: Endpoint Descriptor size */ @@ -291,7 +209,7 @@ __ALIGN_BEGIN static uint8_t USBD_CDC_CfgFSDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN 0x24, /* bDescriptorType: CS_INTERFACE */ 0x01, /* bDescriptorSubtype: Call Management Func Desc */ 0x00, /* bmCapabilities: D0+D1 */ - 0x01, /* bDataInterface: 1 */ + 0x01, /* bDataInterface */ /* ACM Functional Descriptor */ 0x04, /* bFunctionLength */ @@ -311,9 +229,9 @@ __ALIGN_BEGIN static uint8_t USBD_CDC_CfgFSDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ CDC_CMD_EP, /* bEndpointAddress */ 0x03, /* bmAttributes: Interrupt */ - LOBYTE(CDC_CMD_PACKET_SIZE), /* wMaxPacketSize: */ + LOBYTE(CDC_CMD_PACKET_SIZE), /* wMaxPacketSize */ HIBYTE(CDC_CMD_PACKET_SIZE), - CDC_FS_BINTERVAL, /* bInterval: */ + CDC_FS_BINTERVAL, /* bInterval */ /*---------------------------------------------------------------------------*/ /* Data class interface descriptor */ @@ -323,120 +241,33 @@ __ALIGN_BEGIN static uint8_t USBD_CDC_CfgFSDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN 0x00, /* bAlternateSetting: Alternate setting */ 0x02, /* bNumEndpoints: Two endpoints used */ 0x0A, /* bInterfaceClass: CDC */ - 0x00, /* bInterfaceSubClass: */ - 0x00, /* bInterfaceProtocol: */ - 0x00, /* iInterface: */ + 0x00, /* bInterfaceSubClass */ + 0x00, /* bInterfaceProtocol */ + 0x00, /* iInterface */ /* Endpoint OUT Descriptor */ 0x07, /* bLength: Endpoint Descriptor size */ USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ CDC_OUT_EP, /* bEndpointAddress */ 0x02, /* bmAttributes: Bulk */ - LOBYTE(CDC_DATA_FS_MAX_PACKET_SIZE), /* wMaxPacketSize: */ + LOBYTE(CDC_DATA_FS_MAX_PACKET_SIZE), /* wMaxPacketSize */ HIBYTE(CDC_DATA_FS_MAX_PACKET_SIZE), - 0x00, /* bInterval: ignore for Bulk transfer */ + 0x00, /* bInterval */ /* Endpoint IN Descriptor */ 0x07, /* bLength: Endpoint Descriptor size */ USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ CDC_IN_EP, /* bEndpointAddress */ 0x02, /* bmAttributes: Bulk */ - LOBYTE(CDC_DATA_FS_MAX_PACKET_SIZE), /* wMaxPacketSize: */ + LOBYTE(CDC_DATA_FS_MAX_PACKET_SIZE), /* wMaxPacketSize */ HIBYTE(CDC_DATA_FS_MAX_PACKET_SIZE), - 0x00 /* bInterval: ignore for Bulk transfer */ -}; - -__ALIGN_BEGIN static uint8_t USBD_CDC_OtherSpeedCfgDesc[USB_CDC_CONFIG_DESC_SIZ] __ALIGN_END = -{ - 0x09, /* bLength: Configuation Descriptor size */ - USB_DESC_TYPE_OTHER_SPEED_CONFIGURATION, - USB_CDC_CONFIG_DESC_SIZ, - 0x00, - 0x02, /* bNumInterfaces: 2 interfaces */ - 0x01, /* bConfigurationValue: */ - 0x04, /* iConfiguration: */ - 0xC0, /* bmAttributes: */ - 0x32, /* MaxPower 100 mA */ - - /*Interface Descriptor */ - 0x09, /* bLength: Interface Descriptor size */ - USB_DESC_TYPE_INTERFACE, /* bDescriptorType: Interface */ - /* Interface descriptor type */ - 0x00, /* bInterfaceNumber: Number of Interface */ - 0x00, /* bAlternateSetting: Alternate setting */ - 0x01, /* bNumEndpoints: One endpoints used */ - 0x02, /* bInterfaceClass: Communication Interface Class */ - 0x02, /* bInterfaceSubClass: Abstract Control Model */ - 0x01, /* bInterfaceProtocol: Common AT commands */ - 0x00, /* iInterface: */ - - /* Header Functional Descriptor */ - 0x05, /* bLength: Endpoint Descriptor size */ - 0x24, /* bDescriptorType: CS_INTERFACE */ - 0x00, /* bDescriptorSubtype: Header Func Desc */ - 0x10, /* bcdCDC: spec release number */ - 0x01, - - /*Call Management Functional Descriptor*/ - 0x05, /* bFunctionLength */ - 0x24, /* bDescriptorType: CS_INTERFACE */ - 0x01, /* bDescriptorSubtype: Call Management Func Desc */ - 0x00, /* bmCapabilities: D0+D1 */ - 0x01, /* bDataInterface: 1 */ - - /*ACM Functional Descriptor*/ - 0x04, /* bFunctionLength */ - 0x24, /* bDescriptorType: CS_INTERFACE */ - 0x02, /* bDescriptorSubtype: Abstract Control Management desc */ - 0x02, /* bmCapabilities */ - - /*Union Functional Descriptor*/ - 0x05, /* bFunctionLength */ - 0x24, /* bDescriptorType: CS_INTERFACE */ - 0x06, /* bDescriptorSubtype: Union func desc */ - 0x00, /* bMasterInterface: Communication class interface */ - 0x01, /* bSlaveInterface0: Data Class Interface */ - - /*Endpoint 2 Descriptor*/ - 0x07, /* bLength: Endpoint Descriptor size */ - USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ - CDC_CMD_EP, /* bEndpointAddress */ - 0x03, /* bmAttributes: Interrupt */ - LOBYTE(CDC_CMD_PACKET_SIZE), /* wMaxPacketSize: */ - HIBYTE(CDC_CMD_PACKET_SIZE), - CDC_FS_BINTERVAL, /* bInterval: */ - - /*---------------------------------------------------------------------------*/ - - /*Data class interface descriptor*/ - 0x09, /* bLength: Endpoint Descriptor size */ - USB_DESC_TYPE_INTERFACE, /* bDescriptorType: */ - 0x01, /* bInterfaceNumber: Number of Interface */ - 0x00, /* bAlternateSetting: Alternate setting */ - 0x02, /* bNumEndpoints: Two endpoints used */ - 0x0A, /* bInterfaceClass: CDC */ - 0x00, /* bInterfaceSubClass: */ - 0x00, /* bInterfaceProtocol: */ - 0x00, /* iInterface: */ - - /*Endpoint OUT Descriptor*/ - 0x07, /* bLength: Endpoint Descriptor size */ - USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ - CDC_OUT_EP, /* bEndpointAddress */ - 0x02, /* bmAttributes: Bulk */ - 0x40, /* wMaxPacketSize: */ - 0x00, - 0x00, /* bInterval: ignore for Bulk transfer */ - - /*Endpoint IN Descriptor*/ - 0x07, /* bLength: Endpoint Descriptor size */ - USB_DESC_TYPE_ENDPOINT, /* bDescriptorType: Endpoint */ - CDC_IN_EP, /* bEndpointAddress */ - 0x02, /* bmAttributes: Bulk */ - 0x40, /* wMaxPacketSize: */ - 0x00, 0x00 /* bInterval */ }; +#endif /* USE_USBD_COMPOSITE */ + +static uint8_t CDCInEpAdd = CDC_IN_EP; +static uint8_t CDCOutEpAdd = CDC_OUT_EP; +static uint8_t CDCCmdEpAdd = CDC_CMD_EP; /** * @} @@ -458,73 +289,89 @@ static uint8_t USBD_CDC_Init(USBD_HandleTypeDef *pdev, uint8_t cfgidx) UNUSED(cfgidx); USBD_CDC_HandleTypeDef *hcdc; - hcdc = USBD_malloc(sizeof(USBD_CDC_HandleTypeDef)); + hcdc = (USBD_CDC_HandleTypeDef *)USBD_malloc(sizeof(USBD_CDC_HandleTypeDef)); if (hcdc == NULL) { - pdev->pClassData = NULL; + pdev->pClassDataCmsit[pdev->classId] = NULL; return (uint8_t)USBD_EMEM; } - pdev->pClassData = (void *)hcdc; - memset(pdev->pClassData,0,sizeof(USBD_CDC_HandleTypeDef)); // THIS LINE WAS ADDED + (void)USBD_memset(hcdc, 0, sizeof(USBD_CDC_HandleTypeDef)); + + pdev->pClassDataCmsit[pdev->classId] = (void *)hcdc; + pdev->pClassData = pdev->pClassDataCmsit[pdev->classId]; + +#ifdef USE_USBD_COMPOSITE + /* Get the Endpoints addresses allocated for this class instance */ + CDCInEpAdd = USBD_CoreGetEPAdd(pdev, USBD_EP_IN, USBD_EP_TYPE_BULK, (uint8_t)pdev->classId); + CDCOutEpAdd = USBD_CoreGetEPAdd(pdev, USBD_EP_OUT, USBD_EP_TYPE_BULK, (uint8_t)pdev->classId); + CDCCmdEpAdd = USBD_CoreGetEPAdd(pdev, USBD_EP_IN, USBD_EP_TYPE_INTR, (uint8_t)pdev->classId); +#endif /* USE_USBD_COMPOSITE */ if (pdev->dev_speed == USBD_SPEED_HIGH) { /* Open EP IN */ - (void)USBD_LL_OpenEP(pdev, CDC_IN_EP, USBD_EP_TYPE_BULK, + (void)USBD_LL_OpenEP(pdev, CDCInEpAdd, USBD_EP_TYPE_BULK, CDC_DATA_HS_IN_PACKET_SIZE); - pdev->ep_in[CDC_IN_EP & 0xFU].is_used = 1U; + pdev->ep_in[CDCInEpAdd & 0xFU].is_used = 1U; - /* Open EP OUT */ - (void)USBD_LL_OpenEP(pdev, CDC_OUT_EP, USBD_EP_TYPE_BULK, - CDC_DATA_HS_OUT_PACKET_SIZE); + /* Open EP OUT */ + (void)USBD_LL_OpenEP(pdev, CDCOutEpAdd, USBD_EP_TYPE_BULK, + CDC_DATA_HS_OUT_PACKET_SIZE); - pdev->ep_out[CDC_OUT_EP & 0xFU].is_used = 1U; + pdev->ep_out[CDCOutEpAdd & 0xFU].is_used = 1U; - /* Set bInterval for CDC CMD Endpoint */ - pdev->ep_in[CDC_CMD_EP & 0xFU].bInterval = CDC_HS_BINTERVAL; + /* Set bInterval for CDC CMD Endpoint */ + pdev->ep_in[CDCCmdEpAdd & 0xFU].bInterval = CDC_HS_BINTERVAL; } else { /* Open EP IN */ - (void)USBD_LL_OpenEP(pdev, CDC_IN_EP, USBD_EP_TYPE_BULK, + (void)USBD_LL_OpenEP(pdev, CDCInEpAdd, USBD_EP_TYPE_BULK, CDC_DATA_FS_IN_PACKET_SIZE); - pdev->ep_in[CDC_IN_EP & 0xFU].is_used = 1U; + pdev->ep_in[CDCInEpAdd & 0xFU].is_used = 1U; - /* Open EP OUT */ - (void)USBD_LL_OpenEP(pdev, CDC_OUT_EP, USBD_EP_TYPE_BULK, - CDC_DATA_FS_OUT_PACKET_SIZE); + /* Open EP OUT */ + (void)USBD_LL_OpenEP(pdev, CDCOutEpAdd, USBD_EP_TYPE_BULK, + CDC_DATA_FS_OUT_PACKET_SIZE); - pdev->ep_out[CDC_OUT_EP & 0xFU].is_used = 1U; + pdev->ep_out[CDCOutEpAdd & 0xFU].is_used = 1U; - /* Set bInterval for CMD Endpoint */ - pdev->ep_in[CDC_CMD_EP & 0xFU].bInterval = CDC_FS_BINTERVAL; + /* Set bInterval for CMD Endpoint */ + pdev->ep_in[CDCCmdEpAdd & 0xFU].bInterval = CDC_FS_BINTERVAL; } /* Open Command IN EP */ - (void)USBD_LL_OpenEP(pdev, CDC_CMD_EP, USBD_EP_TYPE_INTR, CDC_CMD_PACKET_SIZE); - pdev->ep_in[CDC_CMD_EP & 0xFU].is_used = 1U; + (void)USBD_LL_OpenEP(pdev, CDCCmdEpAdd, USBD_EP_TYPE_INTR, CDC_CMD_PACKET_SIZE); + pdev->ep_in[CDCCmdEpAdd & 0xFU].is_used = 1U; + + hcdc->RxBuffer = NULL; /* Init physical Interface components */ - ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->Init(); + ((USBD_CDC_ItfTypeDef *)pdev->pUserData[pdev->classId])->Init(); /* Init Xfer states */ hcdc->TxState = 0U; hcdc->RxState = 0U; + if (hcdc->RxBuffer == NULL) + { + return (uint8_t)USBD_EMEM; + } + if (pdev->dev_speed == USBD_SPEED_HIGH) { /* Prepare Out endpoint to receive next packet */ - (void)USBD_LL_PrepareReceive(pdev, CDC_OUT_EP, hcdc->RxBuffer, + (void)USBD_LL_PrepareReceive(pdev, CDCOutEpAdd, hcdc->RxBuffer, CDC_DATA_HS_OUT_PACKET_SIZE); } else { /* Prepare Out endpoint to receive next packet */ - (void)USBD_LL_PrepareReceive(pdev, CDC_OUT_EP, hcdc->RxBuffer, + (void)USBD_LL_PrepareReceive(pdev, CDCOutEpAdd, hcdc->RxBuffer, CDC_DATA_FS_OUT_PACKET_SIZE); } @@ -541,30 +388,38 @@ static uint8_t USBD_CDC_Init(USBD_HandleTypeDef *pdev, uint8_t cfgidx) static uint8_t USBD_CDC_DeInit(USBD_HandleTypeDef *pdev, uint8_t cfgidx) { UNUSED(cfgidx); - uint8_t ret = 0U; + + +#ifdef USE_USBD_COMPOSITE + /* Get the Endpoints addresses allocated for this CDC class instance */ + CDCInEpAdd = USBD_CoreGetEPAdd(pdev, USBD_EP_IN, USBD_EP_TYPE_BULK, (uint8_t)pdev->classId); + CDCOutEpAdd = USBD_CoreGetEPAdd(pdev, USBD_EP_OUT, USBD_EP_TYPE_BULK, (uint8_t)pdev->classId); + CDCCmdEpAdd = USBD_CoreGetEPAdd(pdev, USBD_EP_IN, USBD_EP_TYPE_INTR, (uint8_t)pdev->classId); +#endif /* USE_USBD_COMPOSITE */ /* Close EP IN */ - (void)USBD_LL_CloseEP(pdev, CDC_IN_EP); - pdev->ep_in[CDC_IN_EP & 0xFU].is_used = 0U; + (void)USBD_LL_CloseEP(pdev, CDCInEpAdd); + pdev->ep_in[CDCInEpAdd & 0xFU].is_used = 0U; /* Close EP OUT */ - (void)USBD_LL_CloseEP(pdev, CDC_OUT_EP); - pdev->ep_out[CDC_OUT_EP & 0xFU].is_used = 0U; + (void)USBD_LL_CloseEP(pdev, CDCOutEpAdd); + pdev->ep_out[CDCOutEpAdd & 0xFU].is_used = 0U; /* Close Command IN EP */ - (void)USBD_LL_CloseEP(pdev, CDC_CMD_EP); - pdev->ep_in[CDC_CMD_EP & 0xFU].is_used = 0U; - pdev->ep_in[CDC_CMD_EP & 0xFU].bInterval = 0U; + (void)USBD_LL_CloseEP(pdev, CDCCmdEpAdd); + pdev->ep_in[CDCCmdEpAdd & 0xFU].is_used = 0U; + pdev->ep_in[CDCCmdEpAdd & 0xFU].bInterval = 0U; /* DeInit physical Interface components */ - if (pdev->pClassData != NULL) + if (pdev->pClassDataCmsit[pdev->classId] != NULL) { - ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->DeInit(); - (void)USBD_free(pdev->pClassData); + ((USBD_CDC_ItfTypeDef *)pdev->pUserData[pdev->classId])->DeInit(); + (void)USBD_free(pdev->pClassDataCmsit[pdev->classId]); + pdev->pClassDataCmsit[pdev->classId] = NULL; pdev->pClassData = NULL; } - return ret; + return (uint8_t)USBD_OK; } /** @@ -577,88 +432,95 @@ static uint8_t USBD_CDC_DeInit(USBD_HandleTypeDef *pdev, uint8_t cfgidx) static uint8_t USBD_CDC_Setup(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { - USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassDataCmsit[pdev->classId]; + uint16_t len; uint8_t ifalt = 0U; uint16_t status_info = 0U; USBD_StatusTypeDef ret = USBD_OK; - switch (req->bmRequest & USB_REQ_TYPE_MASK) + if (hcdc == NULL) { - case USB_REQ_TYPE_CLASS: - if (req->wLength != 0U) - { - if ((req->bmRequest & 0x80U) != 0U) - { - ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->Control(req->bRequest, - (uint8_t *)hcdc->data, - req->wLength); - - (void)USBD_CtlSendData(pdev, (uint8_t *)hcdc->data, req->wLength); - } - else - { - hcdc->CmdOpCode = req->bRequest; - hcdc->CmdLength = (uint8_t)req->wLength; - - (void)USBD_CtlPrepareRx(pdev, (uint8_t *)hcdc->data, req->wLength); - } - } - else - { - ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->Control(req->bRequest, - (uint8_t *)req, 0U); - } - break; - - case USB_REQ_TYPE_STANDARD: - switch (req->bRequest) - { - case USB_REQ_GET_STATUS: - if (pdev->dev_state == USBD_STATE_CONFIGURED) - { - (void)USBD_CtlSendData(pdev, (uint8_t *)&status_info, 2U); - } - else - { - USBD_CtlError(pdev, req); - ret = USBD_FAIL; - } - break; + return (uint8_t)USBD_FAIL; + } - case USB_REQ_GET_INTERFACE: - if (pdev->dev_state == USBD_STATE_CONFIGURED) + switch (req->bmRequest & USB_REQ_TYPE_MASK) + { + case USB_REQ_TYPE_CLASS: + if (req->wLength != 0U) { - (void)USBD_CtlSendData(pdev, &ifalt, 1U); + if ((req->bmRequest & 0x80U) != 0U) + { + ((USBD_CDC_ItfTypeDef *)pdev->pUserData[pdev->classId])->Control(req->bRequest, + (uint8_t *)hcdc->data, + req->wLength); + + len = MIN(CDC_REQ_MAX_DATA_SIZE, req->wLength); + (void)USBD_CtlSendData(pdev, (uint8_t *)hcdc->data, len); + } + else + { + hcdc->CmdOpCode = req->bRequest; + hcdc->CmdLength = (uint8_t)MIN(req->wLength, USB_MAX_EP0_SIZE); + + (void)USBD_CtlPrepareRx(pdev, (uint8_t *)hcdc->data, hcdc->CmdLength); + } } else { - USBD_CtlError(pdev, req); - ret = USBD_FAIL; + ((USBD_CDC_ItfTypeDef *)pdev->pUserData[pdev->classId])->Control(req->bRequest, + (uint8_t *)req, 0U); } break; - case USB_REQ_SET_INTERFACE: - if (pdev->dev_state != USBD_STATE_CONFIGURED) + case USB_REQ_TYPE_STANDARD: + switch (req->bRequest) { - USBD_CtlError(pdev, req); - ret = USBD_FAIL; + case USB_REQ_GET_STATUS: + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + (void)USBD_CtlSendData(pdev, (uint8_t *)&status_info, 2U); + } + else + { + USBD_CtlError(pdev, req); + ret = USBD_FAIL; + } + break; + + case USB_REQ_GET_INTERFACE: + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + (void)USBD_CtlSendData(pdev, &ifalt, 1U); + } + else + { + USBD_CtlError(pdev, req); + ret = USBD_FAIL; + } + break; + + case USB_REQ_SET_INTERFACE: + if (pdev->dev_state != USBD_STATE_CONFIGURED) + { + USBD_CtlError(pdev, req); + ret = USBD_FAIL; + } + break; + + case USB_REQ_CLEAR_FEATURE: + break; + + default: + USBD_CtlError(pdev, req); + ret = USBD_FAIL; + break; } break; - case USB_REQ_CLEAR_FEATURE: - break; - default: USBD_CtlError(pdev, req); ret = USBD_FAIL; break; - } - break; - - default: - USBD_CtlError(pdev, req); - ret = USBD_FAIL; - break; } return (uint8_t)ret; @@ -674,20 +536,20 @@ static uint8_t USBD_CDC_Setup(USBD_HandleTypeDef *pdev, static uint8_t USBD_CDC_DataIn(USBD_HandleTypeDef *pdev, uint8_t epnum) { USBD_CDC_HandleTypeDef *hcdc; - PCD_HandleTypeDef *hpcd = pdev->pData; + PCD_HandleTypeDef *hpcd = (PCD_HandleTypeDef *)pdev->pData; - if (pdev->pClassData == NULL) + if (pdev->pClassDataCmsit[pdev->classId] == NULL) { return (uint8_t)USBD_FAIL; } - hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassDataCmsit[pdev->classId]; - if ((pdev->ep_in[epnum].total_length > 0U) && - ((pdev->ep_in[epnum].total_length % hpcd->IN_ep[epnum].maxpacket) == 0U)) + if ((pdev->ep_in[epnum & 0xFU].total_length > 0U) && + ((pdev->ep_in[epnum & 0xFU].total_length % hpcd->IN_ep[epnum & 0xFU].maxpacket) == 0U)) { /* Update the packet total length */ - pdev->ep_in[epnum].total_length = 0U; + pdev->ep_in[epnum & 0xFU].total_length = 0U; /* Send ZLP */ (void)USBD_LL_Transmit(pdev, epnum, NULL, 0U); @@ -695,7 +557,11 @@ static uint8_t USBD_CDC_DataIn(USBD_HandleTypeDef *pdev, uint8_t epnum) else { hcdc->TxState = 0U; - ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->TransmitCplt(hcdc->TxBuffer, &hcdc->TxLength, epnum); + + if (((USBD_CDC_ItfTypeDef *)pdev->pUserData[pdev->classId])->TransmitCplt != NULL) + { + ((USBD_CDC_ItfTypeDef *)pdev->pUserData[pdev->classId])->TransmitCplt(hcdc->TxBuffer, &hcdc->TxLength, epnum); + } } return (uint8_t)USBD_OK; @@ -710,9 +576,9 @@ static uint8_t USBD_CDC_DataIn(USBD_HandleTypeDef *pdev, uint8_t epnum) */ static uint8_t USBD_CDC_DataOut(USBD_HandleTypeDef *pdev, uint8_t epnum) { - USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassDataCmsit[pdev->classId]; - if (pdev->pClassData == NULL) + if (pdev->pClassDataCmsit[pdev->classId] == NULL) { return (uint8_t)USBD_FAIL; } @@ -723,7 +589,7 @@ static uint8_t USBD_CDC_DataOut(USBD_HandleTypeDef *pdev, uint8_t epnum) /* USB data will be immediately processed, this allow next USB traffic being NAKed till the end of the application Xfer */ - ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->Receive(hcdc->RxBuffer, &hcdc->RxLength); + ((USBD_CDC_ItfTypeDef *)pdev->pUserData[pdev->classId])->Receive(hcdc->RxBuffer, &hcdc->RxLength); return (uint8_t)USBD_OK; } @@ -736,77 +602,132 @@ static uint8_t USBD_CDC_DataOut(USBD_HandleTypeDef *pdev, uint8_t epnum) */ static uint8_t USBD_CDC_EP0_RxReady(USBD_HandleTypeDef *pdev) { - USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassDataCmsit[pdev->classId]; - if ((pdev->pUserData != NULL) && (hcdc->CmdOpCode != 0xFFU)) + if (hcdc == NULL) { - ((USBD_CDC_ItfTypeDef *)pdev->pUserData)->Control(hcdc->CmdOpCode, - (uint8_t *)hcdc->data, - (uint16_t)hcdc->CmdLength); - hcdc->CmdOpCode = 0xFFU; + return (uint8_t)USBD_FAIL; + } + if ((pdev->pUserData[pdev->classId] != NULL) && (hcdc->CmdOpCode != 0xFFU)) + { + ((USBD_CDC_ItfTypeDef *)pdev->pUserData[pdev->classId])->Control(hcdc->CmdOpCode, + (uint8_t *)hcdc->data, + (uint16_t)hcdc->CmdLength); + hcdc->CmdOpCode = 0xFFU; } return (uint8_t)USBD_OK; } - +#ifndef USE_USBD_COMPOSITE /** * @brief USBD_CDC_GetFSCfgDesc * Return configuration descriptor - * @param speed : current device speed * @param length : pointer data length * @retval pointer to descriptor buffer */ static uint8_t *USBD_CDC_GetFSCfgDesc(uint16_t *length) { - *length = (uint16_t)sizeof(USBD_CDC_CfgFSDesc); + USBD_EpDescTypeDef *pEpCmdDesc = USBD_GetEpDesc(USBD_CDC_CfgDesc, CDC_CMD_EP); + USBD_EpDescTypeDef *pEpOutDesc = USBD_GetEpDesc(USBD_CDC_CfgDesc, CDC_OUT_EP); + USBD_EpDescTypeDef *pEpInDesc = USBD_GetEpDesc(USBD_CDC_CfgDesc, CDC_IN_EP); - return USBD_CDC_CfgFSDesc; + if (pEpCmdDesc != NULL) + { + pEpCmdDesc->bInterval = CDC_FS_BINTERVAL; + } + + if (pEpOutDesc != NULL) + { + pEpOutDesc->wMaxPacketSize = CDC_DATA_FS_MAX_PACKET_SIZE; + } + + if (pEpInDesc != NULL) + { + pEpInDesc->wMaxPacketSize = CDC_DATA_FS_MAX_PACKET_SIZE; + } + + *length = (uint16_t)sizeof(USBD_CDC_CfgDesc); + return USBD_CDC_CfgDesc; } /** * @brief USBD_CDC_GetHSCfgDesc * Return configuration descriptor - * @param speed : current device speed * @param length : pointer data length * @retval pointer to descriptor buffer */ static uint8_t *USBD_CDC_GetHSCfgDesc(uint16_t *length) { - *length = (uint16_t)sizeof(USBD_CDC_CfgHSDesc); + USBD_EpDescTypeDef *pEpCmdDesc = USBD_GetEpDesc(USBD_CDC_CfgDesc, CDC_CMD_EP); + USBD_EpDescTypeDef *pEpOutDesc = USBD_GetEpDesc(USBD_CDC_CfgDesc, CDC_OUT_EP); + USBD_EpDescTypeDef *pEpInDesc = USBD_GetEpDesc(USBD_CDC_CfgDesc, CDC_IN_EP); + + if (pEpCmdDesc != NULL) + { + pEpCmdDesc->bInterval = CDC_HS_BINTERVAL; + } - return USBD_CDC_CfgHSDesc; + if (pEpOutDesc != NULL) + { + pEpOutDesc->wMaxPacketSize = CDC_DATA_HS_MAX_PACKET_SIZE; + } + + if (pEpInDesc != NULL) + { + pEpInDesc->wMaxPacketSize = CDC_DATA_HS_MAX_PACKET_SIZE; + } + + *length = (uint16_t)sizeof(USBD_CDC_CfgDesc); + return USBD_CDC_CfgDesc; } /** - * @brief USBD_CDC_GetCfgDesc + * @brief USBD_CDC_GetOtherSpeedCfgDesc * Return configuration descriptor - * @param speed : current device speed * @param length : pointer data length * @retval pointer to descriptor buffer */ static uint8_t *USBD_CDC_GetOtherSpeedCfgDesc(uint16_t *length) { - *length = (uint16_t)sizeof(USBD_CDC_OtherSpeedCfgDesc); + USBD_EpDescTypeDef *pEpCmdDesc = USBD_GetEpDesc(USBD_CDC_CfgDesc, CDC_CMD_EP); + USBD_EpDescTypeDef *pEpOutDesc = USBD_GetEpDesc(USBD_CDC_CfgDesc, CDC_OUT_EP); + USBD_EpDescTypeDef *pEpInDesc = USBD_GetEpDesc(USBD_CDC_CfgDesc, CDC_IN_EP); - return USBD_CDC_OtherSpeedCfgDesc; + if (pEpCmdDesc != NULL) + { + pEpCmdDesc->bInterval = CDC_FS_BINTERVAL; + } + + if (pEpOutDesc != NULL) + { + pEpOutDesc->wMaxPacketSize = CDC_DATA_FS_MAX_PACKET_SIZE; + } + + if (pEpInDesc != NULL) + { + pEpInDesc->wMaxPacketSize = CDC_DATA_FS_MAX_PACKET_SIZE; + } + + *length = (uint16_t)sizeof(USBD_CDC_CfgDesc); + return USBD_CDC_CfgDesc; } /** -* @brief DeviceQualifierDescriptor -* return Device Qualifier descriptor -* @param length : pointer data length -* @retval pointer to descriptor buffer -*/ + * @brief USBD_CDC_GetDeviceQualifierDescriptor + * return Device Qualifier descriptor + * @param length : pointer data length + * @retval pointer to descriptor buffer + */ uint8_t *USBD_CDC_GetDeviceQualifierDescriptor(uint16_t *length) { *length = (uint16_t)sizeof(USBD_CDC_DeviceQualifierDesc); return USBD_CDC_DeviceQualifierDesc; } - +#endif /* USE_USBD_COMPOSITE */ /** -* @brief USBD_CDC_RegisterInterface + * @brief USBD_CDC_RegisterInterface * @param pdev: device instance * @param fops: CD Interface callback * @retval status @@ -819,21 +740,36 @@ uint8_t USBD_CDC_RegisterInterface(USBD_HandleTypeDef *pdev, return (uint8_t)USBD_FAIL; } - pdev->pUserData = fops; + pdev->pUserData[pdev->classId] = fops; return (uint8_t)USBD_OK; } + /** * @brief USBD_CDC_SetTxBuffer * @param pdev: device instance * @param pbuff: Tx Buffer + * @param length: length of data to be sent + * @param ClassId: The Class ID * @retval status */ +#ifdef USE_USBD_COMPOSITE +uint8_t USBD_CDC_SetTxBuffer(USBD_HandleTypeDef *pdev, + uint8_t *pbuff, uint32_t length, uint8_t ClassId) +{ + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassDataCmsit[ClassId]; +#else uint8_t USBD_CDC_SetTxBuffer(USBD_HandleTypeDef *pdev, uint8_t *pbuff, uint32_t length) { - USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassDataCmsit[pdev->classId]; +#endif /* USE_USBD_COMPOSITE */ + + if (hcdc == NULL) + { + return (uint8_t)USBD_FAIL; + } hcdc->TxBuffer = pbuff; hcdc->TxLength = length; @@ -841,7 +777,6 @@ uint8_t USBD_CDC_SetTxBuffer(USBD_HandleTypeDef *pdev, return (uint8_t)USBD_OK; } - /** * @brief USBD_CDC_SetRxBuffer * @param pdev: device instance @@ -850,25 +785,44 @@ uint8_t USBD_CDC_SetTxBuffer(USBD_HandleTypeDef *pdev, */ uint8_t USBD_CDC_SetRxBuffer(USBD_HandleTypeDef *pdev, uint8_t *pbuff) { - USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassDataCmsit[pdev->classId]; + + if (hcdc == NULL) + { + return (uint8_t)USBD_FAIL; + } hcdc->RxBuffer = pbuff; return (uint8_t)USBD_OK; } + /** * @brief USBD_CDC_TransmitPacket * Transmit packet on IN endpoint * @param pdev: device instance + * @param ClassId: The Class ID * @retval status */ +#ifdef USE_USBD_COMPOSITE +uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev, uint8_t ClassId) +{ + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassDataCmsit[ClassId]; +#else uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev) { - USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassDataCmsit[pdev->classId]; +#endif /* USE_USBD_COMPOSITE */ + USBD_StatusTypeDef ret = USBD_BUSY; - if (pdev->pClassData == NULL) +#ifdef USE_USBD_COMPOSITE + /* Get the Endpoints addresses allocated for this class instance */ + CDCInEpAdd = USBD_CoreGetEPAdd(pdev, USBD_EP_IN, USBD_EP_TYPE_BULK, ClassId); +#endif /* USE_USBD_COMPOSITE */ + + if (hcdc == NULL) { return (uint8_t)USBD_FAIL; } @@ -879,10 +833,10 @@ uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev) hcdc->TxState = 1U; /* Update the packet total length */ - pdev->ep_in[CDC_IN_EP & 0xFU].total_length = hcdc->TxLength; + pdev->ep_in[CDCInEpAdd & 0xFU].total_length = hcdc->TxLength; /* Transmit next packet */ - (void)USBD_LL_Transmit(pdev, CDC_IN_EP, hcdc->TxBuffer, hcdc->TxLength); + (void)USBD_LL_Transmit(pdev, CDCInEpAdd, hcdc->TxBuffer, hcdc->TxLength); ret = USBD_OK; } @@ -890,7 +844,6 @@ uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev) return (uint8_t)ret; } - /** * @brief USBD_CDC_ReceivePacket * prepare OUT Endpoint for reception @@ -899,9 +852,14 @@ uint8_t USBD_CDC_TransmitPacket(USBD_HandleTypeDef *pdev) */ uint8_t USBD_CDC_ReceivePacket(USBD_HandleTypeDef *pdev) { - USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassData; + USBD_CDC_HandleTypeDef *hcdc = (USBD_CDC_HandleTypeDef *)pdev->pClassDataCmsit[pdev->classId]; + +#ifdef USE_USBD_COMPOSITE + /* Get the Endpoints addresses allocated for this class instance */ + CDCOutEpAdd = USBD_CoreGetEPAdd(pdev, USBD_EP_OUT, USBD_EP_TYPE_BULK, (uint8_t)pdev->classId); +#endif /* USE_USBD_COMPOSITE */ - if (pdev->pClassData == NULL) + if (pdev->pClassDataCmsit[pdev->classId] == NULL) { return (uint8_t)USBD_FAIL; } @@ -909,13 +867,13 @@ uint8_t USBD_CDC_ReceivePacket(USBD_HandleTypeDef *pdev) if (pdev->dev_speed == USBD_SPEED_HIGH) { /* Prepare Out endpoint to receive next packet */ - (void)USBD_LL_PrepareReceive(pdev, CDC_OUT_EP, hcdc->RxBuffer, + (void)USBD_LL_PrepareReceive(pdev, CDCOutEpAdd, hcdc->RxBuffer, CDC_DATA_HS_OUT_PACKET_SIZE); } else { /* Prepare Out endpoint to receive next packet */ - (void)USBD_LL_PrepareReceive(pdev, CDC_OUT_EP, hcdc->RxBuffer, + (void)USBD_LL_PrepareReceive(pdev, CDCOutEpAdd, hcdc->RxBuffer, CDC_DATA_FS_OUT_PACKET_SIZE); } @@ -933,4 +891,3 @@ uint8_t USBD_CDC_ReceivePacket(USBD_HandleTypeDef *pdev) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_core.h b/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_core.h index c7d2ba39..4672921b 100644 --- a/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_core.h +++ b/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_core.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2015 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2015 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -87,6 +86,20 @@ USBD_StatusTypeDef USBD_DeInit(USBD_HandleTypeDef *pdev); USBD_StatusTypeDef USBD_Start(USBD_HandleTypeDef *pdev); USBD_StatusTypeDef USBD_Stop(USBD_HandleTypeDef *pdev); USBD_StatusTypeDef USBD_RegisterClass(USBD_HandleTypeDef *pdev, USBD_ClassTypeDef *pclass); +#if (USBD_USER_REGISTER_CALLBACK == 1U) +USBD_StatusTypeDef USBD_RegisterDevStateCallback(USBD_HandleTypeDef *pdev, USBD_DevStateCallbackTypeDef pUserCallback); +#endif /* USBD_USER_REGISTER_CALLBACK */ + +#ifdef USE_USBD_COMPOSITE +USBD_StatusTypeDef USBD_RegisterClassComposite(USBD_HandleTypeDef *pdev, USBD_ClassTypeDef *pclass, + USBD_CompositeClassTypeDef classtype, uint8_t *EpAddr); + +USBD_StatusTypeDef USBD_UnRegisterClassComposite(USBD_HandleTypeDef *pdev); +uint8_t USBD_CoreGetEPAdd(USBD_HandleTypeDef *pdev, uint8_t ep_dir, uint8_t ep_type, uint8_t ClassId); +#endif /* USE_USBD_COMPOSITE */ + +uint8_t USBD_CoreFindIF(USBD_HandleTypeDef *pdev, uint8_t index); +uint8_t USBD_CoreFindEP(USBD_HandleTypeDef *pdev, uint8_t index); USBD_StatusTypeDef USBD_RunTestMode(USBD_HandleTypeDef *pdev); USBD_StatusTypeDef USBD_SetClassConfig(USBD_HandleTypeDef *pdev, uint8_t cfgidx); @@ -129,11 +142,18 @@ USBD_StatusTypeDef USBD_LL_Transmit(USBD_HandleTypeDef *pdev, uint8_t ep_addr, USBD_StatusTypeDef USBD_LL_PrepareReceive(USBD_HandleTypeDef *pdev, uint8_t ep_addr, uint8_t *pbuf, uint32_t size); +#ifdef USBD_HS_TESTMODE_ENABLE +USBD_StatusTypeDef USBD_LL_SetTestMode(USBD_HandleTypeDef *pdev, uint8_t testmode); +#endif /* USBD_HS_TESTMODE_ENABLE */ + uint8_t USBD_LL_IsStallEP(USBD_HandleTypeDef *pdev, uint8_t ep_addr); uint32_t USBD_LL_GetRxDataSize(USBD_HandleTypeDef *pdev, uint8_t ep_addr); void USBD_LL_Delay(uint32_t Delay); +void *USBD_GetEpDesc(uint8_t *pConfDesc, uint8_t EpAddr); +USBD_DescHeaderTypeDef *USBD_GetNextDesc(uint8_t *pbuf, uint16_t *ptr); + /** * @} */ @@ -149,10 +169,7 @@ void USBD_LL_Delay(uint32_t Delay); */ /** -* @} -*/ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ - + * @} + */ diff --git a/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ctlreq.h b/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ctlreq.h index f973a8b1..6c45d6ce 100644 --- a/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ctlreq.h +++ b/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ctlreq.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2015 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2015 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -96,8 +95,7 @@ void USBD_GetString(uint8_t *desc, uint8_t *unicode, uint16_t *len); */ /** -* @} -*/ + * @} + */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_def.h b/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_def.h index 0463e2f3..2a295d78 100644 --- a/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_def.h +++ b/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_def.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2015 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2015 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -53,6 +52,24 @@ extern "C" { #define USBD_MAX_NUM_CONFIGURATION 1U #endif /* USBD_MAX_NUM_CONFIGURATION */ +#ifdef USE_USBD_COMPOSITE +#ifndef USBD_MAX_SUPPORTED_CLASS +#define USBD_MAX_SUPPORTED_CLASS 4U +#endif /* USBD_MAX_SUPPORTED_CLASS */ +#else +#ifndef USBD_MAX_SUPPORTED_CLASS +#define USBD_MAX_SUPPORTED_CLASS 1U +#endif /* USBD_MAX_SUPPORTED_CLASS */ +#endif /* USE_USBD_COMPOSITE */ + +#ifndef USBD_MAX_CLASS_ENDPOINTS +#define USBD_MAX_CLASS_ENDPOINTS 5U +#endif /* USBD_MAX_CLASS_ENDPOINTS */ + +#ifndef USBD_MAX_CLASS_INTERFACES +#define USBD_MAX_CLASS_INTERFACES 5U +#endif /* USBD_MAX_CLASS_INTERFACES */ + #ifndef USBD_LPM_ENABLED #define USBD_LPM_ENABLED 0U #endif /* USBD_LPM_ENABLED */ @@ -61,6 +78,10 @@ extern "C" { #define USBD_SELF_POWERED 1U #endif /*USBD_SELF_POWERED */ +#ifndef USBD_MAX_POWER +#define USBD_MAX_POWER 0x32U /* 100 mA */ +#endif /* USBD_MAX_POWER */ + #ifndef USBD_SUPPORT_USER_STRING_DESC #define USBD_SUPPORT_USER_STRING_DESC 0U #endif /* USBD_SUPPORT_USER_STRING_DESC */ @@ -114,6 +135,7 @@ extern "C" { #define USB_DESC_TYPE_ENDPOINT 0x05U #define USB_DESC_TYPE_DEVICE_QUALIFIER 0x06U #define USB_DESC_TYPE_OTHER_SPEED_CONFIGURATION 0x07U +#define USB_DESC_TYPE_IAD 0x0BU #define USB_DESC_TYPE_BOS 0x0FU #define USB_CONFIG_REMOTE_WAKEUP 0x02U @@ -125,6 +147,11 @@ extern "C" { #define USB_DEVICE_CAPABITY_TYPE 0x10U +#define USB_CONF_DESC_SIZE 0x09U +#define USB_IF_DESC_SIZE 0x09U +#define USB_EP_DESC_SIZE 0x07U +#define USB_IAD_DESC_SIZE 0x08U + #define USB_HS_MAX_PACKET_SIZE 512U #define USB_FS_MAX_PACKET_SIZE 64U #define USB_MAX_EP0_SIZE 64U @@ -150,7 +177,14 @@ extern "C" { #define USBD_EP_TYPE_BULK 0x02U #define USBD_EP_TYPE_INTR 0x03U - +#ifdef USE_USBD_COMPOSITE +#define USBD_EP_IN 0x80U +#define USBD_EP_OUT 0x00U +#define USBD_FUNC_DESCRIPTOR_TYPE 0x24U +#define USBD_DESC_SUBTYPE_ACM 0x0FU +#define USBD_DESC_ECM_BCD_LOW 0x00U +#define USBD_DESC_ECM_BCD_HIGH 0x10U +#endif /* USE_USBD_COMPOSITE */ /** * @} */ @@ -173,14 +207,13 @@ typedef struct { uint8_t bLength; uint8_t bDescriptorType; - uint8_t wDescriptorLengthLow; - uint8_t wDescriptorLengthHigh; + uint16_t wTotalLength; uint8_t bNumInterfaces; uint8_t bConfigurationValue; uint8_t iConfiguration; uint8_t bmAttributes; uint8_t bMaxPower; -} USBD_ConfigDescTypedef; +} __PACKED USBD_ConfigDescTypeDef; typedef struct { @@ -188,8 +221,24 @@ typedef struct uint8_t bDescriptorType; uint16_t wTotalLength; uint8_t bNumDeviceCaps; -} USBD_BosDescTypedef; +} USBD_BosDescTypeDef; + +typedef struct +{ + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bEndpointAddress; + uint8_t bmAttributes; + uint16_t wMaxPacketSize; + uint8_t bInterval; +} __PACKED USBD_EpDescTypeDef; +typedef struct +{ + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubType; +} USBD_DescHeaderTypeDef; struct _USBD_HandleTypeDef; @@ -214,7 +263,7 @@ typedef struct _Device_cb uint8_t *(*GetDeviceQualifierDescriptor)(uint16_t *length); #if (USBD_SUPPORT_USER_STRING_DESC == 1U) uint8_t *(*GetUsrStrDescriptor)(struct _USBD_HandleTypeDef *pdev, uint8_t index, uint16_t *length); -#endif +#endif /* USBD_SUPPORT_USER_STRING_DESC */ } USBD_ClassTypeDef; @@ -247,10 +296,10 @@ typedef struct uint8_t *(*GetInterfaceStrDescriptor)(USBD_SpeedTypeDef speed, uint16_t *length); #if (USBD_CLASS_USER_STRING_DESC == 1) uint8_t *(*GetUserStrDescriptor)(USBD_SpeedTypeDef speed, uint8_t idx, uint16_t *length); -#endif +#endif /* USBD_CLASS_USER_STRING_DESC */ #if ((USBD_LPM_ENABLED == 1U) || (USBD_CLASS_BOS_ENABLED == 1)) uint8_t *(*GetBOSDescriptor)(USBD_SpeedTypeDef speed, uint16_t *length); -#endif +#endif /* (USBD_LPM_ENABLED == 1U) || (USBD_CLASS_BOS_ENABLED == 1) */ } USBD_DescriptorsTypeDef; /* USB Device handle structure */ @@ -264,6 +313,49 @@ typedef struct uint16_t bInterval; } USBD_EndpointTypeDef; +#ifdef USE_USBD_COMPOSITE +typedef enum +{ + CLASS_TYPE_NONE = 0, + CLASS_TYPE_HID = 1, + CLASS_TYPE_CDC = 2, + CLASS_TYPE_MSC = 3, + CLASS_TYPE_DFU = 4, + CLASS_TYPE_CHID = 5, + CLASS_TYPE_AUDIO = 6, + CLASS_TYPE_ECM = 7, + CLASS_TYPE_RNDIS = 8, + CLASS_TYPE_MTP = 9, + CLASS_TYPE_VIDEO = 10, + CLASS_TYPE_PRINTER = 11, + CLASS_TYPE_CCID = 12, +} USBD_CompositeClassTypeDef; + + +/* USB Device handle structure */ +typedef struct +{ + uint8_t add; + uint8_t type; + uint8_t size; + uint8_t is_used; +} USBD_EPTypeDef; + +/* USB Device handle structure */ +typedef struct +{ + USBD_CompositeClassTypeDef ClassType; + uint32_t ClassId; + uint32_t Active; + uint32_t NumEps; + USBD_EPTypeDef Eps[USBD_MAX_CLASS_ENDPOINTS]; + uint8_t *EpAdd; + uint32_t NumIf; + uint8_t Ifs[USBD_MAX_CLASS_INTERFACES]; + uint32_t CurrPcktSze; +} USBD_CompositeElementTypeDef; +#endif /* USE_USBD_COMPOSITE */ + /* USB Device handle structure */ typedef struct _USBD_HandleTypeDef { @@ -274,10 +366,10 @@ typedef struct _USBD_HandleTypeDef USBD_SpeedTypeDef dev_speed; USBD_EndpointTypeDef ep_in[16]; USBD_EndpointTypeDef ep_out[16]; - uint32_t ep0_state; + __IO uint32_t ep0_state; uint32_t ep0_data_len; - uint8_t dev_state; - uint8_t dev_old_state; + __IO uint8_t dev_state; + __IO uint8_t dev_old_state; uint8_t dev_address; uint8_t dev_connection_status; uint8_t dev_test_mode; @@ -286,14 +378,40 @@ typedef struct _USBD_HandleTypeDef USBD_SetupReqTypedef request; USBD_DescriptorsTypeDef *pDesc; - USBD_ClassTypeDef *pClass; + USBD_ClassTypeDef *pClass[USBD_MAX_SUPPORTED_CLASS]; void *pClassData; - void *pUserData; + void *pClassDataCmsit[USBD_MAX_SUPPORTED_CLASS]; + void *pUserData[USBD_MAX_SUPPORTED_CLASS]; void *pData; void *pBosDesc; void *pConfDesc; + uint32_t classId; + uint32_t NumClasses; +#ifdef USE_USBD_COMPOSITE + USBD_CompositeElementTypeDef tclasslist[USBD_MAX_SUPPORTED_CLASS]; +#endif /* USE_USBD_COMPOSITE */ +#if (USBD_USER_REGISTER_CALLBACK == 1U) + void (* DevStateCallback)(uint8_t dev_state, uint8_t cfgidx); /*!< User Notification callback */ +#endif /* USBD_USER_REGISTER_CALLBACK */ } USBD_HandleTypeDef; +#if (USBD_USER_REGISTER_CALLBACK == 1U) +typedef void (*USBD_DevStateCallbackTypeDef)(uint8_t dev_state, uint8_t cfgidx); /*!< pointer to User callback function */ +#endif /* USBD_USER_REGISTER_CALLBACK */ + +/* USB Device endpoint direction */ +typedef enum +{ + OUT = 0x00, + IN = 0x80, +} USBD_EPDirectionTypeDef; + +typedef enum +{ + NETWORK_CONNECTION = 0x00, + RESPONSE_AVAILABLE = 0x01, + CONNECTION_SPEED_CHANGE = 0x2A +} USBD_CDC_NotifCodeTypeDef; /** * @} */ @@ -305,7 +423,9 @@ typedef struct _USBD_HandleTypeDef */ __STATIC_INLINE uint16_t SWAPBYTE(uint8_t *addr) { - uint16_t _SwapVal, _Byte1, _Byte2; + uint16_t _SwapVal; + uint16_t _Byte1; + uint16_t _Byte2; uint8_t *_pbuff = addr; _Byte1 = *(uint8_t *)_pbuff; @@ -317,11 +437,21 @@ __STATIC_INLINE uint16_t SWAPBYTE(uint8_t *addr) return _SwapVal; } +#ifndef LOBYTE #define LOBYTE(x) ((uint8_t)((x) & 0x00FFU)) +#endif /* LOBYTE */ + +#ifndef HIBYTE #define HIBYTE(x) ((uint8_t)(((x) & 0xFF00U) >> 8U)) +#endif /* HIBYTE */ + +#ifndef MIN #define MIN(a, b) (((a) < (b)) ? (a) : (b)) -#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif /* MIN */ +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif /* MAX */ #if defined ( __GNUC__ ) #ifndef __weak @@ -388,6 +518,6 @@ __STATIC_INLINE uint16_t SWAPBYTE(uint8_t *addr) */ /** -* @} -*/ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + * @} + */ + diff --git a/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ioreq.h b/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ioreq.h index b7159d53..15197b92 100644 --- a/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ioreq.h +++ b/Middlewares/ST/STM32_USB_Device_Library/Core/Inc/usbd_ioreq.h @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2015 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2015 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -109,6 +108,6 @@ uint32_t USBD_GetRxCount(USBD_HandleTypeDef *pdev, uint8_t ep_addr); */ /** -* @} -*/ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + * @} + */ + diff --git a/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c b/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c index 3faed352..0576c877 100644 --- a/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c +++ b/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2015 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2015 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -20,72 +19,76 @@ /* Includes ------------------------------------------------------------------*/ #include "usbd_core.h" +#ifdef USE_USBD_COMPOSITE +#include "usbd_composite_builder.h" +#endif /* USE_USBD_COMPOSITE */ + /** @addtogroup STM32_USBD_DEVICE_LIBRARY -* @{ -*/ + * @{ + */ /** @defgroup USBD_CORE -* @brief usbd core module -* @{ -*/ + * @brief usbd core module + * @{ + */ /** @defgroup USBD_CORE_Private_TypesDefinitions -* @{ -*/ + * @{ + */ /** -* @} -*/ + * @} + */ /** @defgroup USBD_CORE_Private_Defines -* @{ -*/ + * @{ + */ /** -* @} -*/ + * @} + */ /** @defgroup USBD_CORE_Private_Macros -* @{ -*/ + * @{ + */ /** -* @} -*/ + * @} + */ /** @defgroup USBD_CORE_Private_FunctionPrototypes -* @{ -*/ + * @{ + */ /** -* @} -*/ + * @} + */ /** @defgroup USBD_CORE_Private_Variables -* @{ -*/ + * @{ + */ /** -* @} -*/ + * @} + */ /** @defgroup USBD_CORE_Private_Functions -* @{ -*/ + * @{ + */ /** -* @brief USBD_Init -* Initializes the device stack and load the class driver -* @param pdev: device instance -* @param pdesc: Descriptor structure address -* @param id: Low level core index -* @retval None -*/ + * @brief USBD_Init + * Initialize the device stack and load the class driver + * @param pdev: device instance + * @param pdesc: Descriptor structure address + * @param id: Low level core index + * @retval status: USBD Status + */ USBD_StatusTypeDef USBD_Init(USBD_HandleTypeDef *pdev, USBD_DescriptorsTypeDef *pdesc, uint8_t id) { @@ -96,20 +99,30 @@ USBD_StatusTypeDef USBD_Init(USBD_HandleTypeDef *pdev, { #if (USBD_DEBUG_LEVEL > 1U) USBD_ErrLog("Invalid Device handle"); -#endif +#endif /* (USBD_DEBUG_LEVEL > 1U) */ return USBD_FAIL; } - /* Unlink previous class */ - if (pdev->pClass != NULL) +#ifdef USE_USBD_COMPOSITE + /* Parse the table of classes in use */ + for (uint32_t i = 0; i < USBD_MAX_SUPPORTED_CLASS; i++) { - pdev->pClass = NULL; + /* Unlink previous class*/ + pdev->pClass[i] = NULL; + pdev->pUserData[i] = NULL; + + /* Set class as inactive */ + pdev->tclasslist[i].Active = 0; + pdev->NumClasses = 0; + pdev->classId = 0; } +#else + /* Unlink previous class*/ + pdev->pClass[0] = NULL; + pdev->pUserData[0] = NULL; +#endif /* USE_USBD_COMPOSITE */ - if (pdev->pConfDesc != NULL) - { - pdev->pConfDesc = NULL; - } + pdev->pConfDesc = NULL; /* Assign USBD Descriptors */ if (pdesc != NULL) @@ -128,38 +141,52 @@ USBD_StatusTypeDef USBD_Init(USBD_HandleTypeDef *pdev, } /** -* @brief USBD_DeInit -* Re-Initialize th device library -* @param pdev: device instance -* @retval status: status -*/ + * @brief USBD_DeInit + * De-Initialize the device library + * @param pdev: device instance + * @retval status: USBD Status + */ USBD_StatusTypeDef USBD_DeInit(USBD_HandleTypeDef *pdev) { USBD_StatusTypeDef ret; + /* Disconnect the USB Device */ + (void)USBD_LL_Stop(pdev); + /* Set Default State */ pdev->dev_state = USBD_STATE_DEFAULT; - /* Free Class Resources */ - if (pdev->pClass != NULL) +#ifdef USE_USBD_COMPOSITE + /* Parse the table of classes in use */ + for (uint32_t i = 0; i < USBD_MAX_SUPPORTED_CLASS; i++) { - pdev->pClass->DeInit(pdev, (uint8_t)pdev->dev_config); + /* Check if current class is in use */ + if ((pdev->tclasslist[i].Active) == 1U) + { + if (pdev->pClass[i] != NULL) + { + pdev->classId = i; + /* Free Class Resources */ + pdev->pClass[i]->DeInit(pdev, (uint8_t)pdev->dev_config); + } + } } - - if (pdev->pConfDesc != NULL) +#else + /* Free Class Resources */ + if (pdev->pClass[0] != NULL) { - pdev->pConfDesc = NULL; + pdev->pClass[0]->DeInit(pdev, (uint8_t)pdev->dev_config); } - /* Stop the low level driver */ - ret = USBD_LL_Stop(pdev); + pdev->pUserData[0] = NULL; - if (ret != USBD_OK) - { - return ret; - } +#endif /* USE_USBD_COMPOSITE */ - /* Initialize low level driver */ + /* Free Device descriptors resources */ + pdev->pDesc = NULL; + pdev->pConfDesc = NULL; + + /* DeInitialize low level driver */ ret = USBD_LL_DeInit(pdev); return ret; @@ -168,7 +195,7 @@ USBD_StatusTypeDef USBD_DeInit(USBD_HandleTypeDef *pdev) /** * @brief USBD_RegisterClass * Link class driver to Device Core. - * @param pDevice : Device Handle + * @param pdev: Device Handle * @param pclass: Class handle * @retval USBD Status */ @@ -180,23 +207,173 @@ USBD_StatusTypeDef USBD_RegisterClass(USBD_HandleTypeDef *pdev, USBD_ClassTypeDe { #if (USBD_DEBUG_LEVEL > 1U) USBD_ErrLog("Invalid Class handle"); -#endif +#endif /* (USBD_DEBUG_LEVEL > 1U) */ return USBD_FAIL; } /* link the class to the USB Device handle */ - pdev->pClass = pclass; + pdev->pClass[0] = pclass; /* Get Device Configuration Descriptor */ -#ifdef USE_USB_FS - pdev->pConfDesc = (void *)pdev->pClass->GetFSConfigDescriptor(&len); -#else /* USE_USB_HS */ - pdev->pConfDesc = (void *)pdev->pClass->GetHSConfigDescriptor(&len); +#ifdef USE_USB_HS + if (pdev->pClass[pdev->classId]->GetHSConfigDescriptor != NULL) + { + pdev->pConfDesc = (void *)pdev->pClass[pdev->classId]->GetHSConfigDescriptor(&len); + } +#else /* Default USE_USB_FS */ + if (pdev->pClass[pdev->classId]->GetFSConfigDescriptor != NULL) + { + pdev->pConfDesc = (void *)pdev->pClass[pdev->classId]->GetFSConfigDescriptor(&len); + } +#endif /* USE_USB_FS */ + + /* Increment the NumClasses */ + pdev->NumClasses++; + + return USBD_OK; +} + +#ifdef USE_USBD_COMPOSITE +/** + * @brief USBD_RegisterClassComposite + * Link class driver to Device Core. + * @param pdev : Device Handle + * @param pclass: Class handle + * @param classtype: Class type + * @param EpAddr: Endpoint Address handle + * @retval USBD Status + */ +USBD_StatusTypeDef USBD_RegisterClassComposite(USBD_HandleTypeDef *pdev, USBD_ClassTypeDef *pclass, + USBD_CompositeClassTypeDef classtype, uint8_t *EpAddr) +{ + USBD_StatusTypeDef ret = USBD_OK; + uint16_t len = 0U; + + if ((pdev->classId < USBD_MAX_SUPPORTED_CLASS) && (pdev->NumClasses < USBD_MAX_SUPPORTED_CLASS)) + { + if ((uint32_t)pclass != 0U) + { + /* Link the class to the USB Device handle */ + pdev->pClass[pdev->classId] = pclass; + ret = USBD_OK; + + pdev->tclasslist[pdev->classId].EpAdd = EpAddr; + + /* Call the composite class builder */ + (void)USBD_CMPSIT_AddClass(pdev, pclass, classtype, 0); + + /* Increment the ClassId for the next occurrence */ + pdev->classId ++; + pdev->NumClasses ++; + } + else + { +#if (USBD_DEBUG_LEVEL > 1U) + USBD_ErrLog("Invalid Class handle"); +#endif /* (USBD_DEBUG_LEVEL > 1U) */ + ret = USBD_FAIL; + } + } + + if (ret == USBD_OK) + { + /* Get Device Configuration Descriptor */ +#ifdef USE_USB_HS + pdev->pConfDesc = USBD_CMPSIT.GetHSConfigDescriptor(&len); +#else /* Default USE_USB_FS */ + pdev->pConfDesc = USBD_CMPSIT.GetFSConfigDescriptor(&len); #endif /* USE_USB_FS */ + } + + return ret; +} + +/** + * @brief USBD_UnRegisterClassComposite + * UnLink all composite class drivers from Device Core. + * @param pdev: Device Handle + * @retval USBD Status + */ +USBD_StatusTypeDef USBD_UnRegisterClassComposite(USBD_HandleTypeDef *pdev) +{ + USBD_StatusTypeDef ret = USBD_FAIL; + uint8_t idx1; + uint8_t idx2; + + /* Unroll all activated classes */ + for (idx1 = 0; idx1 < pdev->NumClasses; idx1++) + { + /* Check if the class correspond to the requested type and if it is active */ + if (pdev->tclasslist[idx1].Active == 1U) + { + /* Set the new class ID */ + pdev->classId = idx1; + + /* Free resources used by the selected class */ + if (pdev->pClass[pdev->classId] != NULL) + { + /* Free Class Resources */ + if (pdev->pClass[pdev->classId]->DeInit(pdev, (uint8_t)pdev->dev_config) != 0U) + { +#if (USBD_DEBUG_LEVEL > 1U) + USBD_ErrLog("Class DeInit didn't succeed!, can't unregister selected class"); +#endif /* (USBD_DEBUG_LEVEL > 1U) */ + + ret = USBD_FAIL; + } + } + + /* Free the class pointer */ + pdev->pClass[pdev->classId] = NULL; + + /* Free the class location in classes table and reset its parameters to zero */ + pdev->tclasslist[pdev->classId].ClassType = CLASS_TYPE_NONE; + pdev->tclasslist[pdev->classId].ClassId = 0U; + pdev->tclasslist[pdev->classId].Active = 0U; + pdev->tclasslist[pdev->classId].NumEps = 0U; + pdev->tclasslist[pdev->classId].NumIf = 0U; + pdev->tclasslist[pdev->classId].CurrPcktSze = 0U; + for (idx2 = 0U; idx2 < USBD_MAX_CLASS_ENDPOINTS; idx2++) + { + pdev->tclasslist[pdev->classId].Eps[idx2].add = 0U; + pdev->tclasslist[pdev->classId].Eps[idx2].type = 0U; + pdev->tclasslist[pdev->classId].Eps[idx2].size = 0U; + pdev->tclasslist[pdev->classId].Eps[idx2].is_used = 0U; + } + + for (idx2 = 0U; idx2 < USBD_MAX_CLASS_INTERFACES; idx2++) + { + pdev->tclasslist[pdev->classId].Ifs[idx2] = 0U; + } + } + } + + /* Reset the configuration descriptor */ + (void)USBD_CMPST_ClearConfDesc(pdev); + + /* Reset the class ID and number of classes */ + pdev->classId = 0U; + pdev->NumClasses = 0U; + + return ret; +} +#endif /* USE_USBD_COMPOSITE */ + +#if (USBD_USER_REGISTER_CALLBACK == 1U) +/** + * @brief USBD_RegisterDevStateCallback + * @param pdev : Device Handle + * @param pUserCallback: User Callback + * @retval USBD Status + */ +USBD_StatusTypeDef USBD_RegisterDevStateCallback(USBD_HandleTypeDef *pdev, USBD_DevStateCallbackTypeDef pUserCallback) +{ + pdev->DevStateCallback = pUserCallback; return USBD_OK; } +#endif /* USBD_USER_REGISTER_CALLBACK */ /** * @brief USBD_Start @@ -206,6 +383,10 @@ USBD_StatusTypeDef USBD_RegisterClass(USBD_HandleTypeDef *pdev, USBD_ClassTypeDe */ USBD_StatusTypeDef USBD_Start(USBD_HandleTypeDef *pdev) { +#ifdef USE_USBD_COMPOSITE + pdev->classId = 0U; +#endif /* USE_USBD_COMPOSITE */ + /* Start the low level driver */ return USBD_LL_Start(pdev); } @@ -218,85 +399,150 @@ USBD_StatusTypeDef USBD_Start(USBD_HandleTypeDef *pdev) */ USBD_StatusTypeDef USBD_Stop(USBD_HandleTypeDef *pdev) { - USBD_StatusTypeDef ret; + /* Disconnect USB Device */ + (void)USBD_LL_Stop(pdev); /* Free Class Resources */ - if (pdev->pClass != NULL) +#ifdef USE_USBD_COMPOSITE + /* Parse the table of classes in use */ + for (uint32_t i = 0U; i < USBD_MAX_SUPPORTED_CLASS; i++) { - pdev->pClass->DeInit(pdev, (uint8_t)pdev->dev_config); + /* Check if current class is in use */ + if ((pdev->tclasslist[i].Active) == 1U) + { + if (pdev->pClass[i] != NULL) + { + pdev->classId = i; + /* Free Class Resources */ + (void)pdev->pClass[i]->DeInit(pdev, (uint8_t)pdev->dev_config); + } + } } - if (pdev->pConfDesc != NULL) + /* Reset the class ID */ + pdev->classId = 0U; +#else + if (pdev->pClass[0] != NULL) { - pdev->pConfDesc = NULL; + (void)pdev->pClass[0]->DeInit(pdev, (uint8_t)pdev->dev_config); } +#endif /* USE_USBD_COMPOSITE */ - /* Stop the low level driver */ - ret = USBD_LL_Stop(pdev); - - return ret; + return USBD_OK; } /** -* @brief USBD_RunTestMode -* Launch test mode process -* @param pdev: device instance -* @retval status -*/ -USBD_StatusTypeDef USBD_RunTestMode(USBD_HandleTypeDef *pdev) + * @brief USBD_RunTestMode + * Launch test mode process + * @param pdev: device instance + * @retval status + */ +USBD_StatusTypeDef USBD_RunTestMode(USBD_HandleTypeDef *pdev) { +#ifdef USBD_HS_TESTMODE_ENABLE + USBD_StatusTypeDef ret; + + /* Run USB HS test mode */ + ret = USBD_LL_SetTestMode(pdev, pdev->dev_test_mode); + + return ret; +#else /* Prevent unused argument compilation warning */ UNUSED(pdev); return USBD_OK; +#endif /* USBD_HS_TESTMODE_ENABLE */ } /** -* @brief USBD_SetClassConfig -* Configure device and start the interface -* @param pdev: device instance -* @param cfgidx: configuration index -* @retval status -*/ + * @brief USBD_SetClassConfig + * Configure device and start the interface + * @param pdev: device instance + * @param cfgidx: configuration index + * @retval status + */ USBD_StatusTypeDef USBD_SetClassConfig(USBD_HandleTypeDef *pdev, uint8_t cfgidx) { - USBD_StatusTypeDef ret = USBD_FAIL; + USBD_StatusTypeDef ret = USBD_OK; - if (pdev->pClass != NULL) +#ifdef USE_USBD_COMPOSITE + /* Parse the table of classes in use */ + for (uint32_t i = 0U; i < USBD_MAX_SUPPORTED_CLASS; i++) + { + /* Check if current class is in use */ + if ((pdev->tclasslist[i].Active) == 1U) + { + if (pdev->pClass[i] != NULL) + { + pdev->classId = i; + /* Set configuration and Start the Class*/ + if (pdev->pClass[i]->Init(pdev, cfgidx) != 0U) + { + ret = USBD_FAIL; + } + } + } + } +#else + if (pdev->pClass[0] != NULL) { /* Set configuration and Start the Class */ - ret = (USBD_StatusTypeDef)pdev->pClass->Init(pdev, cfgidx); + ret = (USBD_StatusTypeDef)pdev->pClass[0]->Init(pdev, cfgidx); } +#endif /* USE_USBD_COMPOSITE */ return ret; } /** -* @brief USBD_ClrClassConfig -* Clear current configuration -* @param pdev: device instance -* @param cfgidx: configuration index -* @retval status: USBD_StatusTypeDef -*/ + * @brief USBD_ClrClassConfig + * Clear current configuration + * @param pdev: device instance + * @param cfgidx: configuration index + * @retval status + */ USBD_StatusTypeDef USBD_ClrClassConfig(USBD_HandleTypeDef *pdev, uint8_t cfgidx) { - /* Clear configuration and De-initialize the Class process */ - if (pdev->pClass != NULL) + USBD_StatusTypeDef ret = USBD_OK; + +#ifdef USE_USBD_COMPOSITE + /* Parse the table of classes in use */ + for (uint32_t i = 0U; i < USBD_MAX_SUPPORTED_CLASS; i++) { - pdev->pClass->DeInit(pdev, cfgidx); + /* Check if current class is in use */ + if ((pdev->tclasslist[i].Active) == 1U) + { + if (pdev->pClass[i] != NULL) + { + pdev->classId = i; + /* Clear configuration and De-initialize the Class process */ + if (pdev->pClass[i]->DeInit(pdev, cfgidx) != 0U) + { + ret = USBD_FAIL; + } + } + } } +#else + /* Clear configuration and De-initialize the Class process */ + if (pdev->pClass[0]->DeInit(pdev, cfgidx) != 0U) + { + ret = USBD_FAIL; + } +#endif /* USE_USBD_COMPOSITE */ - return USBD_OK; + return ret; } /** -* @brief USBD_SetupStage -* Handle the setup stage -* @param pdev: device instance -* @retval status -*/ + * @brief USBD_LL_SetupStage + * Handle the setup stage + * @param pdev: device instance + * @param psetup: setup packet buffer pointer + * @retval status + */ USBD_StatusTypeDef USBD_LL_SetupStage(USBD_HandleTypeDef *pdev, uint8_t *psetup) { USBD_StatusTypeDef ret; @@ -330,17 +576,19 @@ USBD_StatusTypeDef USBD_LL_SetupStage(USBD_HandleTypeDef *pdev, uint8_t *psetup) } /** -* @brief USBD_DataOutStage -* Handle data OUT stage -* @param pdev: device instance -* @param epnum: endpoint index -* @retval status -*/ + * @brief USBD_LL_DataOutStage + * Handle data OUT stage + * @param pdev: device instance + * @param epnum: endpoint index + * @param pdata: data pointer + * @retval status + */ USBD_StatusTypeDef USBD_LL_DataOutStage(USBD_HandleTypeDef *pdev, uint8_t epnum, uint8_t *pdata) { USBD_EndpointTypeDef *pep; - USBD_StatusTypeDef ret; + USBD_StatusTypeDef ret = USBD_OK; + uint8_t idx; if (epnum == 0U) { @@ -356,59 +604,86 @@ USBD_StatusTypeDef USBD_LL_DataOutStage(USBD_HandleTypeDef *pdev, } else { - if ((pdev->pClass->EP0_RxReady != NULL) && - (pdev->dev_state == USBD_STATE_CONFIGURED)) + /* Find the class ID relative to the current request */ + switch (pdev->request.bmRequest & 0x1FU) + { + case USB_REQ_RECIPIENT_DEVICE: + /* Device requests must be managed by the first instantiated class + (or duplicated by all classes for simplicity) */ + idx = 0U; + break; + + case USB_REQ_RECIPIENT_INTERFACE: + idx = USBD_CoreFindIF(pdev, LOBYTE(pdev->request.wIndex)); + break; + + case USB_REQ_RECIPIENT_ENDPOINT: + idx = USBD_CoreFindEP(pdev, LOBYTE(pdev->request.wIndex)); + break; + + default: + /* Back to the first class in case of doubt */ + idx = 0U; + break; + } + + if (idx < USBD_MAX_SUPPORTED_CLASS) { - pdev->pClass->EP0_RxReady(pdev); + /* Setup the class ID and route the request to the relative class function */ + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + if (pdev->pClass[idx]->EP0_RxReady != NULL) + { + pdev->classId = idx; + pdev->pClass[idx]->EP0_RxReady(pdev); + } + } } + (void)USBD_CtlSendStatus(pdev); } } - else - { -#if 0 - if (pdev->ep0_state == USBD_EP0_STATUS_OUT) - { - /* - * STATUS PHASE completed, update ep0_state to idle - */ - pdev->ep0_state = USBD_EP0_IDLE; - (void)USBD_LL_StallEP(pdev, 0U); - } -#endif - } } - else if ((pdev->pClass->DataOut != NULL) && - (pdev->dev_state == USBD_STATE_CONFIGURED)) + else { - ret = (USBD_StatusTypeDef)pdev->pClass->DataOut(pdev, epnum); + /* Get the class index relative to this interface */ + idx = USBD_CoreFindEP(pdev, (epnum & 0x7FU)); - if (ret != USBD_OK) + if (((uint16_t)idx != 0xFFU) && (idx < USBD_MAX_SUPPORTED_CLASS)) { - return ret; + /* Call the class data out function to manage the request */ + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + if (pdev->pClass[idx]->DataOut != NULL) + { + pdev->classId = idx; + ret = (USBD_StatusTypeDef)pdev->pClass[idx]->DataOut(pdev, epnum); + } + } + if (ret != USBD_OK) + { + return ret; + } } } - else - { - /* should never be in this condition */ - return USBD_FAIL; - } return USBD_OK; } /** -* @brief USBD_DataInStage -* Handle data in stage -* @param pdev: device instance -* @param epnum: endpoint index -* @retval status -*/ + * @brief USBD_LL_DataInStage + * Handle data in stage + * @param pdev: device instance + * @param epnum: endpoint index + * @param pdata: data pointer + * @retval status + */ USBD_StatusTypeDef USBD_LL_DataInStage(USBD_HandleTypeDef *pdev, uint8_t epnum, uint8_t *pdata) { USBD_EndpointTypeDef *pep; USBD_StatusTypeDef ret; + uint8_t idx; if (epnum == 0U) { @@ -423,7 +698,7 @@ USBD_StatusTypeDef USBD_LL_DataInStage(USBD_HandleTypeDef *pdev, (void)USBD_CtlContinueSendData(pdev, pdata, pep->rem_length); /* Prepare endpoint for premature end of transfer */ - (void)USBD_LL_PrepareReceive(pdev, 0U, NULL, 0U); + (void)USBD_LL_PrepareReceive(pdev, 0U, NULL, 0U); } else { @@ -440,73 +715,107 @@ USBD_StatusTypeDef USBD_LL_DataInStage(USBD_HandleTypeDef *pdev, } else { - if ((pdev->pClass->EP0_TxSent != NULL) && - (pdev->dev_state == USBD_STATE_CONFIGURED)) + if (pdev->dev_state == USBD_STATE_CONFIGURED) { - pdev->pClass->EP0_TxSent(pdev); + if (pdev->pClass[0]->EP0_TxSent != NULL) + { + pdev->classId = 0U; + pdev->pClass[0]->EP0_TxSent(pdev); + } } (void)USBD_LL_StallEP(pdev, 0x80U); (void)USBD_CtlReceiveStatus(pdev); } } } - else - { -#if 0 - if ((pdev->ep0_state == USBD_EP0_STATUS_IN) || - (pdev->ep0_state == USBD_EP0_IDLE)) - { - (void)USBD_LL_StallEP(pdev, 0x80U); - } -#endif - } - if (pdev->dev_test_mode == 1U) + if (pdev->dev_test_mode != 0U) { (void)USBD_RunTestMode(pdev); pdev->dev_test_mode = 0U; } } - else if ((pdev->pClass->DataIn != NULL) && - (pdev->dev_state == USBD_STATE_CONFIGURED)) + else { - ret = (USBD_StatusTypeDef)pdev->pClass->DataIn(pdev, epnum); + /* Get the class index relative to this interface */ + idx = USBD_CoreFindEP(pdev, ((uint8_t)epnum | 0x80U)); - if (ret != USBD_OK) + if (((uint16_t)idx != 0xFFU) && (idx < USBD_MAX_SUPPORTED_CLASS)) { - return ret; + /* Call the class data out function to manage the request */ + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + if (pdev->pClass[idx]->DataIn != NULL) + { + pdev->classId = idx; + ret = (USBD_StatusTypeDef)pdev->pClass[idx]->DataIn(pdev, epnum); + + if (ret != USBD_OK) + { + return ret; + } + } + } } } - else - { - /* should never be in this condition */ - return USBD_FAIL; - } return USBD_OK; } /** -* @brief USBD_LL_Reset -* Handle Reset event -* @param pdev: device instance -* @retval status -*/ - + * @brief USBD_LL_Reset + * Handle Reset event + * @param pdev: device instance + * @retval status + */ USBD_StatusTypeDef USBD_LL_Reset(USBD_HandleTypeDef *pdev) { + USBD_StatusTypeDef ret = USBD_OK; + /* Upon Reset call user call back */ pdev->dev_state = USBD_STATE_DEFAULT; pdev->ep0_state = USBD_EP0_IDLE; pdev->dev_config = 0U; pdev->dev_remote_wakeup = 0U; + pdev->dev_test_mode = 0U; + +#ifdef USE_USBD_COMPOSITE + /* Parse the table of classes in use */ + for (uint32_t i = 0U; i < USBD_MAX_SUPPORTED_CLASS; i++) + { + /* Check if current class is in use */ + if ((pdev->tclasslist[i].Active) == 1U) + { + if (pdev->pClass[i] != NULL) + { + pdev->classId = i; + /* Clear configuration and De-initialize the Class process*/ + + if (pdev->pClass[i]->DeInit != NULL) + { + if (pdev->pClass[i]->DeInit(pdev, (uint8_t)pdev->dev_config) != USBD_OK) + { + ret = USBD_FAIL; + } + } + } + } + } +#else - if (pdev->pClassData != NULL) + if (pdev->pClass[0] != NULL) { - pdev->pClass->DeInit(pdev, (uint8_t)pdev->dev_config); + if (pdev->pClass[0]->DeInit != NULL) + { + if (pdev->pClass[0]->DeInit(pdev, (uint8_t)pdev->dev_config) != USBD_OK) + { + ret = USBD_FAIL; + } + } } +#endif /* USE_USBD_COMPOSITE */ - /* Open EP0 OUT */ + /* Open EP0 OUT */ (void)USBD_LL_OpenEP(pdev, 0x00U, USBD_EP_TYPE_CTRL, USB_MAX_EP0_SIZE); pdev->ep_out[0x00U & 0xFU].is_used = 1U; @@ -518,15 +827,15 @@ USBD_StatusTypeDef USBD_LL_Reset(USBD_HandleTypeDef *pdev) pdev->ep_in[0].maxpacket = USB_MAX_EP0_SIZE; - return USBD_OK; + return ret; } /** -* @brief USBD_LL_Reset -* Handle Reset event -* @param pdev: device instance -* @retval status -*/ + * @brief USBD_LL_SetSpeed + * Handle Reset event + * @param pdev: device instance + * @retval status + */ USBD_StatusTypeDef USBD_LL_SetSpeed(USBD_HandleTypeDef *pdev, USBD_SpeedTypeDef speed) { @@ -536,27 +845,29 @@ USBD_StatusTypeDef USBD_LL_SetSpeed(USBD_HandleTypeDef *pdev, } /** -* @brief USBD_Suspend -* Handle Suspend event -* @param pdev: device instance -* @retval status -*/ - + * @brief USBD_LL_Suspend + * Handle Suspend event + * @param pdev: device instance + * @retval status + */ USBD_StatusTypeDef USBD_LL_Suspend(USBD_HandleTypeDef *pdev) { - pdev->dev_old_state = pdev->dev_state; + if (pdev->dev_state != USBD_STATE_SUSPENDED) + { + pdev->dev_old_state = pdev->dev_state; + } + pdev->dev_state = USBD_STATE_SUSPENDED; return USBD_OK; } /** -* @brief USBD_Resume -* Handle Resume event -* @param pdev: device instance -* @retval status -*/ - + * @brief USBD_LL_Resume + * Handle Resume event + * @param pdev: device instance + * @retval status + */ USBD_StatusTypeDef USBD_LL_Resume(USBD_HandleTypeDef *pdev) { if (pdev->dev_state == USBD_STATE_SUSPENDED) @@ -568,63 +879,105 @@ USBD_StatusTypeDef USBD_LL_Resume(USBD_HandleTypeDef *pdev) } /** -* @brief USBD_SOF -* Handle SOF event -* @param pdev: device instance -* @retval status -*/ - + * @brief USBD_LL_SOF + * Handle SOF event + * @param pdev: device instance + * @retval status + */ USBD_StatusTypeDef USBD_LL_SOF(USBD_HandleTypeDef *pdev) { + /* The SOF event can be distributed for all classes that support it */ if (pdev->dev_state == USBD_STATE_CONFIGURED) { - if (pdev->pClass->SOF != NULL) +#ifdef USE_USBD_COMPOSITE + /* Parse the table of classes in use */ + for (uint32_t i = 0; i < USBD_MAX_SUPPORTED_CLASS; i++) { - pdev->pClass->SOF(pdev); + /* Check if current class is in use */ + if ((pdev->tclasslist[i].Active) == 1U) + { + if (pdev->pClass[i] != NULL) + { + if (pdev->pClass[i]->SOF != NULL) + { + pdev->classId = i; + (void)pdev->pClass[i]->SOF(pdev); + } + } + } } +#else + if (pdev->pClass[0] != NULL) + { + if (pdev->pClass[0]->SOF != NULL) + { + (void)pdev->pClass[0]->SOF(pdev); + } + } +#endif /* USE_USBD_COMPOSITE */ } return USBD_OK; } /** -* @brief USBD_IsoINIncomplete -* Handle iso in incomplete event -* @param pdev: device instance -* @retval status -*/ + * @brief USBD_LL_IsoINIncomplete + * Handle iso in incomplete event + * @param pdev: device instance + * @param epnum: Endpoint number + * @retval status + */ USBD_StatusTypeDef USBD_LL_IsoINIncomplete(USBD_HandleTypeDef *pdev, uint8_t epnum) { - /* Prevent unused arguments compilation warning */ - UNUSED(pdev); - UNUSED(epnum); + if (pdev->pClass[pdev->classId] == NULL) + { + return USBD_FAIL; + } + + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + if (pdev->pClass[pdev->classId]->IsoINIncomplete != NULL) + { + (void)pdev->pClass[pdev->classId]->IsoINIncomplete(pdev, epnum); + } + } return USBD_OK; } /** -* @brief USBD_IsoOUTIncomplete -* Handle iso out incomplete event -* @param pdev: device instance -* @retval status -*/ + * @brief USBD_LL_IsoOUTIncomplete + * Handle iso out incomplete event + * @param pdev: device instance + * @param epnum: Endpoint number + * @retval status + */ USBD_StatusTypeDef USBD_LL_IsoOUTIncomplete(USBD_HandleTypeDef *pdev, uint8_t epnum) { - /* Prevent unused arguments compilation warning */ - UNUSED(pdev); - UNUSED(epnum); + if (pdev->pClass[pdev->classId] == NULL) + { + return USBD_FAIL; + } + + if (pdev->dev_state == USBD_STATE_CONFIGURED) + { + if (pdev->pClass[pdev->classId]->IsoOUTIncomplete != NULL) + { + (void)pdev->pClass[pdev->classId]->IsoOUTIncomplete(pdev, epnum); + } + } return USBD_OK; } /** -* @brief USBD_DevConnected -* Handle device connection event -* @param pdev: device instance -* @retval status -*/ + * @brief USBD_LL_DevConnected + * Handle device connection event + * @param pdev: device instance + * @retval status + */ USBD_StatusTypeDef USBD_LL_DevConnected(USBD_HandleTypeDef *pdev) { /* Prevent unused argument compilation warning */ @@ -634,36 +987,229 @@ USBD_StatusTypeDef USBD_LL_DevConnected(USBD_HandleTypeDef *pdev) } /** -* @brief USBD_DevDisconnected -* Handle device disconnection event -* @param pdev: device instance -* @retval status -*/ + * @brief USBD_LL_DevDisconnected + * Handle device disconnection event + * @param pdev: device instance + * @retval status + */ USBD_StatusTypeDef USBD_LL_DevDisconnected(USBD_HandleTypeDef *pdev) { + USBD_StatusTypeDef ret = USBD_OK; + /* Free Class Resources */ pdev->dev_state = USBD_STATE_DEFAULT; - if (pdev->pClass != NULL) +#ifdef USE_USBD_COMPOSITE + /* Parse the table of classes in use */ + for (uint32_t i = 0; i < USBD_MAX_SUPPORTED_CLASS; i++) { - pdev->pClass->DeInit(pdev, (uint8_t)pdev->dev_config); + /* Check if current class is in use */ + if ((pdev->tclasslist[i].Active) == 1U) + { + if (pdev->pClass[i] != NULL) + { + pdev->classId = i; + /* Clear configuration and De-initialize the Class process*/ + if (pdev->pClass[i]->DeInit(pdev, (uint8_t)pdev->dev_config) != 0U) + { + ret = USBD_FAIL; + } + } + } } +#else + if (pdev->pClass[0] != NULL) + { + if (pdev->pClass[0]->DeInit(pdev, (uint8_t)pdev->dev_config) != 0U) + { + ret = USBD_FAIL; + } + } +#endif /* USE_USBD_COMPOSITE */ - return USBD_OK; + return ret; } + /** -* @} -*/ + * @brief USBD_CoreFindIF + * return the class index relative to the selected interface + * @param pdev: device instance + * @param index : selected interface number + * @retval index of the class using the selected interface number. OxFF if no class found. + */ +uint8_t USBD_CoreFindIF(USBD_HandleTypeDef *pdev, uint8_t index) +{ +#ifdef USE_USBD_COMPOSITE + /* Parse the table of classes in use */ + for (uint32_t i = 0U; i < USBD_MAX_SUPPORTED_CLASS; i++) + { + /* Check if current class is in use */ + if ((pdev->tclasslist[i].Active) == 1U) + { + /* Parse all interfaces listed in the current class */ + for (uint32_t j = 0U; j < pdev->tclasslist[i].NumIf; j++) + { + /* Check if requested Interface matches the current class interface */ + if (pdev->tclasslist[i].Ifs[j] == index) + { + if (pdev->pClass[i]->Setup != NULL) + { + return (uint8_t)i; + } + } + } + } + } + return 0xFFU; +#else + UNUSED(pdev); + UNUSED(index); + + return 0x00U; +#endif /* USE_USBD_COMPOSITE */ +} /** -* @} -*/ + * @brief USBD_CoreFindEP + * return the class index relative to the selected endpoint + * @param pdev: device instance + * @param index : selected endpoint number + * @retval index of the class using the selected endpoint number. 0xFF if no class found. + */ +uint8_t USBD_CoreFindEP(USBD_HandleTypeDef *pdev, uint8_t index) +{ +#ifdef USE_USBD_COMPOSITE + /* Parse the table of classes in use */ + for (uint32_t i = 0U; i < USBD_MAX_SUPPORTED_CLASS; i++) + { + /* Check if current class is in use */ + if ((pdev->tclasslist[i].Active) == 1U) + { + /* Parse all endpoints listed in the current class */ + for (uint32_t j = 0U; j < pdev->tclasslist[i].NumEps; j++) + { + /* Check if requested endpoint matches the current class endpoint */ + if (pdev->tclasslist[i].Eps[j].add == index) + { + if (pdev->pClass[i]->Setup != NULL) + { + return (uint8_t)i; + } + } + } + } + } + return 0xFFU; +#else + UNUSED(pdev); + UNUSED(index); + return 0x00U; +#endif /* USE_USBD_COMPOSITE */ +} + +#ifdef USE_USBD_COMPOSITE /** -* @} -*/ + * @brief USBD_CoreGetEPAdd + * Get the endpoint address relative to a selected class + * @param pdev: device instance + * @param ep_dir: USBD_EP_IN or USBD_EP_OUT + * @param ep_type: USBD_EP_TYPE_CTRL, USBD_EP_TYPE_ISOC, USBD_EP_TYPE_BULK or USBD_EP_TYPE_INTR + * @param ClassId: The Class ID + * @retval Address of the selected endpoint or 0xFFU if no endpoint found. + */ +uint8_t USBD_CoreGetEPAdd(USBD_HandleTypeDef *pdev, uint8_t ep_dir, uint8_t ep_type, uint8_t ClassId) +{ + uint8_t idx; -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + /* Find the EP address in the selected class table */ + for (idx = 0; idx < pdev->tclasslist[ClassId].NumEps; idx++) + { + if (((pdev->tclasslist[ClassId].Eps[idx].add & USBD_EP_IN) == ep_dir) && \ + (pdev->tclasslist[ClassId].Eps[idx].type == ep_type) && \ + (pdev->tclasslist[ClassId].Eps[idx].is_used != 0U)) + { + return (pdev->tclasslist[ClassId].Eps[idx].add); + } + } + + /* If reaching this point, then no endpoint was found */ + return 0xFFU; +} +#endif /* USE_USBD_COMPOSITE */ + +/** + * @brief USBD_GetEpDesc + * This function return the Endpoint descriptor + * @param pdev: device instance + * @param pConfDesc: pointer to Bos descriptor + * @param EpAddr: endpoint address + * @retval pointer to video endpoint descriptor + */ +void *USBD_GetEpDesc(uint8_t *pConfDesc, uint8_t EpAddr) +{ + USBD_DescHeaderTypeDef *pdesc = (USBD_DescHeaderTypeDef *)(void *)pConfDesc; + USBD_ConfigDescTypeDef *desc = (USBD_ConfigDescTypeDef *)(void *)pConfDesc; + USBD_EpDescTypeDef *pEpDesc = NULL; + uint16_t ptr; + + if (desc->wTotalLength > desc->bLength) + { + ptr = desc->bLength; + + while (ptr < desc->wTotalLength) + { + pdesc = USBD_GetNextDesc((uint8_t *)pdesc, &ptr); + + if (pdesc->bDescriptorType == USB_DESC_TYPE_ENDPOINT) + { + pEpDesc = (USBD_EpDescTypeDef *)(void *)pdesc; + + if (pEpDesc->bEndpointAddress == EpAddr) + { + break; + } + else + { + pEpDesc = NULL; + } + } + } + } + + return (void *)pEpDesc; +} + +/** + * @brief USBD_GetNextDesc + * This function return the next descriptor header + * @param buf: Buffer where the descriptor is available + * @param ptr: data pointer inside the descriptor + * @retval next header + */ +USBD_DescHeaderTypeDef *USBD_GetNextDesc(uint8_t *pbuf, uint16_t *ptr) +{ + USBD_DescHeaderTypeDef *pnext = (USBD_DescHeaderTypeDef *)(void *)pbuf; + + *ptr += pnext->bLength; + pnext = (USBD_DescHeaderTypeDef *)(void *)(pbuf + pnext->bLength); + + return (pnext); +} + +/** + * @} + */ + + +/** + * @} + */ + + +/** + * @} + */ diff --git a/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c b/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c index c31d40e0..814b810c 100644 --- a/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c +++ b/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2015 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2015 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -21,6 +20,9 @@ #include "usbd_ctlreq.h" #include "usbd_ioreq.h" +#ifdef USE_USBD_COMPOSITE +#include "usbd_composite_builder.h" +#endif /* USE_USBD_COMPOSITE */ /** @addtogroup STM32_USBD_STATE_DEVICE_LIBRARY * @{ @@ -44,7 +46,9 @@ /** @defgroup USBD_REQ_Private_Defines * @{ */ - +#ifndef USBD_MAX_STR_DESC_SIZ +#define USBD_MAX_STR_DESC_SIZ 64U +#endif /* USBD_MAX_STR_DESC_SIZ */ /** * @} */ @@ -91,279 +95,323 @@ static uint8_t USBD_GetLen(uint8_t *buf); /** -* @brief USBD_StdDevReq -* Handle standard usb device requests -* @param pdev: device instance -* @param req: usb request -* @retval status -*/ + * @brief USBD_StdDevReq + * Handle standard usb device requests + * @param pdev: device instance + * @param req: usb request + * @retval status + */ USBD_StatusTypeDef USBD_StdDevReq(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { USBD_StatusTypeDef ret = USBD_OK; switch (req->bmRequest & USB_REQ_TYPE_MASK) { - case USB_REQ_TYPE_CLASS: - case USB_REQ_TYPE_VENDOR: - ret = (USBD_StatusTypeDef)pdev->pClass->Setup(pdev, req); - break; - - case USB_REQ_TYPE_STANDARD: - switch (req->bRequest) - { - case USB_REQ_GET_DESCRIPTOR: - USBD_GetDescriptor(pdev, req); + case USB_REQ_TYPE_CLASS: + case USB_REQ_TYPE_VENDOR: + ret = (USBD_StatusTypeDef)pdev->pClass[pdev->classId]->Setup(pdev, req); break; - case USB_REQ_SET_ADDRESS: - USBD_SetAddress(pdev, req); - break; + case USB_REQ_TYPE_STANDARD: + switch (req->bRequest) + { + case USB_REQ_GET_DESCRIPTOR: + USBD_GetDescriptor(pdev, req); + break; - case USB_REQ_SET_CONFIGURATION: - ret = USBD_SetConfig(pdev, req); - break; + case USB_REQ_SET_ADDRESS: + USBD_SetAddress(pdev, req); + break; - case USB_REQ_GET_CONFIGURATION: - USBD_GetConfig(pdev, req); - break; + case USB_REQ_SET_CONFIGURATION: + ret = USBD_SetConfig(pdev, req); + break; - case USB_REQ_GET_STATUS: - USBD_GetStatus(pdev, req); - break; + case USB_REQ_GET_CONFIGURATION: + USBD_GetConfig(pdev, req); + break; - case USB_REQ_SET_FEATURE: - USBD_SetFeature(pdev, req); - break; + case USB_REQ_GET_STATUS: + USBD_GetStatus(pdev, req); + break; + + case USB_REQ_SET_FEATURE: + USBD_SetFeature(pdev, req); + break; - case USB_REQ_CLEAR_FEATURE: - USBD_ClrFeature(pdev, req); + case USB_REQ_CLEAR_FEATURE: + USBD_ClrFeature(pdev, req); + break; + + default: + USBD_CtlError(pdev, req); + break; + } break; default: USBD_CtlError(pdev, req); break; - } - break; - - default: - USBD_CtlError(pdev, req); - break; } return ret; } /** -* @brief USBD_StdItfReq -* Handle standard usb interface requests -* @param pdev: device instance -* @param req: usb request -* @retval status -*/ + * @brief USBD_StdItfReq + * Handle standard usb interface requests + * @param pdev: device instance + * @param req: usb request + * @retval status + */ USBD_StatusTypeDef USBD_StdItfReq(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { USBD_StatusTypeDef ret = USBD_OK; + uint8_t idx; switch (req->bmRequest & USB_REQ_TYPE_MASK) { - case USB_REQ_TYPE_CLASS: - case USB_REQ_TYPE_VENDOR: - case USB_REQ_TYPE_STANDARD: - switch (pdev->dev_state) - { - case USBD_STATE_DEFAULT: - case USBD_STATE_ADDRESSED: - case USBD_STATE_CONFIGURED: - - if (LOBYTE(req->wIndex) <= USBD_MAX_NUM_INTERFACES) + case USB_REQ_TYPE_CLASS: + case USB_REQ_TYPE_VENDOR: + case USB_REQ_TYPE_STANDARD: + switch (pdev->dev_state) { - ret = (USBD_StatusTypeDef)pdev->pClass->Setup(pdev, req); + case USBD_STATE_DEFAULT: + case USBD_STATE_ADDRESSED: + case USBD_STATE_CONFIGURED: - if ((req->wLength == 0U) && (ret == USBD_OK)) - { - (void)USBD_CtlSendStatus(pdev); - } - } - else - { - USBD_CtlError(pdev, req); + if (LOBYTE(req->wIndex) <= USBD_MAX_NUM_INTERFACES) + { + /* Get the class index relative to this interface */ + idx = USBD_CoreFindIF(pdev, LOBYTE(req->wIndex)); + if (((uint8_t)idx != 0xFFU) && (idx < USBD_MAX_SUPPORTED_CLASS)) + { + /* Call the class data out function to manage the request */ + if (pdev->pClass[idx]->Setup != NULL) + { + pdev->classId = idx; + ret = (USBD_StatusTypeDef)(pdev->pClass[idx]->Setup(pdev, req)); + } + else + { + /* should never reach this condition */ + ret = USBD_FAIL; + } + } + else + { + /* No relative interface found */ + ret = USBD_FAIL; + } + + if ((req->wLength == 0U) && (ret == USBD_OK)) + { + (void)USBD_CtlSendStatus(pdev); + } + } + else + { + USBD_CtlError(pdev, req); + } + break; + + default: + USBD_CtlError(pdev, req); + break; } break; default: USBD_CtlError(pdev, req); break; - } - break; - - default: - USBD_CtlError(pdev, req); - break; } return ret; } /** -* @brief USBD_StdEPReq -* Handle standard usb endpoint requests -* @param pdev: device instance -* @param req: usb request -* @retval status -*/ + * @brief USBD_StdEPReq + * Handle standard usb endpoint requests + * @param pdev: device instance + * @param req: usb request + * @retval status + */ USBD_StatusTypeDef USBD_StdEPReq(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { USBD_EndpointTypeDef *pep; uint8_t ep_addr; + uint8_t idx; USBD_StatusTypeDef ret = USBD_OK; + ep_addr = LOBYTE(req->wIndex); switch (req->bmRequest & USB_REQ_TYPE_MASK) { - case USB_REQ_TYPE_CLASS: - case USB_REQ_TYPE_VENDOR: - ret = (USBD_StatusTypeDef)pdev->pClass->Setup(pdev, req); - break; - - case USB_REQ_TYPE_STANDARD: - switch (req->bRequest) - { - case USB_REQ_SET_FEATURE: - switch (pdev->dev_state) + case USB_REQ_TYPE_CLASS: + case USB_REQ_TYPE_VENDOR: + /* Get the class index relative to this endpoint */ + idx = USBD_CoreFindEP(pdev, ep_addr); + if (((uint8_t)idx != 0xFFU) && (idx < USBD_MAX_SUPPORTED_CLASS)) { - case USBD_STATE_ADDRESSED: - if ((ep_addr != 0x00U) && (ep_addr != 0x80U)) - { - (void)USBD_LL_StallEP(pdev, ep_addr); - (void)USBD_LL_StallEP(pdev, 0x80U); - } - else - { - USBD_CtlError(pdev, req); - } - break; - - case USBD_STATE_CONFIGURED: - if (req->wValue == USB_FEATURE_EP_HALT) + pdev->classId = idx; + /* Call the class data out function to manage the request */ + if (pdev->pClass[idx]->Setup != NULL) { - if ((ep_addr != 0x00U) && (ep_addr != 0x80U) && (req->wLength == 0x00U)) - { - (void)USBD_LL_StallEP(pdev, ep_addr); - } + ret = (USBD_StatusTypeDef)pdev->pClass[idx]->Setup(pdev, req); } - (void)USBD_CtlSendStatus(pdev); - - break; - - default: - USBD_CtlError(pdev, req); - break; } break; - case USB_REQ_CLEAR_FEATURE: - - switch (pdev->dev_state) + case USB_REQ_TYPE_STANDARD: + switch (req->bRequest) { - case USBD_STATE_ADDRESSED: - if ((ep_addr != 0x00U) && (ep_addr != 0x80U)) - { - (void)USBD_LL_StallEP(pdev, ep_addr); - (void)USBD_LL_StallEP(pdev, 0x80U); - } - else - { - USBD_CtlError(pdev, req); - } - break; - - case USBD_STATE_CONFIGURED: - if (req->wValue == USB_FEATURE_EP_HALT) - { - if ((ep_addr & 0x7FU) != 0x00U) + case USB_REQ_SET_FEATURE: + switch (pdev->dev_state) { - (void)USBD_LL_ClearStallEP(pdev, ep_addr); + case USBD_STATE_ADDRESSED: + if ((ep_addr != 0x00U) && (ep_addr != 0x80U)) + { + (void)USBD_LL_StallEP(pdev, ep_addr); + (void)USBD_LL_StallEP(pdev, 0x80U); + } + else + { + USBD_CtlError(pdev, req); + } + break; + + case USBD_STATE_CONFIGURED: + if (req->wValue == USB_FEATURE_EP_HALT) + { + if ((ep_addr != 0x00U) && (ep_addr != 0x80U) && (req->wLength == 0x00U)) + { + (void)USBD_LL_StallEP(pdev, ep_addr); + } + } + (void)USBD_CtlSendStatus(pdev); + + break; + + default: + USBD_CtlError(pdev, req); + break; } - (void)USBD_CtlSendStatus(pdev); - (USBD_StatusTypeDef)pdev->pClass->Setup(pdev, req); - } - break; - - default: - USBD_CtlError(pdev, req); - break; - } - break; - - case USB_REQ_GET_STATUS: - switch (pdev->dev_state) - { - case USBD_STATE_ADDRESSED: - if ((ep_addr != 0x00U) && (ep_addr != 0x80U)) - { - USBD_CtlError(pdev, req); break; - } - pep = ((ep_addr & 0x80U) == 0x80U) ? &pdev->ep_in[ep_addr & 0x7FU] : \ - &pdev->ep_out[ep_addr & 0x7FU]; - pep->status = 0x0000U; - - (void)USBD_CtlSendData(pdev, (uint8_t *)&pep->status, 2U); - break; + case USB_REQ_CLEAR_FEATURE: - case USBD_STATE_CONFIGURED: - if ((ep_addr & 0x80U) == 0x80U) - { - if (pdev->ep_in[ep_addr & 0xFU].is_used == 0U) + switch (pdev->dev_state) { - USBD_CtlError(pdev, req); - break; - } - } - else - { - if (pdev->ep_out[ep_addr & 0xFU].is_used == 0U) - { - USBD_CtlError(pdev, req); - break; + case USBD_STATE_ADDRESSED: + if ((ep_addr != 0x00U) && (ep_addr != 0x80U)) + { + (void)USBD_LL_StallEP(pdev, ep_addr); + (void)USBD_LL_StallEP(pdev, 0x80U); + } + else + { + USBD_CtlError(pdev, req); + } + break; + + case USBD_STATE_CONFIGURED: + if (req->wValue == USB_FEATURE_EP_HALT) + { + if ((ep_addr & 0x7FU) != 0x00U) + { + (void)USBD_LL_ClearStallEP(pdev, ep_addr); + } + (void)USBD_CtlSendStatus(pdev); + + /* Get the class index relative to this interface */ + idx = USBD_CoreFindEP(pdev, ep_addr); + if (((uint8_t)idx != 0xFFU) && (idx < USBD_MAX_SUPPORTED_CLASS)) + { + pdev->classId = idx; + /* Call the class data out function to manage the request */ + if (pdev->pClass[idx]->Setup != NULL) + { + ret = (USBD_StatusTypeDef)(pdev->pClass[idx]->Setup(pdev, req)); + } + } + } + break; + + default: + USBD_CtlError(pdev, req); + break; } - } - - pep = ((ep_addr & 0x80U) == 0x80U) ? &pdev->ep_in[ep_addr & 0x7FU] : \ - &pdev->ep_out[ep_addr & 0x7FU]; + break; - if ((ep_addr == 0x00U) || (ep_addr == 0x80U)) + case USB_REQ_GET_STATUS: + switch (pdev->dev_state) { - pep->status = 0x0000U; + case USBD_STATE_ADDRESSED: + if ((ep_addr != 0x00U) && (ep_addr != 0x80U)) + { + USBD_CtlError(pdev, req); + break; + } + pep = ((ep_addr & 0x80U) == 0x80U) ? &pdev->ep_in[ep_addr & 0x7FU] : \ + &pdev->ep_out[ep_addr & 0x7FU]; + + pep->status = 0x0000U; + + (void)USBD_CtlSendData(pdev, (uint8_t *)&pep->status, 2U); + break; + + case USBD_STATE_CONFIGURED: + if ((ep_addr & 0x80U) == 0x80U) + { + if (pdev->ep_in[ep_addr & 0xFU].is_used == 0U) + { + USBD_CtlError(pdev, req); + break; + } + } + else + { + if (pdev->ep_out[ep_addr & 0xFU].is_used == 0U) + { + USBD_CtlError(pdev, req); + break; + } + } + + pep = ((ep_addr & 0x80U) == 0x80U) ? &pdev->ep_in[ep_addr & 0x7FU] : \ + &pdev->ep_out[ep_addr & 0x7FU]; + + if ((ep_addr == 0x00U) || (ep_addr == 0x80U)) + { + pep->status = 0x0000U; + } + else if (USBD_LL_IsStallEP(pdev, ep_addr) != 0U) + { + pep->status = 0x0001U; + } + else + { + pep->status = 0x0000U; + } + + (void)USBD_CtlSendData(pdev, (uint8_t *)&pep->status, 2U); + break; + + default: + USBD_CtlError(pdev, req); + break; } - else if (USBD_LL_IsStallEP(pdev, ep_addr) != 0U) - { - pep->status = 0x0001U; - } - else - { - pep->status = 0x0000U; - } - - (void)USBD_CtlSendData(pdev, (uint8_t *)&pep->status, 2U); break; - default: - USBD_CtlError(pdev, req); - break; + default: + USBD_CtlError(pdev, req); + break; } break; default: USBD_CtlError(pdev, req); break; - } - break; - - default: - USBD_CtlError(pdev, req); - break; } return ret; @@ -371,12 +419,12 @@ USBD_StatusTypeDef USBD_StdEPReq(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef /** -* @brief USBD_GetDescriptor -* Handle Get Descriptor requests -* @param pdev: device instance -* @param req: usb request -* @retval status -*/ + * @brief USBD_GetDescriptor + * Handle Get Descriptor requests + * @param pdev: device instance + * @param req: usb request + * @retval None + */ static void USBD_GetDescriptor(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { uint16_t len = 0U; @@ -386,42 +434,10 @@ static void USBD_GetDescriptor(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *r switch (req->wValue >> 8) { #if ((USBD_LPM_ENABLED == 1U) || (USBD_CLASS_BOS_ENABLED == 1U)) - case USB_DESC_TYPE_BOS: - if (pdev->pDesc->GetBOSDescriptor != NULL) - { - pbuf = pdev->pDesc->GetBOSDescriptor(pdev->dev_speed, &len); - } - else - { - USBD_CtlError(pdev, req); - err++; - } - break; -#endif - case USB_DESC_TYPE_DEVICE: - pbuf = pdev->pDesc->GetDeviceDescriptor(pdev->dev_speed, &len); - break; - - case USB_DESC_TYPE_CONFIGURATION: - if (pdev->dev_speed == USBD_SPEED_HIGH) - { - pbuf = pdev->pClass->GetHSConfigDescriptor(&len); - pbuf[1] = USB_DESC_TYPE_CONFIGURATION; - } - else - { - pbuf = pdev->pClass->GetFSConfigDescriptor(&len); - pbuf[1] = USB_DESC_TYPE_CONFIGURATION; - } - break; - - case USB_DESC_TYPE_STRING: - switch ((uint8_t)(req->wValue)) - { - case USBD_IDX_LANGID_STR: - if (pdev->pDesc->GetLangIDStrDescriptor != NULL) + case USB_DESC_TYPE_BOS: + if (pdev->pDesc->GetBOSDescriptor != NULL) { - pbuf = pdev->pDesc->GetLangIDStrDescriptor(pdev->dev_speed, &len); + pbuf = pdev->pDesc->GetBOSDescriptor(pdev->dev_speed, &len); } else { @@ -429,47 +445,173 @@ static void USBD_GetDescriptor(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *r err++; } break; - - case USBD_IDX_MFC_STR: - if (pdev->pDesc->GetManufacturerStrDescriptor != NULL) - { - pbuf = pdev->pDesc->GetManufacturerStrDescriptor(pdev->dev_speed, &len); - } - else - { - USBD_CtlError(pdev, req); - err++; - } +#endif /* (USBD_LPM_ENABLED == 1U) || (USBD_CLASS_BOS_ENABLED == 1U) */ + case USB_DESC_TYPE_DEVICE: + pbuf = pdev->pDesc->GetDeviceDescriptor(pdev->dev_speed, &len); break; - case USBD_IDX_PRODUCT_STR: - if (pdev->pDesc->GetProductStrDescriptor != NULL) + case USB_DESC_TYPE_CONFIGURATION: + if (pdev->dev_speed == USBD_SPEED_HIGH) { - pbuf = pdev->pDesc->GetProductStrDescriptor(pdev->dev_speed, &len); +#ifdef USE_USBD_COMPOSITE + if ((uint8_t)(pdev->NumClasses) > 0U) + { + pbuf = (uint8_t *)USBD_CMPSIT.GetHSConfigDescriptor(&len); + } + else +#endif /* USE_USBD_COMPOSITE */ + { + pbuf = (uint8_t *)pdev->pClass[0]->GetHSConfigDescriptor(&len); + } + pbuf[1] = USB_DESC_TYPE_CONFIGURATION; } else { - USBD_CtlError(pdev, req); - err++; +#ifdef USE_USBD_COMPOSITE + if ((uint8_t)(pdev->NumClasses) > 0U) + { + pbuf = (uint8_t *)USBD_CMPSIT.GetFSConfigDescriptor(&len); + } + else +#endif /* USE_USBD_COMPOSITE */ + { + pbuf = (uint8_t *)pdev->pClass[0]->GetFSConfigDescriptor(&len); + } + pbuf[1] = USB_DESC_TYPE_CONFIGURATION; } break; - case USBD_IDX_SERIAL_STR: - if (pdev->pDesc->GetSerialStrDescriptor != NULL) - { - pbuf = pdev->pDesc->GetSerialStrDescriptor(pdev->dev_speed, &len); - } - else + case USB_DESC_TYPE_STRING: + switch ((uint8_t)(req->wValue)) { - USBD_CtlError(pdev, req); - err++; + case USBD_IDX_LANGID_STR: + if (pdev->pDesc->GetLangIDStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetLangIDStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USBD_IDX_MFC_STR: + if (pdev->pDesc->GetManufacturerStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetManufacturerStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USBD_IDX_PRODUCT_STR: + if (pdev->pDesc->GetProductStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetProductStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USBD_IDX_SERIAL_STR: + if (pdev->pDesc->GetSerialStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetSerialStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USBD_IDX_CONFIG_STR: + if (pdev->pDesc->GetConfigurationStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetConfigurationStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + case USBD_IDX_INTERFACE_STR: + if (pdev->pDesc->GetInterfaceStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetInterfaceStrDescriptor(pdev->dev_speed, &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } + break; + + default: +#if (USBD_SUPPORT_USER_STRING_DESC == 1U) + pbuf = NULL; + + for (uint32_t idx = 0U; (idx < pdev->NumClasses); idx++) + { + if (pdev->pClass[idx]->GetUsrStrDescriptor != NULL) + { + pdev->classId = idx; + pbuf = pdev->pClass[idx]->GetUsrStrDescriptor(pdev, LOBYTE(req->wValue), &len); + + if (pbuf == NULL) /* This means that no class recognized the string index */ + { + continue; + } + else + { + break; + } + } + } +#endif /* USBD_SUPPORT_USER_STRING_DESC */ + +#if (USBD_CLASS_USER_STRING_DESC == 1U) + if (pdev->pDesc->GetUserStrDescriptor != NULL) + { + pbuf = pdev->pDesc->GetUserStrDescriptor(pdev->dev_speed, LOBYTE(req->wValue), &len); + } + else + { + USBD_CtlError(pdev, req); + err++; + } +#endif /* USBD_SUPPORT_USER_STRING_DESC */ + +#if ((USBD_CLASS_USER_STRING_DESC == 0U) && (USBD_SUPPORT_USER_STRING_DESC == 0U)) + USBD_CtlError(pdev, req); + err++; +#endif /* (USBD_CLASS_USER_STRING_DESC == 0U) && (USBD_SUPPORT_USER_STRING_DESC == 0U) */ + break; } break; - case USBD_IDX_CONFIG_STR: - if (pdev->pDesc->GetConfigurationStrDescriptor != NULL) + case USB_DESC_TYPE_DEVICE_QUALIFIER: + if (pdev->dev_speed == USBD_SPEED_HIGH) { - pbuf = pdev->pDesc->GetConfigurationStrDescriptor(pdev->dev_speed, &len); +#ifdef USE_USBD_COMPOSITE + if ((uint8_t)(pdev->NumClasses) > 0U) + { + pbuf = (uint8_t *)USBD_CMPSIT.GetDeviceQualifierDescriptor(&len); + } + else +#endif /* USE_USBD_COMPOSITE */ + { + pbuf = (uint8_t *)pdev->pClass[0]->GetDeviceQualifierDescriptor(&len); + } } else { @@ -478,10 +620,20 @@ static void USBD_GetDescriptor(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *r } break; - case USBD_IDX_INTERFACE_STR: - if (pdev->pDesc->GetInterfaceStrDescriptor != NULL) + case USB_DESC_TYPE_OTHER_SPEED_CONFIGURATION: + if (pdev->dev_speed == USBD_SPEED_HIGH) { - pbuf = pdev->pDesc->GetInterfaceStrDescriptor(pdev->dev_speed, &len); +#ifdef USE_USBD_COMPOSITE + if ((uint8_t)(pdev->NumClasses) > 0U) + { + pbuf = (uint8_t *)USBD_CMPSIT.GetOtherSpeedConfigDescriptor(&len); + } + else +#endif /* USE_USBD_COMPOSITE */ + { + pbuf = (uint8_t *)pdev->pClass[0]->GetOtherSpeedConfigDescriptor(&len); + } + pbuf[1] = USB_DESC_TYPE_OTHER_SPEED_CONFIGURATION; } else { @@ -491,97 +643,42 @@ static void USBD_GetDescriptor(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *r break; default: -#if (USBD_SUPPORT_USER_STRING_DESC == 1U) - if (pdev->pClass->GetUsrStrDescriptor != NULL) - { - pbuf = pdev->pClass->GetUsrStrDescriptor(pdev, (req->wValue), &len); - } - else - { - USBD_CtlError(pdev, req); - err++; - } -#elif (USBD_CLASS_USER_STRING_DESC == 1U) - if (pdev->pDesc->GetUserStrDescriptor != NULL) - { - pbuf = pdev->pDesc->GetUserStrDescriptor(pdev->dev_speed, (req->wValue), &len); - } - else - { - USBD_CtlError(pdev, req); - err++; - } -#else USBD_CtlError(pdev, req); err++; -#endif break; - } - break; - - case USB_DESC_TYPE_DEVICE_QUALIFIER: - if (pdev->dev_speed == USBD_SPEED_HIGH) - { - pbuf = pdev->pClass->GetDeviceQualifierDescriptor(&len); - } - else - { - USBD_CtlError(pdev, req); - err++; - } - break; - - case USB_DESC_TYPE_OTHER_SPEED_CONFIGURATION: - if (pdev->dev_speed == USBD_SPEED_HIGH) - { - pbuf = pdev->pClass->GetOtherSpeedConfigDescriptor(&len); - pbuf[1] = USB_DESC_TYPE_OTHER_SPEED_CONFIGURATION; - } - else - { - USBD_CtlError(pdev, req); - err++; - } - break; - - default: - USBD_CtlError(pdev, req); - err++; - break; } if (err != 0U) { return; } - else + + if (req->wLength != 0U) { - if (req->wLength != 0U) + if (len != 0U) { - if (len != 0U) - { - len = MIN(len, req->wLength); - (void)USBD_CtlSendData(pdev, pbuf, len); - } - else - { - USBD_CtlError(pdev, req); - } + len = MIN(len, req->wLength); + (void)USBD_CtlSendData(pdev, pbuf, len); } else { - (void)USBD_CtlSendStatus(pdev); + USBD_CtlError(pdev, req); } } + else + { + (void)USBD_CtlSendStatus(pdev); + } } + /** -* @brief USBD_SetAddress -* Set device address -* @param pdev: device instance -* @param req: usb request -* @retval status -*/ + * @brief USBD_SetAddress + * Set device address + * @param pdev: device instance + * @param req: usb request + * @retval None + */ static void USBD_SetAddress(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { uint8_t dev_addr; @@ -617,12 +714,12 @@ static void USBD_SetAddress(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) } /** -* @brief USBD_SetConfig -* Handle Set device configuration request -* @param pdev: device instance -* @param req: usb request -* @retval status -*/ + * @brief USBD_SetConfig + * Handle Set device configuration request + * @param pdev: device instance + * @param req: usb request + * @retval status + */ static USBD_StatusTypeDef USBD_SetConfig(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { USBD_StatusTypeDef ret = USBD_OK; @@ -638,81 +735,89 @@ static USBD_StatusTypeDef USBD_SetConfig(USBD_HandleTypeDef *pdev, USBD_SetupReq switch (pdev->dev_state) { - case USBD_STATE_ADDRESSED: - if (cfgidx != 0U) - { - pdev->dev_config = cfgidx; + case USBD_STATE_ADDRESSED: + if (cfgidx != 0U) + { + pdev->dev_config = cfgidx; - ret = USBD_SetClassConfig(pdev, cfgidx); + ret = USBD_SetClassConfig(pdev, cfgidx); - if (ret != USBD_OK) - { - USBD_CtlError(pdev, req); + if (ret != USBD_OK) + { + USBD_CtlError(pdev, req); + pdev->dev_state = USBD_STATE_ADDRESSED; + } + else + { + (void)USBD_CtlSendStatus(pdev); + pdev->dev_state = USBD_STATE_CONFIGURED; + +#if (USBD_USER_REGISTER_CALLBACK == 1U) + if (pdev->DevStateCallback != NULL) + { + pdev->DevStateCallback(USBD_STATE_CONFIGURED, cfgidx); + } +#endif /* USBD_USER_REGISTER_CALLBACK */ + } } else { (void)USBD_CtlSendStatus(pdev); - pdev->dev_state = USBD_STATE_CONFIGURED; } - } - else - { - (void)USBD_CtlSendStatus(pdev); - } - break; + break; - case USBD_STATE_CONFIGURED: - if (cfgidx == 0U) - { - pdev->dev_state = USBD_STATE_ADDRESSED; - pdev->dev_config = cfgidx; - (void)USBD_ClrClassConfig(pdev, cfgidx); - (void)USBD_CtlSendStatus(pdev); - } - else if (cfgidx != pdev->dev_config) - { - /* Clear old configuration */ - (void)USBD_ClrClassConfig(pdev, (uint8_t)pdev->dev_config); + case USBD_STATE_CONFIGURED: + if (cfgidx == 0U) + { + pdev->dev_state = USBD_STATE_ADDRESSED; + pdev->dev_config = cfgidx; + (void)USBD_ClrClassConfig(pdev, cfgidx); + (void)USBD_CtlSendStatus(pdev); + } + else if (cfgidx != pdev->dev_config) + { + /* Clear old configuration */ + (void)USBD_ClrClassConfig(pdev, (uint8_t)pdev->dev_config); - /* set new configuration */ - pdev->dev_config = cfgidx; + /* set new configuration */ + pdev->dev_config = cfgidx; - ret = USBD_SetClassConfig(pdev, cfgidx); + ret = USBD_SetClassConfig(pdev, cfgidx); - if (ret != USBD_OK) - { - USBD_CtlError(pdev, req); - (void)USBD_ClrClassConfig(pdev, (uint8_t)pdev->dev_config); - pdev->dev_state = USBD_STATE_ADDRESSED; + if (ret != USBD_OK) + { + USBD_CtlError(pdev, req); + (void)USBD_ClrClassConfig(pdev, (uint8_t)pdev->dev_config); + pdev->dev_state = USBD_STATE_ADDRESSED; + } + else + { + (void)USBD_CtlSendStatus(pdev); + } } else { (void)USBD_CtlSendStatus(pdev); } - } - else - { - (void)USBD_CtlSendStatus(pdev); - } - break; + break; - default: - USBD_CtlError(pdev, req); - (void)USBD_ClrClassConfig(pdev, cfgidx); - ret = USBD_FAIL; - break; + default: + USBD_CtlError(pdev, req); + (void)USBD_ClrClassConfig(pdev, cfgidx); + ret = USBD_FAIL; + break; } return ret; } /** -* @brief USBD_GetConfig -* Handle Get device configuration request -* @param pdev: device instance -* @param req: usb request -* @retval status -*/ + * @brief USBD_GetConfig + * Handle Get device configuration request + * @param pdev: device instance + * @param req: usb request + * @retval None + */ static void USBD_GetConfig(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { if (req->wLength != 1U) @@ -723,71 +828,71 @@ static void USBD_GetConfig(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { switch (pdev->dev_state) { - case USBD_STATE_DEFAULT: - case USBD_STATE_ADDRESSED: - pdev->dev_default_config = 0U; - (void)USBD_CtlSendData(pdev, (uint8_t *)&pdev->dev_default_config, 1U); - break; + case USBD_STATE_DEFAULT: + case USBD_STATE_ADDRESSED: + pdev->dev_default_config = 0U; + (void)USBD_CtlSendData(pdev, (uint8_t *)&pdev->dev_default_config, 1U); + break; - case USBD_STATE_CONFIGURED: - (void)USBD_CtlSendData(pdev, (uint8_t *)&pdev->dev_config, 1U); - break; + case USBD_STATE_CONFIGURED: + (void)USBD_CtlSendData(pdev, (uint8_t *)&pdev->dev_config, 1U); + break; - default: - USBD_CtlError(pdev, req); - break; + default: + USBD_CtlError(pdev, req); + break; } } } /** -* @brief USBD_GetStatus -* Handle Get Status request -* @param pdev: device instance -* @param req: usb request -* @retval status -*/ + * @brief USBD_GetStatus + * Handle Get Status request + * @param pdev: device instance + * @param req: usb request + * @retval None + */ static void USBD_GetStatus(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { switch (pdev->dev_state) { - case USBD_STATE_DEFAULT: - case USBD_STATE_ADDRESSED: - case USBD_STATE_CONFIGURED: - if (req->wLength != 0x2U) - { - USBD_CtlError(pdev, req); - break; - } + case USBD_STATE_DEFAULT: + case USBD_STATE_ADDRESSED: + case USBD_STATE_CONFIGURED: + if (req->wLength != 0x2U) + { + USBD_CtlError(pdev, req); + break; + } #if (USBD_SELF_POWERED == 1U) - pdev->dev_config_status = USB_CONFIG_SELF_POWERED; + pdev->dev_config_status = USB_CONFIG_SELF_POWERED; #else - pdev->dev_config_status = 0U; -#endif + pdev->dev_config_status = 0U; +#endif /* USBD_SELF_POWERED */ - if (pdev->dev_remote_wakeup != 0U) - { - pdev->dev_config_status |= USB_CONFIG_REMOTE_WAKEUP; - } + if (pdev->dev_remote_wakeup != 0U) + { + pdev->dev_config_status |= USB_CONFIG_REMOTE_WAKEUP; + } - (void)USBD_CtlSendData(pdev, (uint8_t *)&pdev->dev_config_status, 2U); - break; + (void)USBD_CtlSendData(pdev, (uint8_t *)&pdev->dev_config_status, 2U); + break; - default: - USBD_CtlError(pdev, req); - break; + default: + USBD_CtlError(pdev, req); + break; } } /** -* @brief USBD_SetFeature -* Handle Set device feature request -* @param pdev: device instance -* @param req: usb request -* @retval status -*/ + * @brief USBD_SetFeature + * Handle Set device feature request + * @param pdev: device instance + * @param req: usb request + * @retval None + */ static void USBD_SetFeature(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { if (req->wValue == USB_FEATURE_REMOTE_WAKEUP) @@ -795,16 +900,25 @@ static void USBD_SetFeature(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) pdev->dev_remote_wakeup = 1U; (void)USBD_CtlSendStatus(pdev); } + else if (req->wValue == USB_FEATURE_TEST_MODE) + { + pdev->dev_test_mode = (uint8_t)(req->wIndex >> 8); + (void)USBD_CtlSendStatus(pdev); + } + else + { + USBD_CtlError(pdev, req); + } } /** -* @brief USBD_ClrFeature -* Handle clear device feature request -* @param pdev: device instance -* @param req: usb request -* @retval status -*/ + * @brief USBD_ClrFeature + * Handle clear device feature request + * @param pdev: device instance + * @param req: usb request + * @retval None + */ static void USBD_ClrFeature(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { switch (pdev->dev_state) @@ -825,14 +939,14 @@ static void USBD_ClrFeature(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) } } -/** -* @brief USBD_ParseSetupRequest -* Copy buffer into setup structure -* @param pdev: device instance -* @param req: usb request -* @retval None -*/ +/** + * @brief USBD_ParseSetupRequest + * Copy buffer into setup structure + * @param req: usb request + * @param pdata: setup data pointer + * @retval None + */ void USBD_ParseSetupRequest(USBD_SetupReqTypedef *req, uint8_t *pdata) { uint8_t *pbuff = pdata; @@ -854,14 +968,14 @@ void USBD_ParseSetupRequest(USBD_SetupReqTypedef *req, uint8_t *pdata) req->wLength = SWAPBYTE(pbuff); } -/** -* @brief USBD_CtlError -* Handle USB low level Error -* @param pdev: device instance -* @param req: usb request -* @retval None -*/ +/** + * @brief USBD_CtlError + * Handle USB low level Error + * @param pdev: device instance + * @param req: usb request + * @retval None + */ void USBD_CtlError(USBD_HandleTypeDef *pdev, USBD_SetupReqTypedef *req) { UNUSED(req); @@ -890,7 +1004,7 @@ void USBD_GetString(uint8_t *desc, uint8_t *unicode, uint16_t *len) } pdesc = desc; - *len = ((uint16_t)USBD_GetLen(pdesc) * 2U) + 2U; + *len = MIN(USBD_MAX_STR_DESC_SIZ, ((uint16_t)USBD_GetLen(pdesc) * 2U) + 2U); unicode[idx] = *(uint8_t *)len; idx++; @@ -908,6 +1022,7 @@ void USBD_GetString(uint8_t *desc, uint8_t *unicode, uint16_t *len) } } + /** * @brief USBD_GetLen * return the string length @@ -941,4 +1056,3 @@ static uint8_t USBD_GetLen(uint8_t *buf) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c b/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c index 8ac5491f..7c8004ad 100644 --- a/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c +++ b/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ioreq.c @@ -6,13 +6,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2015 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2015 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -77,20 +76,25 @@ */ /** -* @brief USBD_CtlSendData -* send data on the ctl pipe -* @param pdev: device instance -* @param buff: pointer to data buffer -* @param len: length of data to be sent -* @retval status -*/ + * @brief USBD_CtlSendData + * send data on the ctl pipe + * @param pdev: device instance + * @param buff: pointer to data buffer + * @param len: length of data to be sent + * @retval status + */ USBD_StatusTypeDef USBD_CtlSendData(USBD_HandleTypeDef *pdev, uint8_t *pbuf, uint32_t len) { /* Set EP0 State */ pdev->ep0_state = USBD_EP0_DATA_IN; pdev->ep_in[0].total_length = len; + +#ifdef USBD_AVOID_PACKET_SPLIT_MPS + pdev->ep_in[0].rem_length = 0U; +#else pdev->ep_in[0].rem_length = len; +#endif /* USBD_AVOID_PACKET_SPLIT_MPS */ /* Start the transfer */ (void)USBD_LL_Transmit(pdev, 0x00U, pbuf, len); @@ -99,13 +103,13 @@ USBD_StatusTypeDef USBD_CtlSendData(USBD_HandleTypeDef *pdev, } /** -* @brief USBD_CtlContinueSendData -* continue sending data on the ctl pipe -* @param pdev: device instance -* @param buff: pointer to data buffer -* @param len: length of data to be sent -* @retval status -*/ + * @brief USBD_CtlContinueSendData + * continue sending data on the ctl pipe + * @param pdev: device instance + * @param buff: pointer to data buffer + * @param len: length of data to be sent + * @retval status + */ USBD_StatusTypeDef USBD_CtlContinueSendData(USBD_HandleTypeDef *pdev, uint8_t *pbuf, uint32_t len) { @@ -116,20 +120,25 @@ USBD_StatusTypeDef USBD_CtlContinueSendData(USBD_HandleTypeDef *pdev, } /** -* @brief USBD_CtlPrepareRx -* receive data on the ctl pipe -* @param pdev: device instance -* @param buff: pointer to data buffer -* @param len: length of data to be received -* @retval status -*/ + * @brief USBD_CtlPrepareRx + * receive data on the ctl pipe + * @param pdev: device instance + * @param buff: pointer to data buffer + * @param len: length of data to be received + * @retval status + */ USBD_StatusTypeDef USBD_CtlPrepareRx(USBD_HandleTypeDef *pdev, uint8_t *pbuf, uint32_t len) { /* Set EP0 State */ pdev->ep0_state = USBD_EP0_DATA_OUT; pdev->ep_out[0].total_length = len; + +#ifdef USBD_AVOID_PACKET_SPLIT_MPS + pdev->ep_out[0].rem_length = 0U; +#else pdev->ep_out[0].rem_length = len; +#endif /* USBD_AVOID_PACKET_SPLIT_MPS */ /* Start the transfer */ (void)USBD_LL_PrepareReceive(pdev, 0U, pbuf, len); @@ -138,13 +147,13 @@ USBD_StatusTypeDef USBD_CtlPrepareRx(USBD_HandleTypeDef *pdev, } /** -* @brief USBD_CtlContinueRx -* continue receive data on the ctl pipe -* @param pdev: device instance -* @param buff: pointer to data buffer -* @param len: length of data to be received -* @retval status -*/ + * @brief USBD_CtlContinueRx + * continue receive data on the ctl pipe + * @param pdev: device instance + * @param buff: pointer to data buffer + * @param len: length of data to be received + * @retval status + */ USBD_StatusTypeDef USBD_CtlContinueRx(USBD_HandleTypeDef *pdev, uint8_t *pbuf, uint32_t len) { @@ -154,11 +163,11 @@ USBD_StatusTypeDef USBD_CtlContinueRx(USBD_HandleTypeDef *pdev, } /** -* @brief USBD_CtlSendStatus -* send zero lzngth packet on the ctl pipe -* @param pdev: device instance -* @retval status -*/ + * @brief USBD_CtlSendStatus + * send zero lzngth packet on the ctl pipe + * @param pdev: device instance + * @retval status + */ USBD_StatusTypeDef USBD_CtlSendStatus(USBD_HandleTypeDef *pdev) { /* Set EP0 State */ @@ -171,11 +180,11 @@ USBD_StatusTypeDef USBD_CtlSendStatus(USBD_HandleTypeDef *pdev) } /** -* @brief USBD_CtlReceiveStatus -* receive zero lzngth packet on the ctl pipe -* @param pdev: device instance -* @retval status -*/ + * @brief USBD_CtlReceiveStatus + * receive zero lzngth packet on the ctl pipe + * @param pdev: device instance + * @retval status + */ USBD_StatusTypeDef USBD_CtlReceiveStatus(USBD_HandleTypeDef *pdev) { /* Set EP0 State */ @@ -188,12 +197,12 @@ USBD_StatusTypeDef USBD_CtlReceiveStatus(USBD_HandleTypeDef *pdev) } /** -* @brief USBD_GetRxCount -* returns the received data length -* @param pdev: device instance -* @param ep_addr: endpoint address -* @retval Rx Data blength -*/ + * @brief USBD_GetRxCount + * returns the received data length + * @param pdev: device instance + * @param ep_addr: endpoint address + * @retval Rx Data blength + */ uint32_t USBD_GetRxCount(USBD_HandleTypeDef *pdev, uint8_t ep_addr) { return USBD_LL_GetRxDataSize(pdev, ep_addr); @@ -213,4 +222,3 @@ uint32_t USBD_GetRxCount(USBD_HandleTypeDef *pdev, uint8_t ep_addr) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/Middlewares/ST/STM32_USB_Device_Library/LICENSE.txt b/Middlewares/ST/STM32_USB_Device_Library/LICENSE.txt new file mode 100644 index 00000000..e66295c5 --- /dev/null +++ b/Middlewares/ST/STM32_USB_Device_Library/LICENSE.txt @@ -0,0 +1,86 @@ +This software component is provided to you as part of a software package and +applicable license terms are in the Package_license file. If you received this +software component outside of a package or without applicable license terms, +the terms of the SLA0044 license shall apply and are fully reproduced below: + +SLA0044 Rev5/February 2018 + +Software license agreement + +ULTIMATE LIBERTY SOFTWARE LICENSE AGREEMENT + +BY INSTALLING, COPYING, DOWNLOADING, ACCESSING OR OTHERWISE USING THIS SOFTWARE +OR ANY PART THEREOF (AND THE RELATED DOCUMENTATION) FROM STMICROELECTRONICS +INTERNATIONAL N.V, SWISS BRANCH AND/OR ITS AFFILIATED COMPANIES +(STMICROELECTRONICS), THE RECIPIENT, ON BEHALF OF HIMSELF OR HERSELF, OR ON +BEHALF OF ANY ENTITY BY WHICH SUCH RECIPIENT IS EMPLOYED AND/OR ENGAGED AGREES +TO BE BOUND BY THIS SOFTWARE LICENSE AGREEMENT. + +Under STMicroelectronics’ intellectual property rights, the redistribution, +reproduction and use in source and binary forms of the software or any part +thereof, with or without modification, are permitted provided that the following +conditions are met: + +1. Redistribution of source code (modified or not) must retain any copyright +notice, this list of conditions and the disclaimer set forth below as items 10 +and 11. + +2. Redistributions in binary form, except as embedded into microcontroller or +microprocessor device manufactured by or for STMicroelectronics or a software +update for such device, must reproduce any copyright notice provided with the +binary code, this list of conditions, and the disclaimer set forth below as +items 10 and 11, in documentation and/or other materials provided with the +distribution. + +3. Neither the name of STMicroelectronics nor the names of other contributors to +this software may be used to endorse or promote products derived from this +software or part thereof without specific written permission. + +4. This software or any part thereof, including modifications and/or derivative +works of this software, must be used and execute solely and exclusively on or in +combination with a microcontroller or microprocessor device manufactured by or +for STMicroelectronics. + +5. No use, reproduction or redistribution of this software partially or totally +may be done in any manner that would subject this software to any Open Source +Terms. “Open Source Terms†shall mean any open source license which requires as +part of distribution of software that the source code of such software is +distributed therewith or otherwise made available, or open source license that +substantially complies with the Open Source definition specified at +www.opensource.org and any other comparable open source license such as for +example GNU General Public License (GPL), Eclipse Public License (EPL), Apache +Software License, BSD license or MIT license. + +6. STMicroelectronics has no obligation to provide any maintenance, support or +updates for the software. + +7. The software is and will remain the exclusive property of STMicroelectronics +and its licensors. The recipient will not take any action that jeopardizes +STMicroelectronics and its licensors' proprietary rights or acquire any rights +in the software, except the limited rights specified hereunder. + +8. The recipient shall comply with all applicable laws and regulations affecting +the use of the software or any part thereof including any applicable export +control law or regulation. + +9. Redistribution and use of this software or any part thereof other than as +permitted under this license is void and will automatically terminate your +rights under this license. + +10. THIS SOFTWARE IS PROVIDED BY STMICROELECTRONICS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS, IMPLIED OR STATUTORY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NON-INFRINGEMENT OF THIRD PARTY INTELLECTUAL PROPERTY RIGHTS, WHICH ARE +DISCLAIMED TO THE FULLEST EXTENT PERMITTED BY LAW. IN NO EVENT SHALL +STMICROELECTRONICS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +11. EXCEPT AS EXPRESSLY PERMITTED HEREUNDER, NO LICENSE OR OTHER RIGHTS, WHETHER +EXPRESS OR IMPLIED, ARE GRANTED UNDER ANY PATENT OR OTHER INTELLECTUAL PROPERTY +RIGHTS OF STMICROELECTRONICS OR ANY THIRD PARTY. + diff --git a/Middlewares/Third_Party/LwIP/src/api/api_lib.c b/Middlewares/Third_Party/LwIP/src/api/api_lib.c new file mode 100644 index 00000000..e03b8b74 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/api_lib.c @@ -0,0 +1,1367 @@ +/** + * @file + * Sequential API External module + * + * @defgroup netconn Netconn API + * @ingroup sequential_api + * Thread-safe, to be called from non-TCPIP threads only. + * TX/RX handling based on @ref netbuf (containing @ref pbuf) + * to avoid copying data around. + * + * @defgroup netconn_common Common functions + * @ingroup netconn + * For use with TCP and UDP + * + * @defgroup netconn_tcp TCP only + * @ingroup netconn + * TCP only functions + * + * @defgroup netconn_udp UDP only + * @ingroup netconn + * UDP only functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +/* This is the part of the API that is linked with + the application */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/api.h" +#include "lwip/memp.h" + +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#include + +#define API_MSG_VAR_REF(name) API_VAR_REF(name) +#define API_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct api_msg, name) +#define API_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, ERR_MEM) +#define API_MSG_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, NULL) +#define API_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_API_MSG, name) + +#if TCP_LISTEN_BACKLOG +/* need to allocate API message for accept so empty message pool does not result in event loss + * see bug #47512: MPU_COMPATIBLE may fail on empty pool */ +#define API_MSG_VAR_ALLOC_ACCEPT(msg) API_MSG_VAR_ALLOC(msg) +#define API_MSG_VAR_FREE_ACCEPT(msg) API_MSG_VAR_FREE(msg) +#else /* TCP_LISTEN_BACKLOG */ +#define API_MSG_VAR_ALLOC_ACCEPT(msg) +#define API_MSG_VAR_FREE_ACCEPT(msg) +#endif /* TCP_LISTEN_BACKLOG */ + +#if LWIP_NETCONN_FULLDUPLEX +#define NETCONN_RECVMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->recvmbox) && (((conn)->flags & NETCONN_FLAG_MBOXINVALID) == 0)) +#define NETCONN_ACCEPTMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->acceptmbox) && (((conn)->flags & (NETCONN_FLAG_MBOXCLOSED|NETCONN_FLAG_MBOXINVALID)) == 0)) +#define NETCONN_MBOX_WAITING_INC(conn) SYS_ARCH_INC(conn->mbox_threads_waiting, 1) +#define NETCONN_MBOX_WAITING_DEC(conn) SYS_ARCH_DEC(conn->mbox_threads_waiting, 1) +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define NETCONN_RECVMBOX_WAITABLE(conn) sys_mbox_valid(&(conn)->recvmbox) +#define NETCONN_ACCEPTMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->acceptmbox) && (((conn)->flags & NETCONN_FLAG_MBOXCLOSED) == 0)) +#define NETCONN_MBOX_WAITING_INC(conn) +#define NETCONN_MBOX_WAITING_DEC(conn) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +static err_t netconn_close_shutdown(struct netconn *conn, u8_t how); + +/** + * Call the lower part of a netconn_* function + * This function is then running in the thread context + * of tcpip_thread and has exclusive access to lwIP core code. + * + * @param fn function to call + * @param apimsg a struct containing the function to call and its parameters + * @return ERR_OK if the function was called, another err_t if not + */ +static err_t +netconn_apimsg(tcpip_callback_fn fn, struct api_msg *apimsg) +{ + err_t err; + +#ifdef LWIP_DEBUG + /* catch functions that don't set err */ + apimsg->err = ERR_VAL; +#endif /* LWIP_DEBUG */ + +#if LWIP_NETCONN_SEM_PER_THREAD + apimsg->op_completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + err = tcpip_send_msg_wait_sem(fn, apimsg, LWIP_API_MSG_SEM(apimsg)); + if (err == ERR_OK) { + return apimsg->err; + } + return err; +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is also created. + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param proto the IP protocol for RAW IP pcbs + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn * +netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, netconn_callback callback) +{ + struct netconn *conn; + API_MSG_VAR_DECLARE(msg); + API_MSG_VAR_ALLOC_RETURN_NULL(msg); + + conn = netconn_alloc(t, callback); + if (conn != NULL) { + err_t err; + + API_MSG_VAR_REF(msg).msg.n.proto = proto; + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_newconn, &API_MSG_VAR_REF(msg)); + if (err != ERR_OK) { + LWIP_ASSERT("freeing conn without freeing pcb", conn->pcb.tcp == NULL); + LWIP_ASSERT("conn has no recvmbox", sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("conn->acceptmbox shouldn't exist", !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ +#if !LWIP_NETCONN_SEM_PER_THREAD + LWIP_ASSERT("conn has no op_completed", sys_sem_valid(&conn->op_completed)); + sys_sem_free(&conn->op_completed); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_free(&conn->recvmbox); + memp_free(MEMP_NETCONN, conn); + API_MSG_VAR_FREE(msg); + return NULL; + } + } + API_MSG_VAR_FREE(msg); + return conn; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free all its resources but not the netconn itself. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_prepare_delete(struct netconn *conn) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#if LWIP_TCP + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_TCP */ +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + err = netconn_apimsg(lwip_netconn_do_delconn, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + if (err != ERR_OK) { + return err; + } + return ERR_OK; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free its resources. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_delete(struct netconn *conn) +{ + err_t err; + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + /* Already called netconn_prepare_delete() before */ + err = ERR_OK; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + err = netconn_prepare_delete(conn); + } + if (err == ERR_OK) { + netconn_free(conn); + } + return err; +} + +/** + * Get the local or remote IP address and port of a netconn. + * For RAW netconns, this returns the protocol instead of a port! + * + * @param conn the netconn to query + * @param addr a pointer to which to save the IP address + * @param port a pointer to which to save the port (or protocol for RAW) + * @param local 1 to get the local IP address, 0 to get the remote one + * @return ERR_CONN for invalid connections + * ERR_OK if the information was retrieved + */ +err_t +netconn_getaddr(struct netconn *conn, ip_addr_t *addr, u16_t *port, u8_t local) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_getaddr: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid addr", (addr != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid port", (port != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.ad.local = local; +#if LWIP_MPU_COMPATIBLE + err = netconn_apimsg(lwip_netconn_do_getaddr, &API_MSG_VAR_REF(msg)); + *addr = msg->msg.ad.ipaddr; + *port = msg->msg.ad.port; +#else /* LWIP_MPU_COMPATIBLE */ + msg.msg.ad.ipaddr = addr; + msg.msg.ad.port = port; + err = netconn_apimsg(lwip_netconn_do_getaddr, &msg); +#endif /* LWIP_MPU_COMPATIBLE */ + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific local IP address and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param addr the local IP address to bind the netconn to + * (use IP4_ADDR_ANY/IP6_ADDR_ANY to bind to all addresses) + * @param port the local port to bind the netconn to (not used for RAW) + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to bind to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is 0, use IP_ANY_TYPE to bind + */ + if ((netconn_get_ipv6only(conn) == 0) && + ip_addr_cmp(addr, IP6_ADDR_ANY)) { + addr = IP_ANY_TYPE; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_bind, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific interface and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param if_idx the local interface index to bind the netconn to + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind_if(struct netconn *conn, u8_t if_idx) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind_if: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.if_idx = if_idx; + err = netconn_apimsg(lwip_netconn_do_bind_if, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Connect a netconn to a specific remote IP address and port. + * + * @param conn the netconn to connect + * @param addr the remote IP address to connect to + * @param port the remote port to connect to (no used for RAW) + * @return ERR_OK if connected, return value of tcp_/udp_/raw_connect otherwise + */ +err_t +netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_connect: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_connect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_udp + * Disconnect a netconn from its current peer (only valid for UDP netconns). + * + * @param conn the netconn to disconnect + * @return See @ref err_t + */ +err_t +netconn_disconnect(struct netconn *conn) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_disconnect: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_disconnect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Set a TCP netconn into listen mode + * + * @param conn the tcp netconn to set to listen mode + * @param backlog the listen backlog, only used if TCP_LISTEN_BACKLOG==1 + * @return ERR_OK if the netconn was set to listen (UDP and RAW netconns + * don't return any error (yet?)) + */ +err_t +netconn_listen_with_backlog(struct netconn *conn, u8_t backlog) +{ +#if LWIP_TCP + API_MSG_VAR_DECLARE(msg); + err_t err; + + /* This does no harm. If TCP_LISTEN_BACKLOG is off, backlog is unused. */ + LWIP_UNUSED_ARG(backlog); + + LWIP_ERROR("netconn_listen: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_REF(msg).msg.lb.backlog = backlog; +#endif /* TCP_LISTEN_BACKLOG */ + err = netconn_apimsg(lwip_netconn_do_listen, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(backlog); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_tcp + * Accept a new connection on a TCP listening netconn. + * + * @param conn the TCP listen netconn + * @param new_conn pointer where the new connection is stored + * @return ERR_OK if a new connection has been received or an error + * code otherwise + */ +err_t +netconn_accept(struct netconn *conn, struct netconn **new_conn) +{ +#if LWIP_TCP + err_t err; + void *accept_ptr; + struct netconn *newconn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_DECLARE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + LWIP_ERROR("netconn_accept: invalid pointer", (new_conn != NULL), return ERR_ARG;); + *new_conn = NULL; + LWIP_ERROR("netconn_accept: invalid conn", (conn != NULL), return ERR_ARG;); + + /* NOTE: Although the opengroup spec says a pending error shall be returned to + send/recv/getsockopt(SO_ERROR) only, we return it for listening + connections also, to handle embedded-system errors */ + err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + if (!NETCONN_ACCEPTMBOX_WAITABLE(conn)) { + /* don't accept if closed: this might block the application task + waiting on acceptmbox forever! */ + return ERR_CLSD; + } + + API_MSG_VAR_ALLOC_ACCEPT(msg); + + NETCONN_MBOX_WAITING_INC(conn); + if (netconn_is_nonblocking(conn)) { + if (sys_arch_mbox_tryfetch(&conn->acceptmbox, &accept_ptr) == SYS_ARCH_TIMEOUT) { + API_MSG_VAR_FREE_ACCEPT(msg); + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_WOULDBLOCK; + } + } else { +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { + API_MSG_VAR_FREE_ACCEPT(msg); + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + } + NETCONN_MBOX_WAITING_DEC(conn); +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + if (lwip_netconn_is_deallocated_msg(accept_ptr)) { + /* the netconn has been closed from another thread */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CONN; + } + } +#endif + + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + + if (lwip_netconn_is_err_msg(accept_ptr, &err)) { + /* a connection has been aborted: e.g. out of pcbs or out of netconns during accept */ + API_MSG_VAR_FREE_ACCEPT(msg); + return err; + } + if (accept_ptr == NULL) { + /* connection has been aborted */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CLSD; + } + newconn = (struct netconn *)accept_ptr; +#if TCP_LISTEN_BACKLOG + /* Let the stack know that we have accepted the connection. */ + API_MSG_VAR_REF(msg).conn = newconn; + /* don't care for the return value of lwip_netconn_do_recv */ + netconn_apimsg(lwip_netconn_do_accepted, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + *new_conn = newconn; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(new_conn); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_common + * Receive data: actual implementation that doesn't care whether pbuf or netbuf + * is received (this is internal, it's just here for describing common errors) + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf/netbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_CONN if not connected + * ERR_CLSD if TCP connection has been closed + * ERR_WOULDBLOCK if the netconn is nonblocking but would block to wait for data + * ERR_TIMEOUT if the netconn has a receive timeout and no data was received + */ +static err_t +netconn_recv_data(struct netconn *conn, void **new_buf, u8_t apiflags) +{ + void *buf = NULL; + u16_t len; + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + + if (!NETCONN_RECVMBOX_WAITABLE(conn)) { + err_t err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + return ERR_CONN; + } + + NETCONN_MBOX_WAITING_INC(conn); + if (netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK) || + (conn->flags & NETCONN_FLAG_MBOXCLOSED) || (conn->pending_err != ERR_OK)) { + if (sys_arch_mbox_tryfetch(&conn->recvmbox, &buf) == SYS_ARCH_TIMEOUT) { + err_t err; + NETCONN_MBOX_WAITING_DEC(conn); + err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + if (conn->flags & NETCONN_FLAG_MBOXCLOSED) { + return ERR_CONN; + } + return ERR_WOULDBLOCK; + } + } else { +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->recvmbox, &buf, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + } + NETCONN_MBOX_WAITING_DEC(conn); +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + if (lwip_netconn_is_deallocated_msg(buf)) { + /* the netconn has been closed from another thread */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CONN; + } + } +#endif + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + err_t err; + /* Check if this is an error message or a pbuf */ + if (lwip_netconn_is_err_msg(buf, &err)) { + /* new_buf has been zeroed above already */ + if (err == ERR_CLSD) { + /* connection closed translates to ERR_OK with *new_buf == NULL */ + return ERR_OK; + } + return err; + } + len = ((struct pbuf *)buf)->tot_len; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ +#if (LWIP_UDP || LWIP_RAW) + { + LWIP_ASSERT("buf != NULL", buf != NULL); + len = netbuf_len((struct netbuf *)buf); + } +#endif /* (LWIP_UDP || LWIP_RAW) */ + +#if LWIP_SO_RCVBUF + SYS_ARCH_DEC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, len); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_recv_data: received %p, len=%"U16_F"\n", buf, len)); + + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +} + +#if LWIP_TCP +static err_t +netconn_tcp_recvd_msg(struct netconn *conn, size_t len, struct api_msg *msg) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + msg->conn = conn; + msg->msg.r.len = len; + + return netconn_apimsg(lwip_netconn_do_recv, msg); +} + +err_t +netconn_tcp_recvd(struct netconn *conn, size_t len) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + err = netconn_tcp_recvd_msg(conn, len, &API_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + return err; +} + +static err_t +netconn_recv_data_tcp(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags) +{ + err_t err; + struct pbuf *buf; + API_MSG_VAR_DECLARE(msg); +#if LWIP_MPU_COMPATIBLE + msg = NULL; +#endif + + if (!NETCONN_RECVMBOX_WAITABLE(conn)) { + /* This only happens when calling this function more than once *after* receiving FIN */ + return ERR_CONN; + } + if (netconn_is_flag_set(conn, NETCONN_FIN_RX_PENDING)) { + netconn_clear_flags(conn, NETCONN_FIN_RX_PENDING); + goto handle_fin; + } + + if (!(apiflags & NETCONN_NOAUTORCVD)) { + /* need to allocate API message here so empty message pool does not result in event loss + * see bug #47512: MPU_COMPATIBLE may fail on empty pool */ + API_MSG_VAR_ALLOC(msg); + } + + err = netconn_recv_data(conn, (void **)new_buf, apiflags); + if (err != ERR_OK) { + if (!(apiflags & NETCONN_NOAUTORCVD)) { + API_MSG_VAR_FREE(msg); + } + return err; + } + buf = *new_buf; + if (!(apiflags & NETCONN_NOAUTORCVD)) { + /* Let the stack know that we have taken the data. */ + u16_t len = buf ? buf->tot_len : 1; + /* don't care for the return value of lwip_netconn_do_recv */ + /* @todo: this should really be fixed, e.g. by retrying in poll on error */ + netconn_tcp_recvd_msg(conn, len, &API_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + } + + /* If we are closed, we indicate that we no longer wish to use the socket */ + if (buf == NULL) { + if (apiflags & NETCONN_NOFIN) { + /* received a FIN but the caller cannot handle it right now: + re-enqueue it and return "no data" */ + netconn_set_flags(conn, NETCONN_FIN_RX_PENDING); + return ERR_WOULDBLOCK; + } else { +handle_fin: + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + if (conn->pcb.ip == NULL) { + /* race condition: RST during recv */ + err = netconn_err(conn); + if (err != ERR_OK) { + return err; + } + return ERR_RST; + } + /* RX side is closed, so deallocate the recvmbox */ + netconn_close_shutdown(conn, NETCONN_SHUT_RD); + /* Don' store ERR_CLSD as conn->err since we are only half-closed */ + return ERR_CLSD; + } + } + return err; +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error, @see netconn_recv_data) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data_tcp(conn, new_buf, 0); +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error, @see netconn_recv_data) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data_tcp(conn, new_buf, apiflags); +} +#endif /* LWIP_TCP */ + +/** + * Receive data (in form of a netbuf) from a UDP or RAW netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a UDP/RAW netconn + */ +err_t +netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf) +{ + LWIP_ERROR("netconn_recv_udp_raw_netbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) != NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf, 0); +} + +/** + * Receive data (in form of a netbuf) from a UDP or RAW netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a UDP/RAW netconn + */ +err_t +netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags) +{ + LWIP_ERROR("netconn_recv_udp_raw_netbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) != NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf, apiflags); +} + +/** + * @ingroup netconn_common + * Receive data (in form of a netbuf containing a packet buffer) from a netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + */ +err_t +netconn_recv(struct netconn *conn, struct netbuf **new_buf) +{ +#if LWIP_TCP + struct netbuf *buf = NULL; + err_t err; +#endif /* LWIP_TCP */ + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + struct pbuf *p = NULL; + /* This is not a listening netconn, since recvmbox is set */ + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + return ERR_MEM; + } + + err = netconn_recv_data_tcp(conn, &p, 0); + if (err != ERR_OK) { + memp_free(MEMP_NETBUF, buf); + return err; + } + LWIP_ASSERT("p != NULL", p != NULL); + + buf->p = p; + buf->ptr = p; + buf->port = 0; + ip_addr_set_zero(&buf->addr); + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ + { +#if (LWIP_UDP || LWIP_RAW) + return netconn_recv_data(conn, (void **)new_buf, 0); +#endif /* (LWIP_UDP || LWIP_RAW) */ + } +} + +/** + * @ingroup netconn_udp + * Send data (in form of a netbuf) to a specific remote IP address and port. + * Only to be used for UDP and RAW netconns (not TCP). + * + * @param conn the netconn over which to send data + * @param buf a netbuf containing the data to send + * @param addr the remote IP address to which to send the data + * @param port the remote port to which to send the data + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_sendto(struct netconn *conn, struct netbuf *buf, const ip_addr_t *addr, u16_t port) +{ + if (buf != NULL) { + ip_addr_set(&buf->addr, addr); + buf->port = port; + return netconn_send(conn, buf); + } + return ERR_VAL; +} + +/** + * @ingroup netconn_udp + * Send data over a UDP or RAW netconn (that is already connected). + * + * @param conn the UDP or RAW netconn over which to send data + * @param buf a netbuf containing the data to send + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_send(struct netconn *conn, struct netbuf *buf) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_send: invalid conn", (conn != NULL), return ERR_ARG;); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_send: sending %"U16_F" bytes\n", buf->p->tot_len)); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.b = buf; + err = netconn_apimsg(lwip_netconn_do_send, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Send data over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param dataptr pointer to the application buffer that contains the data to send + * @param size size of the application data to send + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written) +{ + struct netvector vector; + vector.ptr = dataptr; + vector.len = size; + return netconn_write_vectors_partly(conn, &vector, 1, apiflags, bytes_written); +} + +/** + * Send vectorized data atomically over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param vectors array of vectors containing data to send + * @param vectorcnt number of vectors in the array + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt, + u8_t apiflags, size_t *bytes_written) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + u8_t dontblock; + size_t size; + int i; + + LWIP_ERROR("netconn_write: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_write: invalid conn->type", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP), return ERR_VAL;); + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + dontblock = 1; + } +#endif /* LWIP_SO_SNDTIMEO */ + if (dontblock && !bytes_written) { + /* This implies netconn_write() cannot be used for non-blocking send, since + it has no way to return the number of bytes written. */ + return ERR_VAL; + } + + /* sum up the total size */ + size = 0; + for (i = 0; i < vectorcnt; i++) { + size += vectors[i].len; + if (size < vectors[i].len) { + /* overflow */ + return ERR_VAL; + } + } + if (size == 0) { + return ERR_OK; + } else if (size > SSIZE_MAX) { + ssize_t limited; + /* this is required by the socket layer (cannot send full size_t range) */ + if (!bytes_written) { + return ERR_VAL; + } + /* limit the amount of data to send */ + limited = SSIZE_MAX; + size = (size_t)limited; + } + + API_MSG_VAR_ALLOC(msg); + /* non-blocking write sends as much */ + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.w.vector = vectors; + API_MSG_VAR_REF(msg).msg.w.vector_cnt = vectorcnt; + API_MSG_VAR_REF(msg).msg.w.vector_off = 0; + API_MSG_VAR_REF(msg).msg.w.apiflags = apiflags; + API_MSG_VAR_REF(msg).msg.w.len = size; + API_MSG_VAR_REF(msg).msg.w.offset = 0; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.w.time_started = sys_now(); + } else { + API_MSG_VAR_REF(msg).msg.w.time_started = 0; + } +#endif /* LWIP_SO_SNDTIMEO */ + + /* For locking the core: this _can_ be delayed on low memory/low send buffer, + but if it is, this is done inside api_msg.c:do_write(), so we can use the + non-blocking version here. */ + err = netconn_apimsg(lwip_netconn_do_write, &API_MSG_VAR_REF(msg)); + if (err == ERR_OK) { + if (bytes_written != NULL) { + *bytes_written = API_MSG_VAR_REF(msg).msg.w.offset; + } + /* for blocking, check all requested bytes were written, NOTE: send_timeout is + treated as dontblock (see dontblock assignment above) */ + if (!dontblock) { + LWIP_ASSERT("do_write failed to write all bytes", API_MSG_VAR_REF(msg).msg.w.offset == size); + } + } + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close or shutdown a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close or shutdown + * @param how fully close or only shutdown one side? + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +static err_t +netconn_close_shutdown(struct netconn *conn, u8_t how) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + LWIP_UNUSED_ARG(how); + + LWIP_ERROR("netconn_close: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_TCP + /* shutting down both ends is the same as closing */ + API_MSG_VAR_REF(msg).msg.sd.shut = how; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#endif /* LWIP_TCP */ + err = netconn_apimsg(lwip_netconn_do_close, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_close(struct netconn *conn) +{ + /* shutting down both ends is the same as closing */ + return netconn_close_shutdown(conn, NETCONN_SHUT_RDWR); +} + +/** + * @ingroup netconn_common + * Get and reset pending error on a netconn + * + * @param conn the netconn to get the error from + * @return and pending error or ERR_OK if no error was pending + */ +err_t +netconn_err(struct netconn *conn) +{ + err_t err; + SYS_ARCH_DECL_PROTECT(lev); + if (conn == NULL) { + return ERR_OK; + } + SYS_ARCH_PROTECT(lev); + err = conn->pending_err; + conn->pending_err = ERR_OK; + SYS_ARCH_UNPROTECT(lev); + return err; +} + +/** + * @ingroup netconn_tcp + * Shut down one or both sides of a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to shut down + * @param shut_rx shut down the RX side (no more read possible after this) + * @param shut_tx shut down the TX side (no more write possible after this) + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx) +{ + return netconn_close_shutdown(conn, (u8_t)((shut_rx ? NETCONN_SHUT_RD : 0) | (shut_tx ? NETCONN_SHUT_WR : 0))); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param netif_addr the IP address of the network interface on which to send + * the igmp message + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group(struct netconn *conn, + const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (netif_addr == NULL) { + netif_addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.netif_addr = API_MSG_VAR_REF(netif_addr); + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param if_idx the index of the netif + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group_netif(struct netconn *conn, + const ip_addr_t *multiaddr, + u8_t if_idx, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (if_idx == NETIF_NO_INDEX) { + return ERR_IF; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.if_idx = if_idx; + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group_netif, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * @ingroup netconn_common + * Execute a DNS query, only one IP address is returned + * + * @param name a string representation of the DNS host name to query + * @param addr a preallocated ip_addr_t where to store the resolved IP address + * @param dns_addrtype IP address type (IPv4 / IPv6) + * @return ERR_OK: resolving succeeded + * ERR_MEM: memory error, try again later + * ERR_ARG: dns client not initialized or invalid hostname + * ERR_VAL: dns server response was invalid + */ +#if LWIP_IPV4 && LWIP_IPV6 +err_t +netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype) +#else +err_t +netconn_gethostbyname(const char *name, ip_addr_t *addr) +#endif +{ + API_VAR_DECLARE(struct dns_api_msg, msg); +#if !LWIP_MPU_COMPATIBLE + sys_sem_t sem; +#endif /* LWIP_MPU_COMPATIBLE */ + err_t err; + err_t cberr; + + LWIP_ERROR("netconn_gethostbyname: invalid name", (name != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_gethostbyname: invalid addr", (addr != NULL), return ERR_ARG;); +#if LWIP_MPU_COMPATIBLE + if (strlen(name) >= DNS_MAX_NAME_LENGTH) { + return ERR_ARG; + } +#endif + +#ifdef LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, dns_addrtype, &err)) { +#else + if (LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, NETCONN_DNS_DEFAULT, &err)) { +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return err; + } +#endif /* LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE */ + + API_VAR_ALLOC(struct dns_api_msg, MEMP_DNS_API_MSG, msg, ERR_MEM); +#if LWIP_MPU_COMPATIBLE + strncpy(API_VAR_REF(msg).name, name, DNS_MAX_NAME_LENGTH - 1); + API_VAR_REF(msg).name[DNS_MAX_NAME_LENGTH - 1] = 0; +#else /* LWIP_MPU_COMPATIBLE */ + msg.err = &err; + msg.sem = &sem; + API_VAR_REF(msg).addr = API_VAR_REF(addr); + API_VAR_REF(msg).name = name; +#endif /* LWIP_MPU_COMPATIBLE */ +#if LWIP_IPV4 && LWIP_IPV6 + API_VAR_REF(msg).dns_addrtype = dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_NETCONN_SEM_PER_THREAD + API_VAR_REF(msg).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD*/ + err = sys_sem_new(API_EXPR_REF(API_VAR_REF(msg).sem), 0); + if (err != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + cberr = tcpip_send_msg_wait_sem(lwip_netconn_do_gethostbyname, &API_VAR_REF(msg), API_EXPR_REF(API_VAR_REF(msg).sem)); +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(API_EXPR_REF(API_VAR_REF(msg).sem)); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + if (cberr != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return cberr; + } + +#if LWIP_MPU_COMPATIBLE + *addr = msg->addr; + err = msg->err; +#endif /* LWIP_MPU_COMPATIBLE */ + + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; +} +#endif /* LWIP_DNS*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void +netconn_thread_init(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem == NULL) || !sys_sem_valid(sem)) { + /* call alloc only once */ + LWIP_NETCONN_THREAD_SEM_ALLOC(); + LWIP_ASSERT("LWIP_NETCONN_THREAD_SEM_ALLOC() failed", sys_sem_valid(LWIP_NETCONN_THREAD_SEM_GET())); + } +} + +void +netconn_thread_cleanup(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem != NULL) && sys_sem_valid(sem)) { + /* call free only once */ + LWIP_NETCONN_THREAD_SEM_FREE(); + } +} +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#endif /* LWIP_NETCONN */ diff --git a/Middlewares/Third_Party/LwIP/src/api/api_msg.c b/Middlewares/Third_Party/LwIP/src/api/api_msg.c new file mode 100644 index 00000000..39531024 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/api_msg.c @@ -0,0 +1,2173 @@ +/** + * @file + * Sequential API Internal module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/api_msg.h" + +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" + +#include "lwip/memp.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/mld6.h" +#include "lwip/priv/tcpip_priv.h" + +#include + +/* netconns are polled once per second (e.g. continue write on memory error) */ +#define NETCONN_TCP_POLL_INTERVAL 2 + +#define SET_NONBLOCKING_CONNECT(conn, val) do { if (val) { \ + netconn_set_flags(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT); }} while(0) +#define IN_NONBLOCKING_CONNECT(conn) netconn_is_flag_set(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT) + +#if LWIP_NETCONN_FULLDUPLEX +#define NETCONN_MBOX_VALID(conn, mbox) (sys_mbox_valid(mbox) && ((conn->flags & NETCONN_FLAG_MBOXINVALID) == 0)) +#else +#define NETCONN_MBOX_VALID(conn, mbox) sys_mbox_valid(mbox) +#endif + +/* forward declarations */ +#if LWIP_TCP +#if LWIP_TCPIP_CORE_LOCKING +#define WRITE_DELAYED , 1 +#define WRITE_DELAYED_PARAM , u8_t delayed +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define WRITE_DELAYED +#define WRITE_DELAYED_PARAM +#endif /* LWIP_TCPIP_CORE_LOCKING */ +static err_t lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM); +static err_t lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM); +#endif + +static void netconn_drain(struct netconn *conn); + +#if LWIP_TCPIP_CORE_LOCKING +#define TCPIP_APIMSG_ACK(m) +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define TCPIP_APIMSG_ACK(m) do { sys_sem_signal(LWIP_API_MSG_SEM(m)); } while(0) +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_NETCONN_FULLDUPLEX +const u8_t netconn_deleted = 0; + +int +lwip_netconn_is_deallocated_msg(void *msg) +{ + if (msg == &netconn_deleted) { + return 1; + } + return 0; +} +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +#if LWIP_TCP +const u8_t netconn_aborted = 0; +const u8_t netconn_reset = 0; +const u8_t netconn_closed = 0; + +/** Translate an error to a unique void* passed via an mbox */ +static void * +lwip_netconn_err_to_msg(err_t err) +{ + switch (err) { + case ERR_ABRT: + return LWIP_CONST_CAST(void *, &netconn_aborted); + case ERR_RST: + return LWIP_CONST_CAST(void *, &netconn_reset); + case ERR_CLSD: + return LWIP_CONST_CAST(void *, &netconn_closed); + default: + LWIP_ASSERT("unhandled error", err == ERR_OK); + return NULL; + } +} + +int +lwip_netconn_is_err_msg(void *msg, err_t *err) +{ + LWIP_ASSERT("err != NULL", err != NULL); + + if (msg == &netconn_aborted) { + *err = ERR_ABRT; + return 1; + } else if (msg == &netconn_reset) { + *err = ERR_RST; + return 1; + } else if (msg == &netconn_closed) { + *err = ERR_CLSD; + return 1; + } + return 0; +} +#endif /* LWIP_TCP */ + + +#if LWIP_RAW +/** + * Receive callback function for RAW netconns. + * Doesn't 'eat' the packet, only copies it and sends it to + * conn->recvmbox + * + * @see raw.h (struct raw_pcb.recv) for parameters and return value + */ +static u8_t +recv_raw(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr) +{ + struct pbuf *q; + struct netbuf *buf; + struct netconn *conn; + + LWIP_UNUSED_ARG(addr); + conn = (struct netconn *)arg; + + if ((conn != NULL) && NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { +#if LWIP_SO_RCVBUF + int recv_avail; + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize) { + return 0; + } +#endif /* LWIP_SO_RCVBUF */ + /* copy the whole packet into new pbufs */ + q = pbuf_clone(PBUF_RAW, PBUF_RAM, p); + if (q != NULL) { + u16_t len; + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(q); + return 0; + } + + buf->p = q; + buf->ptr = q; + ip_addr_copy(buf->addr, *ip_current_src_addr()); + buf->port = pcb->protocol; + + len = q->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return 0; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + } + } + + return 0; /* do not eat the packet */ +} +#endif /* LWIP_RAW*/ + +#if LWIP_UDP +/** + * Receive callback function for UDP netconns. + * Posts the packet to conn->recvmbox or deletes it on memory error. + * + * @see udp.h (struct udp_pcb.recv) for parameters + */ +static void +recv_udp(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port) +{ + struct netbuf *buf; + struct netconn *conn; + u16_t len; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + LWIP_UNUSED_ARG(pcb); /* only used for asserts... */ + LWIP_ASSERT("recv_udp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_udp must have an argument", arg != NULL); + conn = (struct netconn *)arg; + + if (conn == NULL) { + pbuf_free(p); + return; + } + + LWIP_ASSERT("recv_udp: recv for wrong pcb!", conn->pcb.udp == pcb); + +#if LWIP_SO_RCVBUF + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox) || + ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize)) { +#else /* LWIP_SO_RCVBUF */ + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { +#endif /* LWIP_SO_RCVBUF */ + pbuf_free(p); + return; + } + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(p); + return; + } else { + buf->p = p; + buf->ptr = p; + ip_addr_set(&buf->addr, addr); + buf->port = port; +#if LWIP_NETBUF_RECVINFO + if (conn->flags & NETCONN_FLAG_PKTINFO) { + /* get the UDP header - always in the first pbuf, ensured by udp_input */ + const struct udp_hdr *udphdr = (const struct udp_hdr *)ip_next_header_ptr(); + buf->flags = NETBUF_FLAG_DESTADDR; + ip_addr_set(&buf->toaddr, ip_current_dest_addr()); + buf->toport_chksum = udphdr->dest; + } +#endif /* LWIP_NETBUF_RECVINFO */ + } + + len = p->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } +} +#endif /* LWIP_UDP */ + +#if LWIP_TCP +/** + * Receive callback function for TCP netconns. + * Posts the packet to conn->recvmbox, but doesn't delete it on errors. + * + * @see tcp.h (struct tcp_pcb.recv) for parameters and return value + */ +static err_t +recv_tcp(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + struct netconn *conn; + u16_t len; + void *msg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("recv_tcp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_tcp must have an argument", arg != NULL); + LWIP_ASSERT("err != ERR_OK unhandled", err == ERR_OK); + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + LWIP_ASSERT("recv_tcp: recv for wrong pcb!", conn->pcb.tcp == pcb); + + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { + /* recvmbox already deleted */ + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } + return ERR_OK; + } + /* Unlike for UDP or RAW pcbs, don't check for available space + using recv_avail since that could break the connection + (data is already ACKed) */ + + if (p != NULL) { + msg = p; + len = p->tot_len; + } else { + msg = LWIP_CONST_CAST(void *, &netconn_closed); + len = 0; + } + + if (sys_mbox_trypost(&conn->recvmbox, msg) != ERR_OK) { + /* don't deallocate p: it is presented to us later again from tcp_fasttmr! */ + return ERR_MEM; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + + return ERR_OK; +} + +/** + * Poll callback function for TCP netconns. + * Wakes up an application thread that waits for a connection to close + * or data to be sent. The application thread then takes the + * appropriate action to go on. + * + * Signals the conn->sem. + * netconn_close waits for conn->sem if closing failed. + * + * @see tcp.h (struct tcp_pcb.poll) for parameters and return value + */ +static err_t +poll_tcp(void *arg, struct tcp_pcb *pcb) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { +#if !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER + if (conn->current_msg && conn->current_msg->msg.sd.polls_left) { + conn->current_msg->msg.sd.polls_left--; + } +#endif /* !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER */ + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + /* @todo: implement connect timeout here? */ + + /* Did a nonblocking write fail before? Then check available write-space. */ + if (conn->flags & NETCONN_FLAG_CHECK_WRITESPACE) { + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + netconn_clear_flags(conn, NETCONN_FLAG_CHECK_WRITESPACE); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } + + return ERR_OK; +} + +/** + * Sent callback function for TCP netconns. + * Signals the conn->sem and calls API_EVENT. + * netconn_write waits for conn->sem if send buffer is low. + * + * @see tcp.h (struct tcp_pcb.sent) for parameters and return value + */ +static err_t +sent_tcp(void *arg, struct tcp_pcb *pcb, u16_t len) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn) { + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + netconn_clear_flags(conn, NETCONN_FLAG_CHECK_WRITESPACE); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, len); + } + } + + return ERR_OK; +} + +/** + * Error callback function for TCP netconns. + * Signals conn->sem, posts to all conn mboxes and calls API_EVENT. + * The application thread has then to decide what to do. + * + * @see tcp.h (struct tcp_pcb.err) for parameters + */ +static void +err_tcp(void *arg, err_t err) +{ + struct netconn *conn; + enum netconn_state old_state; + void *mbox_msg; + SYS_ARCH_DECL_PROTECT(lev); + + conn = (struct netconn *)arg; + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + SYS_ARCH_PROTECT(lev); + + /* when err is called, the pcb is deallocated, so delete the reference */ + conn->pcb.tcp = NULL; + /* store pending error */ + conn->pending_err = err; + /* prevent application threads from blocking on 'recvmbox'/'acceptmbox' */ + conn->flags |= NETCONN_FLAG_MBOXCLOSED; + + /* reset conn->state now before waking up other threads */ + old_state = conn->state; + conn->state = NETCONN_NONE; + + SYS_ARCH_UNPROTECT(lev); + + /* Notify the user layer about a connection error. Used to signal select. */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + /* Try to release selects pending on 'read' or 'write', too. + They will get an error if they actually try to read or write. */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + mbox_msg = lwip_netconn_err_to_msg(err); + /* pass error message to recvmbox to wake up pending recv */ + if (NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { + /* use trypost to prevent deadlock */ + sys_mbox_trypost(&conn->recvmbox, mbox_msg); + } + /* pass error message to acceptmbox to wake up pending accept */ + if (NETCONN_MBOX_VALID(conn, &conn->acceptmbox)) { + /* use trypost to preven deadlock */ + sys_mbox_trypost(&conn->acceptmbox, mbox_msg); + } + + if ((old_state == NETCONN_WRITE) || (old_state == NETCONN_CLOSE) || + (old_state == NETCONN_CONNECT)) { + /* calling lwip_netconn_do_writemore/lwip_netconn_do_close_internal is not necessary + since the pcb has already been deleted! */ + int was_nonblocking_connect = IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + + if (!was_nonblocking_connect) { + sys_sem_t *op_completed_sem; + /* set error return code */ + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + if (old_state == NETCONN_CLOSE) { + /* let close succeed: the connection is closed after all... */ + conn->current_msg->err = ERR_OK; + } else { + /* Write and connect fail */ + conn->current_msg->err = err; + } + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + LWIP_ASSERT("inavlid op_completed_sem", sys_sem_valid(op_completed_sem)); + conn->current_msg = NULL; + /* wake up the waiting task */ + sys_sem_signal(op_completed_sem); + } else { + /* @todo: test what happens for error on nonblocking connect */ + } + } else { + LWIP_ASSERT("conn->current_msg == NULL", conn->current_msg == NULL); + } +} + +/** + * Setup a tcp_pcb with the correct callback function pointers + * and their arguments. + * + * @param conn the TCP netconn to setup + */ +static void +setup_tcp(struct netconn *conn) +{ + struct tcp_pcb *pcb; + + pcb = conn->pcb.tcp; + tcp_arg(pcb, conn); + tcp_recv(pcb, recv_tcp); + tcp_sent(pcb, sent_tcp); + tcp_poll(pcb, poll_tcp, NETCONN_TCP_POLL_INTERVAL); + tcp_err(pcb, err_tcp); +} + +/** + * Accept callback function for TCP netconns. + * Allocates a new netconn and posts that to conn->acceptmbox. + * + * @see tcp.h (struct tcp_pcb_listen.accept) for parameters and return value + */ +static err_t +accept_function(void *arg, struct tcp_pcb *newpcb, err_t err) +{ + struct netconn *newconn; + struct netconn *conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + if (!NETCONN_MBOX_VALID(conn, &conn->acceptmbox)) { + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: acceptmbox already deleted\n")); + return ERR_VAL; + } + + if (newpcb == NULL) { + /* out-of-pcbs during connect: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, lwip_netconn_err_to_msg(ERR_ABRT)) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_VAL; + } + LWIP_ASSERT("expect newpcb == NULL or err == ERR_OK", err == ERR_OK); + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: newpcb->state: %s\n", tcp_debug_state_str(newpcb->state))); + + /* We have to set the callback here even though + * the new socket is unknown. newconn->socket is marked as -1. */ + newconn = netconn_alloc(conn->type, conn->callback); + if (newconn == NULL) { + /* outof netconns: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, lwip_netconn_err_to_msg(ERR_ABRT)) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_MEM; + } + newconn->pcb.tcp = newpcb; + setup_tcp(newconn); + + /* handle backlog counter */ + tcp_backlog_delayed(newpcb); + + if (sys_mbox_trypost(&conn->acceptmbox, newconn) != ERR_OK) { + /* When returning != ERR_OK, the pcb is aborted in tcp_process(), + so do nothing here! */ + /* remove all references to this netconn from the pcb */ + struct tcp_pcb *pcb = newconn->pcb.tcp; + tcp_arg(pcb, NULL); + tcp_recv(pcb, NULL); + tcp_sent(pcb, NULL); + tcp_poll(pcb, NULL, 0); + tcp_err(pcb, NULL); + /* remove reference from to the pcb from this netconn */ + newconn->pcb.tcp = NULL; + /* no need to drain since we know the recvmbox is empty. */ + sys_mbox_free(&newconn->recvmbox); + sys_mbox_set_invalid(&newconn->recvmbox); + netconn_free(newconn); + return ERR_MEM; + } else { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Create a new pcb of a specific type. + * Called from lwip_netconn_do_newconn(). + * + * @param msg the api_msg describing the connection type + */ +static void +pcb_new(struct api_msg *msg) +{ + enum lwip_ip_addr_type iptype = IPADDR_TYPE_V4; + + LWIP_ASSERT("pcb_new: pcb already allocated", msg->conn->pcb.tcp == NULL); + +#if LWIP_IPV6 && LWIP_IPV4 + /* IPv6: Dual-stack by default, unless netconn_set_ipv6only() is called */ + if (NETCONNTYPE_ISIPV6(netconn_type(msg->conn))) { + iptype = IPADDR_TYPE_ANY; + } +#endif + + /* Allocate a PCB for this connection */ + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + msg->conn->pcb.raw = raw_new_ip_type(iptype, msg->msg.n.proto); + if (msg->conn->pcb.raw != NULL) { +#if LWIP_IPV6 + /* ICMPv6 packets should always have checksum calculated by the stack as per RFC 3542 chapter 3.1 */ + if (NETCONNTYPE_ISIPV6(msg->conn->type) && msg->conn->pcb.raw->protocol == IP6_NEXTH_ICMP6) { + msg->conn->pcb.raw->chksum_reqd = 1; + msg->conn->pcb.raw->chksum_offset = 2; + } +#endif /* LWIP_IPV6 */ + raw_recv(msg->conn->pcb.raw, recv_raw, msg->conn); + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp = udp_new_ip_type(iptype); + if (msg->conn->pcb.udp != NULL) { +#if LWIP_UDPLITE + if (NETCONNTYPE_ISUDPLITE(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_UDPLITE); + } +#endif /* LWIP_UDPLITE */ + if (NETCONNTYPE_ISUDPNOCHKSUM(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + udp_recv(msg->conn->pcb.udp, recv_udp, msg->conn); + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + msg->conn->pcb.tcp = tcp_new_ip_type(iptype); + if (msg->conn->pcb.tcp != NULL) { + setup_tcp(msg->conn); + } + break; +#endif /* LWIP_TCP */ + default: + /* Unsupported netconn type, e.g. protocol disabled */ + msg->err = ERR_VAL; + return; + } + if (msg->conn->pcb.ip == NULL) { + msg->err = ERR_MEM; + } +} + +/** + * Create a new pcb of a specific type inside a netconn. + * Called from netconn_new_with_proto_and_callback. + * + * @param m the api_msg describing the connection type + */ +void +lwip_netconn_do_newconn(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp == NULL) { + pcb_new(msg); + } + /* Else? This "new" connection already has a PCB allocated. */ + /* Is this an error condition? Should it be deleted? */ + /* We currently just are happy and return. */ + + TCPIP_APIMSG_ACK(msg); +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is NOT created! + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn * +netconn_alloc(enum netconn_type t, netconn_callback callback) +{ + struct netconn *conn; + int size; + u8_t init_flags = 0; + + conn = (struct netconn *)memp_malloc(MEMP_NETCONN); + if (conn == NULL) { + return NULL; + } + + conn->pending_err = ERR_OK; + conn->type = t; + conn->pcb.tcp = NULL; + + /* If all sizes are the same, every compiler should optimize this switch to nothing */ + switch (NETCONNTYPE_GROUP(t)) { +#if LWIP_RAW + case NETCONN_RAW: + size = DEFAULT_RAW_RECVMBOX_SIZE; + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + size = DEFAULT_UDP_RECVMBOX_SIZE; +#if LWIP_NETBUF_RECVINFO + init_flags |= NETCONN_FLAG_PKTINFO; +#endif /* LWIP_NETBUF_RECVINFO */ + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + size = DEFAULT_TCP_RECVMBOX_SIZE; + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("netconn_alloc: undefined netconn_type", 0); + goto free_and_return; + } + + if (sys_mbox_new(&conn->recvmbox, size) != ERR_OK) { + goto free_and_return; + } +#if !LWIP_NETCONN_SEM_PER_THREAD + if (sys_sem_new(&conn->op_completed, 0) != ERR_OK) { + sys_mbox_free(&conn->recvmbox); + goto free_and_return; + } +#endif + +#if LWIP_TCP + sys_mbox_set_invalid(&conn->acceptmbox); +#endif + conn->state = NETCONN_NONE; +#if LWIP_SOCKET + /* initialize socket to -1 since 0 is a valid socket */ + conn->socket = -1; +#endif /* LWIP_SOCKET */ + conn->callback = callback; +#if LWIP_TCP + conn->current_msg = NULL; +#endif /* LWIP_TCP */ +#if LWIP_SO_SNDTIMEO + conn->send_timeout = 0; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + conn->recv_timeout = 0; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + conn->recv_bufsize = RECV_BUFSIZE_DEFAULT; + conn->recv_avail = 0; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + conn->linger = -1; +#endif /* LWIP_SO_LINGER */ + conn->flags = init_flags; + return conn; +free_and_return: + memp_free(MEMP_NETCONN, conn); + return NULL; +} + +/** + * Delete a netconn and all its resources. + * The pcb is NOT freed (since we might not be in the right thread context do this). + * + * @param conn the netconn to free + */ +void +netconn_free(struct netconn *conn) +{ + LWIP_ASSERT("PCB must be deallocated outside this function", conn->pcb.tcp == NULL); + +#if LWIP_NETCONN_FULLDUPLEX + /* in fullduplex, netconn is drained here */ + netconn_drain(conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + LWIP_ASSERT("recvmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("acceptmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&conn->op_completed); + sys_sem_set_invalid(&conn->op_completed); +#endif + + memp_free(MEMP_NETCONN, conn); +} + +/** + * Delete rcvmbox and acceptmbox of a netconn and free the left-over data in + * these mboxes + * + * @param conn the netconn to free + * @bytes_drained bytes drained from recvmbox + * @accepts_drained pending connections drained from acceptmbox + */ +static void +netconn_drain(struct netconn *conn) +{ + void *mem; + + /* This runs when mbox and netconn are marked as closed, + so we don't need to lock against rx packets */ +#if LWIP_NETCONN_FULLDUPLEX + LWIP_ASSERT("netconn marked closed", conn->flags & NETCONN_FLAG_MBOXINVALID); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + /* Delete and drain the recvmbox. */ + if (sys_mbox_valid(&conn->recvmbox)) { + while (sys_mbox_tryfetch(&conn->recvmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_NETCONN_FULLDUPLEX + if (!lwip_netconn_is_deallocated_msg(mem)) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { +#if LWIP_TCP + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) { + err_t err; + if (!lwip_netconn_is_err_msg(mem, &err)) { + pbuf_free((struct pbuf *)mem); + } + } else +#endif /* LWIP_TCP */ + { + netbuf_delete((struct netbuf *)mem); + } + } + } + sys_mbox_free(&conn->recvmbox); + sys_mbox_set_invalid(&conn->recvmbox); + } + + /* Delete and drain the acceptmbox. */ +#if LWIP_TCP + if (sys_mbox_valid(&conn->acceptmbox)) { + while (sys_mbox_tryfetch(&conn->acceptmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_NETCONN_FULLDUPLEX + if (!lwip_netconn_is_deallocated_msg(mem)) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + err_t err; + if (!lwip_netconn_is_err_msg(mem, &err)) { + struct netconn *newconn = (struct netconn *)mem; + /* Only tcp pcbs have an acceptmbox, so no need to check conn->type */ + /* pcb might be set to NULL already by err_tcp() */ + /* drain recvmbox */ + netconn_drain(newconn); + if (newconn->pcb.tcp != NULL) { + tcp_abort(newconn->pcb.tcp); + newconn->pcb.tcp = NULL; + } + netconn_free(newconn); + } + } + } + sys_mbox_free(&conn->acceptmbox); + sys_mbox_set_invalid(&conn->acceptmbox); + } +#endif /* LWIP_TCP */ +} + +#if LWIP_NETCONN_FULLDUPLEX +static void +netconn_mark_mbox_invalid(struct netconn *conn) +{ + int i, num_waiting; + void *msg = LWIP_CONST_CAST(void *, &netconn_deleted); + + /* Prevent new calls/threads from reading from the mbox */ + conn->flags |= NETCONN_FLAG_MBOXINVALID; + + SYS_ARCH_LOCKED(num_waiting = conn->mbox_threads_waiting); + for (i = 0; i < num_waiting; i++) { + if (sys_mbox_valid_val(conn->recvmbox)) { + sys_mbox_trypost(&conn->recvmbox, msg); + } else { + sys_mbox_trypost(&conn->acceptmbox, msg); + } + } +} +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +#if LWIP_TCP +/** + * Internal helper function to close a TCP netconn: since this sometimes + * doesn't work at the first attempt, this function is called from multiple + * places. + * + * @param conn the TCP netconn to close + */ +static err_t +lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + u8_t shut, shut_rx, shut_tx, shut_close; + u8_t close_finished = 0; + struct tcp_pcb *tpcb; +#if LWIP_SO_LINGER + u8_t linger_wait_required = 0; +#endif /* LWIP_SO_LINGER */ + + LWIP_ASSERT("invalid conn", (conn != NULL)); + LWIP_ASSERT("this is for tcp netconns only", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)); + LWIP_ASSERT("conn must be in state NETCONN_CLOSE", (conn->state == NETCONN_CLOSE)); + LWIP_ASSERT("pcb already closed", (conn->pcb.tcp != NULL)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + + tpcb = conn->pcb.tcp; + shut = conn->current_msg->msg.sd.shut; + shut_rx = shut & NETCONN_SHUT_RD; + shut_tx = shut & NETCONN_SHUT_WR; + /* shutting down both ends is the same as closing + (also if RD or WR side was shut down before already) */ + if (shut == NETCONN_SHUT_RDWR) { + shut_close = 1; + } else if (shut_rx && + ((tpcb->state == FIN_WAIT_1) || + (tpcb->state == FIN_WAIT_2) || + (tpcb->state == CLOSING))) { + shut_close = 1; + } else if (shut_tx && ((tpcb->flags & TF_RXCLOSED) != 0)) { + shut_close = 1; + } else { + shut_close = 0; + } + + /* Set back some callback pointers */ + if (shut_close) { + tcp_arg(tpcb, NULL); + } + if (tpcb->state == LISTEN) { + tcp_accept(tpcb, NULL); + } else { + /* some callbacks have to be reset if tcp_close is not successful */ + if (shut_rx) { + tcp_recv(tpcb, NULL); + tcp_accept(tpcb, NULL); + } + if (shut_tx) { + tcp_sent(tpcb, NULL); + } + if (shut_close) { + tcp_poll(tpcb, NULL, 0); + tcp_err(tpcb, NULL); + } + } + /* Try to close the connection */ + if (shut_close) { +#if LWIP_SO_LINGER + /* check linger possibilites before calling tcp_close */ + err = ERR_OK; + /* linger enabled/required at all? (i.e. is there untransmitted data left?) */ + if ((conn->linger >= 0) && (conn->pcb.tcp->unsent || conn->pcb.tcp->unacked)) { + if ((conn->linger == 0)) { + /* data left but linger prevents waiting */ + tcp_abort(tpcb); + tpcb = NULL; + } else if (conn->linger > 0) { + /* data left and linger says we should wait */ + if (netconn_is_nonblocking(conn)) { + /* data left on a nonblocking netconn -> cannot linger */ + err = ERR_WOULDBLOCK; + } else if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= + (conn->linger * 1000)) { + /* data left but linger timeout has expired (this happens on further + calls to this function through poll_tcp */ + tcp_abort(tpcb); + tpcb = NULL; + } else { + /* data left -> need to wait for ACK after successful close */ + linger_wait_required = 1; + } + } + } + if ((err == ERR_OK) && (tpcb != NULL)) +#endif /* LWIP_SO_LINGER */ + { + err = tcp_close(tpcb); + } + } else { + err = tcp_shutdown(tpcb, shut_rx, shut_tx); + } + if (err == ERR_OK) { + close_finished = 1; +#if LWIP_SO_LINGER + if (linger_wait_required) { + /* wait for ACK of all unsent/unacked data by just getting called again */ + close_finished = 0; + err = ERR_INPROGRESS; + } +#endif /* LWIP_SO_LINGER */ + } else { + if (err == ERR_MEM) { + /* Closing failed because of memory shortage, try again later. Even for + nonblocking netconns, we have to wait since no standard socket application + is prepared for close failing because of resource shortage. + Check the timeout: this is kind of an lwip addition to the standard sockets: + we wait for some time when failing to allocate a segment for the FIN */ +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + s32_t close_timeout = LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout > 0) { + close_timeout = conn->send_timeout; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_LINGER + if (conn->linger >= 0) { + /* use linger timeout (seconds) */ + close_timeout = conn->linger * 1000U; + } +#endif + if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= close_timeout) { +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + if (conn->current_msg->msg.sd.polls_left == 0) { +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + close_finished = 1; + if (shut_close) { + /* in this case, we want to RST the connection */ + tcp_abort(tpcb); + err = ERR_OK; + } + } + } else { + /* Closing failed for a non-memory error: give up */ + close_finished = 1; + } + } + if (close_finished) { + /* Closing done (succeeded, non-memory error, nonblocking error or timeout) */ + sys_sem_t *op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + if (err == ERR_OK) { + if (shut_close) { + /* Set back some callback pointers as conn is going away */ + conn->pcb.tcp = NULL; + /* Trigger select() in socket layer. Make sure everybody notices activity + on the connection, error first! */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + } + if (shut_rx) { + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + if (shut_tx) { + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + /* wake up the application task */ + sys_sem_signal(op_completed_sem); + } + return ERR_OK; + } + if (!close_finished) { + /* Closing failed and we want to wait: restore some of the callbacks */ + /* Closing of listen pcb will never fail! */ + LWIP_ASSERT("Closing a listen pcb may not fail!", (tpcb->state != LISTEN)); + if (shut_tx) { + tcp_sent(tpcb, sent_tcp); + } + /* when waiting for close, set up poll interval to 500ms */ + tcp_poll(tpcb, poll_tcp, 1); + tcp_err(tpcb, err_tcp); + tcp_arg(tpcb, conn); + /* don't restore recv callback: we don't want to receive any more data */ + } + /* If closing didn't succeed, we get called again either + from poll_tcp or from sent_tcp */ + LWIP_ASSERT("err != ERR_OK", err != ERR_OK); + return err; +} +#endif /* LWIP_TCP */ + +/** + * Delete the pcb inside a netconn. + * Called from netconn_delete. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_delconn(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + enum netconn_state state = msg->conn->state; + LWIP_ASSERT("netconn state error", /* this only happens for TCP netconns */ + (state == NETCONN_NONE) || (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP)); +#if LWIP_NETCONN_FULLDUPLEX + /* In full duplex mode, blocking write/connect is aborted with ERR_CLSD */ + if (state != NETCONN_NONE) { + if ((state == NETCONN_WRITE) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* close requested, abort running write/connect */ + sys_sem_t *op_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + op_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->state = NETCONN_NONE; + sys_sem_signal(op_completed_sem); + } + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + if (((state != NETCONN_NONE) && + (state != NETCONN_LISTEN) && + (state != NETCONN_CONNECT)) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* This means either a blocking write or blocking connect is running + (nonblocking write returns and sets state to NONE) */ + msg->err = ERR_INPROGRESS; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + LWIP_ASSERT("blocking connect in progress", + (state != NETCONN_CONNECT) || IN_NONBLOCKING_CONNECT(msg->conn)); + msg->err = ERR_OK; +#if LWIP_NETCONN_FULLDUPLEX + /* Mark mboxes invalid */ + netconn_mark_mbox_invalid(msg->conn); +#else /* LWIP_NETCONN_FULLDUPLEX */ + netconn_drain(msg->conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + if (msg->conn->pcb.tcp != NULL) { + + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_remove(msg->conn->pcb.raw); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp->recv_arg = NULL; + udp_remove(msg->conn->pcb.udp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + msg->conn->state = NETCONN_CLOSE; + msg->msg.sd.shut = NETCONN_SHUT_RDWR; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* API_EVENT is called inside lwip_netconn_do_close_internal, before releasing + the application thread, so we can return at this point! */ + return; +#endif /* LWIP_TCP */ + default: + break; + } + msg->conn->pcb.tcp = NULL; + } + /* tcp netconns don't come here! */ + + /* @todo: this lets select make the socket readable and writable, + which is wrong! errfd instead? */ + API_EVENT(msg->conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(msg->conn, NETCONN_EVT_SENDPLUS, 0); + } + if (sys_sem_valid(LWIP_API_MSG_SEM(msg))) { + TCPIP_APIMSG_ACK(msg); + } +} + +/** + * Bind a pcb contained in a netconn + * Called from netconn_bind. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + err = raw_bind(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + err = udp_bind(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + err = tcp_bind(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_TCP */ + default: + err = ERR_VAL; + break; + } + } else { + err = ERR_VAL; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} +/** + * Bind a pcb contained in a netconn to an interface + * Called from netconn_bind_if. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind_if(void *m) +{ + struct netif *netif; + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + netif = netif_get_by_index(msg->msg.bc.if_idx); + + if ((netif != NULL) && (msg->conn->pcb.tcp != NULL)) { + err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_bind_netif(msg->conn->pcb.raw, netif); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + udp_bind_netif(msg->conn->pcb.udp, netif); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + tcp_bind_netif(msg->conn->pcb.tcp, netif); + break; +#endif /* LWIP_TCP */ + default: + err = ERR_VAL; + break; + } + } else { + err = ERR_VAL; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * TCP callback function if a connection (opened by tcp_connect/lwip_netconn_do_connect) has + * been established (or reset by the remote host). + * + * @see tcp.h (struct tcp_pcb.connected) for parameters and return values + */ +static err_t +lwip_netconn_do_connected(void *arg, struct tcp_pcb *pcb, err_t err) +{ + struct netconn *conn; + int was_blocking; + sys_sem_t *op_completed_sem = NULL; + + LWIP_UNUSED_ARG(pcb); + + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + + LWIP_ASSERT("conn->state == NETCONN_CONNECT", conn->state == NETCONN_CONNECT); + LWIP_ASSERT("(conn->current_msg != NULL) || conn->in_non_blocking_connect", + (conn->current_msg != NULL) || IN_NONBLOCKING_CONNECT(conn)); + + if (conn->current_msg != NULL) { + conn->current_msg->err = err; + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + } + if ((NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) && (err == ERR_OK)) { + setup_tcp(conn); + } + was_blocking = !IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + LWIP_ASSERT("blocking connect state error", + (was_blocking && op_completed_sem != NULL) || + (!was_blocking && op_completed_sem == NULL)); + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + if (was_blocking) { + sys_sem_signal(op_completed_sem); + } + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Connect a pcb contained inside a netconn + * Called from netconn_connect. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to connect to + */ +void +lwip_netconn_do_connect(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp == NULL) { + /* This may happen when calling netconn_connect() a second time */ + err = ERR_CLSD; + } else { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + err = raw_connect(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + err = udp_connect(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + /* Prevent connect while doing any other action. */ + if (msg->conn->state == NETCONN_CONNECT) { + err = ERR_ALREADY; + } else if (msg->conn->state != NETCONN_NONE) { + err = ERR_ISCONN; + } else { + setup_tcp(msg->conn); + err = tcp_connect(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), + msg->msg.bc.port, lwip_netconn_do_connected); + if (err == ERR_OK) { + u8_t non_blocking = netconn_is_nonblocking(msg->conn); + msg->conn->state = NETCONN_CONNECT; + SET_NONBLOCKING_CONNECT(msg->conn, non_blocking); + if (non_blocking) { + err = ERR_INPROGRESS; + } else { + msg->conn->current_msg = msg; + /* sys_sem_signal() is called from lwip_netconn_do_connected (or err_tcp()), + when the connection is established! */ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CONNECT); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_CONNECT); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + return; + } + } + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ERROR("Invalid netconn type", 0, do { + err = ERR_VAL; + } while (0)); + break; + } + } + msg->err = err; + /* For all other protocols, netconn_connect() calls netconn_apimsg(), + so use TCPIP_APIMSG_ACK() here. */ + TCPIP_APIMSG_ACK(msg); +} + +/** + * Disconnect a pcb contained inside a netconn + * Only used for UDP netconns. + * Called from netconn_disconnect. + * + * @param m the api_msg pointing to the connection to disconnect + */ +void +lwip_netconn_do_disconnect(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + +#if LWIP_UDP + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { + udp_disconnect(msg->conn->pcb.udp); + msg->err = ERR_OK; + } else +#endif /* LWIP_UDP */ + { + msg->err = ERR_VAL; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Set a TCP pcb contained in a netconn into listen mode + * Called from netconn_listen. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_listen(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + if (msg->conn->state == NETCONN_NONE) { + struct tcp_pcb *lpcb; + if (msg->conn->pcb.tcp->state != CLOSED) { + /* connection is not closed, cannot listen */ + err = ERR_VAL; + } else { + u8_t backlog; +#if TCP_LISTEN_BACKLOG + backlog = msg->msg.lb.backlog; +#else /* TCP_LISTEN_BACKLOG */ + backlog = TCP_DEFAULT_LISTEN_BACKLOG; +#endif /* TCP_LISTEN_BACKLOG */ +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to listen to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is NOT set, use IP_ANY_TYPE to listen + */ + if (ip_addr_cmp(&msg->conn->pcb.ip->local_ip, IP6_ADDR_ANY) && + (netconn_get_ipv6only(msg->conn) == 0)) { + /* change PCB type to IPADDR_TYPE_ANY */ + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->local_ip, IPADDR_TYPE_ANY); + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->remote_ip, IPADDR_TYPE_ANY); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + lpcb = tcp_listen_with_backlog_and_err(msg->conn->pcb.tcp, backlog, &err); + + if (lpcb == NULL) { + /* in this case, the old pcb is still allocated */ + } else { + /* delete the recvmbox and allocate the acceptmbox */ + if (sys_mbox_valid(&msg->conn->recvmbox)) { + /** @todo: should we drain the recvmbox here? */ + sys_mbox_free(&msg->conn->recvmbox); + sys_mbox_set_invalid(&msg->conn->recvmbox); + } + err = ERR_OK; + if (!sys_mbox_valid(&msg->conn->acceptmbox)) { + err = sys_mbox_new(&msg->conn->acceptmbox, DEFAULT_ACCEPTMBOX_SIZE); + } + if (err == ERR_OK) { + msg->conn->state = NETCONN_LISTEN; + msg->conn->pcb.tcp = lpcb; + tcp_arg(msg->conn->pcb.tcp, msg->conn); + tcp_accept(msg->conn->pcb.tcp, accept_function); + } else { + /* since the old pcb is already deallocated, free lpcb now */ + tcp_close(lpcb); + msg->conn->pcb.tcp = NULL; + } + } + } + } else if (msg->conn->state == NETCONN_LISTEN) { + /* already listening, allow updating of the backlog */ + err = ERR_OK; + tcp_backlog_set(msg->conn->pcb.tcp, msg->msg.lb.backlog); + } else { + err = ERR_CONN; + } + } else { + err = ERR_ARG; + } + } else { + err = ERR_CONN; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a RAW or UDP pcb contained in a netconn + * Called from netconn_send + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_send(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + err_t err = netconn_err(msg->conn); + if (err == ERR_OK) { + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = raw_send(msg->conn->pcb.raw, msg->msg.b->p); + } else { + err = raw_sendto(msg->conn->pcb.raw, msg->msg.b->p, &msg->msg.b->addr); + } + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: +#if LWIP_CHECKSUM_ON_COPY + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = udp_send_chksum(msg->conn->pcb.udp, msg->msg.b->p, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } else { + err = udp_sendto_chksum(msg->conn->pcb.udp, msg->msg.b->p, + &msg->msg.b->addr, msg->msg.b->port, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } +#else /* LWIP_CHECKSUM_ON_COPY */ + if (ip_addr_isany_val(msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = udp_send(msg->conn->pcb.udp, msg->msg.b->p); + } else { + err = udp_sendto(msg->conn->pcb.udp, msg->msg.b->p, &msg->msg.b->addr, msg->msg.b->port); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + break; +#endif /* LWIP_UDP */ + default: + err = ERR_CONN; + break; + } + } else { + err = ERR_CONN; + } + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Indicate data has been received from a TCP pcb contained in a netconn + * Called from netconn_recv + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_recv(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + size_t remaining = msg->msg.r.len; + do { + u16_t recved = (u16_t)((remaining > 0xffff) ? 0xffff : remaining); + tcp_recved(msg->conn->pcb.tcp, recved); + remaining -= recved; + } while (remaining != 0); + } + } + TCPIP_APIMSG_ACK(msg); +} + +#if TCP_LISTEN_BACKLOG +/** Indicate that a TCP pcb has been accepted + * Called from netconn_accept + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_accepted(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + tcp_backlog_accepted(msg->conn->pcb.tcp); + } + } + TCPIP_APIMSG_ACK(msg); +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * See if more data needs to be written from a previous call to netconn_write. + * Called initially from lwip_netconn_do_write. If the first call can't send all data + * (because of low memory or empty send-buffer), this function is called again + * from sent_tcp() or poll_tcp() to send more data. If all data is sent, the + * blocking application thread (waiting in netconn_write) is released. + * + * @param conn netconn (that is currently in state NETCONN_WRITE) to process + * @return ERR_OK + * ERR_MEM if LWIP_TCPIP_CORE_LOCKING=1 and sending hasn't yet finished + */ +static err_t +lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + const void *dataptr; + u16_t len, available; + u8_t write_finished = 0; + size_t diff; + u8_t dontblock; + u8_t apiflags; + u8_t write_more; + + LWIP_ASSERT("conn != NULL", conn != NULL); + LWIP_ASSERT("conn->state == NETCONN_WRITE", (conn->state == NETCONN_WRITE)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + LWIP_ASSERT("conn->pcb.tcp != NULL", conn->pcb.tcp != NULL); + LWIP_ASSERT("conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len", + conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len); + LWIP_ASSERT("conn->current_msg->msg.w.vector_cnt > 0", conn->current_msg->msg.w.vector_cnt > 0); + + apiflags = conn->current_msg->msg.w.apiflags; + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); + +#if LWIP_SO_SNDTIMEO + if ((conn->send_timeout != 0) && + ((s32_t)(sys_now() - conn->current_msg->msg.w.time_started) >= conn->send_timeout)) { + write_finished = 1; + if (conn->current_msg->msg.w.offset == 0) { + /* nothing has been written */ + err = ERR_WOULDBLOCK; + } else { + /* partial write */ + err = ERR_OK; + } + } else +#endif /* LWIP_SO_SNDTIMEO */ + { + do { + dataptr = (const u8_t *)conn->current_msg->msg.w.vector->ptr + conn->current_msg->msg.w.vector_off; + diff = conn->current_msg->msg.w.vector->len - conn->current_msg->msg.w.vector_off; + if (diff > 0xffffUL) { /* max_u16_t */ + len = 0xffff; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + len = (u16_t)diff; + } + available = tcp_sndbuf(conn->pcb.tcp); + if (available < len) { + /* don't try to write more than sendbuf */ + len = available; + if (dontblock) { + if (!len) { + /* set error according to partial write or not */ + err = (conn->current_msg->msg.w.offset == 0) ? ERR_WOULDBLOCK : ERR_OK; + goto err_mem; + } + } else { + apiflags |= TCP_WRITE_FLAG_MORE; + } + } + LWIP_ASSERT("lwip_netconn_do_writemore: invalid length!", + ((conn->current_msg->msg.w.vector_off + len) <= conn->current_msg->msg.w.vector->len)); + /* we should loop around for more sending in the following cases: + 1) We couldn't finish the current vector because of 16-bit size limitations. + tcp_write() and tcp_sndbuf() both are limited to 16-bit sizes + 2) We are sending the remainder of the current vector and have more */ + if ((len == 0xffff && diff > 0xffffUL) || + (len == (u16_t)diff && conn->current_msg->msg.w.vector_cnt > 1)) { + write_more = 1; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + write_more = 0; + } + err = tcp_write(conn->pcb.tcp, dataptr, len, apiflags); + if (err == ERR_OK) { + conn->current_msg->msg.w.offset += len; + conn->current_msg->msg.w.vector_off += len; + /* check if current vector is finished */ + if (conn->current_msg->msg.w.vector_off == conn->current_msg->msg.w.vector->len) { + conn->current_msg->msg.w.vector_cnt--; + /* if we have additional vectors, move on to them */ + if (conn->current_msg->msg.w.vector_cnt > 0) { + conn->current_msg->msg.w.vector++; + conn->current_msg->msg.w.vector_off = 0; + } + } + } + } while (write_more && err == ERR_OK); + /* if OK or memory error, check available space */ + if ((err == ERR_OK) || (err == ERR_MEM)) { +err_mem: + if (dontblock && (conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len)) { + /* non-blocking write did not write everything: mark the pcb non-writable + and let poll_tcp check writable space to mark the pcb writable again */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, 0); + conn->flags |= NETCONN_FLAG_CHECK_WRITESPACE; + } else if ((tcp_sndbuf(conn->pcb.tcp) <= TCP_SNDLOWAT) || + (tcp_sndqueuelen(conn->pcb.tcp) >= TCP_SNDQUEUELOWAT)) { + /* The queued byte- or pbuf-count exceeds the configured low-water limit, + let select mark this pcb as non-writable. */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, 0); + } + } + + if (err == ERR_OK) { + err_t out_err; + if ((conn->current_msg->msg.w.offset == conn->current_msg->msg.w.len) || dontblock) { + /* return sent length (caller reads length from msg.w.offset) */ + write_finished = 1; + } + out_err = tcp_output(conn->pcb.tcp); + if (out_err == ERR_RTE) { + /* If tcp_output fails because no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + } + } else if (err == ERR_MEM) { + /* If ERR_MEM, we wait for sent_tcp or poll_tcp to be called. + For blocking sockets, we do NOT return to the application + thread, since ERR_MEM is only a temporary error! Non-blocking + will remain non-writable until sent_tcp/poll_tcp is called */ + + /* tcp_write returned ERR_MEM, try tcp_output anyway */ + err_t out_err = tcp_output(conn->pcb.tcp); + if (out_err == ERR_RTE) { + /* If tcp_output fails because no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + } else if (dontblock) { + /* non-blocking write is done on ERR_MEM, set error according + to partial write or not */ + err = (conn->current_msg->msg.w.offset == 0) ? ERR_WOULDBLOCK : ERR_OK; + write_finished = 1; + } + } else { + /* On errors != ERR_MEM, we don't try writing any more but return + the error to the application thread. */ + write_finished = 1; + } + } + if (write_finished) { + /* everything was written: set back connection state + and back to application task */ + sys_sem_t *op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + sys_sem_signal(op_completed_sem); + } + } +#if LWIP_TCPIP_CORE_LOCKING + else { + return ERR_MEM; + } +#endif + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a TCP pcb contained in a netconn + * Called from netconn_write + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_write(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + err_t err = netconn_err(msg->conn); + if (err == ERR_OK) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { +#if LWIP_TCP + if (msg->conn->state != NETCONN_NONE) { + /* netconn is connecting, closing or in blocking write */ + err = ERR_INPROGRESS; + } else if (msg->conn->pcb.tcp != NULL) { + msg->conn->state = NETCONN_WRITE; + /* set all the variables used by lwip_netconn_do_writemore */ + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + LWIP_ASSERT("msg->msg.w.len != 0", msg->msg.w.len != 0); + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_writemore(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_WRITE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_WRITE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_writemore(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for both cases: if lwip_netconn_do_writemore was called, don't ACK the APIMSG + since lwip_netconn_do_writemore ACKs it! */ + return; + } else { + err = ERR_CONN; + } +#else /* LWIP_TCP */ + err = ERR_VAL; +#endif /* LWIP_TCP */ +#if (LWIP_UDP || LWIP_RAW) + } else { + err = ERR_VAL; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +/** + * Return a connection's local or remote address + * Called from netconn_getaddr + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_getaddr(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + if (msg->conn->pcb.ip != NULL) { + if (msg->msg.ad.local) { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->local_ip); + } else { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->remote_ip); + } + + msg->err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.raw->protocol; + } else { + /* return an error as connecting is only a helper for upper layers */ + msg->err = ERR_CONN; + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->local_port; + } else { + if ((msg->conn->pcb.udp->flags & UDP_FLAGS_CONNECTED) == 0) { + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->remote_port; + } + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + if ((msg->msg.ad.local == 0) && + ((msg->conn->pcb.tcp->state == CLOSED) || (msg->conn->pcb.tcp->state == LISTEN))) { + /* pcb is not connected and remote name is requested */ + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = (msg->msg.ad.local ? msg->conn->pcb.tcp->local_port : msg->conn->pcb.tcp->remote_port); + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("invalid netconn_type", 0); + break; + } + } else { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +/** + * Close or half-shutdown a TCP pcb contained in a netconn + * Called from netconn_close + * In contrast to closing sockets, the netconn is not deallocated. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_close(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + +#if LWIP_TCP + enum netconn_state state = msg->conn->state; + /* First check if this is a TCP netconn and if it is in a correct state + (LISTEN doesn't support half shutdown) */ + if ((msg->conn->pcb.tcp != NULL) && + (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) && + ((msg->msg.sd.shut == NETCONN_SHUT_RDWR) || (state != NETCONN_LISTEN))) { + /* Check if we are in a connected state */ + if (state == NETCONN_CONNECT) { + /* TCP connect in progress: cannot shutdown */ + msg->err = ERR_CONN; + } else if (state == NETCONN_WRITE) { +#if LWIP_NETCONN_FULLDUPLEX + if (msg->msg.sd.shut & NETCONN_SHUT_WR) { + /* close requested, abort running write */ + sys_sem_t *write_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + write_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->state = NETCONN_NONE; + state = NETCONN_NONE; + sys_sem_signal(write_completed_sem); + } else { + LWIP_ASSERT("msg->msg.sd.shut == NETCONN_SHUT_RD", msg->msg.sd.shut == NETCONN_SHUT_RD); + /* In this case, let the write continue and do not interfere with + conn->current_msg or conn->state! */ + msg->err = tcp_shutdown(msg->conn->pcb.tcp, 1, 0); + } + } + if (state == NETCONN_NONE) { +#else /* LWIP_NETCONN_FULLDUPLEX */ + msg->err = ERR_INPROGRESS; + } else { +#endif /* LWIP_NETCONN_FULLDUPLEX */ + if (msg->msg.sd.shut & NETCONN_SHUT_RD) { +#if LWIP_NETCONN_FULLDUPLEX + /* Mark mboxes invalid */ + netconn_mark_mbox_invalid(msg->conn); +#else /* LWIP_NETCONN_FULLDUPLEX */ + netconn_drain(msg->conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + } + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + msg->conn->state = NETCONN_CLOSE; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for tcp netconns, lwip_netconn_do_close_internal ACKs the message */ + return; + } + } else +#endif /* LWIP_TCP */ + { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } + TCPIP_APIMSG_ACK(msg); +} +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group_netif + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group_netif(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + struct netif *netif; + + netif = netif_get_by_index(msg->msg.jl.if_idx); + if (netif == NULL) { + msg->err = ERR_IF; + goto done; + } + + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup_netif(netif, + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup_netif(netif, + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup_netif(netif, + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup_netif(netif, + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } + +done: + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * Callback function that is called when DNS name is resolved + * (or on timeout). A waiting application thread is waked up by + * signaling the semaphore. + */ +static void +lwip_netconn_do_dns_found(const char *name, const ip_addr_t *ipaddr, void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg *)arg; + + /* we trust the internal implementation to be correct :-) */ + LWIP_UNUSED_ARG(name); + + if (ipaddr == NULL) { + /* timeout or memory error */ + API_EXPR_DEREF(msg->err) = ERR_VAL; + } else { + /* address was resolved */ + API_EXPR_DEREF(msg->err) = ERR_OK; + API_EXPR_DEREF(msg->addr) = *ipaddr; + } + /* wake up the application task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); +} + +/** + * Execute a DNS query + * Called from netconn_gethostbyname + * + * @param arg the dns_api_msg pointing to the query + */ +void +lwip_netconn_do_gethostbyname(void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg *)arg; + u8_t addrtype = +#if LWIP_IPV4 && LWIP_IPV6 + msg->dns_addrtype; +#else + LWIP_DNS_ADDRTYPE_DEFAULT; +#endif + + API_EXPR_DEREF(msg->err) = dns_gethostbyname_addrtype(msg->name, + API_EXPR_REF(msg->addr), lwip_netconn_do_dns_found, msg, addrtype); +#if LWIP_TCPIP_CORE_LOCKING + /* For core locking, only block if we need to wait for answer/timeout */ + if (API_EXPR_DEREF(msg->err) == ERR_INPROGRESS) { + UNLOCK_TCPIP_CORE(); + sys_sem_wait(API_EXPR_REF_SEM(msg->sem)); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("do_gethostbyname still in progress!!", API_EXPR_DEREF(msg->err) != ERR_INPROGRESS); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + if (API_EXPR_DEREF(msg->err) != ERR_INPROGRESS) { + /* on error or immediate success, wake up the application + * task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} +#endif /* LWIP_DNS */ + +#endif /* LWIP_NETCONN */ diff --git a/Middlewares/Third_Party/LwIP/src/api/err.c b/Middlewares/Third_Party/LwIP/src/api/err.c new file mode 100644 index 00000000..dd2b62da --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/err.c @@ -0,0 +1,115 @@ +/** + * @file + * Error Management module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/err.h" +#include "lwip/def.h" +#include "lwip/sys.h" + +#include "lwip/errno.h" + +#if !NO_SYS +/** Table to quickly map an lwIP error (err_t) to a socket error + * by using -err as an index */ +static const int err_to_errno_table[] = { + 0, /* ERR_OK 0 No error, everything OK. */ + ENOMEM, /* ERR_MEM -1 Out of memory error. */ + ENOBUFS, /* ERR_BUF -2 Buffer error. */ + EWOULDBLOCK, /* ERR_TIMEOUT -3 Timeout */ + EHOSTUNREACH, /* ERR_RTE -4 Routing problem. */ + EINPROGRESS, /* ERR_INPROGRESS -5 Operation in progress */ + EINVAL, /* ERR_VAL -6 Illegal value. */ + EWOULDBLOCK, /* ERR_WOULDBLOCK -7 Operation would block. */ + EADDRINUSE, /* ERR_USE -8 Address in use. */ + EALREADY, /* ERR_ALREADY -9 Already connecting. */ + EISCONN, /* ERR_ISCONN -10 Conn already established.*/ + ENOTCONN, /* ERR_CONN -11 Not connected. */ + -1, /* ERR_IF -12 Low-level netif error */ + ECONNABORTED, /* ERR_ABRT -13 Connection aborted. */ + ECONNRESET, /* ERR_RST -14 Connection reset. */ + ENOTCONN, /* ERR_CLSD -15 Connection closed. */ + EIO /* ERR_ARG -16 Illegal argument. */ +}; + +int +err_to_errno(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_to_errno_table))) { + return EIO; + } + return err_to_errno_table[-err]; +} +#endif /* !NO_SYS */ + +#ifdef LWIP_DEBUG + +static const char *err_strerr[] = { + "Ok.", /* ERR_OK 0 */ + "Out of memory error.", /* ERR_MEM -1 */ + "Buffer error.", /* ERR_BUF -2 */ + "Timeout.", /* ERR_TIMEOUT -3 */ + "Routing problem.", /* ERR_RTE -4 */ + "Operation in progress.", /* ERR_INPROGRESS -5 */ + "Illegal value.", /* ERR_VAL -6 */ + "Operation would block.", /* ERR_WOULDBLOCK -7 */ + "Address in use.", /* ERR_USE -8 */ + "Already connecting.", /* ERR_ALREADY -9 */ + "Already connected.", /* ERR_ISCONN -10 */ + "Not connected.", /* ERR_CONN -11 */ + "Low-level netif error.", /* ERR_IF -12 */ + "Connection aborted.", /* ERR_ABRT -13 */ + "Connection reset.", /* ERR_RST -14 */ + "Connection closed.", /* ERR_CLSD -15 */ + "Illegal argument." /* ERR_ARG -16 */ +}; + +/** + * Convert an lwip internal error to a string representation. + * + * @param err an lwip internal err_t + * @return a string representation for err + */ +const char * +lwip_strerr(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_strerr))) { + return "Unknown error."; + } + return err_strerr[-err]; +} + +#endif /* LWIP_DEBUG */ diff --git a/Middlewares/Third_Party/LwIP/src/api/if_api.c b/Middlewares/Third_Party/LwIP/src/api/if_api.c new file mode 100644 index 00000000..8e094d09 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/if_api.c @@ -0,0 +1,102 @@ +/** + * @file + * Interface Identification APIs from: + * RFC 3493: Basic Socket Interface Extensions for IPv6 + * Section 4: Interface Identification + * + * @defgroup if_api Interface Identification API + * @ingroup socket + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#include "lwip/opt.h" + +#if LWIP_SOCKET + +#include "lwip/errno.h" +#include "lwip/if_api.h" +#include "lwip/netifapi.h" +#include "lwip/priv/sockets_priv.h" + +/** + * @ingroup if_api + * Maps an interface index to its corresponding name. + * @param ifindex interface index + * @param ifname shall point to a buffer of at least {IF_NAMESIZE} bytes + * @return If ifindex is an interface index, then the function shall return the + * value supplied in ifname, which points to a buffer now containing the interface name. + * Otherwise, the function shall return a NULL pointer. + */ +char * +lwip_if_indextoname(unsigned int ifindex, char *ifname) +{ +#if LWIP_NETIF_API + if (ifindex <= 0xff) { + err_t err = netifapi_netif_index_to_name((u8_t)ifindex, ifname); + if (!err && ifname[0] != '\0') { + return ifname; + } + } +#else /* LWIP_NETIF_API */ + LWIP_UNUSED_ARG(ifindex); + LWIP_UNUSED_ARG(ifname); +#endif /* LWIP_NETIF_API */ + set_errno(ENXIO); + return NULL; +} + +/** + * @ingroup if_api + * Returs the interface index corresponding to name ifname. + * @param ifname Interface name + * @return The corresponding index if ifname is the name of an interface; + * otherwise, zero. + */ +unsigned int +lwip_if_nametoindex(const char *ifname) +{ +#if LWIP_NETIF_API + err_t err; + u8_t idx; + + err = netifapi_netif_name_to_index(ifname, &idx); + if (!err) { + return idx; + } +#else /* LWIP_NETIF_API */ + LWIP_UNUSED_ARG(ifname); +#endif /* LWIP_NETIF_API */ + return 0; /* invalid index */ +} + +#endif /* LWIP_SOCKET */ diff --git a/Middlewares/Third_Party/LwIP/src/api/netbuf.c b/Middlewares/Third_Party/LwIP/src/api/netbuf.c new file mode 100644 index 00000000..3b910de1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/netbuf.c @@ -0,0 +1,250 @@ +/** + * @file + * Network buffer management + * + * @defgroup netbuf Network buffers + * @ingroup netconn + * Network buffer descriptor for @ref netconn. Based on @ref pbuf internally + * to avoid copying data around.\n + * Buffers must not be shared accross multiple threads, all functions except + * netbuf_new() and netbuf_delete() are not thread-safe. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netbuf.h" +#include "lwip/memp.h" + +#include + +/** + * @ingroup netbuf + * Create (allocate) and initialize a new netbuf. + * The netbuf doesn't yet contain a packet buffer! + * + * @return a pointer to a new netbuf + * NULL on lack of memory + */ +struct +netbuf *netbuf_new(void) +{ + struct netbuf *buf; + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf != NULL) { + memset(buf, 0, sizeof(struct netbuf)); + } + return buf; +} + +/** + * @ingroup netbuf + * Deallocate a netbuf allocated by netbuf_new(). + * + * @param buf pointer to a netbuf allocated by netbuf_new() + */ +void +netbuf_delete(struct netbuf *buf) +{ + if (buf != NULL) { + if (buf->p != NULL) { + pbuf_free(buf->p); + buf->p = buf->ptr = NULL; + } + memp_free(MEMP_NETBUF, buf); + } +} + +/** + * @ingroup netbuf + * Allocate memory for a packet buffer for a given netbuf. + * + * @param buf the netbuf for which to allocate a packet buffer + * @param size the size of the packet buffer to allocate + * @return pointer to the allocated memory + * NULL if no memory could be allocated + */ +void * +netbuf_alloc(struct netbuf *buf, u16_t size) +{ + LWIP_ERROR("netbuf_alloc: invalid buf", (buf != NULL), return NULL;); + + /* Deallocate any previously allocated memory. */ + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, size, PBUF_RAM); + if (buf->p == NULL) { + return NULL; + } + LWIP_ASSERT("check that first pbuf can hold size", + (buf->p->len >= size)); + buf->ptr = buf->p; + return buf->p->payload; +} + +/** + * @ingroup netbuf + * Free the packet buffer included in a netbuf + * + * @param buf pointer to the netbuf which contains the packet buffer to free + */ +void +netbuf_free(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_free: invalid buf", (buf != NULL), return;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = buf->ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf->flags = 0; + buf->toport_chksum = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ +} + +/** + * @ingroup netbuf + * Let a netbuf reference existing (non-volatile) data. + * + * @param buf netbuf which should reference the data + * @param dataptr pointer to the data to reference + * @param size size of the data + * @return ERR_OK if data is referenced + * ERR_MEM if data couldn't be referenced due to lack of memory + */ +err_t +netbuf_ref(struct netbuf *buf, const void *dataptr, u16_t size) +{ + LWIP_ERROR("netbuf_ref: invalid buf", (buf != NULL), return ERR_ARG;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (buf->p == NULL) { + buf->ptr = NULL; + return ERR_MEM; + } + ((struct pbuf_rom *)buf->p)->payload = dataptr; + buf->p->len = buf->p->tot_len = size; + buf->ptr = buf->p; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Chain one netbuf to another (@see pbuf_chain) + * + * @param head the first netbuf + * @param tail netbuf to chain after head, freed by this function, may not be reference after returning + */ +void +netbuf_chain(struct netbuf *head, struct netbuf *tail) +{ + LWIP_ERROR("netbuf_chain: invalid head", (head != NULL), return;); + LWIP_ERROR("netbuf_chain: invalid tail", (tail != NULL), return;); + pbuf_cat(head->p, tail->p); + head->ptr = head->p; + memp_free(MEMP_NETBUF, tail); +} + +/** + * @ingroup netbuf + * Get the data pointer and length of the data inside a netbuf. + * + * @param buf netbuf to get the data from + * @param dataptr pointer to a void pointer where to store the data pointer + * @param len pointer to an u16_t where the length of the data is stored + * @return ERR_OK if the information was retrieved, + * ERR_BUF on error. + */ +err_t +netbuf_data(struct netbuf *buf, void **dataptr, u16_t *len) +{ + LWIP_ERROR("netbuf_data: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid len", (len != NULL), return ERR_ARG;); + + if (buf->ptr == NULL) { + return ERR_BUF; + } + *dataptr = buf->ptr->payload; + *len = buf->ptr->len; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the next part. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + * @return -1 if there is no next part + * 1 if moved to the next part but now there is no next part + * 0 if moved to the next part and there are still more parts + */ +s8_t +netbuf_next(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_next: invalid buf", (buf != NULL), return -1;); + if (buf->ptr->next == NULL) { + return -1; + } + buf->ptr = buf->ptr->next; + if (buf->ptr->next == NULL) { + return 1; + } + return 0; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the beginning of the packet. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + */ +void +netbuf_first(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_first: invalid buf", (buf != NULL), return;); + buf->ptr = buf->p; +} + +#endif /* LWIP_NETCONN */ diff --git a/Middlewares/Third_Party/LwIP/src/api/netdb.c b/Middlewares/Third_Party/LwIP/src/api/netdb.c new file mode 100644 index 00000000..87714259 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/netdb.c @@ -0,0 +1,414 @@ +/** + * @file + * API functions for name resolving + * + * @defgroup netdbapi NETDB API + * @ingroup socket + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/netdb.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/err.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/api.h" +#include "lwip/dns.h" + +#include /* memset */ +#include /* atoi */ + +/** helper struct for gethostbyname_r to access the char* buffer */ +struct gethostbyname_r_helper { + ip_addr_t *addr_list[2]; + ip_addr_t addr; + char *aliases; +}; + +/** h_errno is exported in netdb.h for access by applications. */ +#if LWIP_DNS_API_DECLARE_H_ERRNO +int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO */ + +/** define "hostent" variables storage: 0 if we use a static (but unprotected) + * set of variables for lwip_gethostbyname, 1 if we use a local storage */ +#ifndef LWIP_DNS_API_HOSTENT_STORAGE +#define LWIP_DNS_API_HOSTENT_STORAGE 0 +#endif + +/** define "hostent" variables storage */ +#if LWIP_DNS_API_HOSTENT_STORAGE +#define HOSTENT_STORAGE +#else +#define HOSTENT_STORAGE static +#endif /* LWIP_DNS_API_STATIC_HOSTENT */ + +/** + * Returns an entry containing addresses of address family AF_INET + * for the host with name name. + * Due to dns_gethostbyname limitations, only one address is returned. + * + * @param name the hostname to resolve + * @return an entry containing addresses of address family AF_INET + * for the host with name name + */ +struct hostent * +lwip_gethostbyname(const char *name) +{ + err_t err; + ip_addr_t addr; + + /* buffer variables for lwip_gethostbyname() */ + HOSTENT_STORAGE struct hostent s_hostent; + HOSTENT_STORAGE char *s_aliases; + HOSTENT_STORAGE ip_addr_t s_hostent_addr; + HOSTENT_STORAGE ip_addr_t *s_phostent_addr[2]; + HOSTENT_STORAGE char s_hostname[DNS_MAX_NAME_LENGTH + 1]; + + /* query host IP address */ + err = netconn_gethostbyname(name, &addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + h_errno = HOST_NOT_FOUND; + return NULL; + } + + /* fill hostent */ + s_hostent_addr = addr; + s_phostent_addr[0] = &s_hostent_addr; + s_phostent_addr[1] = NULL; + strncpy(s_hostname, name, DNS_MAX_NAME_LENGTH); + s_hostname[DNS_MAX_NAME_LENGTH] = 0; + s_hostent.h_name = s_hostname; + s_aliases = NULL; + s_hostent.h_aliases = &s_aliases; + s_hostent.h_addrtype = AF_INET; + s_hostent.h_length = sizeof(ip_addr_t); + s_hostent.h_addr_list = (char **)&s_phostent_addr; + +#if DNS_DEBUG + /* dump hostent */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_name == %s\n", s_hostent.h_name)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_aliases == %p\n", (void *)s_hostent.h_aliases)); + /* h_aliases are always empty */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addrtype == %d\n", s_hostent.h_addrtype)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_length == %d\n", s_hostent.h_length)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list == %p\n", (void *)s_hostent.h_addr_list)); + if (s_hostent.h_addr_list != NULL) { + u8_t idx; + for (idx = 0; s_hostent.h_addr_list[idx]; idx++) { + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i] == %p\n", idx, s_hostent.h_addr_list[idx])); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i]-> == %s\n", idx, ipaddr_ntoa((ip_addr_t *)s_hostent.h_addr_list[idx]))); + } + } +#endif /* DNS_DEBUG */ + +#if LWIP_DNS_API_HOSTENT_STORAGE + /* this function should return the "per-thread" hostent after copy from s_hostent */ + return sys_thread_hostent(&s_hostent); +#else + return &s_hostent; +#endif /* LWIP_DNS_API_HOSTENT_STORAGE */ +} + +/** + * Thread-safe variant of lwip_gethostbyname: instead of using a static + * buffer, this function takes buffer and errno pointers as arguments + * and uses these for the result. + * + * @param name the hostname to resolve + * @param ret pre-allocated struct where to store the result + * @param buf pre-allocated buffer where to store additional data + * @param buflen the size of buf + * @param result pointer to a hostent pointer that is set to ret on success + * and set to zero on error + * @param h_errnop pointer to an int where to store errors (instead of modifying + * the global h_errno) + * @return 0 on success, non-zero on error, additional error information + * is stored in *h_errnop instead of h_errno to be thread-safe + */ +int +lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop) +{ + err_t err; + struct gethostbyname_r_helper *h; + char *hostname; + size_t namelen; + int lh_errno; + + if (h_errnop == NULL) { + /* ensure h_errnop is never NULL */ + h_errnop = &lh_errno; + } + + if (result == NULL) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + /* first thing to do: set *result to nothing */ + *result = NULL; + if ((name == NULL) || (ret == NULL) || (buf == NULL)) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + + namelen = strlen(name); + if (buflen < (sizeof(struct gethostbyname_r_helper) + LWIP_MEM_ALIGN_BUFFER(namelen + 1))) { + /* buf can't hold the data needed + a copy of name */ + *h_errnop = ERANGE; + return -1; + } + + h = (struct gethostbyname_r_helper *)LWIP_MEM_ALIGN(buf); + hostname = ((char *)h) + sizeof(struct gethostbyname_r_helper); + + /* query host IP address */ + err = netconn_gethostbyname(name, &h->addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + *h_errnop = HOST_NOT_FOUND; + return -1; + } + + /* copy the hostname into buf */ + MEMCPY(hostname, name, namelen); + hostname[namelen] = 0; + + /* fill hostent */ + h->addr_list[0] = &h->addr; + h->addr_list[1] = NULL; + h->aliases = NULL; + ret->h_name = hostname; + ret->h_aliases = &h->aliases; + ret->h_addrtype = AF_INET; + ret->h_length = sizeof(ip_addr_t); + ret->h_addr_list = (char **)&h->addr_list; + + /* set result != NULL */ + *result = ret; + + /* return success */ + return 0; +} + +/** + * Frees one or more addrinfo structures returned by getaddrinfo(), along with + * any additional storage associated with those structures. If the ai_next field + * of the structure is not null, the entire list of structures is freed. + * + * @param ai struct addrinfo to free + */ +void +lwip_freeaddrinfo(struct addrinfo *ai) +{ + struct addrinfo *next; + + while (ai != NULL) { + next = ai->ai_next; + memp_free(MEMP_NETDB, ai); + ai = next; + } +} + +/** + * Translates the name of a service location (for example, a host name) and/or + * a service name and returns a set of socket addresses and associated + * information to be used in creating a socket with which to address the + * specified service. + * Memory for the result is allocated internally and must be freed by calling + * lwip_freeaddrinfo()! + * + * Due to a limitation in dns_gethostbyname, only the first address of a + * host is returned. + * Also, service names are not supported (only port numbers)! + * + * @param nodename descriptive name or address string of the host + * (may be NULL -> local address) + * @param servname port number as string of NULL + * @param hints structure containing input values that set socktype and protocol + * @param res pointer to a pointer where to store the result (set to NULL on failure) + * @return 0 on success, non-zero on failure + * + * @todo: implement AI_V4MAPPED, AI_ADDRCONFIG + */ +int +lwip_getaddrinfo(const char *nodename, const char *servname, + const struct addrinfo *hints, struct addrinfo **res) +{ + err_t err; + ip_addr_t addr; + struct addrinfo *ai; + struct sockaddr_storage *sa = NULL; + int port_nr = 0; + size_t total_size; + size_t namelen = 0; + int ai_family; + + if (res == NULL) { + return EAI_FAIL; + } + *res = NULL; + if ((nodename == NULL) && (servname == NULL)) { + return EAI_NONAME; + } + + if (hints != NULL) { + ai_family = hints->ai_family; + if ((ai_family != AF_UNSPEC) +#if LWIP_IPV4 + && (ai_family != AF_INET) +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + && (ai_family != AF_INET6) +#endif /* LWIP_IPV6 */ + ) { + return EAI_FAMILY; + } + } else { + ai_family = AF_UNSPEC; + } + + if (servname != NULL) { + /* service name specified: convert to port number + * @todo?: currently, only ASCII integers (port numbers) are supported (AI_NUMERICSERV)! */ + port_nr = atoi(servname); + if ((port_nr <= 0) || (port_nr > 0xffff)) { + return EAI_SERVICE; + } + } + + if (nodename != NULL) { + /* service location specified, try to resolve */ + if ((hints != NULL) && (hints->ai_flags & AI_NUMERICHOST)) { + /* no DNS lookup, just parse for an address string */ + if (!ipaddr_aton(nodename, &addr)) { + return EAI_NONAME; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6_VAL(addr) && ai_family == AF_INET) || + (IP_IS_V4_VAL(addr) && ai_family == AF_INET6)) { + return EAI_NONAME; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + } else { +#if LWIP_IPV4 && LWIP_IPV6 + /* AF_UNSPEC: prefer IPv4 */ + u8_t type = NETCONN_DNS_IPV4_IPV6; + if (ai_family == AF_INET) { + type = NETCONN_DNS_IPV4; + } else if (ai_family == AF_INET6) { + type = NETCONN_DNS_IPV6; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + err = netconn_gethostbyname_addrtype(nodename, &addr, type); + if (err != ERR_OK) { + return EAI_FAIL; + } + } + } else { + /* service location specified, use loopback address */ + if ((hints != NULL) && (hints->ai_flags & AI_PASSIVE)) { + ip_addr_set_any_val(ai_family == AF_INET6, addr); + } else { + ip_addr_set_loopback_val(ai_family == AF_INET6, addr); + } + } + + total_size = sizeof(struct addrinfo) + sizeof(struct sockaddr_storage); + if (nodename != NULL) { + namelen = strlen(nodename); + if (namelen > DNS_MAX_NAME_LENGTH) { + /* invalid name length */ + return EAI_FAIL; + } + LWIP_ASSERT("namelen is too long", total_size + namelen + 1 > total_size); + total_size += namelen + 1; + } + /* If this fails, please report to lwip-devel! :-) */ + LWIP_ASSERT("total_size <= NETDB_ELEM_SIZE: please report this!", + total_size <= NETDB_ELEM_SIZE); + ai = (struct addrinfo *)memp_malloc(MEMP_NETDB); + if (ai == NULL) { + return EAI_MEMORY; + } + memset(ai, 0, total_size); + /* cast through void* to get rid of alignment warnings */ + sa = (struct sockaddr_storage *)(void *)((u8_t *)ai + sizeof(struct addrinfo)); + if (IP_IS_V6_VAL(addr)) { +#if LWIP_IPV6 + struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa; + /* set up sockaddr */ + inet6_addr_from_ip6addr(&sa6->sin6_addr, ip_2_ip6(&addr)); + sa6->sin6_family = AF_INET6; + sa6->sin6_len = sizeof(struct sockaddr_in6); + sa6->sin6_port = lwip_htons((u16_t)port_nr); + sa6->sin6_scope_id = ip6_addr_zone(ip_2_ip6(&addr)); + ai->ai_family = AF_INET6; +#endif /* LWIP_IPV6 */ + } else { +#if LWIP_IPV4 + struct sockaddr_in *sa4 = (struct sockaddr_in *)sa; + /* set up sockaddr */ + inet_addr_from_ip4addr(&sa4->sin_addr, ip_2_ip4(&addr)); + sa4->sin_family = AF_INET; + sa4->sin_len = sizeof(struct sockaddr_in); + sa4->sin_port = lwip_htons((u16_t)port_nr); + ai->ai_family = AF_INET; +#endif /* LWIP_IPV4 */ + } + + /* set up addrinfo */ + if (hints != NULL) { + /* copy socktype & protocol from hints if specified */ + ai->ai_socktype = hints->ai_socktype; + ai->ai_protocol = hints->ai_protocol; + } + if (nodename != NULL) { + /* copy nodename to canonname if specified */ + ai->ai_canonname = ((char *)ai + sizeof(struct addrinfo) + sizeof(struct sockaddr_storage)); + MEMCPY(ai->ai_canonname, nodename, namelen); + ai->ai_canonname[namelen] = 0; + } + ai->ai_addrlen = sizeof(struct sockaddr_storage); + ai->ai_addr = (struct sockaddr *)sa; + + *res = ai; + + return 0; +} + +#endif /* LWIP_DNS && LWIP_SOCKET */ diff --git a/Middlewares/Third_Party/LwIP/src/api/netifapi.c b/Middlewares/Third_Party/LwIP/src/api/netifapi.c new file mode 100644 index 00000000..25957cd5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/netifapi.c @@ -0,0 +1,380 @@ +/** + * @file + * Network Interface Sequential API module + * + * @defgroup netifapi NETIF API + * @ingroup sequential_api + * Thread-safe functions to be called from non-TCPIP threads + * + * @defgroup netifapi_netif NETIF related + * @ingroup netifapi + * To be called from non-TCPIP threads + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/etharp.h" +#include "lwip/netifapi.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include /* strncpy */ + +#define NETIFAPI_VAR_REF(name) API_VAR_REF(name) +#define NETIFAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct netifapi_msg, name) +#define NETIFAPI_VAR_ALLOC(name) API_VAR_ALLOC(struct netifapi_msg, MEMP_NETIFAPI_MSG, name, ERR_MEM) +#define NETIFAPI_VAR_FREE(name) API_VAR_FREE(MEMP_NETIFAPI_MSG, name) + +/** + * Call netif_add() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_add(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (!netif_add( msg->netif, +#if LWIP_IPV4 + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw), +#endif /* LWIP_IPV4 */ + msg->msg.add.state, + msg->msg.add.init, + msg->msg.add.input)) { + return ERR_IF; + } else { + return ERR_OK; + } +} + +#if LWIP_IPV4 +/** + * Call netif_set_addr() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_set_addr(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + netif_set_addr( msg->netif, + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw)); + return ERR_OK; +} +#endif /* LWIP_IPV4 */ + +/** +* Call netif_name_to_index() inside the tcpip_thread context. +*/ +static err_t +netifapi_do_name_to_index(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + msg->msg.ifs.index = netif_name_to_index(msg->msg.ifs.name); + return ERR_OK; +} + +/** +* Call netif_index_to_name() inside the tcpip_thread context. +*/ +static err_t +netifapi_do_index_to_name(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (!netif_index_to_name(msg->msg.ifs.index, msg->msg.ifs.name)) { + /* return failure via empty name */ + msg->msg.ifs.name[0] = '\0'; + } + return ERR_OK; +} + +/** + * Call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) inside the + * tcpip_thread context. + */ +static err_t +netifapi_do_netif_common(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (msg->msg.common.errtfunc != NULL) { + return msg->msg.common.errtfunc(msg->netif); + } else { + msg->msg.common.voidfunc(msg->netif); + return ERR_OK; + } +} + +#if LWIP_ARP && LWIP_IPV4 +/** + * @ingroup netifapi_arp + * Add or update an entry in the ARP cache. + * For an update, ipaddr is used to find the cache entry. + * + * @param ipaddr IPv4 address of cache entry + * @param ethaddr hardware address mapped to ipaddr + * @param type type of ARP cache entry + * @return ERR_OK: entry added/updated, else error from err_t + */ +err_t +netifapi_arp_add(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, enum netifapi_arp_entry type) +{ + err_t err; + + /* We only support permanent entries currently */ + LWIP_UNUSED_ARG(type); + +#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); + err = etharp_add_static_entry(ipaddr, ethaddr); + UNLOCK_TCPIP_CORE(); +#else + /* @todo add new vars to struct netifapi_msg and create a 'do' func */ + LWIP_UNUSED_ARG(ipaddr); + LWIP_UNUSED_ARG(ethaddr); + err = ERR_VAL; +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */ + + return err; +} + +/** + * @ingroup netifapi_arp + * Remove an entry in the ARP cache identified by ipaddr + * + * @param ipaddr IPv4 address of cache entry + * @param type type of ARP cache entry + * @return ERR_OK: entry removed, else error from err_t + */ +err_t +netifapi_arp_remove(const ip4_addr_t *ipaddr, enum netifapi_arp_entry type) +{ + err_t err; + + /* We only support permanent entries currently */ + LWIP_UNUSED_ARG(type); + +#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); + err = etharp_remove_static_entry(ipaddr); + UNLOCK_TCPIP_CORE(); +#else + /* @todo add new vars to struct netifapi_msg and create a 'do' func */ + LWIP_UNUSED_ARG(ipaddr); + err = ERR_VAL; +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */ + + return err; +} +#endif /* LWIP_ARP && LWIP_IPV4 */ + +/** + * @ingroup netifapi_netif + * Call netif_add() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_add() + */ +err_t +netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } +#endif /* LWIP_IPV4 */ + + NETIFAPI_VAR_REF(msg).netif = netif; +#if LWIP_IPV4 + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); +#endif /* LWIP_IPV4 */ + NETIFAPI_VAR_REF(msg).msg.add.state = state; + NETIFAPI_VAR_REF(msg).msg.add.init = init; + NETIFAPI_VAR_REF(msg).msg.add.input = input; + err = tcpip_api_call(netifapi_do_netif_add, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +#if LWIP_IPV4 +/** + * @ingroup netifapi_netif + * Call netif_set_addr() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_set_addr() + */ +err_t +netifapi_netif_set_addr(struct netif *netif, + const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); + err = tcpip_api_call(netifapi_do_netif_set_addr, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} +#endif /* LWIP_IPV4 */ + +/** + * call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) in a thread-safe + * way by running that function inside the tcpip_thread context. + * + * @note use only for functions where there is only "netif" parameter. + */ +err_t +netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.common.voidfunc = voidfunc; + NETIFAPI_VAR_REF(msg).msg.common.errtfunc = errtfunc; + err = tcpip_api_call(netifapi_do_netif_common, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +/** +* @ingroup netifapi_netif +* Call netif_name_to_index() in a thread-safe way by running that function inside the +* tcpip_thread context. +* +* @param name the interface name of the netif +* @param idx output index of the found netif +*/ +err_t +netifapi_netif_name_to_index(const char *name, u8_t *idx) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + *idx = 0; + +#if LWIP_MPU_COMPATIBLE + strncpy(NETIFAPI_VAR_REF(msg).msg.ifs.name, name, NETIF_NAMESIZE - 1); + NETIFAPI_VAR_REF(msg).msg.ifs.name[NETIF_NAMESIZE - 1] = '\0'; +#else + NETIFAPI_VAR_REF(msg).msg.ifs.name = LWIP_CONST_CAST(char *, name); +#endif /* LWIP_MPU_COMPATIBLE */ + err = tcpip_api_call(netifapi_do_name_to_index, &API_VAR_REF(msg).call); + if (!err) { + *idx = NETIFAPI_VAR_REF(msg).msg.ifs.index; + } + NETIFAPI_VAR_FREE(msg); + return err; +} + +/** +* @ingroup netifapi_netif +* Call netif_index_to_name() in a thread-safe way by running that function inside the +* tcpip_thread context. +* +* @param idx the interface index of the netif +* @param name output name of the found netif, empty '\0' string if netif not found. +* name should be of at least NETIF_NAMESIZE bytes +*/ +err_t +netifapi_netif_index_to_name(u8_t idx, char *name) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).msg.ifs.index = idx; +#if !LWIP_MPU_COMPATIBLE + NETIFAPI_VAR_REF(msg).msg.ifs.name = name; +#endif /* LWIP_MPU_COMPATIBLE */ + err = tcpip_api_call(netifapi_do_index_to_name, &API_VAR_REF(msg).call); +#if LWIP_MPU_COMPATIBLE + if (!err) { + strncpy(name, NETIFAPI_VAR_REF(msg).msg.ifs.name, NETIF_NAMESIZE - 1); + name[NETIF_NAMESIZE - 1] = '\0'; + } +#endif /* LWIP_MPU_COMPATIBLE */ + NETIFAPI_VAR_FREE(msg); + return err; +} + +#endif /* LWIP_NETIF_API */ diff --git a/Middlewares/Third_Party/LwIP/src/api/sockets.c b/Middlewares/Third_Party/LwIP/src/api/sockets.c new file mode 100644 index 00000000..cb7df914 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/sockets.c @@ -0,0 +1,4160 @@ +/** + * @file + * Sockets BSD-Like API module + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * Improved by Marc Boucher and David Haas + * + */ + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sockets.h" +#include "lwip/priv/sockets_priv.h" +#include "lwip/api.h" +#include "lwip/igmp.h" +#include "lwip/inet.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/mld6.h" +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES +#include +#endif + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/* If the netconn API is not required publicly, then we include the necessary + files here to get the implementation */ +#if !LWIP_NETCONN +#undef LWIP_NETCONN +#define LWIP_NETCONN 1 +#include "api_msg.c" +#include "api_lib.c" +#include "netbuf.c" +#undef LWIP_NETCONN +#define LWIP_NETCONN 0 +#endif + +#define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name) +#define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name) +#define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock) +#define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name) + +#if LWIP_IPV4 +#define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \ + (sin)->sin_len = sizeof(struct sockaddr_in); \ + (sin)->sin_family = AF_INET; \ + (sin)->sin_port = lwip_htons((port)); \ + inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \ + memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0) +#define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \ + inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \ + (port) = lwip_ntohs((sin)->sin_port); }while(0) +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \ + (sin6)->sin6_len = sizeof(struct sockaddr_in6); \ + (sin6)->sin6_family = AF_INET6; \ + (sin6)->sin6_port = lwip_htons((port)); \ + (sin6)->sin6_flowinfo = 0; \ + inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \ + (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0) +#define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \ + inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \ + if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \ + ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \ + } \ + (port) = lwip_ntohs((sin6)->sin6_port); }while(0) +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 && LWIP_IPV6 +static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port); + +#define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \ + ((namelen) == sizeof(struct sockaddr_in6))) +#define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \ + ((name)->sa_family == AF_INET6)) +#define SOCK_ADDR_TYPE_MATCH(name, sock) \ + ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \ + (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type)))) +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \ + if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \ + } else { \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \ + } } while(0) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port)) +#define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \ + (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6)) +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#endif /* LWIP_IPV6 */ + +#define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \ + IS_SOCK_ADDR_TYPE_VALID(name)) +#define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \ + SOCK_ADDR_TYPE_MATCH(name, sock)) +#define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0) + + +#define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \ + if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \ + if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \ + if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0) + + +#define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name) +#if LWIP_MPU_COMPATIBLE +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \ + name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \ + if (name == NULL) { \ + sock_set_errno(sock, ENOMEM); \ + done_socket(sock); \ + return -1; \ + } }while(0) +#else /* LWIP_MPU_COMPATIBLE */ +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) +#endif /* LWIP_MPU_COMPATIBLE */ + +#if LWIP_SO_SNDRCVTIMEO_NONSTANDARD +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE int +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val)) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval)) +#else +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \ + u32_t loc = (val); \ + ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \ + ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000)) +#endif + + +/** A struct sockaddr replacement that has the same alignment as sockaddr_in/ + * sockaddr_in6 if instantiated. + */ +union sockaddr_aligned { + struct sockaddr sa; +#if LWIP_IPV6 + struct sockaddr_in6 sin6; +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + struct sockaddr_in sin; +#endif /* LWIP_IPV4 */ +}; + +/* Define the number of IPv4 multicast memberships, default is one per socket */ +#ifndef LWIP_SOCKET_MAX_MEMBERSHIPS +#define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS +#endif + +#if LWIP_IGMP +/* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_pair { + /** the socket */ + struct lwip_sock *sock; + /** the interface address */ + ip4_addr_t if_addr; + /** the group address */ + ip4_addr_t multi_addr; +}; + +static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_drop_registered_memberships(int s); +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6_MLD +/* This is to keep track of IP_JOIN_GROUP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_mld6_pair { + /** the socket */ + struct lwip_sock *sock; + /** the interface index */ + u8_t if_idx; + /** the group address */ + ip6_addr_t multi_addr; +}; + +static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr); +static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr); +static void lwip_socket_drop_registered_mld6_memberships(int s); +#endif /* LWIP_IPV6_MLD */ + +/** The global array of available sockets */ +static struct lwip_sock sockets[NUM_SOCKETS]; + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +#if LWIP_TCPIP_CORE_LOCKING +/* protect the select_cb_list using core lock */ +#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) +#define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE() +#define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE() +#else /* LWIP_TCPIP_CORE_LOCKING */ +/* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */ +#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +/** This counter is increased from lwip_select when the list is changed + and checked in select_check_waiters to see if it has changed. */ +static volatile int select_cb_ctr; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +/** The global list of tasks waiting for select */ +static struct lwip_select_cb *select_cb_list; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#define sock_set_errno(sk, e) do { \ + const int sockerr = (e); \ + set_errno(sockerr); \ +} while (0) + +/* Forward declaration of some functions */ +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len); +#define DEFAULT_SOCKET_EVENTCB event_callback +static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent); +#else +#define DEFAULT_SOCKET_EVENTCB NULL +#endif +#if !LWIP_TCPIP_CORE_LOCKING +static void lwip_getsockopt_callback(void *arg); +static void lwip_setsockopt_callback(void *arg); +#endif +static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen); +static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen); +static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn, + union lwip_sock_lastdata *lastdata); +static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata); + +#if LWIP_IPV4 && LWIP_IPV6 +static void +sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port) +{ + if ((sockaddr->sa_family) == AF_INET6) { + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V6; + } else { + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V4; + } +} +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +/** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void +lwip_socket_thread_init(void) +{ + netconn_thread_init(); +} + +/** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ +void +lwip_socket_thread_cleanup(void) +{ + netconn_thread_cleanup(); +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Thread-safe increment of sock->fd_used, with overflow check */ +static int +sock_inc_used(struct lwip_sock *sock) +{ + int ret; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("sock != NULL", sock != NULL); + + SYS_ARCH_PROTECT(lev); + if (sock->fd_free_pending) { + /* prevent new usage of this socket if free is pending */ + ret = 0; + } else { + ++sock->fd_used; + ret = 1; + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + } + SYS_ARCH_UNPROTECT(lev); + return ret; +} + +/* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */ +static int +sock_inc_used_locked(struct lwip_sock *sock) +{ + LWIP_ASSERT("sock != NULL", sock != NULL); + + if (sock->fd_free_pending) { + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + return 0; + } + + ++sock->fd_used; + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + return 1; +} + +/* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being + * released (and possibly reused) when used from more than one thread + * (e.g. read-while-write or close-while-write, etc) + * This function is called at the end of functions using (try)get_socket*(). + */ +static void +done_socket(struct lwip_sock *sock) +{ + int freed = 0; + int is_tcp = 0; + struct netconn *conn = NULL; + union lwip_sock_lastdata lastdata; + SYS_ARCH_DECL_PROTECT(lev); + LWIP_ASSERT("sock != NULL", sock != NULL); + + SYS_ARCH_PROTECT(lev); + LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0); + if (--sock->fd_used == 0) { + if (sock->fd_free_pending) { + /* free the socket */ + sock->fd_used = 1; + is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP; + freed = free_socket_locked(sock, is_tcp, &conn, &lastdata); + } + } + SYS_ARCH_UNPROTECT(lev); + + if (freed) { + free_socket_free_elements(is_tcp, conn, &lastdata); + } +} + +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define sock_inc_used(sock) 1 +#define sock_inc_used_locked(sock) 1 +#define done_socket(sock) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */ +static struct lwip_sock * +tryget_socket_unconn_nouse(int fd) +{ + int s = fd - LWIP_SOCKET_OFFSET; + if ((s < 0) || (s >= NUM_SOCKETS)) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd)); + return NULL; + } + return &sockets[s]; +} + +struct lwip_sock * +lwip_socket_dbg_get_socket(int fd) +{ + return tryget_socket_unconn_nouse(fd); +} + +/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */ +static struct lwip_sock * +tryget_socket_unconn(int fd) +{ + struct lwip_sock *ret = tryget_socket_unconn_nouse(fd); + if (ret != NULL) { + if (!sock_inc_used(ret)) { + return NULL; + } + } + return ret; +} + +/* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */ +static struct lwip_sock * +tryget_socket_unconn_locked(int fd) +{ + struct lwip_sock *ret = tryget_socket_unconn_nouse(fd); + if (ret != NULL) { + if (!sock_inc_used_locked(ret)) { + return NULL; + } + } + return ret; +} + +/** + * Same as get_socket but doesn't set errno + * + * @param fd externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +tryget_socket(int fd) +{ + struct lwip_sock *sock = tryget_socket_unconn(fd); + if (sock != NULL) { + if (sock->conn) { + return sock; + } + done_socket(sock); + } + return NULL; +} + +/** + * Map a externally used socket index to the internal socket representation. + * + * @param fd externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +get_socket(int fd) +{ + struct lwip_sock *sock = tryget_socket(fd); + if (!sock) { + if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd)); + } + set_errno(EBADF); + return NULL; + } + return sock; +} + +/** + * Allocate a new socket for a given netconn. + * + * @param newconn the netconn for which to allocate a socket + * @param accepted 1 if socket has been created by accept(), + * 0 if socket has been created by socket() + * @return the index of the new socket; -1 on error + */ +static int +alloc_socket(struct netconn *newconn, int accepted) +{ + int i; + SYS_ARCH_DECL_PROTECT(lev); + LWIP_UNUSED_ARG(accepted); + + /* allocate a new socket identifier */ + for (i = 0; i < NUM_SOCKETS; ++i) { + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + if (!sockets[i].conn) { +#if LWIP_NETCONN_FULLDUPLEX + if (sockets[i].fd_used) { + SYS_ARCH_UNPROTECT(lev); + continue; + } + sockets[i].fd_used = 1; + sockets[i].fd_free_pending = 0; +#endif + sockets[i].conn = newconn; + /* The socket is not yet known to anyone, so no need to protect + after having marked it as used. */ + SYS_ARCH_UNPROTECT(lev); + sockets[i].lastdata.pbuf = NULL; +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0); + sockets[i].rcvevent = 0; + /* TCP sendbuf is empty, but the socket is not yet writable until connected + * (unless it has been created by accept()). */ + sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1); + sockets[i].errevent = 0; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + return i + LWIP_SOCKET_OFFSET; + } + SYS_ARCH_UNPROTECT(lev); + } + return -1; +} + +/** Free a socket (under lock) + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + * @param conn the socekt's netconn is stored here, must be freed externally + * @param lastdata lastdata is stored here, must be freed externally + */ +static int +free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn, + union lwip_sock_lastdata *lastdata) +{ +#if LWIP_NETCONN_FULLDUPLEX + LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0); + sock->fd_used--; + if (sock->fd_used > 0) { + sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0); + return 0; + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + LWIP_UNUSED_ARG(is_tcp); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + *lastdata = sock->lastdata; + sock->lastdata.pbuf = NULL; + *conn = sock->conn; + sock->conn = NULL; + return 1; +} + +/** Free a socket's leftover members. + */ +static void +free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata) +{ + if (lastdata->pbuf != NULL) { + if (is_tcp) { + pbuf_free(lastdata->pbuf); + } else { + netbuf_delete(lastdata->netbuf); + } + } + if (conn != NULL) { + /* netconn_prepare_delete() has already been called, here we only free the conn */ + netconn_delete(conn); + } +} + +/** Free a socket. The socket's netconn must have been + * delete before! + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + */ +static void +free_socket(struct lwip_sock *sock, int is_tcp) +{ + int freed; + struct netconn *conn; + union lwip_sock_lastdata lastdata; + SYS_ARCH_DECL_PROTECT(lev); + + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + + freed = free_socket_locked(sock, is_tcp, &conn, &lastdata); + SYS_ARCH_UNPROTECT(lev); + /* don't use 'sock' after this line, as another task might have allocated it */ + + if (freed) { + free_socket_free_elements(is_tcp, conn, &lastdata); + } +} + +/* Below this, the well-known socket functions are implemented. + * Use google.com or opengroup.org to get a good description :-) + * + * Exceptions are documented! + */ + +int +lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen) +{ + struct lwip_sock *sock, *nsock; + struct netconn *newconn; + ip_addr_t naddr; + u16_t port = 0; + int newsock; + err_t err; + int recvevent; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s)); + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* wait for a new connection */ + err = netconn_accept(sock->conn, &newconn); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else if (err == ERR_CLSD) { + sock_set_errno(sock, EINVAL); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + done_socket(sock); + return -1; + } + LWIP_ASSERT("newconn != NULL", newconn != NULL); + + newsock = alloc_socket(newconn, 1); + if (newsock == -1) { + netconn_delete(newconn); + sock_set_errno(sock, ENFILE); + done_socket(sock); + return -1; + } + LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET)); + nsock = &sockets[newsock - LWIP_SOCKET_OFFSET]; + + /* See event_callback: If data comes in right away after an accept, even + * though the server task might not have created a new socket yet. + * In that case, newconn->socket is counted down (newconn->socket--), + * so nsock->rcvevent is >= 1 here! + */ + SYS_ARCH_PROTECT(lev); + recvevent = (s16_t)(-1 - newconn->socket); + newconn->socket = newsock; + SYS_ARCH_UNPROTECT(lev); + + if (newconn->callback) { + LOCK_TCPIP_CORE(); + while (recvevent > 0) { + recvevent--; + newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0); + } + UNLOCK_TCPIP_CORE(); + } + + /* Note that POSIX only requires us to check addr is non-NULL. addrlen must + * not be NULL if addr is valid. + */ + if ((addr != NULL) && (addrlen != NULL)) { + union sockaddr_aligned tempaddr; + /* get the IP address and port of the remote host */ + err = netconn_peer(newconn, &naddr, &port); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err)); + netconn_delete(newconn); + free_socket(nsock, 1); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port); + if (*addrlen > tempaddr.sa.sa_len) { + *addrlen = tempaddr.sa.sa_len; + } + MEMCPY(addr, &tempaddr, *addrlen); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port)); + } else { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock)); + } + + sock_set_errno(sock, 0); + done_socket(sock); + done_socket(nsock); + return newsock; +} + +int +lwip_bind(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + ip_addr_t local_addr; + u16_t local_port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_UNUSED_ARG(namelen); + + SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr)); + IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_bind(sock->conn, &local_addr, local_port); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +int +lwip_close(int s) +{ + struct lwip_sock *sock; + int is_tcp = 0; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP; + } else { + LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL); + } + +#if LWIP_IGMP + /* drop all possibly joined IGMP memberships */ + lwip_socket_drop_registered_memberships(s); +#endif /* LWIP_IGMP */ +#if LWIP_IPV6_MLD + /* drop all possibly joined MLD6 memberships */ + lwip_socket_drop_registered_mld6_memberships(s); +#endif /* LWIP_IPV6_MLD */ + + err = netconn_prepare_delete(sock->conn); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + free_socket(sock, is_tcp); + set_errno(0); + return 0; +} + +int +lwip_connect(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + + LWIP_UNUSED_ARG(namelen); + if (name->sa_family == AF_UNSPEC) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s)); + err = netconn_disconnect(sock->conn); + } else { + ip_addr_t remote_addr; + u16_t remote_port; + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + + SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr)); + IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_connect(sock->conn, &remote_addr, remote_port); + } + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +/** + * Set a socket into listen mode. + * The socket may not have been used for another connection previously. + * + * @param s the socket to set to listening mode + * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1) + * @return 0 on success, non-zero on failure + */ +int +lwip_listen(int s, int backlog) +{ + struct lwip_sock *sock; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* limit the "backlog" parameter to fit in an u8_t */ + backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff); + + err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + done_socket(sock); + return -1; + } + + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +#if LWIP_TCP +/* Helper function to loop over receiving pbufs from netconn + * until "len" bytes are received or we're otherwise done. + * Keeps sock->lastdata for peeking or partly copying. + */ +static ssize_t +lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags) +{ + u8_t apiflags = NETCONN_NOAUTORCVD; + ssize_t recvd = 0; + ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX; + + LWIP_ASSERT("no socket given", sock != NULL); + LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP); + + if (flags & MSG_DONTWAIT) { + apiflags |= NETCONN_DONTBLOCK; + } + + do { + struct pbuf *p; + err_t err; + u16_t copylen; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf)); + /* Check if there is data left from the last recv operation. */ + if (sock->lastdata.pbuf) { + p = sock->lastdata.pbuf; + } else { + /* No data was left from the previous operation, so we try to get + some from the network. */ + err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n", + err, (void *)p)); + + if (err != ERR_OK) { + if (recvd > 0) { + /* already received data, return that (this trusts in getting the same error from + netconn layer again next time netconn_recv is called) */ + goto lwip_recv_tcp_done; + } + /* We should really do some error checking here. */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n", + lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + if (err == ERR_CLSD) { + return 0; + } else { + return -1; + } + } + LWIP_ASSERT("p != NULL", p != NULL); + sock->lastdata.pbuf = p; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n", + p->tot_len, (int)recv_left, (int)recvd)); + + if (recv_left > p->tot_len) { + copylen = p->tot_len; + } else { + copylen = (u16_t)recv_left; + } + if (recvd + copylen < recvd) { + /* overflow */ + copylen = (u16_t)(SSIZE_MAX - recvd); + } + + /* copy the contents of the received buffer into + the supplied memory pointer mem */ + pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0); + + recvd += copylen; + + /* TCP combines multiple pbufs for one recv */ + LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen); + recv_left -= copylen; + + /* Unless we peek the incoming message... */ + if ((flags & MSG_PEEK) == 0) { + /* ... check if there is data left in the pbuf */ + LWIP_ASSERT("invalid copylen", p->tot_len >= copylen); + if (p->tot_len - copylen > 0) { + /* If so, it should be saved in the sock structure for the next recv call. + We store the pbuf but hide/free the consumed data: */ + sock->lastdata.pbuf = pbuf_free_header(p, copylen); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf)); + } else { + sock->lastdata.pbuf = NULL; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p)); + pbuf_free(p); + } + } + /* once we have some data to return, only add more if we don't need to wait */ + apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN; + /* @todo: do we need to support peeking more than one pbuf? */ + } while ((recv_left > 0) && !(flags & MSG_PEEK)); +lwip_recv_tcp_done: + if ((recvd > 0) && !(flags & MSG_PEEK)) { + /* ensure window update after copying all data */ + netconn_tcp_recvd(sock->conn, (size_t)recvd); + } + sock_set_errno(sock, 0); + return recvd; +} +#endif + +/* Convert a netbuf's address data to struct sockaddr */ +static int +lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port, + struct sockaddr *from, socklen_t *fromlen) +{ + int truncated = 0; + union sockaddr_aligned saddr; + + LWIP_UNUSED_ARG(conn); + + LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL); + LWIP_ASSERT("from != NULL", from != NULL); + LWIP_ASSERT("fromlen != NULL", fromlen != NULL); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr)); + IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port); + if (*fromlen < saddr.sa.sa_len) { + truncated = 1; + } else if (*fromlen > saddr.sa.sa_len) { + *fromlen = saddr.sa.sa_len; + } + MEMCPY(from, &saddr, *fromlen); + return truncated; +} + +#if LWIP_TCP +/* Helper function to get a tcp socket's remote address info */ +static int +lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret) +{ + if (sock == NULL) { + return 0; + } + LWIP_UNUSED_ARG(dbg_fn); + LWIP_UNUSED_ARG(dbg_s); + LWIP_UNUSED_ARG(dbg_ret); + +#if !SOCKETS_DEBUG + if (from && fromlen) +#endif /* !SOCKETS_DEBUG */ + { + /* get remote addr/port from tcp_pcb */ + u16_t port; + ip_addr_t tmpaddr; + netconn_getaddr(sock->conn, &tmpaddr, &port, 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret)); + if (from && fromlen) { + return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen); + } + } + return 0; +} +#endif + +/* Helper function to receive a netbuf from a udp or raw netconn. + * Keeps sock->lastdata for peeking. + */ +static err_t +lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s) +{ + struct netbuf *buf; + u8_t apiflags; + err_t err; + u16_t buflen, copylen, copied; + int i; + + LWIP_UNUSED_ARG(dbg_s); + LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;); + + if (flags & MSG_DONTWAIT) { + apiflags = NETCONN_DONTBLOCK; + } else { + apiflags = 0; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf)); + /* Check if there is data left from the last recv operation. */ + buf = sock->lastdata.netbuf; + if (buf == NULL) { + /* No data was left from the previous operation, so we try to get + some from the network. */ + err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n", + err, (void *)buf)); + + if (err != ERR_OK) { + return err; + } + LWIP_ASSERT("buf != NULL", buf != NULL); + sock->lastdata.netbuf = buf; + } + buflen = buf->p->tot_len; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen)); + + copied = 0; + /* copy the pbuf payload into the iovs */ + for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) { + u16_t len_left = (u16_t)(buflen - copied); + if (msg->msg_iov[i].iov_len > len_left) { + copylen = len_left; + } else { + copylen = (u16_t)msg->msg_iov[i].iov_len; + } + + /* copy the contents of the received buffer into + the supplied memory buffer */ + pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied); + copied = (u16_t)(copied + copylen); + } + + /* Check to see from where the data was.*/ +#if !SOCKETS_DEBUG + if (msg->msg_name && msg->msg_namelen) +#endif /* !SOCKETS_DEBUG */ + { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf)); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied)); + if (msg->msg_name && msg->msg_namelen) { + lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf), + (struct sockaddr *)msg->msg_name, &msg->msg_namelen); + } + } + + /* Initialize flag output */ + msg->msg_flags = 0; + + if (msg->msg_control) { + u8_t wrote_msg = 0; +#if LWIP_NETBUF_RECVINFO + /* Check if packet info was recorded */ + if (buf->flags & NETBUF_FLAG_DESTADDR) { + if (IP_IS_V4(&buf->toaddr)) { +#if LWIP_IPV4 + if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) { + struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */ + struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr); + chdr->cmsg_level = IPPROTO_IP; + chdr->cmsg_type = IP_PKTINFO; + chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo)); + pkti->ipi_ifindex = buf->p->if_idx; + inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf))); + msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo)); + wrote_msg = 1; + } else { + msg->msg_flags |= MSG_CTRUNC; + } +#endif /* LWIP_IPV4 */ + } + } +#endif /* LWIP_NETBUF_RECVINFO */ + + if (!wrote_msg) { + msg->msg_controllen = 0; + } + } + + /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */ + if ((flags & MSG_PEEK) == 0) { + sock->lastdata.netbuf = NULL; + netbuf_delete(buf); + } + if (datagram_len) { + *datagram_len = buflen; + } + return ERR_OK; +} + +ssize_t +lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen) +{ + struct lwip_sock *sock; + ssize_t ret; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags)); + sock = get_socket(s); + if (!sock) { + return -1; + } +#if LWIP_TCP + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + ret = lwip_recv_tcp(sock, mem, len, flags); + lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret); + done_socket(sock); + return ret; + } else +#endif + { + u16_t datagram_len = 0; + struct iovec vec; + struct msghdr msg; + err_t err; + vec.iov_base = mem; + vec.iov_len = len; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_name = from; + msg.msg_namelen = (fromlen ? *fromlen : 0); + err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX); + if (fromlen) { + *fromlen = msg.msg_namelen; + } + } + + sock_set_errno(sock, 0); + done_socket(sock); + return ret; +} + +ssize_t +lwip_read(int s, void *mem, size_t len) +{ + return lwip_recvfrom(s, mem, len, 0, NULL, NULL); +} + +ssize_t +lwip_readv(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_recvmsg(s, &msg, 0); +} + +ssize_t +lwip_recv(int s, void *mem, size_t len, int flags) +{ + return lwip_recvfrom(s, mem, len, flags, NULL, NULL); +} + +ssize_t +lwip_recvmsg(int s, struct msghdr *message, int flags) +{ + struct lwip_sock *sock; + int i; + ssize_t buflen; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags)); + LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;); + LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0, + set_errno(EOPNOTSUPP); return -1;); + + if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) { + set_errno(EMSGSIZE); + return -1; + } + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* check for valid vectors */ + buflen = 0; + for (i = 0; i < message->msg_iovlen; i++) { + if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) || + ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) || + ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) { + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len); + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + int recv_flags = flags; + message->msg_flags = 0; + /* recv the data */ + buflen = 0; + for (i = 0; i < message->msg_iovlen; i++) { + /* try to receive into this vector's buffer */ + ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags); + if (recvd_local > 0) { + /* sum up received bytes */ + buflen += recvd_local; + } + if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) || + (flags & MSG_PEEK)) { + /* returned prematurely (or peeking, which might actually be limitated to the first iov) */ + if (buflen <= 0) { + /* nothing received at all, propagate the error */ + buflen = recvd_local; + } + break; + } + /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */ + recv_flags |= MSG_DONTWAIT; + } + if (buflen > 0) { + /* reset socket error since we have received something */ + sock_set_errno(sock, 0); + } + /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */ + done_socket(sock); + return buflen; +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + u16_t datagram_len = 0; + err_t err; + err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + if (datagram_len > buflen) { + message->msg_flags |= MSG_TRUNC; + } + + sock_set_errno(sock, 0); + done_socket(sock); + return (int)datagram_len; + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +ssize_t +lwip_send(int s, const void *data, size_t size, int flags) +{ + struct lwip_sock *sock; + err_t err; + u8_t write_flags; + size_t written; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n", + s, data, size, flags)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { +#if (LWIP_UDP || LWIP_RAW) + done_socket(sock); + return lwip_sendto(s, data, size, flags, NULL, 0); +#else /* (LWIP_UDP || LWIP_RAW) */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + + write_flags = (u8_t)(NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0)); + written = 0; + err = netconn_write_partly(sock->conn, data, size, write_flags, &written); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */ + return (err == ERR_OK ? (ssize_t)written : -1); +} + +ssize_t +lwip_sendmsg(int s, const struct msghdr *msg, int flags) +{ + struct lwip_sock *sock; +#if LWIP_TCP + u8_t write_flags; + size_t written; +#endif + err_t err = ERR_OK; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX), + sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0, + sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;); + + LWIP_UNUSED_ARG(msg->msg_control); + LWIP_UNUSED_ARG(msg->msg_controllen); + LWIP_UNUSED_ARG(msg->msg_flags); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + write_flags = (u8_t)(NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0)); + + written = 0; + err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */ + return (err == ERR_OK ? (ssize_t)written : -1); +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + struct netbuf chain_buf; + int i; + ssize_t size = 0; + + LWIP_UNUSED_ARG(flags); + LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) || + IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + + /* initialize chain buffer with destination */ + memset(&chain_buf, 0, sizeof(struct netbuf)); + if (msg->msg_name) { + u16_t remote_port; + SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port); + netbuf_fromport(&chain_buf) = remote_port; + } +#if LWIP_NETIF_TX_SINGLE_PBUF + for (i = 0; i < msg->msg_iovlen; i++) { + size += msg->msg_iov[i].iov_len; + if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) { + /* overflow */ + goto sendmsg_emsgsize; + } + } + if (size > 0xFFFF) { + /* overflow */ + goto sendmsg_emsgsize; + } + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) { + err = ERR_MEM; + } else { + /* flatten the IO vectors */ + size_t offset = 0; + for (i = 0; i < msg->msg_iovlen; i++) { + MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); + offset += msg->msg_iov[i].iov_len; + } +#if LWIP_CHECKSUM_ON_COPY + { + /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */ + u16_t chksum = ~inet_chksum_pbuf(chain_buf.p); + netbuf_set_chksum(&chain_buf, chksum); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain + manually to avoid having to allocate, chain, and delete a netbuf for each iov */ + for (i = 0; i < msg->msg_iovlen; i++) { + struct pbuf *p; + if (msg->msg_iov[i].iov_len > 0xFFFF) { + /* overflow */ + goto sendmsg_emsgsize; + } + p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (p == NULL) { + err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */ + break; + } + p->payload = msg->msg_iov[i].iov_base; + p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len; + /* netbuf empty, add new pbuf */ + if (chain_buf.p == NULL) { + chain_buf.p = chain_buf.ptr = p; + /* add pbuf to existing pbuf chain */ + } else { + if (chain_buf.p->tot_len + p->len > 0xffff) { + /* overflow */ + pbuf_free(p); + goto sendmsg_emsgsize; + } + pbuf_cat(chain_buf.p, p); + } + } + /* save size of total chain */ + if (err == ERR_OK) { + size = netbuf_len(&chain_buf); + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr)); + IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &chain_buf); + } + + /* deallocated the buffer */ + netbuf_free(&chain_buf); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? size : -1); +sendmsg_emsgsize: + sock_set_errno(sock, EMSGSIZE); + netbuf_free(&chain_buf); + done_socket(sock); + return -1; + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +ssize_t +lwip_sendto(int s, const void *data, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen) +{ + struct lwip_sock *sock; + err_t err; + u16_t short_size; + u16_t remote_port; + struct netbuf buf; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + done_socket(sock); + return lwip_send(s, data, size, flags); +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(flags); + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + + if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) { + /* cannot fit into one datagram (at least for us) */ + sock_set_errno(sock, EMSGSIZE); + done_socket(sock); + return -1; + } + short_size = (u16_t)size; + LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) || + (IS_SOCK_ADDR_LEN_VALID(tolen) && + ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_UNUSED_ARG(tolen); + + /* initialize a buffer */ + buf.p = buf.ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf.flags = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ + if (to) { + SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port); + } else { + remote_port = 0; + ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr); + } + netbuf_fromport(&buf) = remote_port; + + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=", + s, data, short_size, flags)); + ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port)); + + /* make the buffer point to the data that should be sent */ +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&buf, short_size) == NULL) { + err = ERR_MEM; + } else { +#if LWIP_CHECKSUM_ON_COPY + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) { + u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size); + netbuf_set_chksum(&buf, chksum); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + MEMCPY(buf.p->payload, data, short_size); + } + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + err = netbuf_ref(&buf, data, short_size); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr)); + IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &buf); + } + + /* deallocated the buffer */ + netbuf_free(&buf); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? short_size : -1); +} + +int +lwip_socket(int domain, int type, int protocol) +{ + struct netconn *conn; + int i; + + LWIP_UNUSED_ARG(domain); /* @todo: check this */ + + /* create a netconn */ + switch (type) { + case SOCK_RAW: + conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW), + (u8_t)protocol, DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + case SOCK_DGRAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, + ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)), + DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); +#if LWIP_NETBUF_RECVINFO + if (conn) { + /* netconn layer enables pktinfo by default, sockets default to off */ + conn->flags &= ~NETCONN_FLAG_PKTINFO; + } +#endif /* LWIP_NETBUF_RECVINFO */ + break; + case SOCK_STREAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n", + domain, type, protocol)); + set_errno(EINVAL); + return -1; + } + + if (!conn) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n")); + set_errno(ENOBUFS); + return -1; + } + + i = alloc_socket(conn, 0); + + if (i == -1) { + netconn_delete(conn); + set_errno(ENFILE); + return -1; + } + conn->socket = i; + done_socket(&sockets[i - LWIP_SOCKET_OFFSET]); + LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i)); + set_errno(0); + return i; +} + +ssize_t +lwip_write(int s, const void *data, size_t size) +{ + return lwip_send(s, data, size, 0); +} + +ssize_t +lwip_writev(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_sendmsg(s, &msg, 0); +} + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +/* Add select_cb to select_cb_list. */ +static void +lwip_link_select_cb(struct lwip_select_cb *select_cb) +{ + LWIP_SOCKET_SELECT_DECL_PROTECT(lev); + + /* Protect the select_cb_list */ + LWIP_SOCKET_SELECT_PROTECT(lev); + + /* Put this select_cb on top of list */ + select_cb->next = select_cb_list; + if (select_cb_list != NULL) { + select_cb_list->prev = select_cb; + } + select_cb_list = select_cb; +#if !LWIP_TCPIP_CORE_LOCKING + /* Increasing this counter tells select_check_waiters that the list has changed. */ + select_cb_ctr++; +#endif + + /* Now we can safely unprotect */ + LWIP_SOCKET_SELECT_UNPROTECT(lev); +} + +/* Remove select_cb from select_cb_list. */ +static void +lwip_unlink_select_cb(struct lwip_select_cb *select_cb) +{ + LWIP_SOCKET_SELECT_DECL_PROTECT(lev); + + /* Take us off the list */ + LWIP_SOCKET_SELECT_PROTECT(lev); + if (select_cb->next != NULL) { + select_cb->next->prev = select_cb->prev; + } + if (select_cb_list == select_cb) { + LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL); + select_cb_list = select_cb->next; + } else { + LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL); + select_cb->prev->next = select_cb->next; + } +#if !LWIP_TCPIP_CORE_LOCKING + /* Increasing this counter tells select_check_waiters that the list has changed. */ + select_cb_ctr++; +#endif + LWIP_SOCKET_SELECT_UNPROTECT(lev); +} +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#if LWIP_SOCKET_SELECT +/** + * Go through the readset and writeset lists and see which socket of the sockets + * set in the sets has events. On return, readset, writeset and exceptset have + * the sockets enabled that had events. + * + * @param maxfdp1 the highest socket index in the sets + * @param readset_in set of sockets to check for read events + * @param writeset_in set of sockets to check for write events + * @param exceptset_in set of sockets to check for error events + * @param readset_out set of sockets that had read events + * @param writeset_out set of sockets that had write events + * @param exceptset_out set os sockets that had error events + * @return number of sockets that had events (read/write/exception) (>= 0) + */ +static int +lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in, + fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out) +{ + int i, nready = 0; + fd_set lreadset, lwriteset, lexceptset; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + FD_ZERO(&lreadset); + FD_ZERO(&lwriteset); + FD_ZERO(&lexceptset); + + /* Go through each socket in each list to count number of sockets which + currently match */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + /* if this FD is not in the set, continue */ + if (!(readset_in && FD_ISSET(i, readset_in)) && + !(writeset_in && FD_ISSET(i, writeset_in)) && + !(exceptset_in && FD_ISSET(i, exceptset_in))) { + continue; + } + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + void *lastdata = sock->lastdata.pbuf; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + SYS_ARCH_UNPROTECT(lev); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) { + FD_SET(i, &lreadset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i)); + nready++; + } + /* See if netconn of this socket is ready for write */ + if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) { + FD_SET(i, &lwriteset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i)); + nready++; + } + /* See if netconn of this socket had an error */ + if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) { + FD_SET(i, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i)); + nready++; + } + done_socket(sock); + } else { + SYS_ARCH_UNPROTECT(lev); + /* no a valid open socket */ + return -1; + } + } + /* copy local sets to the ones provided as arguments */ + *readset_out = lreadset; + *writeset_out = lwriteset; + *exceptset_out = lexceptset; + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Mark all of the set sockets in one of the three fdsets passed to select as used. + * All sockets are marked (and later unmarked), whether they are open or not. + * This is OK as lwip_selscan aborts select when non-open sockets are found. + */ +static void +lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets) +{ + SYS_ARCH_DECL_PROTECT(lev); + if (fdset) { + int i; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) { + /* if this FD is in the set, lock it (unless already done) */ + if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + /* leave the socket used until released by lwip_select_dec_sockets_used */ + FD_SET(i, used_sockets); + } + SYS_ARCH_UNPROTECT(lev); + } + } + } +} + +/* Mark all sockets passed to select as used to prevent them from being freed + * from other threads while select is running. + * Marked sockets are added to 'used_sockets' to mark them only once an be able + * to unmark them correctly. + */ +static void +lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets) +{ + FD_ZERO(used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets); +} + +/* Let go all sockets that were marked as used when starting select */ +static void +lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets) +{ + int i; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) { + /* if this FD is not in the set, continue */ + if (FD_ISSET(i, used_sockets)) { + struct lwip_sock *sock = tryget_socket_unconn_nouse(i); + LWIP_ASSERT("socket gone at the end of select", sock != NULL); + if (sock != NULL) { + done_socket(sock); + } + } + } +} +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets) +#define lwip_select_dec_sockets_used(maxfdp1, used_sockets) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +int +lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout) +{ + u32_t waitres = 0; + int nready; + fd_set lreadset, lwriteset, lexceptset; + u32_t msectimeout; + int i; + int maxfdp2; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif +#if LWIP_NETCONN_FULLDUPLEX + fd_set used_sockets; +#endif + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n", + maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset, + timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1, + timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1)); + + if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) { + set_errno(EINVAL); + return -1; + } + + lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets); + + /* Go through each socket in each list to count number of sockets which + currently match */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + + if (nready < 0) { + /* one of the sockets in one of the fd_sets was invalid */ + set_errno(EBADF); + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + return -1; + } else if (nready > 0) { + /* one or more sockets are set, no need to wait */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); + } else { + /* If we don't have any current events, then suspend if we are supposed to */ + if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + } else { + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables (unless we're running in MPU compatible + mode). */ + API_SELECT_CB_VAR_DECLARE(select_cb); + API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1); + memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb)); + + API_SELECT_CB_VAR_REF(select_cb).readset = readset; + API_SELECT_CB_VAR_REF(select_cb).writeset = writeset; + API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset; +#if LWIP_NETCONN_SEM_PER_THREAD + API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(ENOMEM); + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + API_SELECT_CB_VAR_FREE(select_cb); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + + /* Increase select_waiting for each socket we are interested in */ + maxfdp2 = maxfdp1; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + sock->select_waiting++; + if (sock->select_waiting == 0) { + /* overflow - too many threads waiting */ + sock->select_waiting--; + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + set_errno(EBUSY); + break; + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + } else { + /* Not a valid socket */ + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + set_errno(EBADF); + break; + } + } + } + + if (nready >= 0) { + /* Call lwip_selscan again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout == 0) { + /* Wait forever */ + msectimeout = 0; + } else { + long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000)); + if (msecs_long <= 0) { + /* Wait 1ms at least (0 means wait forever) */ + msectimeout = 1; + } else { + msectimeout = (u32_t)msecs_long; + } + } + + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + } + + /* Decrease select_waiting for each socket we are interested in */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + } else { + SYS_ARCH_UNPROTECT(lev); + /* Not a valid socket */ + nready = -1; + set_errno(EBADF); + } + } + } + + lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + API_SELECT_CB_VAR_FREE(select_cb); + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + } else { + /* See what's set now after waiting */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); + } + } + } + + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + set_errno(0); + if (readset) { + *readset = lreadset; + } + if (writeset) { + *writeset = lwriteset; + } + if (exceptset) { + *exceptset = lexceptset; + } + return nready; +} +#endif /* LWIP_SOCKET_SELECT */ + +#if LWIP_SOCKET_POLL +/** Options for the lwip_pollscan function. */ +enum lwip_pollscan_opts +{ + /** Clear revents in each struct pollfd. */ + LWIP_POLLSCAN_CLEAR = 1, + + /** Increment select_waiting in each struct lwip_sock. */ + LWIP_POLLSCAN_INC_WAIT = 2, + + /** Decrement select_waiting in each struct lwip_sock. */ + LWIP_POLLSCAN_DEC_WAIT = 4 +}; + +/** + * Update revents in each struct pollfd. + * Optionally update select_waiting in struct lwip_sock. + * + * @param fds array of structures to update + * @param nfds number of structures in fds + * @param opts what to update and how + * @return number of structures that have revents != 0 + */ +static int +lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts) +{ + int nready = 0; + nfds_t fdi; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + if ((opts & LWIP_POLLSCAN_CLEAR) != 0) { + fds[fdi].revents = 0; + } + + /* Negative fd means the caller wants us to ignore this struct. + POLLNVAL means we already detected that the fd is invalid; + if another thread has since opened a new socket with that fd, + we must not use that socket. */ + if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) { + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(fds[fdi].fd); + if (sock != NULL) { + void* lastdata = sock->lastdata.pbuf; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + + if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) { + sock->select_waiting++; + if (sock->select_waiting == 0) { + /* overflow - too many threads waiting */ + sock->select_waiting--; + nready = -1; + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + break; + } + } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) { + fds[fdi].revents |= POLLIN; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd)); + } + /* See if netconn of this socket is ready for write */ + if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) { + fds[fdi].revents |= POLLOUT; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd)); + } + /* See if netconn of this socket had an error */ + if (errevent != 0) { + /* POLLERR is output only. */ + fds[fdi].revents |= POLLERR; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd)); + } + } else { + /* Not a valid socket */ + SYS_ARCH_UNPROTECT(lev); + /* POLLNVAL is output only. */ + fds[fdi].revents |= POLLNVAL; + return -1; + } + } + + /* Will return the number of structures that have events, + not the number of events. */ + if (fds[fdi].revents != 0) { + nready++; + } + } + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Mark all sockets as used. + * + * All sockets are marked (and later unmarked), whether they are open or not. + * This is OK as lwip_pollscan aborts select when non-open sockets are found. + */ +static void +lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds) +{ + nfds_t fdi; + + if(fds) { + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + /* Increase the reference counter */ + tryget_socket_unconn(fds[fdi].fd); + } + } +} + +/* Let go all sockets that were marked as used when starting poll */ +static void +lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds) +{ + nfds_t fdi; + + if(fds) { + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd); + if (sock != NULL) { + done_socket(sock); + } + } + } +} +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define lwip_poll_inc_sockets_used(fds, nfds) +#define lwip_poll_dec_sockets_used(fds, nfds) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +int +lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout) +{ + u32_t waitres = 0; + int nready; + u32_t msectimeout; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n", + (void*)fds, (int)nfds, timeout)); + LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)), + set_errno(EINVAL); return -1;); + + lwip_poll_inc_sockets_used(fds, nfds); + + /* Go through each struct pollfd to count number of structures + which currently match */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR); + + if (nready < 0) { + lwip_poll_dec_sockets_used(fds, nfds); + return -1; + } + + /* If we don't have any current events, then suspend if we are supposed to */ + if (!nready) { + API_SELECT_CB_VAR_DECLARE(select_cb); + + if (timeout == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n")); + goto return_success; + } + API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1); + memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb)); + + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables. */ + + API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds; + API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds; +#if LWIP_NETCONN_SEM_PER_THREAD + API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(EAGAIN); + lwip_poll_dec_sockets_used(fds, nfds); + API_SELECT_CB_VAR_FREE(select_cb); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + + /* Increase select_waiting for each socket we are interested in. + Also, check for events again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT); + + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout < 0) { + /* Wait forever */ + msectimeout = 0; + } else { + /* timeout == 0 would have been handled earlier. */ + LWIP_ASSERT("timeout > 0", timeout > 0); + msectimeout = timeout; + } + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + + /* Decrease select_waiting for each socket we are interested in, + and check which events occurred while we waited. */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT); + + lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + API_SELECT_CB_VAR_FREE(select_cb); + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + lwip_poll_dec_sockets_used(fds, nfds); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n")); + goto return_success; + } + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready)); +return_success: + lwip_poll_dec_sockets_used(fds, nfds); + set_errno(0); + return nready; +} + +/** + * Check whether event_callback should wake up a thread waiting in + * lwip_poll. + */ +static int +lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent) +{ + nfds_t fdi; + for (fdi = 0; fdi < scb->poll_nfds; fdi++) { + const struct pollfd *pollfd = &scb->poll_fds[fdi]; + if (pollfd->fd == fd) { + /* Do not update pollfd->revents right here; + that would be a data race because lwip_pollscan + accesses revents without protecting. */ + if (has_recvevent && (pollfd->events & POLLIN) != 0) { + return 1; + } + if (has_sendevent && (pollfd->events & POLLOUT) != 0) { + return 1; + } + if (has_errevent) { + /* POLLERR is output only. */ + return 1; + } + } + } + return 0; +} +#endif /* LWIP_SOCKET_POLL */ + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +/** + * Callback registered in the netconn layer for each socket-netconn. + * Processes recvevent (data available) and wakes up tasks waiting for select. + * + * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function + * must have the core lock held when signaling the following events + * as they might cause select_list_cb to be checked: + * NETCONN_EVT_RCVPLUS + * NETCONN_EVT_SENDPLUS + * NETCONN_EVT_ERROR + * This requirement will be asserted in select_check_waiters() + */ +static void +event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len) +{ + int s, check_waiters; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_UNUSED_ARG(len); + + /* Get socket */ + if (conn) { + s = conn->socket; + if (s < 0) { + /* Data comes in right away after an accept, even though + * the server task might not have created a new socket yet. + * Just count down (or up) if that's the case and we + * will use the data later. Note that only receive events + * can happen before the new socket is set up. */ + SYS_ARCH_PROTECT(lev); + if (conn->socket < 0) { + if (evt == NETCONN_EVT_RCVPLUS) { + /* conn->socket is -1 on initialization + lwip_accept adjusts sock->recvevent if conn->socket < -1 */ + conn->socket--; + } + SYS_ARCH_UNPROTECT(lev); + return; + } + s = conn->socket; + SYS_ARCH_UNPROTECT(lev); + } + + sock = get_socket(s); + if (!sock) { + return; + } + } else { + return; + } + + check_waiters = 1; + SYS_ARCH_PROTECT(lev); + /* Set event as required */ + switch (evt) { + case NETCONN_EVT_RCVPLUS: + sock->rcvevent++; + if (sock->rcvevent > 1) { + check_waiters = 0; + } + break; + case NETCONN_EVT_RCVMINUS: + sock->rcvevent--; + check_waiters = 0; + break; + case NETCONN_EVT_SENDPLUS: + if (sock->sendevent) { + check_waiters = 0; + } + sock->sendevent = 1; + break; + case NETCONN_EVT_SENDMINUS: + sock->sendevent = 0; + check_waiters = 0; + break; + case NETCONN_EVT_ERROR: + sock->errevent = 1; + break; + default: + LWIP_ASSERT("unknown event", 0); + break; + } + + if (sock->select_waiting && check_waiters) { + /* Save which events are active */ + int has_recvevent, has_sendevent, has_errevent; + has_recvevent = sock->rcvevent > 0; + has_sendevent = sock->sendevent != 0; + has_errevent = sock->errevent != 0; + SYS_ARCH_UNPROTECT(lev); + /* Check any select calls waiting on this socket */ + select_check_waiters(s, has_recvevent, has_sendevent, has_errevent); + } else { + SYS_ARCH_UNPROTECT(lev); + } + done_socket(sock); +} + +/** + * Check if any select waiters are waiting on this socket and its events + * + * @note on synchronization of select_cb_list: + * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding + * the core lock. We do a single pass through the list and signal any waiters. + * Core lock should already be held when calling here!!!! + + * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration + * of the loop, thus creating a possibility where a thread could modify the + * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to + * detect this change and restart the list walk. The list is expected to be small + */ +static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent) +{ + struct lwip_select_cb *scb; +#if !LWIP_TCPIP_CORE_LOCKING + int last_select_cb_ctr; + SYS_ARCH_DECL_PROTECT(lev); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if !LWIP_TCPIP_CORE_LOCKING + SYS_ARCH_PROTECT(lev); +again: + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + for (scb = select_cb_list; scb != NULL; scb = scb->next) { + if (scb->sem_signalled == 0) { + /* semaphore not signalled yet */ + int do_signal = 0; +#if LWIP_SOCKET_POLL + if (scb->poll_fds != NULL) { + do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent); + } +#endif /* LWIP_SOCKET_POLL */ +#if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL + else +#endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */ +#if LWIP_SOCKET_SELECT + { + /* Test this select call for our socket */ + if (has_recvevent) { + if (scb->readset && FD_ISSET(s, scb->readset)) { + do_signal = 1; + } + } + if (has_sendevent) { + if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) { + do_signal = 1; + } + } + if (has_errevent) { + if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) { + do_signal = 1; + } + } + } +#endif /* LWIP_SOCKET_SELECT */ + if (do_signal) { + scb->sem_signalled = 1; + /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling + the semaphore, as this might lead to the select thread taking itself off the list, + invalidating the semaphore. */ + sys_sem_signal(SELECT_SEM_PTR(scb->sem)); + } + } +#if LWIP_TCPIP_CORE_LOCKING + } +#else + /* unlock interrupts with each step */ + SYS_ARCH_UNPROTECT(lev); + /* this makes sure interrupt protection time is short */ + SYS_ARCH_PROTECT(lev); + if (last_select_cb_ctr != select_cb_ctr) { + /* someone has changed select_cb_list, restart at the beginning */ + goto again; + } + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; + } + SYS_ARCH_UNPROTECT(lev); +#endif +} +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +/** + * Close one end of a full-duplex connection. + */ +int +lwip_shutdown(int s, int how) +{ + struct lwip_sock *sock; + err_t err; + u8_t shut_rx = 0, shut_tx = 0; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + done_socket(sock); + return -1; + } + } else { + sock_set_errno(sock, ENOTCONN); + done_socket(sock); + return -1; + } + + if (how == SHUT_RD) { + shut_rx = 1; + } else if (how == SHUT_WR) { + shut_tx = 1; + } else if (how == SHUT_RDWR) { + shut_rx = 1; + shut_tx = 1; + } else { + sock_set_errno(sock, EINVAL); + done_socket(sock); + return -1; + } + err = netconn_shutdown(sock->conn, shut_rx, shut_tx); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? 0 : -1); +} + +static int +lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local) +{ + struct lwip_sock *sock; + union sockaddr_aligned saddr; + ip_addr_t naddr; + u16_t port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* get the IP address and port */ + err = netconn_getaddr(sock->conn, &naddr, &port, local); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) && + IP_IS_V4_VAL(naddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr)); + IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port)); + + if (*namelen > saddr.sa.sa_len) { + *namelen = saddr.sa.sa_len; + } + MEMCPY(name, &saddr, *namelen); + + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +int +lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 0); +} + +int +lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 1); +} + +int +lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + int err; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + err_t cberr; + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if ((NULL == optval) || (NULL == optlen)) { + sock_set_errno(sock, EFAULT); + done_socket(sock); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_getsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + done_socket(sock); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen; +#if !LWIP_MPU_COMPATIBLE + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval; +#endif /* !LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (cberr != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(cberr)); + done_socket(sock); + return -1; + } + sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* write back optlen and optval */ + *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen); +#endif /* LWIP_MPU_COMPATIBLE */ + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + done_socket(sock); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_getsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_getsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data *)arg; + + data->err = lwip_getsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.p, +#endif /* LWIP_MPU_COMPATIBLE */ + &data->optlen); + + sys_sem_signal((sys_sem_t *)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +static int +lwip_sockopt_to_ipopt(int optname) +{ + /* Map SO_* values to our internal SOF_* values + * We should not rely on #defines in socket.h + * being in sync with ip.h. + */ + switch (optname) { + case SO_BROADCAST: + return SOF_BROADCAST; + case SO_KEEPALIVE: + return SOF_KEEPALIVE; + case SO_REUSEADDR: + return SOF_REUSEADDR; + default: + LWIP_ASSERT("Unknown socket option", 0); + return 0; + } +} + +/** lwip_getsockopt_impl: the actual implementation of getsockopt: + * same argument as lwip_getsockopt, either called directly or through callback + */ +static int +lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + int err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + +#ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT + if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) { + return err; + } +#endif + + switch (level) { + + /* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + +#if LWIP_TCP + case SO_ACCEPTCONN: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) { + done_socket(sock); + return ENOPROTOOPT; + } + if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) { + *(int *)optval = 1; + } else { + *(int *)optval = 0; + } + break; +#endif /* LWIP_TCP */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + if ((optname == SO_BROADCAST) && + (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) { + done_socket(sock); + return ENOPROTOOPT; + } + + optname = lwip_sockopt_to_ipopt(optname); + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n", + s, optname, (*(int *)optval ? "on" : "off"))); + break; + + case SO_TYPE: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { + case NETCONN_RAW: + *(int *)optval = SOCK_RAW; + break; + case NETCONN_TCP: + *(int *)optval = SOCK_STREAM; + break; + case NETCONN_UDP: + *(int *)optval = SOCK_DGRAM; + break; + default: /* unrecognized socket type */ + *(int *)optval = netconn_type(sock->conn); + LWIP_DEBUGF(SOCKETS_DEBUG, + ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n", + s, *(int *)optval)); + } /* switch (netconn_type(sock->conn)) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n", + s, *(int *)optval)); + break; + + case SO_ERROR: + LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int); + *(int *)optval = err_to_errno(netconn_err(sock->conn)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn)); + break; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn)); + break; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = netconn_get_recvbufsize(sock->conn); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: { + s16_t conn_linger; + struct linger *linger = (struct linger *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger); + conn_linger = sock->conn->linger; + if (conn_linger >= 0) { + linger->l_onoff = 1; + linger->l_linger = (int)conn_linger; + } else { + linger->l_onoff = 0; + linger->l_linger = 0; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) { + /* this flag is only available for UDP, not for UDP lite */ + done_socket(sock); + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0; + break; +#endif /* LWIP_UDP*/ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + + /* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = sock->conn->pcb.ip->ttl; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = sock->conn->pcb.ip->tos; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n", + s, *(int *)optval)); + break; +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + done_socket(sock); + return ENOPROTOOPT; + } + *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_MULTICAST_IF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + done_socket(sock); + return ENOPROTOOPT; + } + inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n", + s, *(u32_t *)optval)); + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) { + *(u8_t *)optval = 1; + } else { + *(u8_t *)optval = 0; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP + /* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + done_socket(sock); + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n", + s, (*(int *)optval) ? "on" : "off") ); + break; + case TCP_KEEPALIVE: + *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPINTVL: + *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPCNT: + *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 + /* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n", + s, *(int *)optval)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + done_socket(sock); + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + *(int *)optval = sock->conn->pcb.udp->chksum_len_tx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n", + s, (*(int *)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + *(int *)optval = sock->conn->pcb.udp->chksum_len_rx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n", + s, (*(int *)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW); + if (sock->conn->pcb.raw->chksum_reqd == 0) { + *(int *)optval = -1; + } else { + *(int *)optval = sock->conn->pcb.raw->chksum_offset; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n", + s, (*(int *)optval)) ); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + done_socket(sock); + return err; +} + +int +lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + int err = 0; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + err_t cberr; + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if (NULL == optval) { + sock_set_errno(sock, EFAULT); + done_socket(sock); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_setsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + done_socket(sock); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen); +#else /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval; +#endif /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (cberr != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(cberr)); + done_socket(sock); + return -1; + } + sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + done_socket(sock); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_setsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_setsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data *)arg; + + data->err = lwip_setsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.pc, +#endif /* LWIP_MPU_COMPATIBLE */ + data->optlen); + + sys_sem_signal((sys_sem_t *)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +/** lwip_setsockopt_impl: the actual implementation of setsockopt: + * same argument as lwip_setsockopt, either called directly or through callback + */ +static int +lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + int err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + +#ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT + if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) { + return err; + } +#endif + + switch (level) { + + /* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + + /* SO_ACCEPTCONN is get-only */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + if ((optname == SO_BROADCAST) && + (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) { + done_socket(sock); + return ENOPROTOOPT; + } + + optname = lwip_sockopt_to_ipopt(optname); + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int *)optval) { + ip_set_option(sock->conn->pcb.ip, optname); + } else { + ip_reset_option(sock->conn->pcb.ip, optname); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n", + s, optname, (*(const int *)optval ? "on" : "off"))); + break; + + /* SO_TYPE is get-only */ + /* SO_ERROR is get-only */ + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: { + long ms_long; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval); + if (ms_long < 0) { + done_socket(sock); + return EINVAL; + } + netconn_set_sendtimeout(sock->conn, ms_long); + break; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: { + long ms_long; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval); + if (ms_long < 0) { + done_socket(sock); + return EINVAL; + } + netconn_set_recvtimeout(sock->conn, (u32_t)ms_long); + break; + } +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int); + netconn_set_recvbufsize(sock->conn, *(const int *)optval); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: { + const struct linger *linger = (const struct linger *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger); + if (linger->l_onoff) { + int lingersec = linger->l_linger; + if (lingersec < 0) { + done_socket(sock); + return EINVAL; + } + if (lingersec > 0xFFFF) { + lingersec = 0xFFFF; + } + sock->conn->linger = (s16_t)lingersec; + } else { + sock->conn->linger = -1; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) { + /* this flag is only available for UDP, not for UDP lite */ + done_socket(sock); + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + if (*(const int *)optval) { + udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } else { + udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + break; +#endif /* LWIP_UDP */ + case SO_BINDTODEVICE: { + const struct ifreq *iface; + struct netif *n = NULL; + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq); + + iface = (const struct ifreq *)optval; + if (iface->ifr_name[0] != 0) { + n = netif_find(iface->ifr_name); + if (n == NULL) { + done_socket(sock); + return ENODEV; + } + } + + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { +#if LWIP_TCP + case NETCONN_TCP: + tcp_bind_netif(sock->conn->pcb.tcp, n); + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: + udp_bind_netif(sock->conn->pcb.udp, n); + break; +#endif +#if LWIP_RAW + case NETCONN_RAW: + raw_bind_netif(sock->conn->pcb.raw, n); + break; +#endif + default: + LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0); + break; + } + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + + /* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n", + s, sock->conn->pcb.ip->ttl)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n", + s, sock->conn->pcb.ip->tos)); + break; +#if LWIP_NETBUF_RECVINFO + case IP_PKTINFO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); + if (*(const int *)optval) { + sock->conn->flags |= NETCONN_FLAG_PKTINFO; + } else { + sock->conn->flags &= ~NETCONN_FLAG_PKTINFO; + } + break; +#endif /* LWIP_NETBUF_RECVINFO */ +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval)); + break; + case IP_MULTICAST_IF: { + ip4_addr_t if_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval); + udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr); + } + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + if (*(const u8_t *)optval) { + udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP); + } else { + udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP); + } + break; +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */ +#if LWIP_IGMP + case IP_ADD_MEMBERSHIP: + case IP_DROP_MEMBERSHIP: { + /* If this is a TCP or a RAW socket, ignore these options. */ + err_t igmp_err; + const struct ip_mreq *imr = (const struct ip_mreq *)optval; + ip4_addr_t if_addr; + ip4_addr_t multi_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, &imr->imr_interface); + inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr); + if (optname == IP_ADD_MEMBERSHIP) { + if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + igmp_err = ERR_OK; + } else { + igmp_err = igmp_joingroup(&if_addr, &multi_addr); + } + } else { + igmp_err = igmp_leavegroup(&if_addr, &multi_addr); + lwip_socket_unregister_membership(s, &if_addr, &multi_addr); + } + if (igmp_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IGMP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP + /* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + done_socket(sock); + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + if (*(const int *)optval) { + tcp_nagle_disable(sock->conn->pcb.tcp); + } else { + tcp_nagle_enable(sock->conn->pcb.tcp); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n", + s, (*(const int *)optval) ? "on" : "off") ); + break; + case TCP_KEEPALIVE: + sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + case TCP_KEEPINTVL: + sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_intvl)); + break; + case TCP_KEEPCNT: + sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_cnt)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP*/ + +#if LWIP_IPV6 + /* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int *)optval) { + netconn_set_ipv6only(sock->conn, 1); + } else { + netconn_set_ipv6only(sock->conn, 0); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n", + s, (netconn_get_ipv6only(sock->conn) ? 1 : 0))); + break; +#if LWIP_IPV6_MLD + case IPV6_JOIN_GROUP: + case IPV6_LEAVE_GROUP: { + /* If this is a TCP or a RAW socket, ignore these options. */ + err_t mld6_err; + struct netif *netif; + ip6_addr_t multi_addr; + const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP); + inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr); + LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu); + netif = netif_get_by_index((u8_t)imr->ipv6mr_interface); + if (netif == NULL) { + err = EADDRNOTAVAIL; + break; + } + + if (optname == IPV6_JOIN_GROUP) { + if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + mld6_err = ERR_OK; + } else { + mld6_err = mld6_joingroup_netif(netif, &multi_addr); + } + } else { + mld6_err = mld6_leavegroup_netif(netif, &multi_addr); + lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr); + } + if (mld6_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IPV6_MLD */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + done_socket(sock); + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_tx = 8; + } else { + sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n", + s, (*(const int *)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_rx = 8; + } else { + sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n", + s, (*(const int *)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + /* It should not be possible to disable the checksum generation with ICMPv6 + * as per RFC 3542 chapter 3.1 */ + if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) { + done_socket(sock); + return EINVAL; + } + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW); + if (*(const int *)optval < 0) { + sock->conn->pcb.raw->chksum_reqd = 0; + } else if (*(const int *)optval & 1) { + /* Per RFC3542, odd offsets are not allowed */ + done_socket(sock); + return EINVAL; + } else { + sock->conn->pcb.raw->chksum_reqd = 1; + sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n", + s, sock->conn->pcb.raw->chksum_reqd)); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + done_socket(sock); + return err; +} + +int +lwip_ioctl(int s, long cmd, void *argp) +{ + struct lwip_sock *sock = get_socket(s); + u8_t val; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + if (!sock) { + return -1; + } + + switch (cmd) { +#if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE + case FIONREAD: + if (!argp) { + sock_set_errno(sock, EINVAL); + done_socket(sock); + return -1; + } +#if LWIP_FIONREAD_LINUXMODE + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + struct netbuf *nb; + if (sock->lastdata.netbuf) { + nb = sock->lastdata.netbuf; + *((int *)argp) = nb->p->tot_len; + } else { + struct netbuf *rxbuf; + err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK); + if (err != ERR_OK) { + *((int *)argp) = 0; + } else { + sock->lastdata.netbuf = rxbuf; + *((int *)argp) = rxbuf->p->tot_len; + } + } + done_socket(sock); + return 0; + } +#endif /* LWIP_FIONREAD_LINUXMODE */ + +#if LWIP_SO_RCVBUF + /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */ + SYS_ARCH_GET(sock->conn->recv_avail, recv_avail); + if (recv_avail < 0) { + recv_avail = 0; + } + + /* Check if there is data left from the last recv operation. /maq 041215 */ + if (sock->lastdata.netbuf) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + recv_avail += sock->lastdata.pbuf->tot_len; + } else { + recv_avail += sock->lastdata.netbuf->p->tot_len; + } + } + *((int *)argp) = recv_avail; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp))); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +#else /* LWIP_SO_RCVBUF */ + break; +#endif /* LWIP_SO_RCVBUF */ +#endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */ + + case (long)FIONBIO: + val = 0; + if (argp && *(int *)argp) { + val = 1; + } + netconn_set_nonblocking(sock->conn, val); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; + + default: + break; + } /* switch (cmd) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + done_socket(sock); + return -1; +} + +/** A minimal implementation of fcntl. + * Currently only the commands F_GETFL and F_SETFL are implemented. + * The flag O_NONBLOCK and access modes are supported for F_GETFL, only + * the flag O_NONBLOCK is implemented for F_SETFL. + */ +int +lwip_fcntl(int s, int cmd, int val) +{ + struct lwip_sock *sock = get_socket(s); + int ret = -1; + int op_mode = 0; + + if (!sock) { + return -1; + } + + switch (cmd) { + case F_GETFL: + ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0; + sock_set_errno(sock, 0); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); +#else + SYS_ARCH_DECL_PROTECT(lev); + /* the proper thing to do here would be to get into the tcpip_thread, + but locking should be OK as well since we only *read* some flags */ + SYS_ARCH_PROTECT(lev); +#endif +#if LWIP_TCP + if (sock->conn->pcb.tcp) { + if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) { + op_mode |= O_RDONLY; + } + if (!(sock->conn->pcb.tcp->flags & TF_FIN)) { + op_mode |= O_WRONLY; + } + } +#endif +#if LWIP_TCPIP_CORE_LOCKING + UNLOCK_TCPIP_CORE(); +#else + SYS_ARCH_UNPROTECT(lev); +#endif + } else { + op_mode |= O_RDWR; + } + + /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */ + ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode; + + break; + case F_SETFL: + /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */ + val &= ~(O_RDONLY | O_WRONLY | O_RDWR); + if ((val & ~O_NONBLOCK) == 0) { + /* only O_NONBLOCK, all other bits are zero */ + netconn_set_nonblocking(sock->conn, val & O_NONBLOCK); + ret = 0; + sock_set_errno(sock, 0); + } else { + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + break; + } + done_socket(sock); + return ret; +} + +#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES +int +fcntl(int s, int cmd, ...) +{ + va_list ap; + int val; + + va_start(ap, cmd); + val = va_arg(ap, int); + va_end(ap); + return lwip_fcntl(s, cmd, val); +} +#endif + +const char * +lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size) +{ + const char *ret = NULL; + int size_int = (int)size; + if (size_int < 0) { + set_errno(ENOSPC); + return NULL; + } + switch (af) { +#if LWIP_IPV4 + case AF_INET: + ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int); + if (ret == NULL) { + set_errno(ENOSPC); + } + break; +#endif +#if LWIP_IPV6 + case AF_INET6: + ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int); + if (ret == NULL) { + set_errno(ENOSPC); + } + break; +#endif + default: + set_errno(EAFNOSUPPORT); + break; + } + return ret; +} + +int +lwip_inet_pton(int af, const char *src, void *dst) +{ + int err; + switch (af) { +#if LWIP_IPV4 + case AF_INET: + err = ip4addr_aton(src, (ip4_addr_t *)dst); + break; +#endif +#if LWIP_IPV6 + case AF_INET6: { + /* convert into temporary variable since ip6_addr_t might be larger + than in6_addr when scopes are enabled */ + ip6_addr_t addr; + err = ip6addr_aton(src, &addr); + if (err) { + memcpy(dst, &addr.addr, sizeof(addr.addr)); + } + break; + } +#endif + default: + err = -1; + set_errno(EAFNOSUPPORT); + break; + } + return err; +} + +#if LWIP_IGMP +/** Register a new IGMP membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == NULL) { + socket_ipv4_multicast_memberships[i].sock = sock; + ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr); + ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr); + done_socket(sock); + return 1; + } + } + done_socket(sock); + return 0; +} + +/** Unregister a previously registered membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv4_multicast_memberships[i].sock == sock) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + break; + } + } + done_socket(sock); +} + +/** Drop all memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr, if_addr; + ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr); + ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr); + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + + netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE); + } + } + done_socket(sock); +} +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6_MLD +/** Register a new MLD6 membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv6_multicast_memberships[i].sock == NULL) { + socket_ipv6_multicast_memberships[i].sock = sock; + socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx; + ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr); + done_socket(sock); + return 1; + } + } + done_socket(sock); + return 0; +} + +/** Unregister a previously registered MLD6 membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv6_multicast_memberships[i].sock == sock) && + (socket_ipv6_multicast_memberships[i].if_idx == if_idx) && + ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv6_multicast_memberships[i].sock = NULL; + socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX; + ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr); + break; + } + } + done_socket(sock); +} + +/** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_mld6_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv6_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr; + u8_t if_idx; + + ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr); + if_idx = socket_ipv6_multicast_memberships[i].if_idx; + + socket_ipv6_multicast_memberships[i].sock = NULL; + socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX; + ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr); + + netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE); + } + } + done_socket(sock); +} +#endif /* LWIP_IPV6_MLD */ + +#endif /* LWIP_SOCKET */ diff --git a/Middlewares/Third_Party/LwIP/src/api/tcpip.c b/Middlewares/Third_Party/LwIP/src/api/tcpip.c new file mode 100644 index 00000000..743553a5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/api/tcpip.c @@ -0,0 +1,658 @@ +/** + * @file + * Sequential API Main thread module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcpip_priv.h" +#include "lwip/sys.h" +#include "lwip/memp.h" +#include "lwip/mem.h" +#include "lwip/init.h" +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/etharp.h" +#include "netif/ethernet.h" + +#define TCPIP_MSG_VAR_REF(name) API_VAR_REF(name) +#define TCPIP_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct tcpip_msg, name) +#define TCPIP_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct tcpip_msg, MEMP_TCPIP_MSG_API, name, ERR_MEM) +#define TCPIP_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_TCPIP_MSG_API, name) + +/* global variables */ +static tcpip_init_done_fn tcpip_init_done; +static void *tcpip_init_done_arg; +static sys_mbox_t tcpip_mbox; + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +sys_mutex_t lock_tcpip_core; +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +static void tcpip_thread_handle_msg(struct tcpip_msg *msg); + +#if !LWIP_TIMERS +/* wait for a message with timers disabled (e.g. pass a timer-check trigger into tcpip_thread) */ +#define TCPIP_MBOX_FETCH(mbox, msg) sys_mbox_fetch(mbox, msg) +#else /* !LWIP_TIMERS */ +/* wait for a message, timeouts are processed while waiting */ +#define TCPIP_MBOX_FETCH(mbox, msg) tcpip_timeouts_mbox_fetch(mbox, msg) +/** + * Wait (forever) for a message to arrive in an mbox. + * While waiting, timeouts are processed. + * + * @param mbox the mbox to fetch the message from + * @param msg the place to store the message + */ +static void +tcpip_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg) +{ + u32_t sleeptime, res; + +again: + LWIP_ASSERT_CORE_LOCKED(); + + sleeptime = sys_timeouts_sleeptime(); + if (sleeptime == SYS_TIMEOUTS_SLEEPTIME_INFINITE) { + UNLOCK_TCPIP_CORE(); + sys_arch_mbox_fetch(mbox, msg, 0); + LOCK_TCPIP_CORE(); + return; + } else if (sleeptime == 0) { + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } + + UNLOCK_TCPIP_CORE(); + res = sys_arch_mbox_fetch(mbox, msg, sleeptime); + LOCK_TCPIP_CORE(); + if (res == SYS_ARCH_TIMEOUT) { + /* If a SYS_ARCH_TIMEOUT value is returned, a timeout occurred + before a message could be fetched. */ + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } +} +#endif /* !LWIP_TIMERS */ + +/** + * The main lwIP thread. This thread has exclusive access to lwIP core functions + * (unless access to them is not locked). Other threads communicate with this + * thread using message boxes. + * + * It also starts all the timers to make sure they are running in the right + * thread context. + * + * @param arg unused argument + */ +static void +tcpip_thread(void *arg) +{ + struct tcpip_msg *msg; + LWIP_UNUSED_ARG(arg); + + LWIP_MARK_TCPIP_THREAD(); + + LOCK_TCPIP_CORE(); + if (tcpip_init_done != NULL) { + tcpip_init_done(tcpip_init_done_arg); + } + + while (1) { /* MAIN Loop */ + LWIP_TCPIP_THREAD_ALIVE(); + /* wait for a message, timeouts are processed while waiting */ + TCPIP_MBOX_FETCH(&tcpip_mbox, (void **)&msg); + if (msg == NULL) { + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: NULL\n")); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + continue; + } + tcpip_thread_handle_msg(msg); + } +} + +/* Handle a single tcpip_msg + * This is in its own function for access by tests only. + */ +static void +tcpip_thread_handle_msg(struct tcpip_msg *msg) +{ + switch (msg->type) { +#if !LWIP_TCPIP_CORE_LOCKING + case TCPIP_MSG_API: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg)); + msg->msg.api_msg.function(msg->msg.api_msg.msg); + break; + case TCPIP_MSG_API_CALL: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API CALL message %p\n", (void *)msg)); + msg->msg.api_call.arg->err = msg->msg.api_call.function(msg->msg.api_call.arg); + sys_sem_signal(msg->msg.api_call.sem); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + case TCPIP_MSG_INPKT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg)); + if (msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif) != ERR_OK) { + pbuf_free(msg->msg.inp.p); + } + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + case TCPIP_MSG_TIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg)); + sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + case TCPIP_MSG_UNTIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg)); + sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + case TCPIP_MSG_CALLBACK: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + + case TCPIP_MSG_CALLBACK_STATIC: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + break; + + default: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type)); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + break; + } +} + +#ifdef TCPIP_THREAD_TEST +/** Work on queued items in single-threaded test mode */ +int +tcpip_thread_poll_one(void) +{ + int ret = 0; + struct tcpip_msg *msg; + + if (sys_arch_mbox_tryfetch(&tcpip_mbox, (void **)&msg) != SYS_ARCH_TIMEOUT) { + LOCK_TCPIP_CORE(); + if (msg != NULL) { + tcpip_thread_handle_msg(msg); + ret = 1; + } + UNLOCK_TCPIP_CORE(); + } + return ret; +} +#endif + +/** + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet + * @param inp the network interface on which the packet was received + * @param input_fn input function to call + */ +err_t +tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn) +{ +#if LWIP_TCPIP_CORE_LOCKING_INPUT + err_t ret; + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_inpkt: PACKET %p/%p\n", (void *)p, (void *)inp)); + LOCK_TCPIP_CORE(); + ret = input_fn(p, inp); + UNLOCK_TCPIP_CORE(); + return ret; +#else /* LWIP_TCPIP_CORE_LOCKING_INPUT */ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_INPKT); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_INPKT; + msg->msg.inp.p = p; + msg->msg.inp.netif = inp; + msg->msg.inp.input_fn = input_fn; + if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + return ERR_MEM; + } + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING_INPUT */ +} + +/** + * @ingroup lwip_os + * Pass a received packet to tcpip_thread for input processing with + * ethernet_input or ip_input. Don't call directly, pass to netif_add() + * and call netif->input(). + * + * @param p the received packet, p->payload pointing to the Ethernet header or + * to an IP header (if inp doesn't have NETIF_FLAG_ETHARP or + * NETIF_FLAG_ETHERNET flags) + * @param inp the network interface on which the packet was received + */ +err_t +tcpip_input(struct pbuf *p, struct netif *inp) +{ +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return tcpip_inpkt(p, inp, ethernet_input); + } else +#endif /* LWIP_ETHERNET */ + return tcpip_inpkt(p, inp, ip_input); +} + +/** + * @ingroup lwip_os + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * Blocks until the request is posted. + * Must not be called from interrupt context! + * + * @param function the function to call + * @param ctx parameter passed to f + * @return ERR_OK if the function was called, another err_t if not + * + * @see tcpip_try_callback + */ +err_t +tcpip_callback(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} + +/** + * @ingroup lwip_os + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * Does NOT block when the request cannot be posted because the + * tcpip_mbox is full, but returns ERR_MEM instead. + * Can be called from interrupt context. + * + * @param function the function to call + * @param ctx parameter passed to f + * @return ERR_OK if the function was called, another err_t if not + * + * @see tcpip_callback + */ +err_t +tcpip_try_callback(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + + if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_API, msg); + return ERR_MEM; + } + return ERR_OK; +} + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +/** + * call sys_timeout in tcpip_thread + * + * @param msecs time in milliseconds for timeout + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_TIMEOUT; + msg->msg.tmo.msecs = msecs; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} + +/** + * call sys_untimeout in tcpip_thread + * + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_untimeout(sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_UNTIMEOUT; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + +/** + * Sends a message to TCPIP thread to call a function. Caller thread blocks on + * on a provided semaphore, which ist NOT automatically signalled by TCPIP thread, + * this has to be done by the user. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING since this is the way + * with least runtime overhead. + * + * @param fn function to be called from TCPIP thread + * @param apimsg argument to API function + * @param sem semaphore to wait on + * @return ERR_OK if the function was called, another err_t if not + */ +err_t +tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t *sem) +{ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_UNUSED_ARG(sem); + LOCK_TCPIP_CORE(); + fn(apimsg); + UNLOCK_TCPIP_CORE(); + return ERR_OK; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + + LWIP_ASSERT("semaphore not initialized", sys_sem_valid(sem)); + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.function = fn; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.msg = apimsg; + sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(sem, 0); + TCPIP_MSG_VAR_FREE(msg); + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * Synchronously calls function in TCPIP thread and waits for its completion. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING (preferred) or + * LWIP_NETCONN_SEM_PER_THREAD. + * If not, a semaphore is created and destroyed on every call which is usually + * an expensive/slow operation. + * @param fn Function to call + * @param call Call parameters + * @return Return value from tcpip_api_call_fn + */ +err_t +tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call) +{ +#if LWIP_TCPIP_CORE_LOCKING + err_t err; + LOCK_TCPIP_CORE(); + err = fn(call); + UNLOCK_TCPIP_CORE(); + return err; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + err_t err = sys_sem_new(&call->sem, 0); + if (err != ERR_OK) { + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API_CALL; + TCPIP_MSG_VAR_REF(msg).msg.api_call.arg = call; + TCPIP_MSG_VAR_REF(msg).msg.api_call.function = fn; +#if LWIP_NETCONN_SEM_PER_THREAD + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = &call->sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(TCPIP_MSG_VAR_REF(msg).msg.api_call.sem, 0); + TCPIP_MSG_VAR_FREE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&call->sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + return call->err; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * @ingroup lwip_os + * Allocate a structure for a static callback message and initialize it. + * The message has a special type such that lwIP never frees it. + * This is intended to be used to send "static" messages from interrupt context, + * e.g. the message is allocated once and posted several times from an IRQ + * using tcpip_callbackmsg_trycallback(). + * Example usage: Trigger execution of an ethernet IRQ DPC routine in lwIP thread context. + * + * @param function the function to call + * @param ctx parameter passed to function + * @return a struct pointer to pass to tcpip_callbackmsg_trycallback(). + * + * @see tcpip_callbackmsg_trycallback() + * @see tcpip_callbackmsg_delete() + */ +struct tcpip_callback_msg * +tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return NULL; + } + msg->type = TCPIP_MSG_CALLBACK_STATIC; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + return (struct tcpip_callback_msg *)msg; +} + +/** + * @ingroup lwip_os + * Free a callback message allocated by tcpip_callbackmsg_new(). + * + * @param msg the message to free + * + * @see tcpip_callbackmsg_new() + */ +void +tcpip_callbackmsg_delete(struct tcpip_callback_msg *msg) +{ + memp_free(MEMP_TCPIP_MSG_API, msg); +} + +/** + * @ingroup lwip_os + * Try to post a callback-message to the tcpip_thread tcpip_mbox. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost() return code + * + * @see tcpip_callbackmsg_new() + */ +err_t +tcpip_callbackmsg_trycallback(struct tcpip_callback_msg *msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + return sys_mbox_trypost(&tcpip_mbox, msg); +} + +/** + * @ingroup lwip_os + * Try to post a callback-message to the tcpip_thread mbox. + * Same as @ref tcpip_callbackmsg_trycallback but calls sys_mbox_trypost_fromisr(), + * mainly to help FreeRTOS, where calls differ between task level and ISR level. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost_fromisr() return code (without change, so this + * knowledge can be used to e.g. propagate "bool needs_scheduling") + * + * @see tcpip_callbackmsg_new() + */ +err_t +tcpip_callbackmsg_trycallback_fromisr(struct tcpip_callback_msg *msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + return sys_mbox_trypost_fromisr(&tcpip_mbox, msg); +} + +/** + * @ingroup lwip_os + * Initialize this module: + * - initialize all sub modules + * - start the tcpip_thread + * + * @param initfunc a function to call when tcpip_thread is running and finished initializing + * @param arg argument to pass to initfunc + */ +void +tcpip_init(tcpip_init_done_fn initfunc, void *arg) +{ + lwip_init(); + + tcpip_init_done = initfunc; + tcpip_init_done_arg = arg; + if (sys_mbox_new(&tcpip_mbox, TCPIP_MBOX_SIZE) != ERR_OK) { + LWIP_ASSERT("failed to create tcpip_thread mbox", 0); + } +#if LWIP_TCPIP_CORE_LOCKING + if (sys_mutex_new(&lock_tcpip_core) != ERR_OK) { + LWIP_ASSERT("failed to create lock_tcpip_core", 0); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sys_thread_new(TCPIP_THREAD_NAME, tcpip_thread, NULL, TCPIP_THREAD_STACKSIZE, TCPIP_THREAD_PRIO); +} + +/** + * Simple callback function used with tcpip_callback to free a pbuf + * (pbuf_free has a wrong signature for tcpip_callback) + * + * @param p The pbuf (chain) to be dereferenced. + */ +static void +pbuf_free_int(void *p) +{ + struct pbuf *q = (struct pbuf *)p; + pbuf_free(q); +} + +/** + * A simple wrapper function that allows you to free a pbuf from interrupt context. + * + * @param p The pbuf (chain) to be dereferenced. + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +pbuf_free_callback(struct pbuf *p) +{ + return tcpip_try_callback(pbuf_free_int, p); +} + +/** + * A simple wrapper function that allows you to free heap memory from + * interrupt context. + * + * @param m the heap memory to free + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +mem_free_callback(void *m) +{ + return tcpip_try_callback(mem_free, m); +} + +#endif /* !NO_SYS */ diff --git a/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls.c b/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls.c new file mode 100644 index 00000000..bb8e623c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls.c @@ -0,0 +1,1270 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * This file provides a TLS layer using mbedTLS + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + * Watch out: + * - 'sent' is always called with len==0 to the upper layer. This is because keeping + * track of the ratio of application data and TLS overhead would be too much. + * + * Mandatory security-related configuration: + * - ensure to add at least one strong entropy source to your mbedtls port (implement + * mbedtls_platform_entropy_poll or mbedtls_hardware_poll providing strong entropy) + * - define ALTCP_MBEDTLS_ENTROPY_PTR and ALTCP_MBEDTLS_ENTROPY_LEN to something providing + * GOOD custom entropy + * + * Missing things / @todo: + * - some unhandled/untested things migh be caught by LWIP_ASSERTs... + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/apps/altcp_tls_mbedtls_opts.h" + +#if LWIP_ALTCP_TLS && LWIP_ALTCP_TLS_MBEDTLS + +#include "lwip/altcp.h" +#include "lwip/altcp_tls.h" +#include "lwip/priv/altcp_priv.h" + +#include "altcp_tls_mbedtls_structs.h" +#include "altcp_tls_mbedtls_mem.h" + +/* @todo: which includes are really needed? */ +#include "mbedtls/entropy.h" +#include "mbedtls/ctr_drbg.h" +#include "mbedtls/certs.h" +#include "mbedtls/x509.h" +#include "mbedtls/ssl.h" +#include "mbedtls/net.h" +#include "mbedtls/error.h" +#include "mbedtls/debug.h" +#include "mbedtls/platform.h" +#include "mbedtls/memory_buffer_alloc.h" +#include "mbedtls/ssl_cache.h" +#include "mbedtls/ssl_ticket.h" + +#include "mbedtls/ssl_internal.h" /* to call mbedtls_flush_output after ERR_MEM */ + +#include + +#ifndef ALTCP_MBEDTLS_ENTROPY_PTR +#define ALTCP_MBEDTLS_ENTROPY_PTR NULL +#endif +#ifndef ALTCP_MBEDTLS_ENTROPY_LEN +#define ALTCP_MBEDTLS_ENTROPY_LEN 0 +#endif + +/* Variable prototype, the actual declaration is at the end of this file + since it contains pointers to static functions declared here */ +extern const struct altcp_functions altcp_mbedtls_functions; + +/** Our global mbedTLS configuration (server-specific, not connection-specific) */ +struct altcp_tls_config { + mbedtls_ssl_config conf; + mbedtls_x509_crt *cert; + mbedtls_pk_context *pkey; + uint8_t cert_count; + uint8_t cert_max; + uint8_t pkey_count; + uint8_t pkey_max; + mbedtls_x509_crt *ca; +#if defined(MBEDTLS_SSL_CACHE_C) && ALTCP_MBEDTLS_USE_SESSION_CACHE + /** Inter-connection cache for fast connection startup */ + struct mbedtls_ssl_cache_context cache; +#endif +#if defined(MBEDTLS_SSL_SESSION_TICKETS) && ALTCP_MBEDTLS_USE_SESSION_TICKETS + mbedtls_ssl_ticket_context ticket_ctx; +#endif +}; + +/** Entropy and random generator are shared by all mbedTLS configuration */ +struct altcp_tls_entropy_rng { + mbedtls_entropy_context entropy; + mbedtls_ctr_drbg_context ctr_drbg; + int ref; +}; +static struct altcp_tls_entropy_rng *altcp_tls_entropy_rng; + +static err_t altcp_mbedtls_lower_recv(void *arg, struct altcp_pcb *inner_conn, struct pbuf *p, err_t err); +static err_t altcp_mbedtls_setup(void *conf, struct altcp_pcb *conn, struct altcp_pcb *inner_conn); +static err_t altcp_mbedtls_lower_recv_process(struct altcp_pcb *conn, altcp_mbedtls_state_t *state); +static err_t altcp_mbedtls_handle_rx_appldata(struct altcp_pcb *conn, altcp_mbedtls_state_t *state); +static int altcp_mbedtls_bio_send(void *ctx, const unsigned char *dataptr, size_t size); + + +/* callback functions from inner/lower connection: */ + +/** Accept callback from lower connection (i.e. TCP) + * Allocate one of our structures, assign it to the new connection's 'state' and + * call the new connection's 'accepted' callback. If that succeeds, we wait + * to receive connection setup handshake bytes from the client. + */ +static err_t +altcp_mbedtls_lower_accept(void *arg, struct altcp_pcb *accepted_conn, err_t err) +{ + struct altcp_pcb *listen_conn = (struct altcp_pcb *)arg; + if (listen_conn && listen_conn->state && listen_conn->accept) { + err_t setup_err; + altcp_mbedtls_state_t *listen_state = (altcp_mbedtls_state_t *)listen_conn->state; + /* create a new altcp_conn to pass to the next 'accept' callback */ + struct altcp_pcb *new_conn = altcp_alloc(); + if (new_conn == NULL) { + return ERR_MEM; + } + setup_err = altcp_mbedtls_setup(listen_state->conf, new_conn, accepted_conn); + if (setup_err != ERR_OK) { + altcp_free(new_conn); + return setup_err; + } + return listen_conn->accept(listen_conn->arg, new_conn, err); + } + return ERR_ARG; +} + +/** Connected callback from lower connection (i.e. TCP). + * Not really implemented/tested yet... + */ +static err_t +altcp_mbedtls_lower_connected(void *arg, struct altcp_pcb *inner_conn, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + LWIP_UNUSED_ARG(inner_conn); /* for LWIP_NOASSERT */ + if (conn && conn->state) { + LWIP_ASSERT("pcb mismatch", conn->inner_conn == inner_conn); + /* upper connected is called when handshake is done */ + if (err != ERR_OK) { + if (conn->connected) { + return conn->connected(conn->arg, conn, err); + } + } + return altcp_mbedtls_lower_recv_process(conn, (altcp_mbedtls_state_t *)conn->state); + } + return ERR_VAL; +} + +/* Call recved for possibly more than an u16_t */ +static void +altcp_mbedtls_lower_recved(struct altcp_pcb *inner_conn, int recvd_cnt) +{ + while (recvd_cnt > 0) { + u16_t recvd_part = (u16_t)LWIP_MIN(recvd_cnt, 0xFFFF); + altcp_recved(inner_conn, recvd_part); + recvd_cnt -= recvd_part; + } +} + +/** Recv callback from lower connection (i.e. TCP) + * This one mainly differs between connection setup/handshake (data is fed into mbedTLS only) + * and application phase (data is decoded by mbedTLS and passed on to the application). + */ +static err_t +altcp_mbedtls_lower_recv(void *arg, struct altcp_pcb *inner_conn, struct pbuf *p, err_t err) +{ + altcp_mbedtls_state_t *state; + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + + LWIP_ASSERT("no err expected", err == ERR_OK); + LWIP_UNUSED_ARG(err); + + if (!conn) { + /* no connection given as arg? should not happen, but prevent pbuf/conn leaks */ + if (p != NULL) { + pbuf_free(p); + } + altcp_close(inner_conn); + return ERR_CLSD; + } + state = (altcp_mbedtls_state_t *)conn->state; + LWIP_ASSERT("pcb mismatch", conn->inner_conn == inner_conn); + if (!state) { + /* already closed */ + if (p != NULL) { + pbuf_free(p); + } + altcp_close(inner_conn); + return ERR_CLSD; + } + + /* handle NULL pbuf (inner connection closed) */ + if (p == NULL) { + /* remote host sent FIN, remember this (SSL state is destroyed + when both sides are closed only!) */ + if ((state->flags & (ALTCP_MBEDTLS_FLAGS_HANDSHAKE_DONE | ALTCP_MBEDTLS_FLAGS_UPPER_CALLED)) == + (ALTCP_MBEDTLS_FLAGS_HANDSHAKE_DONE | ALTCP_MBEDTLS_FLAGS_UPPER_CALLED)) { + /* need to notify upper layer (e.g. 'accept' called or 'connect' succeeded) */ + if ((state->rx != NULL) || (state->rx_app != NULL)) { + state->flags |= ALTCP_MBEDTLS_FLAGS_RX_CLOSE_QUEUED; + /* this is a normal close (FIN) but we have unprocessed data, so delay the FIN */ + altcp_mbedtls_handle_rx_appldata(conn, state); + return ERR_OK; + } + state->flags |= ALTCP_MBEDTLS_FLAGS_RX_CLOSED; + if (conn->recv) { + return conn->recv(conn->arg, conn, NULL, ERR_OK); + } + } else { + /* before connection setup is done: call 'err' */ + if (conn->err) { + conn->err(conn->arg, ERR_CLSD); + } + altcp_close(conn); + } + return ERR_OK; + } + + /* If we come here, the connection is in good state (handshake phase or application data phase). + Queue up the pbuf for processing as handshake data or application data. */ + if (state->rx == NULL) { + state->rx = p; + } else { + LWIP_ASSERT("rx pbuf overflow", (int)p->tot_len + (int)p->len <= 0xFFFF); + pbuf_cat(state->rx, p); + } + return altcp_mbedtls_lower_recv_process(conn, state); +} + +static err_t +altcp_mbedtls_lower_recv_process(struct altcp_pcb *conn, altcp_mbedtls_state_t *state) +{ + if (!(state->flags & ALTCP_MBEDTLS_FLAGS_HANDSHAKE_DONE)) { + /* handle connection setup (handshake not done) */ + int ret = mbedtls_ssl_handshake(&state->ssl_context); + /* try to send data... */ + altcp_output(conn->inner_conn); + if (state->bio_bytes_read) { + /* acknowledge all bytes read */ + altcp_mbedtls_lower_recved(conn->inner_conn, state->bio_bytes_read); + state->bio_bytes_read = 0; + } + + if (ret == MBEDTLS_ERR_SSL_WANT_READ || ret == MBEDTLS_ERR_SSL_WANT_WRITE) { + /* handshake not done, wait for more recv calls */ + LWIP_ASSERT("in this state, the rx chain should be empty", state->rx == NULL); + return ERR_OK; + } + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_ssl_handshake failed: %d\n", ret)); + /* handshake failed, connection has to be closed */ + if (conn->err) { + conn->err(conn->arg, ERR_CLSD); + } + + if (altcp_close(conn) != ERR_OK) { + altcp_abort(conn); + } + return ERR_OK; + } + /* If we come here, handshake succeeded. */ + LWIP_ASSERT("state", state->bio_bytes_read == 0); + LWIP_ASSERT("state", state->bio_bytes_appl == 0); + state->flags |= ALTCP_MBEDTLS_FLAGS_HANDSHAKE_DONE; + /* issue "connect" callback" to upper connection (this can only happen for active open) */ + if (conn->connected) { + err_t err; + err = conn->connected(conn->arg, conn, ERR_OK); + if (err != ERR_OK) { + return err; + } + } + if (state->rx == NULL) { + return ERR_OK; + } + } + /* handle application data */ + return altcp_mbedtls_handle_rx_appldata(conn, state); +} + +/* Pass queued decoded rx data to application */ +static err_t +altcp_mbedtls_pass_rx_data(struct altcp_pcb *conn, altcp_mbedtls_state_t *state) +{ + err_t err; + struct pbuf *buf; + LWIP_ASSERT("conn != NULL", conn != NULL); + LWIP_ASSERT("state != NULL", state != NULL); + buf = state->rx_app; + if (buf) { + state->rx_app = NULL; + if (conn->recv) { + u16_t tot_len = buf->tot_len; + /* this needs to be increased first because the 'recved' call may come nested */ + state->rx_passed_unrecved += tot_len; + state->flags |= ALTCP_MBEDTLS_FLAGS_UPPER_CALLED; + err = conn->recv(conn->arg, conn, buf, ERR_OK); + if (err != ERR_OK) { + if (err == ERR_ABRT) { + return ERR_ABRT; + } + /* not received, leave the pbuf(s) queued (and decrease 'unrecved' again) */ + LWIP_ASSERT("state == conn->state", state == conn->state); + state->rx_app = buf; + state->rx_passed_unrecved -= tot_len; + LWIP_ASSERT("state->rx_passed_unrecved >= 0", state->rx_passed_unrecved >= 0); + if (state->rx_passed_unrecved < 0) { + state->rx_passed_unrecved = 0; + } + return err; + } + } else { + pbuf_free(buf); + } + } else if ((state->flags & (ALTCP_MBEDTLS_FLAGS_RX_CLOSE_QUEUED | ALTCP_MBEDTLS_FLAGS_RX_CLOSED)) == + ALTCP_MBEDTLS_FLAGS_RX_CLOSE_QUEUED) { + state->flags |= ALTCP_MBEDTLS_FLAGS_RX_CLOSED; + if (conn->recv) { + return conn->recv(conn->arg, conn, NULL, ERR_OK); + } + } + + /* application may have close the connection */ + if (conn->state != state) { + /* return error code to ensure altcp_mbedtls_handle_rx_appldata() exits the loop */ + return ERR_CLSD; + } + return ERR_OK; +} + +/* Helper function that processes rx application data stored in rx pbuf chain */ +static err_t +altcp_mbedtls_handle_rx_appldata(struct altcp_pcb *conn, altcp_mbedtls_state_t *state) +{ + int ret; + LWIP_ASSERT("state != NULL", state != NULL); + if (!(state->flags & ALTCP_MBEDTLS_FLAGS_HANDSHAKE_DONE)) { + /* handshake not done yet */ + return ERR_VAL; + } + do { + /* allocate a full-sized unchained PBUF_POOL: this is for RX! */ + struct pbuf *buf = pbuf_alloc(PBUF_RAW, PBUF_POOL_BUFSIZE, PBUF_POOL); + if (buf == NULL) { + /* We're short on pbufs, try again later from 'poll' or 'recv' callbacks. + @todo: close on excessive allocation failures or leave this up to upper conn? */ + return ERR_OK; + } + + /* decrypt application data, this pulls encrypted RX data off state->rx pbuf chain */ + ret = mbedtls_ssl_read(&state->ssl_context, (unsigned char *)buf->payload, PBUF_POOL_BUFSIZE); + if (ret < 0) { + if (ret == MBEDTLS_ERR_SSL_CLIENT_RECONNECT) { + /* client is initiating a new connection using the same source port -> close connection or make handshake */ + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("new connection on same source port\n")); + LWIP_ASSERT("TODO: new connection on same source port, close this connection", 0); + } else if ((ret != MBEDTLS_ERR_SSL_WANT_READ) && (ret != MBEDTLS_ERR_SSL_WANT_WRITE)) { + if (ret == MBEDTLS_ERR_SSL_PEER_CLOSE_NOTIFY) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("connection was closed gracefully\n")); + } else if (ret == MBEDTLS_ERR_NET_CONN_RESET) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("connection was reset by peer\n")); + } + pbuf_free(buf); + return ERR_OK; + } else { + pbuf_free(buf); + return ERR_OK; + } + pbuf_free(buf); + altcp_abort(conn); + return ERR_ABRT; + } else { + err_t err; + if (ret) { + LWIP_ASSERT("bogus receive length", ret <= PBUF_POOL_BUFSIZE); + /* trim pool pbuf to actually decoded length */ + pbuf_realloc(buf, (u16_t)ret); + + state->bio_bytes_appl += ret; + if (mbedtls_ssl_get_bytes_avail(&state->ssl_context) == 0) { + /* Record is done, now we know the share between application and protocol bytes + and can adjust the RX window by the protocol bytes. + The rest is 'recved' by the application calling our 'recved' fn. */ + int overhead_bytes; + LWIP_ASSERT("bogus byte counts", state->bio_bytes_read > state->bio_bytes_appl); + overhead_bytes = state->bio_bytes_read - state->bio_bytes_appl; + altcp_mbedtls_lower_recved(conn->inner_conn, overhead_bytes); + state->bio_bytes_read = 0; + state->bio_bytes_appl = 0; + } + + if (state->rx_app == NULL) { + state->rx_app = buf; + } else { + pbuf_cat(state->rx_app, buf); + } + } else { + pbuf_free(buf); + buf = NULL; + } + err = altcp_mbedtls_pass_rx_data(conn, state); + if (err != ERR_OK) { + if (err == ERR_ABRT) { + /* recv callback needs to return this as the pcb is deallocated */ + return ERR_ABRT; + } + /* we hide all other errors as we retry feeding the pbuf to the app later */ + return ERR_OK; + } + } + } while (ret > 0); + return ERR_OK; +} + +/** Receive callback function called from mbedtls (set via mbedtls_ssl_set_bio) + * This function mainly copies data from pbufs and frees the pbufs after copying. + */ +static int +altcp_mbedtls_bio_recv(void *ctx, unsigned char *buf, size_t len) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)ctx; + altcp_mbedtls_state_t *state; + struct pbuf *p; + u16_t ret; + u16_t copy_len; + err_t err; + + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + if ((conn == NULL) || (conn->state == NULL)) { + return MBEDTLS_ERR_NET_INVALID_CONTEXT; + } + state = (altcp_mbedtls_state_t *)conn->state; + LWIP_ASSERT("state != NULL", state != NULL); + p = state->rx; + + /* @todo: return MBEDTLS_ERR_NET_CONN_RESET/MBEDTLS_ERR_NET_RECV_FAILED? */ + + if ((p == NULL) || ((p->len == 0) && (p->next == NULL))) { + if (p) { + pbuf_free(p); + } + state->rx = NULL; + if ((state->flags & (ALTCP_MBEDTLS_FLAGS_RX_CLOSE_QUEUED | ALTCP_MBEDTLS_FLAGS_RX_CLOSED)) == + ALTCP_MBEDTLS_FLAGS_RX_CLOSE_QUEUED) { + /* close queued but not passed up yet */ + return 0; + } + return MBEDTLS_ERR_SSL_WANT_READ; + } + /* limit number of bytes again to copy from first pbuf in a chain only */ + copy_len = (u16_t)LWIP_MIN(len, p->len); + /* copy the data */ + ret = pbuf_copy_partial(p, buf, copy_len, 0); + LWIP_ASSERT("ret == copy_len", ret == copy_len); + /* hide the copied bytes from the pbuf */ + err = pbuf_remove_header(p, ret); + LWIP_ASSERT("error", err == ERR_OK); + if (p->len == 0) { + /* the first pbuf has been fully read, free it */ + state->rx = p->next; + p->next = NULL; + pbuf_free(p); + } + + state->bio_bytes_read += (int)ret; + return ret; +} + +/** Sent callback from lower connection (i.e. TCP) + * This only informs the upper layer to try to send more, not about + * the number of ACKed bytes. + */ +static err_t +altcp_mbedtls_lower_sent(void *arg, struct altcp_pcb *inner_conn, u16_t len) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + LWIP_UNUSED_ARG(inner_conn); /* for LWIP_NOASSERT */ + LWIP_UNUSED_ARG(len); + if (conn) { + altcp_mbedtls_state_t *state = (altcp_mbedtls_state_t *)conn->state; + LWIP_ASSERT("pcb mismatch", conn->inner_conn == inner_conn); + if (!state || !(state->flags & ALTCP_MBEDTLS_FLAGS_HANDSHAKE_DONE)) { + /* @todo: do something here? */ + return ERR_OK; + } + /* try to send more if we failed before */ + mbedtls_ssl_flush_output(&state->ssl_context); + /* call upper sent with len==0 if the application already sent data */ + if ((state->flags & ALTCP_MBEDTLS_FLAGS_APPLDATA_SENT) && conn->sent) { + return conn->sent(conn->arg, conn, 0); + } + } + return ERR_OK; +} + +/** Poll callback from lower connection (i.e. TCP) + * Just pass this on to the application. + * @todo: retry sending? + */ +static err_t +altcp_mbedtls_lower_poll(void *arg, struct altcp_pcb *inner_conn) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + LWIP_UNUSED_ARG(inner_conn); /* for LWIP_NOASSERT */ + if (conn) { + LWIP_ASSERT("pcb mismatch", conn->inner_conn == inner_conn); + /* check if there's unreceived rx data */ + if (conn->state) { + altcp_mbedtls_state_t *state = (altcp_mbedtls_state_t *)conn->state; + /* try to send more if we failed before */ + mbedtls_ssl_flush_output(&state->ssl_context); + if (altcp_mbedtls_handle_rx_appldata(conn, state) == ERR_ABRT) { + return ERR_ABRT; + } + } + if (conn->poll) { + return conn->poll(conn->arg, conn); + } + } + return ERR_OK; +} + +static void +altcp_mbedtls_lower_err(void *arg, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + conn->inner_conn = NULL; /* already freed */ + if (conn->err) { + conn->err(conn->arg, err); + } + altcp_free(conn); + } +} + +/* setup functions */ + +static void +altcp_mbedtls_remove_callbacks(struct altcp_pcb *inner_conn) +{ + altcp_arg(inner_conn, NULL); + altcp_recv(inner_conn, NULL); + altcp_sent(inner_conn, NULL); + altcp_err(inner_conn, NULL); + altcp_poll(inner_conn, NULL, inner_conn->pollinterval); +} + +static void +altcp_mbedtls_setup_callbacks(struct altcp_pcb *conn, struct altcp_pcb *inner_conn) +{ + altcp_arg(inner_conn, conn); + altcp_recv(inner_conn, altcp_mbedtls_lower_recv); + altcp_sent(inner_conn, altcp_mbedtls_lower_sent); + altcp_err(inner_conn, altcp_mbedtls_lower_err); + /* tcp_poll is set when interval is set by application */ + /* listen is set totally different :-) */ +} + +static err_t +altcp_mbedtls_setup(void *conf, struct altcp_pcb *conn, struct altcp_pcb *inner_conn) +{ + int ret; + struct altcp_tls_config *config = (struct altcp_tls_config *)conf; + altcp_mbedtls_state_t *state; + if (!conf) { + return ERR_ARG; + } + LWIP_ASSERT("invalid inner_conn", conn != inner_conn); + + /* allocate mbedtls context */ + state = altcp_mbedtls_alloc(conf); + if (state == NULL) { + return ERR_MEM; + } + /* initialize mbedtls context: */ + mbedtls_ssl_init(&state->ssl_context); + ret = mbedtls_ssl_setup(&state->ssl_context, &config->conf); + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_ssl_setup failed\n")); + /* @todo: convert 'ret' to err_t */ + altcp_mbedtls_free(conf, state); + return ERR_MEM; + } + /* tell mbedtls about our I/O functions */ + mbedtls_ssl_set_bio(&state->ssl_context, conn, altcp_mbedtls_bio_send, altcp_mbedtls_bio_recv, NULL); + + altcp_mbedtls_setup_callbacks(conn, inner_conn); + conn->inner_conn = inner_conn; + conn->fns = &altcp_mbedtls_functions; + conn->state = state; + return ERR_OK; +} + +struct altcp_pcb * +altcp_tls_wrap(struct altcp_tls_config *config, struct altcp_pcb *inner_pcb) +{ + struct altcp_pcb *ret; + if (inner_pcb == NULL) { + return NULL; + } + ret = altcp_alloc(); + if (ret != NULL) { + if (altcp_mbedtls_setup(config, ret, inner_pcb) != ERR_OK) { + altcp_free(ret); + return NULL; + } + } + return ret; +} + +void * +altcp_tls_context(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + altcp_mbedtls_state_t *state = (altcp_mbedtls_state_t *)conn->state; + return &state->ssl_context; + } + return NULL; +} + +#if ALTCP_MBEDTLS_LIB_DEBUG != LWIP_DBG_OFF +static void +altcp_mbedtls_debug(void *ctx, int level, const char *file, int line, const char *str) +{ + LWIP_UNUSED_ARG(ctx); + LWIP_UNUSED_ARG(file); + LWIP_UNUSED_ARG(line); + LWIP_UNUSED_ARG(str); + + if (level >= ALTCP_MBEDTLS_LIB_DEBUG_LEVEL_MIN) { + LWIP_DEBUGF(ALTCP_MBEDTLS_LIB_DEBUG, ("%s:%04d: %s", file, line, str)); + } +} +#endif + +/** Create new TLS configuration + * ATTENTION: Server certificate and private key have to be added outside this function! + */ +static struct altcp_tls_config * +altcp_tls_create_config(int is_server, uint8_t cert_count, uint8_t pkey_count, int have_ca) +{ + size_t sz; + int ret; + struct altcp_tls_config *conf; + mbedtls_x509_crt *mem; + + if (TCP_WND < MBEDTLS_SSL_MAX_CONTENT_LEN) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG|LWIP_DBG_LEVEL_SERIOUS, + ("altcp_tls: TCP_WND is smaller than the RX decrypion buffer, connection RX might stall!\n")); + } + + altcp_mbedtls_mem_init(); + + sz = sizeof(struct altcp_tls_config); + if (cert_count > 0) { + sz += (cert_count * sizeof(mbedtls_x509_crt)); + } + if (have_ca) { + sz += sizeof(mbedtls_x509_crt); + } + if (pkey_count > 0) { + sz += (pkey_count * sizeof(mbedtls_pk_context)); + } + + conf = (struct altcp_tls_config *)altcp_mbedtls_alloc_config(sz); + if (conf == NULL) { + return NULL; + } + conf->cert_max = cert_count; + mem = (mbedtls_x509_crt *)(conf + 1); + if (cert_count > 0) { + conf->cert = mem; + mem += cert_count; + } + if (have_ca) { + conf->ca = mem; + mem++; + } + conf->pkey_max = pkey_count; + if (pkey_count > 0) { + conf->pkey = (mbedtls_pk_context *)mem; + } + + mbedtls_ssl_config_init(&conf->conf); + + if (!altcp_tls_entropy_rng) { + altcp_tls_entropy_rng = (struct altcp_tls_entropy_rng *)altcp_mbedtls_alloc_config(sizeof(struct altcp_tls_entropy_rng)); + if (altcp_tls_entropy_rng) { + altcp_tls_entropy_rng->ref = 1; + mbedtls_entropy_init(&altcp_tls_entropy_rng->entropy); + mbedtls_ctr_drbg_init(&altcp_tls_entropy_rng->ctr_drbg); + /* Seed the RNG, only once */ + ret = mbedtls_ctr_drbg_seed(&altcp_tls_entropy_rng->ctr_drbg, + mbedtls_entropy_func, &altcp_tls_entropy_rng->entropy, + ALTCP_MBEDTLS_ENTROPY_PTR, ALTCP_MBEDTLS_ENTROPY_LEN); + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_ctr_drbg_seed failed: %d\n", ret)); + mbedtls_ctr_drbg_free(&altcp_tls_entropy_rng->ctr_drbg); + mbedtls_entropy_free(&altcp_tls_entropy_rng->entropy); + altcp_mbedtls_free_config(altcp_tls_entropy_rng); + altcp_tls_entropy_rng = NULL; + altcp_mbedtls_free_config(conf); + return NULL; + } + } else { + altcp_mbedtls_free_config(conf); + return NULL; + } + } else { + altcp_tls_entropy_rng->ref++; + } + + /* Setup ssl context (@todo: what's different for a client here? -> might better be done on listen/connect) */ + ret = mbedtls_ssl_config_defaults(&conf->conf, is_server ? MBEDTLS_SSL_IS_SERVER : MBEDTLS_SSL_IS_CLIENT, + MBEDTLS_SSL_TRANSPORT_STREAM, MBEDTLS_SSL_PRESET_DEFAULT); + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_ssl_config_defaults failed: %d\n", ret)); + if (altcp_tls_entropy_rng->ref == 1) { + mbedtls_ctr_drbg_free(&altcp_tls_entropy_rng->ctr_drbg); + mbedtls_entropy_free(&altcp_tls_entropy_rng->entropy); + altcp_mbedtls_free_config(altcp_tls_entropy_rng); + altcp_tls_entropy_rng = NULL; + } + altcp_mbedtls_free_config(conf); + return NULL; + } + mbedtls_ssl_conf_authmode(&conf->conf, MBEDTLS_SSL_VERIFY_OPTIONAL); + + mbedtls_ssl_conf_rng(&conf->conf, mbedtls_ctr_drbg_random, &altcp_tls_entropy_rng->ctr_drbg); +#if ALTCP_MBEDTLS_LIB_DEBUG != LWIP_DBG_OFF + mbedtls_ssl_conf_dbg(&conf->conf, altcp_mbedtls_debug, stdout); +#endif +#if defined(MBEDTLS_SSL_CACHE_C) && ALTCP_MBEDTLS_USE_SESSION_CACHE + mbedtls_ssl_conf_session_cache(&conf->conf, &conf->cache, mbedtls_ssl_cache_get, mbedtls_ssl_cache_set); + mbedtls_ssl_cache_set_timeout(&conf->cache, ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS); + mbedtls_ssl_cache_set_max_entries(&conf->cache, ALTCP_MBEDTLS_SESSION_CACHE_SIZE); +#endif + +#if defined(MBEDTLS_SSL_SESSION_TICKETS) && ALTCP_MBEDTLS_USE_SESSION_TICKETS + mbedtls_ssl_ticket_init(&conf->ticket_ctx); + + ret = mbedtls_ssl_ticket_setup(&conf->ticket_ctx, mbedtls_ctr_drbg_random, &altcp_tls_entropy_rng->ctr_drbg, + ALTCP_MBEDTLS_SESSION_TICKET_CIPHER, ALTCP_MBEDTLS_SESSION_TICKET_TIMEOUT_SECONDS); + if (ret) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_ssl_ticket_setup failed: %d\n", ret)); + altcp_mbedtls_free_config(conf); + return NULL; + } + + mbedtls_ssl_conf_session_tickets_cb(&conf->conf, mbedtls_ssl_ticket_write, mbedtls_ssl_ticket_parse, + &conf->ticket_ctx); +#endif + + return conf; +} + +struct altcp_tls_config *altcp_tls_create_config_server(uint8_t cert_count) +{ + struct altcp_tls_config *conf = altcp_tls_create_config(1, cert_count, cert_count, 0); + if (conf == NULL) { + return NULL; + } + + mbedtls_ssl_conf_ca_chain(&conf->conf, NULL, NULL); + return conf; +} + +err_t altcp_tls_config_server_add_privkey_cert(struct altcp_tls_config *config, + const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len) +{ + int ret; + mbedtls_x509_crt *srvcert; + mbedtls_pk_context *pkey; + + if (config->cert_count >= config->cert_max) { + return ERR_MEM; + } + if (config->pkey_count >= config->pkey_max) { + return ERR_MEM; + } + + srvcert = config->cert + config->cert_count; + mbedtls_x509_crt_init(srvcert); + + pkey = config->pkey + config->pkey_count; + mbedtls_pk_init(pkey); + + /* Load the certificates and private key */ + ret = mbedtls_x509_crt_parse(srvcert, cert, cert_len); + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_x509_crt_parse failed: %d\n", ret)); + return ERR_VAL; + } + + ret = mbedtls_pk_parse_key(pkey, (const unsigned char *) privkey, privkey_len, privkey_pass, privkey_pass_len); + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_pk_parse_public_key failed: %d\n", ret)); + mbedtls_x509_crt_free(srvcert); + return ERR_VAL; + } + + ret = mbedtls_ssl_conf_own_cert(&config->conf, srvcert, pkey); + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_ssl_conf_own_cert failed: %d\n", ret)); + mbedtls_x509_crt_free(srvcert); + mbedtls_pk_free(pkey); + return ERR_VAL; + } + + config->cert_count++; + config->pkey_count++; + return ERR_OK; +} + +/** Create new TLS configuration + * This is a suboptimal version that gets the encrypted private key and its password, + * as well as the server certificate. + */ +struct altcp_tls_config * +altcp_tls_create_config_server_privkey_cert(const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len) +{ + struct altcp_tls_config *conf = altcp_tls_create_config_server(1); + if (conf == NULL) { + return NULL; + } + + if (altcp_tls_config_server_add_privkey_cert(conf, privkey, privkey_len, + privkey_pass, privkey_pass_len, cert, cert_len) != ERR_OK) { + altcp_mbedtls_free_config(conf); + return NULL; + } + + return conf; +} + +static struct altcp_tls_config * +altcp_tls_create_config_client_common(const u8_t *ca, size_t ca_len, int is_2wayauth) +{ + int ret; + struct altcp_tls_config *conf = altcp_tls_create_config(0, (is_2wayauth) ? 1 : 0, (is_2wayauth) ? 1 : 0, ca != NULL); + if (conf == NULL) { + return NULL; + } + + /* Initialize the CA certificate if provided + * CA certificate is optional (to save memory) but recommended for production environment + * Without CA certificate, connection will be prone to man-in-the-middle attacks */ + if (ca) { + mbedtls_x509_crt_init(conf->ca); + ret = mbedtls_x509_crt_parse(conf->ca, ca, ca_len); + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_x509_crt_parse ca failed: %d 0x%x", ret, -1*ret)); + altcp_mbedtls_free_config(conf); + return NULL; + } + + mbedtls_ssl_conf_ca_chain(&conf->conf, conf->ca, NULL); + } + return conf; +} + +struct altcp_tls_config * +altcp_tls_create_config_client(const u8_t *ca, size_t ca_len) +{ + return altcp_tls_create_config_client_common(ca, ca_len, 0); +} + +struct altcp_tls_config * +altcp_tls_create_config_client_2wayauth(const u8_t *ca, size_t ca_len, const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len) +{ + int ret; + struct altcp_tls_config *conf; + + if (!cert || !privkey) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("altcp_tls_create_config_client_2wayauth: certificate and priv key required")); + return NULL; + } + + conf = altcp_tls_create_config_client_common(ca, ca_len, 1); + if (conf == NULL) { + return NULL; + } + + /* Initialize the client certificate and corresponding private key */ + mbedtls_x509_crt_init(conf->cert); + ret = mbedtls_x509_crt_parse(conf->cert, cert, cert_len); + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_x509_crt_parse cert failed: %d 0x%x", ret, -1*ret)); + altcp_mbedtls_free_config(conf->cert); + return NULL; + } + + mbedtls_pk_init(conf->pkey); + ret = mbedtls_pk_parse_key(conf->pkey, privkey, privkey_len, privkey_pass, privkey_pass_len); + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_pk_parse_key failed: %d 0x%x", ret, -1*ret)); + altcp_mbedtls_free_config(conf); + return NULL; + } + + ret = mbedtls_ssl_conf_own_cert(&conf->conf, conf->cert, conf->pkey); + if (ret != 0) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("mbedtls_ssl_conf_own_cert failed: %d 0x%x", ret, -1*ret)); + altcp_mbedtls_free_config(conf); + return NULL; + } + + return conf; +} + +void +altcp_tls_free_config(struct altcp_tls_config *conf) +{ + if (conf->pkey) { + mbedtls_pk_free(conf->pkey); + } + if (conf->cert) { + mbedtls_x509_crt_free(conf->cert); + } + if (conf->ca) { + mbedtls_x509_crt_free(conf->ca); + } + altcp_mbedtls_free_config(conf); + if (altcp_tls_entropy_rng && altcp_tls_entropy_rng->ref) + altcp_tls_entropy_rng->ref--; +} + +void +altcp_tls_free_entropy(void) +{ + if (altcp_tls_entropy_rng && altcp_tls_entropy_rng->ref == 0) { + mbedtls_ctr_drbg_free(&altcp_tls_entropy_rng->ctr_drbg); + mbedtls_entropy_free(&altcp_tls_entropy_rng->entropy); + altcp_mbedtls_free_config(altcp_tls_entropy_rng); + altcp_tls_entropy_rng = NULL; + } +} + +/* "virtual" functions */ +static void +altcp_mbedtls_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn != NULL) { + altcp_poll(conn->inner_conn, altcp_mbedtls_lower_poll, interval); + } +} + +static void +altcp_mbedtls_recved(struct altcp_pcb *conn, u16_t len) +{ + u16_t lower_recved; + altcp_mbedtls_state_t *state; + if (conn == NULL) { + return; + } + state = (altcp_mbedtls_state_t *)conn->state; + if (state == NULL) { + return; + } + if (!(state->flags & ALTCP_MBEDTLS_FLAGS_HANDSHAKE_DONE)) { + return; + } + lower_recved = len; + if (lower_recved > state->rx_passed_unrecved) { + LWIP_DEBUGF(ALTCP_MBEDTLS_DEBUG, ("bogus recved count (len > state->rx_passed_unrecved / %d / %d)", + len, state->rx_passed_unrecved)); + lower_recved = (u16_t)state->rx_passed_unrecved; + } + state->rx_passed_unrecved -= lower_recved; + + altcp_recved(conn->inner_conn, lower_recved); +} + +static err_t +altcp_mbedtls_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + if (conn == NULL) { + return ERR_VAL; + } + conn->connected = connected; + return altcp_connect(conn->inner_conn, ipaddr, port, altcp_mbedtls_lower_connected); +} + +static struct altcp_pcb * +altcp_mbedtls_listen(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + struct altcp_pcb *lpcb; + if (conn == NULL) { + return NULL; + } + lpcb = altcp_listen_with_backlog_and_err(conn->inner_conn, backlog, err); + if (lpcb != NULL) { + altcp_mbedtls_state_t *state = (altcp_mbedtls_state_t *)conn->state; + /* Free members of the ssl context (not used on listening pcb). This + includes freeing input/output buffers, so saves ~32KByte by default */ + mbedtls_ssl_free(&state->ssl_context); + + conn->inner_conn = lpcb; + altcp_accept(lpcb, altcp_mbedtls_lower_accept); + return conn; + } + return NULL; +} + +static void +altcp_mbedtls_abort(struct altcp_pcb *conn) +{ + if (conn != NULL) { + altcp_abort(conn->inner_conn); + } +} + +static err_t +altcp_mbedtls_close(struct altcp_pcb *conn) +{ + struct altcp_pcb *inner_conn; + if (conn == NULL) { + return ERR_VAL; + } + inner_conn = conn->inner_conn; + if (inner_conn) { + err_t err; + altcp_poll_fn oldpoll = inner_conn->poll; + altcp_mbedtls_remove_callbacks(conn->inner_conn); + err = altcp_close(conn->inner_conn); + if (err != ERR_OK) { + /* not closed, set up all callbacks again */ + altcp_mbedtls_setup_callbacks(conn, inner_conn); + /* poll callback is not included in the above */ + altcp_poll(inner_conn, oldpoll, inner_conn->pollinterval); + return err; + } + conn->inner_conn = NULL; + } + altcp_free(conn); + return ERR_OK; +} + +/** Allow caller of altcp_write() to limit to negotiated chunk size + * or remaining sndbuf space of inner_conn. + */ +static u16_t +altcp_mbedtls_sndbuf(struct altcp_pcb *conn) +{ + if (conn) { + altcp_mbedtls_state_t *state; + state = (altcp_mbedtls_state_t*)conn->state; + if (!state || !(state->flags & ALTCP_MBEDTLS_FLAGS_HANDSHAKE_DONE)) { + return 0; + } + if (conn->inner_conn) { + u16_t sndbuf = altcp_sndbuf(conn->inner_conn); + /* Take care of record header, IV, AuthTag */ + int ssl_expan = mbedtls_ssl_get_record_expansion(&state->ssl_context); + if (ssl_expan > 0) { + size_t ssl_added = (u16_t)LWIP_MIN(ssl_expan, 0xFFFF); + /* internal sndbuf smaller than our offset */ + if (ssl_added < sndbuf) { + size_t max_len = 0xFFFF; + size_t ret; +#if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH) + /* @todo: adjust ssl_added to real value related to negociated cipher */ + size_t max_frag_len = mbedtls_ssl_get_max_frag_len(&state->ssl_context); + max_len = LWIP_MIN(max_frag_len, max_len); +#endif + /* Adjust sndbuf of inner_conn with what added by SSL */ + ret = LWIP_MIN(sndbuf - ssl_added, max_len); + LWIP_ASSERT("sndbuf overflow", ret <= 0xFFFF); + return (u16_t)ret; + } + } + } + } + /* fallback: use sendbuf of the inner connection */ + return altcp_default_sndbuf(conn); +} + +/** Write data to a TLS connection. Calls into mbedTLS, which in turn calls into + * @ref altcp_mbedtls_bio_send() to send the encrypted data + */ +static err_t +altcp_mbedtls_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + int ret; + altcp_mbedtls_state_t *state; + + LWIP_UNUSED_ARG(apiflags); + + if (conn == NULL) { + return ERR_VAL; + } + + state = (altcp_mbedtls_state_t *)conn->state; + if (state == NULL) { + /* @todo: which error? */ + return ERR_CLSD; + } + if (!(state->flags & ALTCP_MBEDTLS_FLAGS_HANDSHAKE_DONE)) { + /* @todo: which error? */ + return ERR_VAL; + } + + /* HACK: if thre is something left to send, try to flush it and only + allow sending more if this succeeded (this is a hack because neither + returning 0 nor MBEDTLS_ERR_SSL_WANT_WRITE worked for me) */ + if (state->ssl_context.out_left) { + mbedtls_ssl_flush_output(&state->ssl_context); + if (state->ssl_context.out_left) { + return ERR_MEM; + } + } + ret = mbedtls_ssl_write(&state->ssl_context, (const unsigned char *)dataptr, len); + /* try to send data... */ + altcp_output(conn->inner_conn); + if (ret >= 0) { + if (ret == len) { + state->flags |= ALTCP_MBEDTLS_FLAGS_APPLDATA_SENT; + return ERR_OK; + } else { + /* @todo/@fixme: assumption: either everything sent or error */ + LWIP_ASSERT("ret <= 0", 0); + return ERR_MEM; + } + } else { + if (ret == MBEDTLS_ERR_SSL_WANT_WRITE) { + /* @todo: convert error to err_t */ + return ERR_MEM; + } + LWIP_ASSERT("unhandled error", 0); + return ERR_VAL; + } +} + +/** Send callback function called from mbedtls (set via mbedtls_ssl_set_bio) + * This function is either called during handshake or when sending application + * data via @ref altcp_mbedtls_write (or altcp_write) + */ +static int +altcp_mbedtls_bio_send(void *ctx, const unsigned char *dataptr, size_t size) +{ + struct altcp_pcb *conn = (struct altcp_pcb *) ctx; + int written = 0; + size_t size_left = size; + u8_t apiflags = TCP_WRITE_FLAG_COPY; + + LWIP_ASSERT("conn != NULL", conn != NULL); + if ((conn == NULL) || (conn->inner_conn == NULL)) { + return MBEDTLS_ERR_NET_INVALID_CONTEXT; + } + + while (size_left) { + u16_t write_len = (u16_t)LWIP_MIN(size_left, 0xFFFF); + err_t err = altcp_write(conn->inner_conn, (const void *)dataptr, write_len, apiflags); + if (err == ERR_OK) { + written += write_len; + size_left -= write_len; + } else if (err == ERR_MEM) { + if (written) { + return written; + } + return 0; /* MBEDTLS_ERR_SSL_WANT_WRITE; */ + } else { + LWIP_ASSERT("tls_write, tcp_write: err != ERR MEM", 0); + /* @todo: return MBEDTLS_ERR_NET_CONN_RESET or MBEDTLS_ERR_NET_SEND_FAILED */ + return MBEDTLS_ERR_NET_SEND_FAILED; + } + } + return written; +} + +static u16_t +altcp_mbedtls_mss(struct altcp_pcb *conn) +{ + if (conn == NULL) { + return 0; + } + /* @todo: LWIP_MIN(mss, mbedtls_ssl_get_max_frag_len()) ? */ + return altcp_mss(conn->inner_conn); +} + +static void +altcp_mbedtls_dealloc(struct altcp_pcb *conn) +{ + /* clean up and free tls state */ + if (conn) { + altcp_mbedtls_state_t *state = (altcp_mbedtls_state_t *)conn->state; + if (state) { + mbedtls_ssl_free(&state->ssl_context); + state->flags = 0; + if (state->rx) { + /* free leftover (unhandled) rx pbufs */ + pbuf_free(state->rx); + state->rx = NULL; + } + altcp_mbedtls_free(state->conf, state); + conn->state = NULL; + } + } +} + +const struct altcp_functions altcp_mbedtls_functions = { + altcp_mbedtls_set_poll, + altcp_mbedtls_recved, + altcp_default_bind, + altcp_mbedtls_connect, + altcp_mbedtls_listen, + altcp_mbedtls_abort, + altcp_mbedtls_close, + altcp_default_shutdown, + altcp_mbedtls_write, + altcp_default_output, + altcp_mbedtls_mss, + altcp_mbedtls_sndbuf, + altcp_default_sndqueuelen, + altcp_default_nagle_disable, + altcp_default_nagle_enable, + altcp_default_nagle_disabled, + altcp_default_setprio, + altcp_mbedtls_dealloc, + altcp_default_get_tcp_addrinfo, + altcp_default_get_ip, + altcp_default_get_port +#if LWIP_TCP_KEEPALIVE + , altcp_default_keepalive_disable + , altcp_default_keepalive_enable +#endif +#ifdef LWIP_DEBUG + , altcp_default_dbg_get_tcp_state +#endif +}; + +#endif /* LWIP_ALTCP_TLS && LWIP_ALTCP_TLS_MBEDTLS */ +#endif /* LWIP_ALTCP */ diff --git a/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls_mem.c b/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls_mem.c new file mode 100644 index 00000000..04d47ae5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls_mem.c @@ -0,0 +1,210 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread) + * + * This file contains memory management functions for a TLS layer using mbedTLS. + * + * ATTENTION: For production usage, you might want to override this file with + * your own implementation since this implementation simply uses the + * lwIP heap without caring for fragmentation or leaving heap for + * other parts of lwIP! + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + * Missing things / @todo: + * - RX data is acknowledged after receiving (tcp_recved is called when enqueueing + * the pbuf for mbedTLS receive, not when processed by mbedTLS or the inner + * connection; altcp_recved() from inner connection does nothing) + * - TX data is marked as 'sent' (i.e. acknowledged; sent callback is called) right + * after enqueueing for transmission, not when actually ACKed be the remote host. + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/apps/altcp_tls_mbedtls_opts.h" + +#if LWIP_ALTCP_TLS && LWIP_ALTCP_TLS_MBEDTLS + +#include "altcp_tls_mbedtls_mem.h" +#include "altcp_tls_mbedtls_structs.h" +#include "lwip/mem.h" + +#include "mbedtls/platform.h" + +#include + +#ifndef ALTCP_MBEDTLS_MEM_DEBUG +#define ALTCP_MBEDTLS_MEM_DEBUG LWIP_DBG_OFF +#endif + +#if defined(MBEDTLS_PLATFORM_MEMORY) && \ + (!defined(MBEDTLS_PLATFORM_FREE_MACRO) || \ + defined(MBEDTLS_PLATFORM_CALLOC_MACRO)) +#define ALTCP_MBEDTLS_PLATFORM_ALLOC 1 +#else +#define ALTCP_MBEDTLS_PLATFORM_ALLOC 0 +#endif + +#if ALTCP_MBEDTLS_PLATFORM_ALLOC + +#ifndef ALTCP_MBEDTLS_PLATFORM_ALLOC_STATS +#define ALTCP_MBEDTLS_PLATFORM_ALLOC_STATS 0 +#endif + +/* This is an example/debug implementation of alloc/free functions only */ +typedef struct altcp_mbedtls_malloc_helper_s { + size_t c; + size_t len; +} altcp_mbedtls_malloc_helper_t; + +#if ALTCP_MBEDTLS_PLATFORM_ALLOC_STATS +typedef struct altcp_mbedtls_malloc_stats_s { + size_t allocedBytes; + size_t allocCnt; + size_t maxBytes; + size_t totalBytes; +} altcp_mbedtls_malloc_stats_t; +altcp_mbedtls_malloc_stats_t altcp_mbedtls_malloc_stats; +volatile int altcp_mbedtls_malloc_clear_stats; +#endif + +static void * +tls_malloc(size_t c, size_t len) +{ + altcp_mbedtls_malloc_helper_t *hlpr; + void *ret; + size_t alloc_size; +#if ALTCP_MBEDTLS_PLATFORM_ALLOC_STATS + if (altcp_mbedtls_malloc_clear_stats) { + altcp_mbedtls_malloc_clear_stats = 0; + memset(&altcp_mbedtls_malloc_stats, 0, sizeof(altcp_mbedtls_malloc_stats)); + } +#endif + alloc_size = sizeof(altcp_mbedtls_malloc_helper_t) + (c * len); + /* check for maximum allocation size, mainly to prevent mem_size_t overflow */ + if (alloc_size > MEM_SIZE) { + LWIP_DEBUGF(ALTCP_MBEDTLS_MEM_DEBUG, ("mbedtls allocation too big: %c * %d bytes vs MEM_SIZE=%d", + (int)c, (int)len, (int)MEM_SIZE)); + return NULL; + } + hlpr = (altcp_mbedtls_malloc_helper_t *)mem_malloc((mem_size_t)alloc_size); + if (hlpr == NULL) { + LWIP_DEBUGF(ALTCP_MBEDTLS_MEM_DEBUG, ("mbedtls alloc callback failed for %c * %d bytes", (int)c, (int)len)); + return NULL; + } +#if ALTCP_MBEDTLS_PLATFORM_ALLOC_STATS + altcp_mbedtls_malloc_stats.allocCnt++; + altcp_mbedtls_malloc_stats.allocedBytes += c * len; + if (altcp_mbedtls_malloc_stats.allocedBytes > altcp_mbedtls_malloc_stats.maxBytes) { + altcp_mbedtls_malloc_stats.maxBytes = altcp_mbedtls_malloc_stats.allocedBytes; + } + altcp_mbedtls_malloc_stats.totalBytes += c * len; +#endif + hlpr->c = c; + hlpr->len = len; + ret = hlpr + 1; + /* zeroing the allocated chunk is required by mbedTLS! */ + memset(ret, 0, c * len); + return ret; +} + +static void +tls_free(void *ptr) +{ + altcp_mbedtls_malloc_helper_t *hlpr; + if (ptr == NULL) { + /* this obviously happened in mbedtls... */ + return; + } + hlpr = ((altcp_mbedtls_malloc_helper_t *)ptr) - 1; +#if ALTCP_MBEDTLS_PLATFORM_ALLOC_STATS + if (!altcp_mbedtls_malloc_clear_stats) { + altcp_mbedtls_malloc_stats.allocedBytes -= hlpr->c * hlpr->len; + } +#endif + mem_free(hlpr); +} +#endif /* ALTCP_MBEDTLS_PLATFORM_ALLOC*/ + +void +altcp_mbedtls_mem_init(void) +{ + /* not much to do here when using the heap */ + +#if ALTCP_MBEDTLS_PLATFORM_ALLOC + /* set mbedtls allocation methods */ + mbedtls_platform_set_calloc_free(&tls_malloc, &tls_free); +#endif +} + +altcp_mbedtls_state_t * +altcp_mbedtls_alloc(void *conf) +{ + altcp_mbedtls_state_t *ret = (altcp_mbedtls_state_t *)mem_calloc(1, sizeof(altcp_mbedtls_state_t)); + if (ret != NULL) { + ret->conf = conf; + } + return ret; +} + +void +altcp_mbedtls_free(void *conf, altcp_mbedtls_state_t *state) +{ + LWIP_UNUSED_ARG(conf); + LWIP_ASSERT("state != NULL", state != NULL); + mem_free(state); +} + +void * +altcp_mbedtls_alloc_config(size_t size) +{ + void *ret; + size_t checked_size = (mem_size_t)size; + if (size != checked_size) { + /* allocation too big (mem_size_t overflow) */ + return NULL; + } + ret = (altcp_mbedtls_state_t *)mem_calloc(1, (mem_size_t)size); + return ret; +} + +void +altcp_mbedtls_free_config(void *item) +{ + LWIP_ASSERT("item != NULL", item != NULL); + mem_free(item); +} + +#endif /* LWIP_ALTCP_TLS && LWIP_ALTCP_TLS_MBEDTLS */ +#endif /* LWIP_ALTCP */ diff --git a/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls_mem.h b/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls_mem.h new file mode 100644 index 00000000..b391bf87 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls_mem.h @@ -0,0 +1,72 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * This file contains memory management function prototypes for a TLS layer using mbedTLS. + * + * Memory management contains: + * - allocating/freeing altcp_mbedtls_state_t + * - allocating/freeing memory used in the mbedTLS library + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_MBEDTLS_MEM_H +#define LWIP_HDR_ALTCP_MBEDTLS_MEM_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/apps/altcp_tls_mbedtls_opts.h" + +#if LWIP_ALTCP_TLS && LWIP_ALTCP_TLS_MBEDTLS + +#include "altcp_tls_mbedtls_structs.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void altcp_mbedtls_mem_init(void); +altcp_mbedtls_state_t *altcp_mbedtls_alloc(void *conf); +void altcp_mbedtls_free(void *conf, altcp_mbedtls_state_t *state); +void *altcp_mbedtls_alloc_config(size_t size); +void altcp_mbedtls_free_config(void *item); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP_TLS && LWIP_ALTCP_TLS_MBEDTLS */ +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_ALTCP_MBEDTLS_MEM_H */ diff --git a/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls_structs.h b/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls_structs.h new file mode 100644 index 00000000..17efaaf6 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/apps/altcp_tls/altcp_tls_mbedtls_structs.h @@ -0,0 +1,83 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * This file contains structure definitions for a TLS layer using mbedTLS. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_MBEDTLS_STRUCTS_H +#define LWIP_HDR_ALTCP_MBEDTLS_STRUCTS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/apps/altcp_tls_mbedtls_opts.h" + +#if LWIP_ALTCP_TLS && LWIP_ALTCP_TLS_MBEDTLS + +#include "lwip/altcp.h" +#include "lwip/pbuf.h" + +#include "mbedtls/ssl.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ALTCP_MBEDTLS_FLAGS_HANDSHAKE_DONE 0x01 +#define ALTCP_MBEDTLS_FLAGS_UPPER_CALLED 0x02 +#define ALTCP_MBEDTLS_FLAGS_RX_CLOSE_QUEUED 0x04 +#define ALTCP_MBEDTLS_FLAGS_RX_CLOSED 0x08 +#define ALTCP_MBEDTLS_FLAGS_APPLDATA_SENT 0x10 + +typedef struct altcp_mbedtls_state_s { + void *conf; + mbedtls_ssl_context ssl_context; + /* chain of rx pbufs (before decryption) */ + struct pbuf *rx; + struct pbuf *rx_app; + u8_t flags; + int rx_passed_unrecved; + int bio_bytes_read; + int bio_bytes_appl; +} altcp_mbedtls_state_t; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP_TLS && LWIP_ALTCP_TLS_MBEDTLS */ +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_ALTCP_MBEDTLS_STRUCTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/apps/mdns/mdns.c b/Middlewares/Third_Party/LwIP/src/apps/mdns/mdns.c new file mode 100644 index 00000000..aad27288 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/apps/mdns/mdns.c @@ -0,0 +1,2407 @@ +/** + * @file + * MDNS responder implementation + * + * @defgroup mdns MDNS + * @ingroup apps + * + * RFC 6762 - Multicast DNS\n + * RFC 6763 - DNS-Based Service Discovery\n + * + * @verbinclude mdns.txt + * + * Things left to implement: + * ------------------------- + * + * - Tiebreaking for simultaneous probing + * - Sending goodbye messages (zero ttl) - shutdown, DHCP lease about to expire, DHCP turned off... + * - Checking that source address of unicast requests are on the same network + * - Limiting multicast responses to 1 per second per resource record + * - Fragmenting replies if required + * - Handling multi-packet known answers + * - Individual known answer detection for all local IPv6 addresses + * - Dynamic size of outgoing packet + */ + +/* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#include "lwip/apps/mdns.h" +#include "lwip/apps/mdns_priv.h" +#include "lwip/netif.h" +#include "lwip/udp.h" +#include "lwip/ip_addr.h" +#include "lwip/mem.h" +#include "lwip/prot/dns.h" +#include "lwip/prot/iana.h" +#include "lwip/timeouts.h" + +#include + +#if LWIP_MDNS_RESPONDER + +#if (LWIP_IPV4 && !LWIP_IGMP) +#error "If you want to use MDNS with IPv4, you have to define LWIP_IGMP=1 in your lwipopts.h" +#endif +#if (LWIP_IPV6 && !LWIP_IPV6_MLD) +#error "If you want to use MDNS with IPv6, you have to define LWIP_IPV6_MLD=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP) +#error "If you want to use MDNS, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif + +#if LWIP_IPV4 +#include "lwip/igmp.h" +/* IPv4 multicast group 224.0.0.251 */ +static const ip_addr_t v4group = DNS_MQUERY_IPV4_GROUP_INIT; +#endif + +#if LWIP_IPV6 +#include "lwip/mld6.h" +/* IPv6 multicast group FF02::FB */ +static const ip_addr_t v6group = DNS_MQUERY_IPV6_GROUP_INIT; +#endif + +#define MDNS_TTL 255 + +/* Stored offsets to beginning of domain names + * Used for compression. + */ +#define NUM_DOMAIN_OFFSETS 10 +#define DOMAIN_JUMP_SIZE 2 +#define DOMAIN_JUMP 0xc000 + +static u8_t mdns_netif_client_id; +static struct udp_pcb *mdns_pcb; +#if MDNS_RESP_USENETIF_EXTCALLBACK +NETIF_DECLARE_EXT_CALLBACK(netif_callback) +#endif +static mdns_name_result_cb_t mdns_name_result_cb; + +#define NETIF_TO_HOST(netif) (struct mdns_host*)(netif_get_client_data(netif, mdns_netif_client_id)) + +#define TOPDOMAIN_LOCAL "local" + +#define REVERSE_PTR_TOPDOMAIN "arpa" +#define REVERSE_PTR_V4_DOMAIN "in-addr" +#define REVERSE_PTR_V6_DOMAIN "ip6" + +#define SRV_PRIORITY 0 +#define SRV_WEIGHT 0 + +/* Payload size allocated for each outgoing UDP packet */ +#define OUTPACKET_SIZE 500 + +/* Lookup from hostname -> IPv4 */ +#define REPLY_HOST_A 0x01 +/* Lookup from IPv4/v6 -> hostname */ +#define REPLY_HOST_PTR_V4 0x02 +/* Lookup from hostname -> IPv6 */ +#define REPLY_HOST_AAAA 0x04 +/* Lookup from hostname -> IPv6 */ +#define REPLY_HOST_PTR_V6 0x08 + +/* Lookup for service types */ +#define REPLY_SERVICE_TYPE_PTR 0x10 +/* Lookup for instances of service */ +#define REPLY_SERVICE_NAME_PTR 0x20 +/* Lookup for location of service instance */ +#define REPLY_SERVICE_SRV 0x40 +/* Lookup for text info on service instance */ +#define REPLY_SERVICE_TXT 0x80 + +#define MDNS_PROBE_DELAY_MS 250 +#define MDNS_PROBE_COUNT 3 +#ifdef LWIP_RAND +/* first probe timeout SHOULD be random 0-250 ms*/ +#define MDNS_INITIAL_PROBE_DELAY_MS (LWIP_RAND() % MDNS_PROBE_DELAY_MS) +#else +#define MDNS_INITIAL_PROBE_DELAY_MS MDNS_PROBE_DELAY_MS +#endif + +#define MDNS_PROBING_NOT_STARTED 0 +#define MDNS_PROBING_ONGOING 1 +#define MDNS_PROBING_COMPLETE 2 + +static const char *dnssd_protos[] = { + "_udp", /* DNSSD_PROTO_UDP */ + "_tcp", /* DNSSD_PROTO_TCP */ +}; + +/** Description of a service */ +struct mdns_service { + /** TXT record to answer with */ + struct mdns_domain txtdata; + /** Name of service, like 'myweb' */ + char name[MDNS_LABEL_MAXLEN + 1]; + /** Type of service, like '_http' */ + char service[MDNS_LABEL_MAXLEN + 1]; + /** Callback function and userdata + * to update txtdata buffer */ + service_get_txt_fn_t txt_fn; + void *txt_userdata; + /** TTL in seconds of SRV/TXT replies */ + u32_t dns_ttl; + /** Protocol, TCP or UDP */ + u16_t proto; + /** Port of the service */ + u16_t port; +}; + +/** Description of a host/netif */ +struct mdns_host { + /** Hostname */ + char name[MDNS_LABEL_MAXLEN + 1]; + /** Pointer to services */ + struct mdns_service *services[MDNS_MAX_SERVICES]; + /** TTL in seconds of A/AAAA/PTR replies */ + u32_t dns_ttl; + /** Number of probes sent for the current name */ + u8_t probes_sent; + /** State in probing sequence */ + u8_t probing_state; +}; + +/** Information about received packet */ +struct mdns_packet { + /** Sender IP/port */ + ip_addr_t source_addr; + u16_t source_port; + /** If packet was received unicast */ + u16_t recv_unicast; + /** Netif that received the packet */ + struct netif *netif; + /** Packet data */ + struct pbuf *pbuf; + /** Current parsing offset in packet */ + u16_t parse_offset; + /** Identifier. Used in legacy queries */ + u16_t tx_id; + /** Number of questions in packet, + * read from packet header */ + u16_t questions; + /** Number of unparsed questions */ + u16_t questions_left; + /** Number of answers in packet, + * (sum of normal, authoritative and additional answers) + * read from packet header */ + u16_t answers; + /** Number of unparsed answers */ + u16_t answers_left; +}; + +/** Information about outgoing packet */ +struct mdns_outpacket { + /** Netif to send the packet on */ + struct netif *netif; + /** Packet data */ + struct pbuf *pbuf; + /** Current write offset in packet */ + u16_t write_offset; + /** Identifier. Used in legacy queries */ + u16_t tx_id; + /** Destination IP/port if sent unicast */ + ip_addr_t dest_addr; + u16_t dest_port; + /** Number of questions written */ + u16_t questions; + /** Number of normal answers written */ + u16_t answers; + /** Number of authoritative answers written */ + u16_t authoritative; + /** Number of additional answers written */ + u16_t additional; + /** Offsets for written domain names in packet. + * Used for compression */ + u16_t domain_offsets[NUM_DOMAIN_OFFSETS]; + /** If all answers in packet should set cache_flush bit */ + u8_t cache_flush; + /** If reply should be sent unicast */ + u8_t unicast_reply; + /** If legacy query. (tx_id needed, and write + * question again in reply before answer) */ + u8_t legacy_query; + /* Reply bitmask for host information */ + u8_t host_replies; + /* Bitmask for which reverse IPv6 hosts to answer */ + u8_t host_reverse_v6_replies; + /* Reply bitmask per service */ + u8_t serv_replies[MDNS_MAX_SERVICES]; +}; + +/** Domain, type and class. + * Shared between questions and answers */ +struct mdns_rr_info { + struct mdns_domain domain; + u16_t type; + u16_t klass; +}; + +struct mdns_question { + struct mdns_rr_info info; + /** unicast reply requested */ + u16_t unicast; +}; + +struct mdns_answer { + struct mdns_rr_info info; + /** cache flush command bit */ + u16_t cache_flush; + /* Validity time in seconds */ + u32_t ttl; + /** Length of variable answer */ + u16_t rd_length; + /** Offset of start of variable answer in packet */ + u16_t rd_offset; +}; + +static err_t mdns_send_outpacket(struct mdns_outpacket *outpkt, u8_t flags); +static void mdns_probe(void* arg); + +static err_t +mdns_domain_add_label_base(struct mdns_domain *domain, u8_t len) +{ + if (len > MDNS_LABEL_MAXLEN) { + return ERR_VAL; + } + if (len > 0 && (1 + len + domain->length >= MDNS_DOMAIN_MAXLEN)) { + return ERR_VAL; + } + /* Allow only zero marker on last byte */ + if (len == 0 && (1 + domain->length > MDNS_DOMAIN_MAXLEN)) { + return ERR_VAL; + } + domain->name[domain->length] = len; + domain->length++; + return ERR_OK; +} + +/** + * Add a label part to a domain + * @param domain The domain to add a label to + * @param label The label to add, like <hostname>, 'local', 'com' or '' + * @param len The length of the label + * @return ERR_OK on success, an err_t otherwise if label too long + */ +err_t +mdns_domain_add_label(struct mdns_domain *domain, const char *label, u8_t len) +{ + err_t err = mdns_domain_add_label_base(domain, len); + if (err != ERR_OK) { + return err; + } + if (len) { + MEMCPY(&domain->name[domain->length], label, len); + domain->length += len; + } + return ERR_OK; +} + +/** + * Add a label part to a domain (@see mdns_domain_add_label but copy directly from pbuf) + */ +static err_t +mdns_domain_add_label_pbuf(struct mdns_domain *domain, const struct pbuf *p, u16_t offset, u8_t len) +{ + err_t err = mdns_domain_add_label_base(domain, len); + if (err != ERR_OK) { + return err; + } + if (len) { + if (pbuf_copy_partial(p, &domain->name[domain->length], len, offset) != len) { + /* take back the ++ done before */ + domain->length--; + return ERR_ARG; + } + domain->length += len; + } + return ERR_OK; +} + +/** + * Internal readname function with max 6 levels of recursion following jumps + * while decompressing name + */ +static u16_t +mdns_readname_loop(struct pbuf *p, u16_t offset, struct mdns_domain *domain, unsigned depth) +{ + u8_t c; + + do { + if (depth > 5) { + /* Too many jumps */ + return MDNS_READNAME_ERROR; + } + + c = pbuf_get_at(p, offset); + offset++; + + /* is this a compressed label? */ + if ((c & 0xc0) == 0xc0) { + u16_t jumpaddr; + if (offset >= p->tot_len) { + /* Make sure both jump bytes fit in the packet */ + return MDNS_READNAME_ERROR; + } + jumpaddr = (((c & 0x3f) << 8) | (pbuf_get_at(p, offset) & 0xff)); + offset++; + if (jumpaddr >= SIZEOF_DNS_HDR && jumpaddr < p->tot_len) { + u16_t res; + /* Recursive call, maximum depth will be checked */ + res = mdns_readname_loop(p, jumpaddr, domain, depth + 1); + /* Dont return offset since new bytes were not read (jumped to somewhere in packet) */ + if (res == MDNS_READNAME_ERROR) { + return res; + } + } else { + return MDNS_READNAME_ERROR; + } + break; + } + + /* normal label */ + if (c <= MDNS_LABEL_MAXLEN) { + err_t res; + + if (c + domain->length >= MDNS_DOMAIN_MAXLEN) { + return MDNS_READNAME_ERROR; + } + res = mdns_domain_add_label_pbuf(domain, p, offset, c); + if (res != ERR_OK) { + return MDNS_READNAME_ERROR; + } + offset += c; + } else { + /* bad length byte */ + return MDNS_READNAME_ERROR; + } + } while (c != 0); + + return offset; +} + +/** + * Read possibly compressed domain name from packet buffer + * @param p The packet + * @param offset start position of domain name in packet + * @param domain The domain name destination + * @return The new offset after the domain, or MDNS_READNAME_ERROR + * if reading failed + */ +u16_t +mdns_readname(struct pbuf *p, u16_t offset, struct mdns_domain *domain) +{ + memset(domain, 0, sizeof(struct mdns_domain)); + return mdns_readname_loop(p, offset, domain, 0); +} + +/** + * Print domain name to debug output + * @param domain The domain name + */ +static void +mdns_domain_debug_print(struct mdns_domain *domain) +{ + u8_t *src = domain->name; + u8_t i; + + while (*src) { + u8_t label_len = *src; + src++; + for (i = 0; i < label_len; i++) { + LWIP_DEBUGF(MDNS_DEBUG, ("%c", src[i])); + } + src += label_len; + LWIP_DEBUGF(MDNS_DEBUG, (".")); + } +} + +/** + * Return 1 if contents of domains match (case-insensitive) + * @param a Domain name to compare 1 + * @param b Domain name to compare 2 + * @return 1 if domains are equal ignoring case, 0 otherwise + */ +int +mdns_domain_eq(struct mdns_domain *a, struct mdns_domain *b) +{ + u8_t *ptra, *ptrb; + u8_t len; + int res; + + if (a->length != b->length) { + return 0; + } + + ptra = a->name; + ptrb = b->name; + while (*ptra && *ptrb && ptra < &a->name[a->length]) { + if (*ptra != *ptrb) { + return 0; + } + len = *ptra; + ptra++; + ptrb++; + res = lwip_strnicmp((char *) ptra, (char *) ptrb, len); + if (res != 0) { + return 0; + } + ptra += len; + ptrb += len; + } + if (*ptra != *ptrb && ptra < &a->name[a->length]) { + return 0; + } + return 1; +} + +/** + * Call user supplied function to setup TXT data + * @param service The service to build TXT record for + */ +static void +mdns_prepare_txtdata(struct mdns_service *service) +{ + memset(&service->txtdata, 0, sizeof(struct mdns_domain)); + if (service->txt_fn) { + service->txt_fn(service, service->txt_userdata); + } +} + +#if LWIP_IPV4 +/** + * Build domain for reverse lookup of IPv4 address + * like 12.0.168.192.in-addr.arpa. for 192.168.0.12 + * @param domain Where to write the domain name + * @param addr Pointer to an IPv4 address to encode + * @return ERR_OK if domain was written, an err_t otherwise + */ +static err_t +mdns_build_reverse_v4_domain(struct mdns_domain *domain, const ip4_addr_t *addr) +{ + int i; + err_t res; + const u8_t *ptr; + + LWIP_UNUSED_ARG(res); + if (!domain || !addr) { + return ERR_ARG; + } + memset(domain, 0, sizeof(struct mdns_domain)); + ptr = (const u8_t *) addr; + for (i = sizeof(ip4_addr_t) - 1; i >= 0; i--) { + char buf[4]; + u8_t val = ptr[i]; + + lwip_itoa(buf, sizeof(buf), val); + res = mdns_domain_add_label(domain, buf, (u8_t)strlen(buf)); + LWIP_ERROR("mdns_build_reverse_v4_domain: Failed to add label", (res == ERR_OK), return res); + } + res = mdns_domain_add_label(domain, REVERSE_PTR_V4_DOMAIN, (u8_t)(sizeof(REVERSE_PTR_V4_DOMAIN) - 1)); + LWIP_ERROR("mdns_build_reverse_v4_domain: Failed to add label", (res == ERR_OK), return res); + res = mdns_domain_add_label(domain, REVERSE_PTR_TOPDOMAIN, (u8_t)(sizeof(REVERSE_PTR_TOPDOMAIN) - 1)); + LWIP_ERROR("mdns_build_reverse_v4_domain: Failed to add label", (res == ERR_OK), return res); + res = mdns_domain_add_label(domain, NULL, 0); + LWIP_ERROR("mdns_build_reverse_v4_domain: Failed to add label", (res == ERR_OK), return res); + + return ERR_OK; +} +#endif + +#if LWIP_IPV6 +/** + * Build domain for reverse lookup of IP address + * like b.a.9.8.7.6.5.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa. for 2001:db8::567:89ab + * @param domain Where to write the domain name + * @param addr Pointer to an IPv6 address to encode + * @return ERR_OK if domain was written, an err_t otherwise + */ +static err_t +mdns_build_reverse_v6_domain(struct mdns_domain *domain, const ip6_addr_t *addr) +{ + int i; + err_t res; + const u8_t *ptr; + LWIP_UNUSED_ARG(res); + if (!domain || !addr) { + return ERR_ARG; + } + memset(domain, 0, sizeof(struct mdns_domain)); + ptr = (const u8_t *) addr; + for (i = sizeof(ip6_addr_p_t) - 1; i >= 0; i--) { + char buf; + u8_t byte = ptr[i]; + int j; + for (j = 0; j < 2; j++) { + if ((byte & 0x0F) < 0xA) { + buf = '0' + (byte & 0x0F); + } else { + buf = 'a' + (byte & 0x0F) - 0xA; + } + res = mdns_domain_add_label(domain, &buf, sizeof(buf)); + LWIP_ERROR("mdns_build_reverse_v6_domain: Failed to add label", (res == ERR_OK), return res); + byte >>= 4; + } + } + res = mdns_domain_add_label(domain, REVERSE_PTR_V6_DOMAIN, (u8_t)(sizeof(REVERSE_PTR_V6_DOMAIN) - 1)); + LWIP_ERROR("mdns_build_reverse_v6_domain: Failed to add label", (res == ERR_OK), return res); + res = mdns_domain_add_label(domain, REVERSE_PTR_TOPDOMAIN, (u8_t)(sizeof(REVERSE_PTR_TOPDOMAIN) - 1)); + LWIP_ERROR("mdns_build_reverse_v6_domain: Failed to add label", (res == ERR_OK), return res); + res = mdns_domain_add_label(domain, NULL, 0); + LWIP_ERROR("mdns_build_reverse_v6_domain: Failed to add label", (res == ERR_OK), return res); + + return ERR_OK; +} +#endif + +/* Add .local. to domain */ +static err_t +mdns_add_dotlocal(struct mdns_domain *domain) +{ + err_t res = mdns_domain_add_label(domain, TOPDOMAIN_LOCAL, (u8_t)(sizeof(TOPDOMAIN_LOCAL) - 1)); + LWIP_UNUSED_ARG(res); + LWIP_ERROR("mdns_add_dotlocal: Failed to add label", (res == ERR_OK), return res); + return mdns_domain_add_label(domain, NULL, 0); +} + +/** + * Build the .local. domain name + * @param domain Where to write the domain name + * @param mdns TMDNS netif descriptor. + * @return ERR_OK if domain .local. was written, an err_t otherwise + */ +static err_t +mdns_build_host_domain(struct mdns_domain *domain, struct mdns_host *mdns) +{ + err_t res; + LWIP_UNUSED_ARG(res); + memset(domain, 0, sizeof(struct mdns_domain)); + LWIP_ERROR("mdns_build_host_domain: mdns != NULL", (mdns != NULL), return ERR_VAL); + res = mdns_domain_add_label(domain, mdns->name, (u8_t)strlen(mdns->name)); + LWIP_ERROR("mdns_build_host_domain: Failed to add label", (res == ERR_OK), return res); + return mdns_add_dotlocal(domain); +} + +/** + * Build the lookup-all-services special DNS-SD domain name + * @param domain Where to write the domain name + * @return ERR_OK if domain _services._dns-sd._udp.local. was written, an err_t otherwise + */ +static err_t +mdns_build_dnssd_domain(struct mdns_domain *domain) +{ + err_t res; + LWIP_UNUSED_ARG(res); + memset(domain, 0, sizeof(struct mdns_domain)); + res = mdns_domain_add_label(domain, "_services", (u8_t)(sizeof("_services") - 1)); + LWIP_ERROR("mdns_build_dnssd_domain: Failed to add label", (res == ERR_OK), return res); + res = mdns_domain_add_label(domain, "_dns-sd", (u8_t)(sizeof("_dns-sd") - 1)); + LWIP_ERROR("mdns_build_dnssd_domain: Failed to add label", (res == ERR_OK), return res); + res = mdns_domain_add_label(domain, dnssd_protos[DNSSD_PROTO_UDP], (u8_t)strlen(dnssd_protos[DNSSD_PROTO_UDP])); + LWIP_ERROR("mdns_build_dnssd_domain: Failed to add label", (res == ERR_OK), return res); + return mdns_add_dotlocal(domain); +} + +/** + * Build domain name for a service + * @param domain Where to write the domain name + * @param service The service struct, containing service name, type and protocol + * @param include_name Whether to include the service name in the domain + * @return ERR_OK if domain was written. If service name is included, + * ...local. will be written, otherwise ..local. + * An err_t is returned on error. + */ +static err_t +mdns_build_service_domain(struct mdns_domain *domain, struct mdns_service *service, int include_name) +{ + err_t res; + LWIP_UNUSED_ARG(res); + memset(domain, 0, sizeof(struct mdns_domain)); + if (include_name) { + res = mdns_domain_add_label(domain, service->name, (u8_t)strlen(service->name)); + LWIP_ERROR("mdns_build_service_domain: Failed to add label", (res == ERR_OK), return res); + } + res = mdns_domain_add_label(domain, service->service, (u8_t)strlen(service->service)); + LWIP_ERROR("mdns_build_service_domain: Failed to add label", (res == ERR_OK), return res); + res = mdns_domain_add_label(domain, dnssd_protos[service->proto], (u8_t)strlen(dnssd_protos[service->proto])); + LWIP_ERROR("mdns_build_service_domain: Failed to add label", (res == ERR_OK), return res); + return mdns_add_dotlocal(domain); +} + +/** + * Check which replies we should send for a host/netif based on question + * @param netif The network interface that received the question + * @param rr Domain/type/class from a question + * @param reverse_v6_reply Bitmask of which IPv6 addresses to send reverse PTRs for + * if reply bit has REPLY_HOST_PTR_V6 set + * @return Bitmask of which replies to send + */ +static int +check_host(struct netif *netif, struct mdns_rr_info *rr, u8_t *reverse_v6_reply) +{ + err_t res; + int replies = 0; + struct mdns_domain mydomain; + + LWIP_UNUSED_ARG(reverse_v6_reply); /* if ipv6 is disabled */ + + if (rr->klass != DNS_RRCLASS_IN && rr->klass != DNS_RRCLASS_ANY) { + /* Invalid class */ + return replies; + } + + /* Handle PTR for our addresses */ + if (rr->type == DNS_RRTYPE_PTR || rr->type == DNS_RRTYPE_ANY) { +#if LWIP_IPV6 + int i; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + res = mdns_build_reverse_v6_domain(&mydomain, netif_ip6_addr(netif, i)); + if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain)) { + replies |= REPLY_HOST_PTR_V6; + /* Mark which addresses where requested */ + if (reverse_v6_reply) { + *reverse_v6_reply |= (1 << i); + } + } + } + } +#endif +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { + res = mdns_build_reverse_v4_domain(&mydomain, netif_ip4_addr(netif)); + if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain)) { + replies |= REPLY_HOST_PTR_V4; + } + } +#endif + } + + res = mdns_build_host_domain(&mydomain, NETIF_TO_HOST(netif)); + /* Handle requests for our hostname */ + if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain)) { + /* TODO return NSEC if unsupported protocol requested */ +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif)) + && (rr->type == DNS_RRTYPE_A || rr->type == DNS_RRTYPE_ANY)) { + replies |= REPLY_HOST_A; + } +#endif +#if LWIP_IPV6 + if (rr->type == DNS_RRTYPE_AAAA || rr->type == DNS_RRTYPE_ANY) { + replies |= REPLY_HOST_AAAA; + } +#endif + } + + return replies; +} + +/** + * Check which replies we should send for a service based on question + * @param service A registered MDNS service + * @param rr Domain/type/class from a question + * @return Bitmask of which replies to send + */ +static int +check_service(struct mdns_service *service, struct mdns_rr_info *rr) +{ + err_t res; + int replies = 0; + struct mdns_domain mydomain; + + if (rr->klass != DNS_RRCLASS_IN && rr->klass != DNS_RRCLASS_ANY) { + /* Invalid class */ + return 0; + } + + res = mdns_build_dnssd_domain(&mydomain); + if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain) && + (rr->type == DNS_RRTYPE_PTR || rr->type == DNS_RRTYPE_ANY)) { + /* Request for all service types */ + replies |= REPLY_SERVICE_TYPE_PTR; + } + + res = mdns_build_service_domain(&mydomain, service, 0); + if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain) && + (rr->type == DNS_RRTYPE_PTR || rr->type == DNS_RRTYPE_ANY)) { + /* Request for the instance of my service */ + replies |= REPLY_SERVICE_NAME_PTR; + } + + res = mdns_build_service_domain(&mydomain, service, 1); + if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain)) { + /* Request for info about my service */ + if (rr->type == DNS_RRTYPE_SRV || rr->type == DNS_RRTYPE_ANY) { + replies |= REPLY_SERVICE_SRV; + } + if (rr->type == DNS_RRTYPE_TXT || rr->type == DNS_RRTYPE_ANY) { + replies |= REPLY_SERVICE_TXT; + } + } + + return replies; +} + +/** + * Return bytes needed to write before jump for best result of compressing supplied domain + * against domain in outpacket starting at specified offset. + * If a match is found, offset is updated to where to jump to + * @param pbuf Pointer to pbuf with the partially constructed DNS packet + * @param offset Start position of a domain written earlier. If this location is suitable + * for compression, the pointer is updated to where in the domain to jump to. + * @param domain The domain to write + * @return Number of bytes to write of the new domain before writing a jump to the offset. + * If compression can not be done against this previous domain name, the full new + * domain length is returned. + */ +u16_t +mdns_compress_domain(struct pbuf *pbuf, u16_t *offset, struct mdns_domain *domain) +{ + struct mdns_domain target; + u16_t target_end; + u8_t target_len; + u8_t writelen = 0; + u8_t *ptr; + if (pbuf == NULL) { + return domain->length; + } + target_end = mdns_readname(pbuf, *offset, &target); + if (target_end == MDNS_READNAME_ERROR) { + return domain->length; + } + target_len = (u8_t)(target_end - *offset); + ptr = domain->name; + while (writelen < domain->length) { + u8_t domainlen = (u8_t)(domain->length - writelen); + u8_t labellen; + if (domainlen <= target.length && domainlen > DOMAIN_JUMP_SIZE) { + /* Compare domains if target is long enough, and we have enough left of the domain */ + u8_t targetpos = (u8_t)(target.length - domainlen); + if ((targetpos + DOMAIN_JUMP_SIZE) >= target_len) { + /* We are checking at or beyond a jump in the original, stop looking */ + break; + } + if (target.length >= domainlen && + memcmp(&domain->name[writelen], &target.name[targetpos], domainlen) == 0) { + *offset += targetpos; + return writelen; + } + } + /* Skip to next label in domain */ + labellen = *ptr; + writelen += 1 + labellen; + ptr += 1 + labellen; + } + /* Nothing found */ + return domain->length; +} + +/** + * Write domain to outpacket. Compression will be attempted, + * unless domain->skip_compression is set. + * @param outpkt The outpacket to write to + * @param domain The domain name to write + * @return ERR_OK on success, an err_t otherwise + */ +static err_t +mdns_write_domain(struct mdns_outpacket *outpkt, struct mdns_domain *domain) +{ + int i; + err_t res; + u16_t writelen = domain->length; + u16_t jump_offset = 0; + u16_t jump; + + if (!domain->skip_compression) { + for (i = 0; i < NUM_DOMAIN_OFFSETS; i++) { + u16_t offset = outpkt->domain_offsets[i]; + if (offset) { + u16_t len = mdns_compress_domain(outpkt->pbuf, &offset, domain); + if (len < writelen) { + writelen = len; + jump_offset = offset; + } + } + } + } + + if (writelen) { + /* Write uncompressed part of name */ + res = pbuf_take_at(outpkt->pbuf, domain->name, writelen, outpkt->write_offset); + if (res != ERR_OK) { + return res; + } + + /* Store offset of this new domain */ + for (i = 0; i < NUM_DOMAIN_OFFSETS; i++) { + if (outpkt->domain_offsets[i] == 0) { + outpkt->domain_offsets[i] = outpkt->write_offset; + break; + } + } + + outpkt->write_offset += writelen; + } + if (jump_offset) { + /* Write jump */ + jump = lwip_htons(DOMAIN_JUMP | jump_offset); + res = pbuf_take_at(outpkt->pbuf, &jump, DOMAIN_JUMP_SIZE, outpkt->write_offset); + if (res != ERR_OK) { + return res; + } + outpkt->write_offset += DOMAIN_JUMP_SIZE; + } + return ERR_OK; +} + +/** + * Write a question to an outpacket + * A question contains domain, type and class. Since an answer also starts with these fields this function is also + * called from mdns_add_answer(). + * @param outpkt The outpacket to write to + * @param domain The domain name the answer is for + * @param type The DNS type of the answer (like 'AAAA', 'SRV') + * @param klass The DNS type of the answer (like 'IN') + * @param unicast If highest bit in class should be set, to instruct the responder to + * reply with a unicast packet + * @return ERR_OK on success, an err_t otherwise + */ +static err_t +mdns_add_question(struct mdns_outpacket *outpkt, struct mdns_domain *domain, u16_t type, u16_t klass, u16_t unicast) +{ + u16_t question_len; + u16_t field16; + err_t res; + + if (!outpkt->pbuf) { + /* If no pbuf is active, allocate one */ + outpkt->pbuf = pbuf_alloc(PBUF_TRANSPORT, OUTPACKET_SIZE, PBUF_RAM); + if (!outpkt->pbuf) { + return ERR_MEM; + } + outpkt->write_offset = SIZEOF_DNS_HDR; + } + + /* Worst case calculation. Domain string might be compressed */ + question_len = domain->length + sizeof(type) + sizeof(klass); + if (outpkt->write_offset + question_len > outpkt->pbuf->tot_len) { + /* No space */ + return ERR_MEM; + } + + /* Write name */ + res = mdns_write_domain(outpkt, domain); + if (res != ERR_OK) { + return res; + } + + /* Write type */ + field16 = lwip_htons(type); + res = pbuf_take_at(outpkt->pbuf, &field16, sizeof(field16), outpkt->write_offset); + if (res != ERR_OK) { + return res; + } + outpkt->write_offset += sizeof(field16); + + /* Write class */ + if (unicast) { + klass |= 0x8000; + } + field16 = lwip_htons(klass); + res = pbuf_take_at(outpkt->pbuf, &field16, sizeof(field16), outpkt->write_offset); + if (res != ERR_OK) { + return res; + } + outpkt->write_offset += sizeof(field16); + + return ERR_OK; +} + +/** + * Write answer to reply packet. + * buf or answer_domain can be null. The rd_length written will be buf_length + + * size of (compressed) domain. Most uses will need either buf or answer_domain, + * special case is SRV that starts with 3 u16 and then a domain name. + * @param reply The outpacket to write to + * @param domain The domain name the answer is for + * @param type The DNS type of the answer (like 'AAAA', 'SRV') + * @param klass The DNS type of the answer (like 'IN') + * @param cache_flush If highest bit in class should be set, to instruct receiver that + * this reply replaces any earlier answer for this domain/type/class + * @param ttl Validity time in seconds to send out for IP address data in DNS replies + * @param buf Pointer to buffer of answer data + * @param buf_length Length of variable data + * @param answer_domain A domain to write after any buffer data as answer + * @return ERR_OK on success, an err_t otherwise + */ +static err_t +mdns_add_answer(struct mdns_outpacket *reply, struct mdns_domain *domain, u16_t type, u16_t klass, u16_t cache_flush, + u32_t ttl, const u8_t *buf, size_t buf_length, struct mdns_domain *answer_domain) +{ + u16_t answer_len; + u16_t field16; + u16_t rdlen_offset; + u16_t answer_offset; + u32_t field32; + err_t res; + + if (!reply->pbuf) { + /* If no pbuf is active, allocate one */ + reply->pbuf = pbuf_alloc(PBUF_TRANSPORT, OUTPACKET_SIZE, PBUF_RAM); + if (!reply->pbuf) { + return ERR_MEM; + } + reply->write_offset = SIZEOF_DNS_HDR; + } + + /* Worst case calculation. Domain strings might be compressed */ + answer_len = domain->length + sizeof(type) + sizeof(klass) + sizeof(ttl) + sizeof(field16)/*rd_length*/; + if (buf) { + answer_len += (u16_t)buf_length; + } + if (answer_domain) { + answer_len += answer_domain->length; + } + if (reply->write_offset + answer_len > reply->pbuf->tot_len) { + /* No space */ + return ERR_MEM; + } + + /* Answer starts with same data as question, then more fields */ + mdns_add_question(reply, domain, type, klass, cache_flush); + + /* Write TTL */ + field32 = lwip_htonl(ttl); + res = pbuf_take_at(reply->pbuf, &field32, sizeof(field32), reply->write_offset); + if (res != ERR_OK) { + return res; + } + reply->write_offset += sizeof(field32); + + /* Store offsets and skip forward to the data */ + rdlen_offset = reply->write_offset; + reply->write_offset += sizeof(field16); + answer_offset = reply->write_offset; + + if (buf) { + /* Write static data */ + res = pbuf_take_at(reply->pbuf, buf, (u16_t)buf_length, reply->write_offset); + if (res != ERR_OK) { + return res; + } + reply->write_offset += (u16_t)buf_length; + } + + if (answer_domain) { + /* Write name answer (compressed if possible) */ + res = mdns_write_domain(reply, answer_domain); + if (res != ERR_OK) { + return res; + } + } + + /* Write rd_length after when we know the answer size */ + field16 = lwip_htons(reply->write_offset - answer_offset); + res = pbuf_take_at(reply->pbuf, &field16, sizeof(field16), rdlen_offset); + + return res; +} + +/** + * Helper function for mdns_read_question/mdns_read_answer + * Reads a domain, type and class from the packet + * @param pkt The MDNS packet to read from. The parse_offset field will be + * incremented to point to the next unparsed byte. + * @param info The struct to fill with domain, type and class + * @return ERR_OK on success, an err_t otherwise + */ +static err_t +mdns_read_rr_info(struct mdns_packet *pkt, struct mdns_rr_info *info) +{ + u16_t field16, copied; + pkt->parse_offset = mdns_readname(pkt->pbuf, pkt->parse_offset, &info->domain); + if (pkt->parse_offset == MDNS_READNAME_ERROR) { + return ERR_VAL; + } + + copied = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), pkt->parse_offset); + if (copied != sizeof(field16)) { + return ERR_VAL; + } + pkt->parse_offset += copied; + info->type = lwip_ntohs(field16); + + copied = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), pkt->parse_offset); + if (copied != sizeof(field16)) { + return ERR_VAL; + } + pkt->parse_offset += copied; + info->klass = lwip_ntohs(field16); + + return ERR_OK; +} + +/** + * Read a question from the packet. + * All questions have to be read before the answers. + * @param pkt The MDNS packet to read from. The questions_left field will be decremented + * and the parse_offset will be updated. + * @param question The struct to fill with question data + * @return ERR_OK on success, an err_t otherwise + */ +static err_t +mdns_read_question(struct mdns_packet *pkt, struct mdns_question *question) +{ + /* Safety check */ + if (pkt->pbuf->tot_len < pkt->parse_offset) { + return ERR_VAL; + } + + if (pkt->questions_left) { + err_t res; + pkt->questions_left--; + + memset(question, 0, sizeof(struct mdns_question)); + res = mdns_read_rr_info(pkt, &question->info); + if (res != ERR_OK) { + return res; + } + + /* Extract unicast flag from class field */ + question->unicast = question->info.klass & 0x8000; + question->info.klass &= 0x7FFF; + + return ERR_OK; + } + return ERR_VAL; +} + +/** + * Read an answer from the packet + * The variable length reply is not copied, its pbuf offset and length is stored instead. + * @param pkt The MDNS packet to read. The answers_left field will be decremented and + * the parse_offset will be updated. + * @param answer The struct to fill with answer data + * @return ERR_OK on success, an err_t otherwise + */ +static err_t +mdns_read_answer(struct mdns_packet *pkt, struct mdns_answer *answer) +{ + /* Read questions first */ + if (pkt->questions_left) { + return ERR_VAL; + } + + /* Safety check */ + if (pkt->pbuf->tot_len < pkt->parse_offset) { + return ERR_VAL; + } + + if (pkt->answers_left) { + u16_t copied, field16; + u32_t ttl; + err_t res; + pkt->answers_left--; + + memset(answer, 0, sizeof(struct mdns_answer)); + res = mdns_read_rr_info(pkt, &answer->info); + if (res != ERR_OK) { + return res; + } + + /* Extract cache_flush flag from class field */ + answer->cache_flush = answer->info.klass & 0x8000; + answer->info.klass &= 0x7FFF; + + copied = pbuf_copy_partial(pkt->pbuf, &ttl, sizeof(ttl), pkt->parse_offset); + if (copied != sizeof(ttl)) { + return ERR_VAL; + } + pkt->parse_offset += copied; + answer->ttl = lwip_ntohl(ttl); + + copied = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), pkt->parse_offset); + if (copied != sizeof(field16)) { + return ERR_VAL; + } + pkt->parse_offset += copied; + answer->rd_length = lwip_ntohs(field16); + + answer->rd_offset = pkt->parse_offset; + pkt->parse_offset += answer->rd_length; + + return ERR_OK; + } + return ERR_VAL; +} + +#if LWIP_IPV4 +/** Write an IPv4 address (A) RR to outpacket */ +static err_t +mdns_add_a_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct netif *netif) +{ + struct mdns_domain host; + mdns_build_host_domain(&host, NETIF_TO_HOST(netif)); + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with A record\n")); + return mdns_add_answer(reply, &host, DNS_RRTYPE_A, DNS_RRCLASS_IN, cache_flush, (NETIF_TO_HOST(netif))->dns_ttl, (const u8_t *) netif_ip4_addr(netif), sizeof(ip4_addr_t), NULL); +} + +/** Write a 4.3.2.1.in-addr.arpa -> hostname.local PTR RR to outpacket */ +static err_t +mdns_add_hostv4_ptr_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct netif *netif) +{ + struct mdns_domain host, revhost; + mdns_build_host_domain(&host, NETIF_TO_HOST(netif)); + mdns_build_reverse_v4_domain(&revhost, netif_ip4_addr(netif)); + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with v4 PTR record\n")); + return mdns_add_answer(reply, &revhost, DNS_RRTYPE_PTR, DNS_RRCLASS_IN, cache_flush, (NETIF_TO_HOST(netif))->dns_ttl, NULL, 0, &host); +} +#endif + +#if LWIP_IPV6 +/** Write an IPv6 address (AAAA) RR to outpacket */ +static err_t +mdns_add_aaaa_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct netif *netif, int addrindex) +{ + struct mdns_domain host; + mdns_build_host_domain(&host, NETIF_TO_HOST(netif)); + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with AAAA record\n")); + return mdns_add_answer(reply, &host, DNS_RRTYPE_AAAA, DNS_RRCLASS_IN, cache_flush, (NETIF_TO_HOST(netif))->dns_ttl, (const u8_t *) netif_ip6_addr(netif, addrindex), sizeof(ip6_addr_p_t), NULL); +} + +/** Write a x.y.z.ip6.arpa -> hostname.local PTR RR to outpacket */ +static err_t +mdns_add_hostv6_ptr_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct netif *netif, int addrindex) +{ + struct mdns_domain host, revhost; + mdns_build_host_domain(&host, NETIF_TO_HOST(netif)); + mdns_build_reverse_v6_domain(&revhost, netif_ip6_addr(netif, addrindex)); + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with v6 PTR record\n")); + return mdns_add_answer(reply, &revhost, DNS_RRTYPE_PTR, DNS_RRCLASS_IN, cache_flush, (NETIF_TO_HOST(netif))->dns_ttl, NULL, 0, &host); +} +#endif + +/** Write an all-services -> servicetype PTR RR to outpacket */ +static err_t +mdns_add_servicetype_ptr_answer(struct mdns_outpacket *reply, struct mdns_service *service) +{ + struct mdns_domain service_type, service_dnssd; + mdns_build_service_domain(&service_type, service, 0); + mdns_build_dnssd_domain(&service_dnssd); + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with service type PTR record\n")); + return mdns_add_answer(reply, &service_dnssd, DNS_RRTYPE_PTR, DNS_RRCLASS_IN, 0, service->dns_ttl, NULL, 0, &service_type); +} + +/** Write a servicetype -> servicename PTR RR to outpacket */ +static err_t +mdns_add_servicename_ptr_answer(struct mdns_outpacket *reply, struct mdns_service *service) +{ + struct mdns_domain service_type, service_instance; + mdns_build_service_domain(&service_type, service, 0); + mdns_build_service_domain(&service_instance, service, 1); + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with service name PTR record\n")); + return mdns_add_answer(reply, &service_type, DNS_RRTYPE_PTR, DNS_RRCLASS_IN, 0, service->dns_ttl, NULL, 0, &service_instance); +} + +/** Write a SRV RR to outpacket */ +static err_t +mdns_add_srv_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct mdns_host *mdns, struct mdns_service *service) +{ + struct mdns_domain service_instance, srvhost; + u16_t srvdata[3]; + mdns_build_service_domain(&service_instance, service, 1); + mdns_build_host_domain(&srvhost, mdns); + if (reply->legacy_query) { + /* RFC 6762 section 18.14: + * In legacy unicast responses generated to answer legacy queries, + * name compression MUST NOT be performed on SRV records. + */ + srvhost.skip_compression = 1; + } + srvdata[0] = lwip_htons(SRV_PRIORITY); + srvdata[1] = lwip_htons(SRV_WEIGHT); + srvdata[2] = lwip_htons(service->port); + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with SRV record\n")); + return mdns_add_answer(reply, &service_instance, DNS_RRTYPE_SRV, DNS_RRCLASS_IN, cache_flush, service->dns_ttl, + (const u8_t *) &srvdata, sizeof(srvdata), &srvhost); +} + +/** Write a TXT RR to outpacket */ +static err_t +mdns_add_txt_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct mdns_service *service) +{ + struct mdns_domain service_instance; + mdns_build_service_domain(&service_instance, service, 1); + mdns_prepare_txtdata(service); + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with TXT record\n")); + return mdns_add_answer(reply, &service_instance, DNS_RRTYPE_TXT, DNS_RRCLASS_IN, cache_flush, service->dns_ttl, + (u8_t *) &service->txtdata.name, service->txtdata.length, NULL); +} + +/** + * Setup outpacket as a reply to the incoming packet + */ +static void +mdns_init_outpacket(struct mdns_outpacket *out, struct mdns_packet *in) +{ + memset(out, 0, sizeof(struct mdns_outpacket)); + out->cache_flush = 1; + out->netif = in->netif; + + /* Copy source IP/port to use when responding unicast, or to choose + * which pcb to use for multicast (IPv4/IPv6) + */ + SMEMCPY(&out->dest_addr, &in->source_addr, sizeof(ip_addr_t)); + out->dest_port = in->source_port; + + if (in->source_port != LWIP_IANA_PORT_MDNS) { + out->unicast_reply = 1; + out->cache_flush = 0; + if (in->questions == 1) { + out->legacy_query = 1; + out->tx_id = in->tx_id; + } + } + + if (in->recv_unicast) { + out->unicast_reply = 1; + } +} + +/** + * Send chosen answers as a reply + * + * Add all selected answers (first write will allocate pbuf) + * Add additional answers based on the selected answers + * Send the packet + */ +static err_t +mdns_send_outpacket(struct mdns_outpacket *outpkt, u8_t flags) +{ + struct mdns_service *service; + err_t res = ERR_ARG; + int i; + struct mdns_host *mdns = NETIF_TO_HOST(outpkt->netif); + u16_t answers = 0; + + /* Write answers to host questions */ +#if LWIP_IPV4 + if (outpkt->host_replies & REPLY_HOST_A) { + res = mdns_add_a_answer(outpkt, outpkt->cache_flush, outpkt->netif); + if (res != ERR_OK) { + goto cleanup; + } + answers++; + } + if (outpkt->host_replies & REPLY_HOST_PTR_V4) { + res = mdns_add_hostv4_ptr_answer(outpkt, outpkt->cache_flush, outpkt->netif); + if (res != ERR_OK) { + goto cleanup; + } + answers++; + } +#endif +#if LWIP_IPV6 + if (outpkt->host_replies & REPLY_HOST_AAAA) { + int addrindex; + for (addrindex = 0; addrindex < LWIP_IPV6_NUM_ADDRESSES; addrindex++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(outpkt->netif, addrindex))) { + res = mdns_add_aaaa_answer(outpkt, outpkt->cache_flush, outpkt->netif, addrindex); + if (res != ERR_OK) { + goto cleanup; + } + answers++; + } + } + } + if (outpkt->host_replies & REPLY_HOST_PTR_V6) { + u8_t rev_addrs = outpkt->host_reverse_v6_replies; + int addrindex = 0; + while (rev_addrs) { + if (rev_addrs & 1) { + res = mdns_add_hostv6_ptr_answer(outpkt, outpkt->cache_flush, outpkt->netif, addrindex); + if (res != ERR_OK) { + goto cleanup; + } + answers++; + } + addrindex++; + rev_addrs >>= 1; + } + } +#endif + + /* Write answers to service questions */ + for (i = 0; i < MDNS_MAX_SERVICES; i++) { + service = mdns->services[i]; + if (!service) { + continue; + } + + if (outpkt->serv_replies[i] & REPLY_SERVICE_TYPE_PTR) { + res = mdns_add_servicetype_ptr_answer(outpkt, service); + if (res != ERR_OK) { + goto cleanup; + } + answers++; + } + + if (outpkt->serv_replies[i] & REPLY_SERVICE_NAME_PTR) { + res = mdns_add_servicename_ptr_answer(outpkt, service); + if (res != ERR_OK) { + goto cleanup; + } + answers++; + } + + if (outpkt->serv_replies[i] & REPLY_SERVICE_SRV) { + res = mdns_add_srv_answer(outpkt, outpkt->cache_flush, mdns, service); + if (res != ERR_OK) { + goto cleanup; + } + answers++; + } + + if (outpkt->serv_replies[i] & REPLY_SERVICE_TXT) { + res = mdns_add_txt_answer(outpkt, outpkt->cache_flush, service); + if (res != ERR_OK) { + goto cleanup; + } + answers++; + } + } + + /* if this is a response, the data above is anwers, else this is a probe and the answers above goes into auth section */ + if (flags & DNS_FLAG1_RESPONSE) { + outpkt->answers += answers; + } else { + outpkt->authoritative += answers; + } + + /* All answers written, add additional RRs */ + for (i = 0; i < MDNS_MAX_SERVICES; i++) { + service = mdns->services[i]; + if (!service) { + continue; + } + + if (outpkt->serv_replies[i] & REPLY_SERVICE_NAME_PTR) { + /* Our service instance requested, include SRV & TXT + * if they are already not requested. */ + if (!(outpkt->serv_replies[i] & REPLY_SERVICE_SRV)) { + res = mdns_add_srv_answer(outpkt, outpkt->cache_flush, mdns, service); + if (res != ERR_OK) { + goto cleanup; + } + outpkt->additional++; + } + + if (!(outpkt->serv_replies[i] & REPLY_SERVICE_TXT)) { + res = mdns_add_txt_answer(outpkt, outpkt->cache_flush, service); + if (res != ERR_OK) { + goto cleanup; + } + outpkt->additional++; + } + } + + /* If service instance, SRV, record or an IP address is requested, + * supply all addresses for the host + */ + if ((outpkt->serv_replies[i] & (REPLY_SERVICE_NAME_PTR | REPLY_SERVICE_SRV)) || + (outpkt->host_replies & (REPLY_HOST_A | REPLY_HOST_AAAA))) { +#if LWIP_IPV6 + if (!(outpkt->host_replies & REPLY_HOST_AAAA)) { + int addrindex; + for (addrindex = 0; addrindex < LWIP_IPV6_NUM_ADDRESSES; addrindex++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(outpkt->netif, addrindex))) { + res = mdns_add_aaaa_answer(outpkt, outpkt->cache_flush, outpkt->netif, addrindex); + if (res != ERR_OK) { + goto cleanup; + } + outpkt->additional++; + } + } + } +#endif +#if LWIP_IPV4 + if (!(outpkt->host_replies & REPLY_HOST_A) && + !ip4_addr_isany_val(*netif_ip4_addr(outpkt->netif))) { + res = mdns_add_a_answer(outpkt, outpkt->cache_flush, outpkt->netif); + if (res != ERR_OK) { + goto cleanup; + } + outpkt->additional++; + } +#endif + } + } + + if (outpkt->pbuf) { + const ip_addr_t *mcast_destaddr; + struct dns_hdr hdr; + + /* Write header */ + memset(&hdr, 0, sizeof(hdr)); + hdr.flags1 = flags; + hdr.numquestions = lwip_htons(outpkt->questions); + hdr.numanswers = lwip_htons(outpkt->answers); + hdr.numauthrr = lwip_htons(outpkt->authoritative); + hdr.numextrarr = lwip_htons(outpkt->additional); + hdr.id = lwip_htons(outpkt->tx_id); + pbuf_take(outpkt->pbuf, &hdr, sizeof(hdr)); + + /* Shrink packet */ + pbuf_realloc(outpkt->pbuf, outpkt->write_offset); + + if (IP_IS_V6_VAL(outpkt->dest_addr)) { +#if LWIP_IPV6 + mcast_destaddr = &v6group; +#endif + } else { +#if LWIP_IPV4 + mcast_destaddr = &v4group; +#endif + } + /* Send created packet */ + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Sending packet, len=%d, unicast=%d\n", outpkt->write_offset, outpkt->unicast_reply)); + if (outpkt->unicast_reply) { + res = udp_sendto_if(mdns_pcb, outpkt->pbuf, &outpkt->dest_addr, outpkt->dest_port, outpkt->netif); + } else { + res = udp_sendto_if(mdns_pcb, outpkt->pbuf, mcast_destaddr, LWIP_IANA_PORT_MDNS, outpkt->netif); + } + } + +cleanup: + if (outpkt->pbuf) { + pbuf_free(outpkt->pbuf); + outpkt->pbuf = NULL; + } + return res; +} + +/** + * Send unsolicited answer containing all our known data + * @param netif The network interface to send on + * @param destination The target address to send to (usually multicast address) + */ +static void +mdns_announce(struct netif *netif, const ip_addr_t *destination) +{ + struct mdns_outpacket announce; + int i; + struct mdns_host *mdns = NETIF_TO_HOST(netif); + + memset(&announce, 0, sizeof(announce)); + announce.netif = netif; + announce.cache_flush = 1; +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { + announce.host_replies = REPLY_HOST_A | REPLY_HOST_PTR_V4; + } +#endif +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + announce.host_replies |= REPLY_HOST_AAAA | REPLY_HOST_PTR_V6; + announce.host_reverse_v6_replies |= (1 << i); + } + } +#endif + + for (i = 0; i < MDNS_MAX_SERVICES; i++) { + struct mdns_service *serv = mdns->services[i]; + if (serv) { + announce.serv_replies[i] = REPLY_SERVICE_TYPE_PTR | REPLY_SERVICE_NAME_PTR | + REPLY_SERVICE_SRV | REPLY_SERVICE_TXT; + } + } + + announce.dest_port = LWIP_IANA_PORT_MDNS; + SMEMCPY(&announce.dest_addr, destination, sizeof(announce.dest_addr)); + mdns_send_outpacket(&announce, DNS_FLAG1_RESPONSE | DNS_FLAG1_AUTHORATIVE); +} + +/** + * Handle question MDNS packet + * 1. Parse all questions and set bits what answers to send + * 2. Clear pending answers if known answers are supplied + * 3. Put chosen answers in new packet and send as reply + */ +static void +mdns_handle_question(struct mdns_packet *pkt) +{ + struct mdns_service *service; + struct mdns_outpacket reply; + int replies = 0; + int i; + err_t res; + struct mdns_host *mdns = NETIF_TO_HOST(pkt->netif); + + if (mdns->probing_state != MDNS_PROBING_COMPLETE) { + /* Don't answer questions until we've verified our domains via probing */ + /* @todo we should check incoming questions during probing for tiebreaking */ + return; + } + + mdns_init_outpacket(&reply, pkt); + + while (pkt->questions_left) { + struct mdns_question q; + + res = mdns_read_question(pkt, &q); + if (res != ERR_OK) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Failed to parse question, skipping query packet\n")); + return; + } + + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Query for domain ")); + mdns_domain_debug_print(&q.info.domain); + LWIP_DEBUGF(MDNS_DEBUG, (" type %d class %d\n", q.info.type, q.info.klass)); + + if (q.unicast) { + /* Reply unicast if any question is unicast */ + reply.unicast_reply = 1; + } + + reply.host_replies |= check_host(pkt->netif, &q.info, &reply.host_reverse_v6_replies); + replies |= reply.host_replies; + + for (i = 0; i < MDNS_MAX_SERVICES; i++) { + service = mdns->services[i]; + if (!service) { + continue; + } + reply.serv_replies[i] |= check_service(service, &q.info); + replies |= reply.serv_replies[i]; + } + + if (replies && reply.legacy_query) { + /* Add question to reply packet (legacy packet only has 1 question) */ + res = mdns_add_question(&reply, &q.info.domain, q.info.type, q.info.klass, 0); + reply.questions = 1; + if (res != ERR_OK) { + goto cleanup; + } + } + } + + /* Handle known answers */ + while (pkt->answers_left) { + struct mdns_answer ans; + u8_t rev_v6; + int match; + + res = mdns_read_answer(pkt, &ans); + if (res != ERR_OK) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Failed to parse answer, skipping query packet\n")); + goto cleanup; + } + + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Known answer for domain ")); + mdns_domain_debug_print(&ans.info.domain); + LWIP_DEBUGF(MDNS_DEBUG, (" type %d class %d\n", ans.info.type, ans.info.klass)); + + + if (ans.info.type == DNS_RRTYPE_ANY || ans.info.klass == DNS_RRCLASS_ANY) { + /* Skip known answers for ANY type & class */ + continue; + } + + rev_v6 = 0; + match = reply.host_replies & check_host(pkt->netif, &ans.info, &rev_v6); + if (match && (ans.ttl > (mdns->dns_ttl / 2))) { + /* The RR in the known answer matches an RR we are planning to send, + * and the TTL is less than half gone. + * If the payload matches we should not send that answer. + */ + if (ans.info.type == DNS_RRTYPE_PTR) { + /* Read domain and compare */ + struct mdns_domain known_ans, my_ans; + u16_t len; + len = mdns_readname(pkt->pbuf, ans.rd_offset, &known_ans); + res = mdns_build_host_domain(&my_ans, mdns); + if (len != MDNS_READNAME_ERROR && res == ERR_OK && mdns_domain_eq(&known_ans, &my_ans)) { +#if LWIP_IPV4 + if (match & REPLY_HOST_PTR_V4) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: v4 PTR\n")); + reply.host_replies &= ~REPLY_HOST_PTR_V4; + } +#endif +#if LWIP_IPV6 + if (match & REPLY_HOST_PTR_V6) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: v6 PTR\n")); + reply.host_reverse_v6_replies &= ~rev_v6; + if (reply.host_reverse_v6_replies == 0) { + reply.host_replies &= ~REPLY_HOST_PTR_V6; + } + } +#endif + } + } else if (match & REPLY_HOST_A) { +#if LWIP_IPV4 + if (ans.rd_length == sizeof(ip4_addr_t) && + pbuf_memcmp(pkt->pbuf, ans.rd_offset, netif_ip4_addr(pkt->netif), ans.rd_length) == 0) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: A\n")); + reply.host_replies &= ~REPLY_HOST_A; + } +#endif + } else if (match & REPLY_HOST_AAAA) { +#if LWIP_IPV6 + if (ans.rd_length == sizeof(ip6_addr_p_t) && + /* TODO this clears all AAAA responses if first addr is set as known */ + pbuf_memcmp(pkt->pbuf, ans.rd_offset, netif_ip6_addr(pkt->netif, 0), ans.rd_length) == 0) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: AAAA\n")); + reply.host_replies &= ~REPLY_HOST_AAAA; + } +#endif + } + } + + for (i = 0; i < MDNS_MAX_SERVICES; i++) { + service = mdns->services[i]; + if (!service) { + continue; + } + match = reply.serv_replies[i] & check_service(service, &ans.info); + if (match && (ans.ttl > (service->dns_ttl / 2))) { + /* The RR in the known answer matches an RR we are planning to send, + * and the TTL is less than half gone. + * If the payload matches we should not send that answer. + */ + if (ans.info.type == DNS_RRTYPE_PTR) { + /* Read domain and compare */ + struct mdns_domain known_ans, my_ans; + u16_t len; + len = mdns_readname(pkt->pbuf, ans.rd_offset, &known_ans); + if (len != MDNS_READNAME_ERROR) { + if (match & REPLY_SERVICE_TYPE_PTR) { + res = mdns_build_service_domain(&my_ans, service, 0); + if (res == ERR_OK && mdns_domain_eq(&known_ans, &my_ans)) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: service type PTR\n")); + reply.serv_replies[i] &= ~REPLY_SERVICE_TYPE_PTR; + } + } + if (match & REPLY_SERVICE_NAME_PTR) { + res = mdns_build_service_domain(&my_ans, service, 1); + if (res == ERR_OK && mdns_domain_eq(&known_ans, &my_ans)) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: service name PTR\n")); + reply.serv_replies[i] &= ~REPLY_SERVICE_NAME_PTR; + } + } + } + } else if (match & REPLY_SERVICE_SRV) { + /* Read and compare to my SRV record */ + u16_t field16, len, read_pos; + struct mdns_domain known_ans, my_ans; + read_pos = ans.rd_offset; + do { + /* Check priority field */ + len = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), read_pos); + if (len != sizeof(field16) || lwip_ntohs(field16) != SRV_PRIORITY) { + break; + } + read_pos += len; + /* Check weight field */ + len = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), read_pos); + if (len != sizeof(field16) || lwip_ntohs(field16) != SRV_WEIGHT) { + break; + } + read_pos += len; + /* Check port field */ + len = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), read_pos); + if (len != sizeof(field16) || lwip_ntohs(field16) != service->port) { + break; + } + read_pos += len; + /* Check host field */ + len = mdns_readname(pkt->pbuf, read_pos, &known_ans); + mdns_build_host_domain(&my_ans, mdns); + if (len == MDNS_READNAME_ERROR || !mdns_domain_eq(&known_ans, &my_ans)) { + break; + } + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: SRV\n")); + reply.serv_replies[i] &= ~REPLY_SERVICE_SRV; + } while (0); + } else if (match & REPLY_SERVICE_TXT) { + mdns_prepare_txtdata(service); + if (service->txtdata.length == ans.rd_length && + pbuf_memcmp(pkt->pbuf, ans.rd_offset, service->txtdata.name, ans.rd_length) == 0) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: TXT\n")); + reply.serv_replies[i] &= ~REPLY_SERVICE_TXT; + } + } + } + } + } + + mdns_send_outpacket(&reply, DNS_FLAG1_RESPONSE | DNS_FLAG1_AUTHORATIVE); + +cleanup: + if (reply.pbuf) { + /* This should only happen if we fail to alloc/write question for legacy query */ + pbuf_free(reply.pbuf); + reply.pbuf = NULL; + } +} + +/** + * Handle response MDNS packet + * Only prints debug for now. Will need more code to do conflict resolution. + */ +static void +mdns_handle_response(struct mdns_packet *pkt) +{ + struct mdns_host* mdns = NETIF_TO_HOST(pkt->netif); + + /* Ignore all questions */ + while (pkt->questions_left) { + struct mdns_question q; + err_t res; + + res = mdns_read_question(pkt, &q); + if (res != ERR_OK) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Failed to parse question, skipping response packet\n")); + return; + } + } + + while (pkt->answers_left) { + struct mdns_answer ans; + err_t res; + + res = mdns_read_answer(pkt, &ans); + if (res != ERR_OK) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Failed to parse answer, skipping response packet\n")); + return; + } + + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Answer for domain ")); + mdns_domain_debug_print(&ans.info.domain); + LWIP_DEBUGF(MDNS_DEBUG, (" type %d class %d\n", ans.info.type, ans.info.klass)); + + /*"Apparently conflicting Multicast DNS responses received *before* the first probe packet is sent MUST + be silently ignored" so drop answer if we haven't started probing yet*/ + if ((mdns->probing_state == MDNS_PROBING_ONGOING) && (mdns->probes_sent > 0)) { + struct mdns_domain domain; + u8_t i; + u8_t conflict = 0; + + res = mdns_build_host_domain(&domain, mdns); + if (res == ERR_OK && mdns_domain_eq(&ans.info.domain, &domain)) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Probe response matches host domain!")); + conflict = 1; + } + + for (i = 0; i < MDNS_MAX_SERVICES; i++) { + struct mdns_service* service = mdns->services[i]; + if (!service) { + continue; + } + res = mdns_build_service_domain(&domain, service, 1); + if ((res == ERR_OK) && mdns_domain_eq(&ans.info.domain, &domain)) { + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Probe response matches service domain!")); + conflict = 1; + } + } + + if (conflict != 0) { + sys_untimeout(mdns_probe, pkt->netif); + if (mdns_name_result_cb != NULL) { + mdns_name_result_cb(pkt->netif, MDNS_PROBING_CONFLICT); + } + } + } + } +} + +/** + * Receive input function for MDNS packets. + * Handles both IPv4 and IPv6 UDP pcbs. + */ +static void +mdns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct dns_hdr hdr; + struct mdns_packet packet; + struct netif *recv_netif = ip_current_input_netif(); + u16_t offset = 0; + + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(pcb); + + LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Received IPv%d MDNS packet, len %d\n", IP_IS_V6(addr) ? 6 : 4, p->tot_len)); + + if (NETIF_TO_HOST(recv_netif) == NULL) { + /* From netif not configured for MDNS */ + goto dealloc; + } + + if (pbuf_copy_partial(p, &hdr, SIZEOF_DNS_HDR, offset) < SIZEOF_DNS_HDR) { + /* Too small */ + goto dealloc; + } + offset += SIZEOF_DNS_HDR; + + if (DNS_HDR_GET_OPCODE(&hdr)) { + /* Ignore non-standard queries in multicast packets (RFC 6762, section 18.3) */ + goto dealloc; + } + + memset(&packet, 0, sizeof(packet)); + SMEMCPY(&packet.source_addr, addr, sizeof(packet.source_addr)); + packet.source_port = port; + packet.netif = recv_netif; + packet.pbuf = p; + packet.parse_offset = offset; + packet.tx_id = lwip_ntohs(hdr.id); + packet.questions = packet.questions_left = lwip_ntohs(hdr.numquestions); + packet.answers = packet.answers_left = lwip_ntohs(hdr.numanswers) + lwip_ntohs(hdr.numauthrr) + lwip_ntohs(hdr.numextrarr); + +#if LWIP_IPV6 + if (IP_IS_V6(ip_current_dest_addr())) { + /* instead of having one 'v6group' per netif, just compare zoneless here */ + if (!ip_addr_cmp_zoneless(ip_current_dest_addr(), &v6group)) { + packet.recv_unicast = 1; + } + } +#endif +#if LWIP_IPV4 + if (!IP_IS_V6(ip_current_dest_addr())) { + if (!ip_addr_cmp(ip_current_dest_addr(), &v4group)) { + packet.recv_unicast = 1; + } + } +#endif + + if (hdr.flags1 & DNS_FLAG1_RESPONSE) { + mdns_handle_response(&packet); + } else { + mdns_handle_question(&packet); + } + +dealloc: + pbuf_free(p); +} + +#if LWIP_NETIF_EXT_STATUS_CALLBACK && MDNS_RESP_USENETIF_EXTCALLBACK +static void +mdns_netif_ext_status_callback(struct netif *netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t *args) +{ + LWIP_UNUSED_ARG(args); + + /* MDNS enabled on netif? */ + if (NETIF_TO_HOST(netif) == NULL) { + return; + } + + if (reason & LWIP_NSC_STATUS_CHANGED) { + if (args->status_changed.state != 0) { + mdns_resp_restart(netif); + } + /* TODO: send goodbye message */ + } + if (reason & LWIP_NSC_LINK_CHANGED) { + if (args->link_changed.state != 0) { + mdns_resp_restart(netif); + } + } + if (reason & (LWIP_NSC_IPV4_ADDRESS_CHANGED | LWIP_NSC_IPV4_GATEWAY_CHANGED | + LWIP_NSC_IPV4_NETMASK_CHANGED | LWIP_NSC_IPV4_SETTINGS_CHANGED | + LWIP_NSC_IPV6_SET | LWIP_NSC_IPV6_ADDR_STATE_CHANGED)) { + mdns_resp_announce(netif); + } +} +#endif /* LWIP_NETIF_EXT_STATUS_CALLBACK && MDNS_RESP_USENETIF_EXTCALLBACK */ + +static err_t +mdns_send_probe(struct netif* netif, const ip_addr_t *destination) +{ + struct mdns_host* mdns; + struct mdns_outpacket pkt; + struct mdns_domain domain; + u8_t i; + err_t res; + + mdns = NETIF_TO_HOST(netif); + + memset(&pkt, 0, sizeof(pkt)); + pkt.netif = netif; + + /* Add unicast questions with rtype ANY for all our desired records */ + mdns_build_host_domain(&domain, mdns); + res = mdns_add_question(&pkt, &domain, DNS_RRTYPE_ANY, DNS_RRCLASS_IN, 1); + if (res != ERR_OK) { + goto cleanup; + } + pkt.questions++; + for (i = 0; i < MDNS_MAX_SERVICES; i++) { + struct mdns_service* service = mdns->services[i]; + if (!service) { + continue; + } + mdns_build_service_domain(&domain, service, 1); + res = mdns_add_question(&pkt, &domain, DNS_RRTYPE_ANY, DNS_RRCLASS_IN, 1); + if (res != ERR_OK) { + goto cleanup; + } + pkt.questions++; + } + + /* Add answers to the questions above into the authority section for tiebreaking */ +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { + pkt.host_replies = REPLY_HOST_A; + } +#endif +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + pkt.host_replies |= REPLY_HOST_AAAA; + } + } +#endif + + for (i = 0; i < MDNS_MAX_SERVICES; i++) { + struct mdns_service *serv = mdns->services[i]; + if (serv) { + pkt.serv_replies[i] = REPLY_SERVICE_SRV | REPLY_SERVICE_TXT; + } + } + + pkt.tx_id = 0; + pkt.dest_port = LWIP_IANA_PORT_MDNS; + SMEMCPY(&pkt.dest_addr, destination, sizeof(pkt.dest_addr)); + res = mdns_send_outpacket(&pkt, 0); + +cleanup: + if (pkt.pbuf) { + pbuf_free(pkt.pbuf); + pkt.pbuf = NULL; + } + return res; +} + +/** + * Timer callback for probing network. + */ +static void +mdns_probe(void* arg) +{ + struct netif *netif = (struct netif *)arg; + struct mdns_host* mdns = NETIF_TO_HOST(netif); + + if(mdns->probes_sent >= MDNS_PROBE_COUNT) { + /* probing successful, announce the new name */ + mdns->probing_state = MDNS_PROBING_COMPLETE; + mdns_resp_announce(netif); + if (mdns_name_result_cb != NULL) { + mdns_name_result_cb(netif, MDNS_PROBING_SUCCESSFUL); + } + } else { +#if LWIP_IPV4 + /*if ipv4 wait with probing until address is set*/ + if (!ip4_addr_isany_val(*netif_ip4_addr(netif)) && + mdns_send_probe(netif, IP4_ADDR_ANY) == ERR_OK) +#endif + { +#if LWIP_IPV6 + if (mdns_send_probe(netif, IP6_ADDR_ANY) == ERR_OK) +#endif + { + mdns->probes_sent++; + } + } + sys_timeout(MDNS_PROBE_DELAY_MS, mdns_probe, netif); + } +} + +/** + * @ingroup mdns + * Activate MDNS responder for a network interface. + * @param netif The network interface to activate. + * @param hostname Name to use. Queries for <hostname>.local will be answered + * with the IP addresses of the netif. The hostname will be copied, the + * given pointer can be on the stack. + * @param dns_ttl Validity time in seconds to send out for IP address data in DNS replies + * @return ERR_OK if netif was added, an err_t otherwise + */ +err_t +mdns_resp_add_netif(struct netif *netif, const char *hostname, u32_t dns_ttl) +{ + err_t res; + struct mdns_host *mdns; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("mdns_resp_add_netif: netif != NULL", (netif != NULL), return ERR_VAL); + LWIP_ERROR("mdns_resp_add_netif: Hostname too long", (strlen(hostname) <= MDNS_LABEL_MAXLEN), return ERR_VAL); + + LWIP_ASSERT("mdns_resp_add_netif: Double add", NETIF_TO_HOST(netif) == NULL); + mdns = (struct mdns_host *) mem_calloc(1, sizeof(struct mdns_host)); + LWIP_ERROR("mdns_resp_add_netif: Alloc failed", (mdns != NULL), return ERR_MEM); + + netif_set_client_data(netif, mdns_netif_client_id, mdns); + + MEMCPY(&mdns->name, hostname, LWIP_MIN(MDNS_LABEL_MAXLEN, strlen(hostname))); + mdns->dns_ttl = dns_ttl; + mdns->probes_sent = 0; + mdns->probing_state = MDNS_PROBING_NOT_STARTED; + + /* Join multicast groups */ +#if LWIP_IPV4 + res = igmp_joingroup_netif(netif, ip_2_ip4(&v4group)); + if (res != ERR_OK) { + goto cleanup; + } +#endif +#if LWIP_IPV6 + res = mld6_joingroup_netif(netif, ip_2_ip6(&v6group)); + if (res != ERR_OK) { + goto cleanup; + } +#endif + + mdns_resp_restart(netif); + + return ERR_OK; + +cleanup: + mem_free(mdns); + netif_set_client_data(netif, mdns_netif_client_id, NULL); + return res; +} + +/** + * @ingroup mdns + * Stop responding to MDNS queries on this interface, leave multicast groups, + * and free the helper structure and any of its services. + * @param netif The network interface to remove. + * @return ERR_OK if netif was removed, an err_t otherwise + */ +err_t +mdns_resp_remove_netif(struct netif *netif) +{ + int i; + struct mdns_host *mdns; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mdns_resp_remove_netif: Null pointer", netif); + mdns = NETIF_TO_HOST(netif); + LWIP_ERROR("mdns_resp_remove_netif: Not an active netif", (mdns != NULL), return ERR_VAL); + + if (mdns->probing_state == MDNS_PROBING_ONGOING) { + sys_untimeout(mdns_probe, netif); + } + + for (i = 0; i < MDNS_MAX_SERVICES; i++) { + struct mdns_service *service = mdns->services[i]; + if (service) { + mem_free(service); + } + } + + /* Leave multicast groups */ +#if LWIP_IPV4 + igmp_leavegroup_netif(netif, ip_2_ip4(&v4group)); +#endif +#if LWIP_IPV6 + mld6_leavegroup_netif(netif, ip_2_ip6(&v6group)); +#endif + + mem_free(mdns); + netif_set_client_data(netif, mdns_netif_client_id, NULL); + return ERR_OK; +} + +/** + * @ingroup mdns + * Update MDNS hostname for a network interface. + * @param netif The network interface to activate. + * @param hostname Name to use. Queries for <hostname>.local will be answered + * with the IP addresses of the netif. The hostname will be copied, the + * given pointer can be on the stack. + * @return ERR_OK if name could be set on netif, an err_t otherwise + */ +err_t +mdns_resp_rename_netif(struct netif *netif, const char *hostname) +{ + struct mdns_host *mdns; + size_t len; + + LWIP_ASSERT_CORE_LOCKED(); + len = strlen(hostname); + LWIP_ERROR("mdns_resp_rename_netif: netif != NULL", (netif != NULL), return ERR_VAL); + LWIP_ERROR("mdns_resp_rename_netif: Hostname too long", (len <= MDNS_LABEL_MAXLEN), return ERR_VAL); + mdns = NETIF_TO_HOST(netif); + LWIP_ERROR("mdns_resp_rename_netif: Not an mdns netif", (mdns != NULL), return ERR_VAL); + + MEMCPY(&mdns->name, hostname, LWIP_MIN(MDNS_LABEL_MAXLEN, len)); + mdns->name[len] = '\0'; /* null termination in case new name is shorter than previous */ + + mdns_resp_restart(netif); + + return ERR_OK; +} + +/** + * @ingroup mdns + * Add a service to the selected network interface. + * @param netif The network interface to publish this service on + * @param name The name of the service + * @param service The service type, like "_http" + * @param proto The service protocol, DNSSD_PROTO_TCP for TCP ("_tcp") and DNSSD_PROTO_UDP + * for others ("_udp") + * @param port The port the service listens to + * @param dns_ttl Validity time in seconds to send out for service data in DNS replies + * @param txt_fn Callback to get TXT data. Will be called each time a TXT reply is created to + * allow dynamic replies. + * @param txt_data Userdata pointer for txt_fn + * @return service_id if the service was added to the netif, an err_t otherwise + */ +s8_t +mdns_resp_add_service(struct netif *netif, const char *name, const char *service, enum mdns_sd_proto proto, u16_t port, u32_t dns_ttl, service_get_txt_fn_t txt_fn, void *txt_data) +{ + s8_t i; + s8_t slot = -1; + struct mdns_service *srv; + struct mdns_host *mdns; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mdns_resp_add_service: netif != NULL", netif); + mdns = NETIF_TO_HOST(netif); + LWIP_ERROR("mdns_resp_add_service: Not an mdns netif", (mdns != NULL), return ERR_VAL); + + LWIP_ERROR("mdns_resp_add_service: Name too long", (strlen(name) <= MDNS_LABEL_MAXLEN), return ERR_VAL); + LWIP_ERROR("mdns_resp_add_service: Service too long", (strlen(service) <= MDNS_LABEL_MAXLEN), return ERR_VAL); + LWIP_ERROR("mdns_resp_add_service: Bad proto (need TCP or UDP)", (proto == DNSSD_PROTO_TCP || proto == DNSSD_PROTO_UDP), return ERR_VAL); + + for (i = 0; i < MDNS_MAX_SERVICES; i++) { + if (mdns->services[i] == NULL) { + slot = i; + break; + } + } + LWIP_ERROR("mdns_resp_add_service: Service list full (increase MDNS_MAX_SERVICES)", (slot >= 0), return ERR_MEM); + + srv = (struct mdns_service *)mem_calloc(1, sizeof(struct mdns_service)); + LWIP_ERROR("mdns_resp_add_service: Alloc failed", (srv != NULL), return ERR_MEM); + + MEMCPY(&srv->name, name, LWIP_MIN(MDNS_LABEL_MAXLEN, strlen(name))); + MEMCPY(&srv->service, service, LWIP_MIN(MDNS_LABEL_MAXLEN, strlen(service))); + srv->txt_fn = txt_fn; + srv->txt_userdata = txt_data; + srv->proto = (u16_t)proto; + srv->port = port; + srv->dns_ttl = dns_ttl; + + mdns->services[slot] = srv; + + mdns_resp_restart(netif); + + return slot; +} + +/** + * @ingroup mdns + * Delete a service on the selected network interface. + * @param netif The network interface on which service should be removed + * @param slot The service slot number returned by mdns_resp_add_service + * @return ERR_OK if the service was removed from the netif, an err_t otherwise + */ +err_t +mdns_resp_del_service(struct netif *netif, s8_t slot) +{ + struct mdns_host *mdns; + struct mdns_service *srv; + LWIP_ASSERT("mdns_resp_del_service: netif != NULL", netif); + mdns = NETIF_TO_HOST(netif); + LWIP_ERROR("mdns_resp_del_service: Not an mdns netif", (mdns != NULL), return ERR_VAL); + LWIP_ERROR("mdns_resp_del_service: Invalid Service ID", (slot >= 0) && (slot < MDNS_MAX_SERVICES), return ERR_VAL); + LWIP_ERROR("mdns_resp_del_service: Invalid Service ID", (mdns->services[slot] != NULL), return ERR_VAL); + + srv = mdns->services[slot]; + mdns->services[slot] = NULL; + mem_free(srv); + return ERR_OK; +} + +/** + * @ingroup mdns + * Update name for an MDNS service. + * @param netif The network interface to activate. + * @param slot The service slot number returned by mdns_resp_add_service + * @param name The new name for the service + * @return ERR_OK if name could be set on service, an err_t otherwise + */ +err_t +mdns_resp_rename_service(struct netif *netif, s8_t slot, const char *name) +{ + struct mdns_service *srv; + struct mdns_host *mdns; + size_t len; + + LWIP_ASSERT_CORE_LOCKED(); + len = strlen(name); + LWIP_ASSERT("mdns_resp_rename_service: netif != NULL", netif); + mdns = NETIF_TO_HOST(netif); + LWIP_ERROR("mdns_resp_rename_service: Not an mdns netif", (mdns != NULL), return ERR_VAL); + LWIP_ERROR("mdns_resp_rename_service: Name too long", (len <= MDNS_LABEL_MAXLEN), return ERR_VAL); + LWIP_ERROR("mdns_resp_rename_service: Invalid Service ID", (slot >= 0) && (slot < MDNS_MAX_SERVICES), return ERR_VAL); + LWIP_ERROR("mdns_resp_rename_service: Invalid Service ID", (mdns->services[slot] != NULL), return ERR_VAL); + + srv = mdns->services[slot]; + + MEMCPY(&srv->name, name, LWIP_MIN(MDNS_LABEL_MAXLEN, len)); + srv->name[len] = '\0'; /* null termination in case new name is shorter than previous */ + + mdns_resp_restart(netif); + + return ERR_OK; +} + +/** + * @ingroup mdns + * Call this function from inside the service_get_txt_fn_t callback to add text data. + * Buffer for TXT data is 256 bytes, and each field is prefixed with a length byte. + * @param service The service provided to the get_txt callback + * @param txt String to add to the TXT field. + * @param txt_len Length of string + * @return ERR_OK if the string was added to the reply, an err_t otherwise + */ +err_t +mdns_resp_add_service_txtitem(struct mdns_service *service, const char *txt, u8_t txt_len) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mdns_resp_add_service_txtitem: service != NULL", service); + + /* Use a mdns_domain struct to store txt chunks since it is the same encoding */ + return mdns_domain_add_label(&service->txtdata, txt, txt_len); +} + +/** + * @ingroup mdns + * Send unsolicited answer containing all our known data + * @param netif The network interface to send on + */ +void +mdns_resp_announce(struct netif *netif) +{ + struct mdns_host* mdns; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("mdns_resp_announce: netif != NULL", (netif != NULL), return); + + mdns = NETIF_TO_HOST(netif); + if (mdns == NULL) { + return; + } + + if (mdns->probing_state == MDNS_PROBING_COMPLETE) { + /* Announce on IPv6 and IPv4 */ +#if LWIP_IPV6 + mdns_announce(netif, IP6_ADDR_ANY); +#endif +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { + mdns_announce(netif, IP4_ADDR_ANY); + } +#endif + } /* else: ip address changed while probing was ongoing? @todo reset counter to restart? */ +} + +/** Register a callback function that is called if probing is completed successfully + * or with a conflict. */ +void +mdns_resp_register_name_result_cb(mdns_name_result_cb_t cb) +{ + mdns_name_result_cb = cb; +} + +/** + * @ingroup mdns + * Restart mdns responder. Call this when cable is connected after being disconnected or + * administrative interface is set up after being down + * @param netif The network interface to send on + */ +void +mdns_resp_restart(struct netif *netif) +{ + struct mdns_host* mdns; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("mdns_resp_restart: netif != NULL", (netif != NULL), return); + + mdns = NETIF_TO_HOST(netif); + if (mdns == NULL) { + return; + } + + if (mdns->probing_state == MDNS_PROBING_ONGOING) { + sys_untimeout(mdns_probe, netif); + } + /* @todo if we've failed 15 times within a 10 second period we MUST wait 5 seconds (or wait 5 seconds every time except first)*/ + mdns->probes_sent = 0; + mdns->probing_state = MDNS_PROBING_ONGOING; + sys_timeout(MDNS_INITIAL_PROBE_DELAY_MS, mdns_probe, netif); +} + +/** + * @ingroup mdns + * Initiate MDNS responder. Will open UDP sockets on port 5353 + */ +void +mdns_resp_init(void) +{ + err_t res; + + /* LWIP_ASSERT_CORE_LOCKED(); is checked by udp_new() */ + + mdns_pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + LWIP_ASSERT("Failed to allocate pcb", mdns_pcb != NULL); +#if LWIP_MULTICAST_TX_OPTIONS + udp_set_multicast_ttl(mdns_pcb, MDNS_TTL); +#else + mdns_pcb->ttl = MDNS_TTL; +#endif + res = udp_bind(mdns_pcb, IP_ANY_TYPE, LWIP_IANA_PORT_MDNS); + LWIP_UNUSED_ARG(res); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("Failed to bind pcb", res == ERR_OK); + udp_recv(mdns_pcb, mdns_recv, NULL); + + mdns_netif_client_id = netif_alloc_client_data_id(); + +#if MDNS_RESP_USENETIF_EXTCALLBACK + /* register for netif events when started on first netif */ + netif_add_ext_callback(&netif_callback, mdns_netif_ext_status_callback); +#endif +} + +#endif /* LWIP_MDNS_RESPONDER */ diff --git a/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c b/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c new file mode 100644 index 00000000..269f4a49 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c @@ -0,0 +1,1463 @@ +/** + * @file + * MQTT client + * + * @defgroup mqtt MQTT client + * @ingroup apps + * @verbinclude mqtt_client.txt + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack + * + * Author: Erik Andersson + * + * + * @todo: + * - Handle large outgoing payloads for PUBLISH messages + * - Fix restriction of a single topic in each (UN)SUBSCRIBE message (protocol has support for multiple topics) + * - Add support for legacy MQTT protocol version + * + * Please coordinate changes and requests with Erik Andersson + * Erik Andersson + * + */ +#include "lwip/apps/mqtt.h" +#include "lwip/apps/mqtt_priv.h" +#include "lwip/timeouts.h" +#include "lwip/ip_addr.h" +#include "lwip/mem.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/altcp_tls.h" +#include + +#if LWIP_TCP && LWIP_CALLBACK_API + +/** + * MQTT_DEBUG: Default is off. + */ +#if !defined MQTT_DEBUG || defined __DOXYGEN__ +#define MQTT_DEBUG LWIP_DBG_OFF +#endif + +#define MQTT_DEBUG_TRACE (MQTT_DEBUG | LWIP_DBG_TRACE) +#define MQTT_DEBUG_STATE (MQTT_DEBUG | LWIP_DBG_STATE) +#define MQTT_DEBUG_WARN (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define MQTT_DEBUG_WARN_STATE (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING | LWIP_DBG_STATE) +#define MQTT_DEBUG_SERIOUS (MQTT_DEBUG | LWIP_DBG_LEVEL_SERIOUS) + + + +/** + * MQTT client connection states + */ +enum { + TCP_DISCONNECTED, + TCP_CONNECTING, + MQTT_CONNECTING, + MQTT_CONNECTED +}; + +/** + * MQTT control message types + */ +enum mqtt_message_type { + MQTT_MSG_TYPE_CONNECT = 1, + MQTT_MSG_TYPE_CONNACK = 2, + MQTT_MSG_TYPE_PUBLISH = 3, + MQTT_MSG_TYPE_PUBACK = 4, + MQTT_MSG_TYPE_PUBREC = 5, + MQTT_MSG_TYPE_PUBREL = 6, + MQTT_MSG_TYPE_PUBCOMP = 7, + MQTT_MSG_TYPE_SUBSCRIBE = 8, + MQTT_MSG_TYPE_SUBACK = 9, + MQTT_MSG_TYPE_UNSUBSCRIBE = 10, + MQTT_MSG_TYPE_UNSUBACK = 11, + MQTT_MSG_TYPE_PINGREQ = 12, + MQTT_MSG_TYPE_PINGRESP = 13, + MQTT_MSG_TYPE_DISCONNECT = 14 +}; + +/** Helpers to extract control packet type and qos from first byte in fixed header */ +#define MQTT_CTL_PACKET_TYPE(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0xf0) >> 4) +#define MQTT_CTL_PACKET_QOS(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0x6) >> 1) + +/** + * MQTT connect flags, only used in CONNECT message + */ +enum mqtt_connect_flag { + MQTT_CONNECT_FLAG_USERNAME = 1 << 7, + MQTT_CONNECT_FLAG_PASSWORD = 1 << 6, + MQTT_CONNECT_FLAG_WILL_RETAIN = 1 << 5, + MQTT_CONNECT_FLAG_WILL = 1 << 2, + MQTT_CONNECT_FLAG_CLEAN_SESSION = 1 << 1 +}; + + +static void mqtt_cyclic_timer(void *arg); + +#if defined(LWIP_DEBUG) +static const char *const mqtt_message_type_str[15] = { + "UNDEFINED", + "CONNECT", + "CONNACK", + "PUBLISH", + "PUBACK", + "PUBREC", + "PUBREL", + "PUBCOMP", + "SUBSCRIBE", + "SUBACK", + "UNSUBSCRIBE", + "UNSUBACK", + "PINGREQ", + "PINGRESP", + "DISCONNECT" +}; + +/** + * Message type value to string + * @param msg_type see enum mqtt_message_type + * + * @return Control message type text string + */ +static const char * +mqtt_msg_type_to_str(u8_t msg_type) +{ + if (msg_type >= LWIP_ARRAYSIZE(mqtt_message_type_str)) { + msg_type = 0; + } + return mqtt_message_type_str[msg_type]; +} + +#endif + + +/** + * Generate MQTT packet identifier + * @param client MQTT client + * @return New packet identifier, range 1 to 65535 + */ +static u16_t +msg_generate_packet_id(mqtt_client_t *client) +{ + client->pkt_id_seq++; + if (client->pkt_id_seq == 0) { + client->pkt_id_seq++; + } + return client->pkt_id_seq; +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output ring buffer */ + +/** Add single item to ring buffer */ +static void +mqtt_ringbuf_put(struct mqtt_ringbuf_t *rb, u8_t item) +{ + rb->buf[rb->put] = item; + rb->put++; + if (rb->put >= MQTT_OUTPUT_RINGBUF_SIZE) { + rb->put = 0; + } +} + +/** Return pointer to ring buffer get position */ +static u8_t * +mqtt_ringbuf_get_ptr(struct mqtt_ringbuf_t *rb) +{ + return &rb->buf[rb->get]; +} + +static void +mqtt_ringbuf_advance_get_idx(struct mqtt_ringbuf_t *rb, u16_t len) +{ + LWIP_ASSERT("mqtt_ringbuf_advance_get_idx: len < MQTT_OUTPUT_RINGBUF_SIZE", len < MQTT_OUTPUT_RINGBUF_SIZE); + + rb->get += len; + if (rb->get >= MQTT_OUTPUT_RINGBUF_SIZE) { + rb->get = rb->get - MQTT_OUTPUT_RINGBUF_SIZE; + } +} + +/** Return number of bytes in ring buffer */ +static u16_t +mqtt_ringbuf_len(struct mqtt_ringbuf_t *rb) +{ + u32_t len = rb->put - rb->get; + if (len > 0xFFFF) { + len += MQTT_OUTPUT_RINGBUF_SIZE; + } + return (u16_t)len; +} + +/** Return number of bytes free in ring buffer */ +#define mqtt_ringbuf_free(rb) (MQTT_OUTPUT_RINGBUF_SIZE - mqtt_ringbuf_len(rb)) + +/** Return number of bytes possible to read without wrapping around */ +#define mqtt_ringbuf_linear_read_length(rb) LWIP_MIN(mqtt_ringbuf_len(rb), (MQTT_OUTPUT_RINGBUF_SIZE - (rb)->get)) + +/** + * Try send as many bytes as possible from output ring buffer + * @param rb Output ring buffer + * @param tpcb TCP connection handle + */ +static void +mqtt_output_send(struct mqtt_ringbuf_t *rb, struct altcp_pcb *tpcb) +{ + err_t err; + u8_t wrap = 0; + u16_t ringbuf_lin_len = mqtt_ringbuf_linear_read_length(rb); + u16_t send_len = altcp_sndbuf(tpcb); + LWIP_ASSERT("mqtt_output_send: tpcb != NULL", tpcb != NULL); + + if (send_len == 0 || ringbuf_lin_len == 0) { + return; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_output_send: tcp_sndbuf: %d bytes, ringbuf_linear_available: %d, get %d, put %d\n", + send_len, ringbuf_lin_len, rb->get, rb->put)); + + if (send_len > ringbuf_lin_len) { + /* Space in TCP output buffer is larger than available in ring buffer linear portion */ + send_len = ringbuf_lin_len; + /* Wrap around if more data in ring buffer after linear portion */ + wrap = (mqtt_ringbuf_len(rb) > ringbuf_lin_len); + } + err = altcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY | (wrap ? TCP_WRITE_FLAG_MORE : 0)); + if ((err == ERR_OK) && wrap) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Use the lesser one of ring buffer linear length and TCP send buffer size */ + send_len = LWIP_MIN(altcp_sndbuf(tpcb), mqtt_ringbuf_linear_read_length(rb)); + err = altcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY); + } + + if (err == ERR_OK) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Flush */ + altcp_output(tpcb); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_output_send: Send failed with err %d (\"%s\")\n", err, lwip_strerr(err))); + } +} + + + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Request queue */ + +/** + * Create request item + * @param r_objs Pointer to request objects + * @param r_objs_len Number of array entries + * @param pkt_id Packet identifier of request + * @param cb Packet callback to call when requests lifetime ends + * @param arg Parameter following callback + * @return Request or NULL if failed to create + */ +static struct mqtt_request_t * +mqtt_create_request(struct mqtt_request_t *r_objs, size_t r_objs_len, u16_t pkt_id, mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r = NULL; + u8_t n; + LWIP_ASSERT("mqtt_create_request: r_objs != NULL", r_objs != NULL); + for (n = 0; n < r_objs_len; n++) { + /* Item point to itself if not in use */ + if (r_objs[n].next == &r_objs[n]) { + r = &r_objs[n]; + r->next = NULL; + r->cb = cb; + r->arg = arg; + r->pkt_id = pkt_id; + break; + } + } + return r; +} + + +/** + * Append request to pending request queue + * @param tail Pointer to request queue tail pointer + * @param r Request to append + */ +static void +mqtt_append_request(struct mqtt_request_t **tail, struct mqtt_request_t *r) +{ + struct mqtt_request_t *head = NULL; + s16_t time_before = 0; + struct mqtt_request_t *iter; + + LWIP_ASSERT("mqtt_append_request: tail != NULL", tail != NULL); + + /* Iterate trough queue to find head, and count total timeout time */ + for (iter = *tail; iter != NULL; iter = iter->next) { + time_before += iter->timeout_diff; + head = iter; + } + + LWIP_ASSERT("mqtt_append_request: time_before <= MQTT_REQ_TIMEOUT", time_before <= MQTT_REQ_TIMEOUT); + r->timeout_diff = MQTT_REQ_TIMEOUT - time_before; + if (head == NULL) { + *tail = r; + } else { + head->next = r; + } +} + + +/** + * Delete request item + * @param r Request item to delete + */ +static void +mqtt_delete_request(struct mqtt_request_t *r) +{ + if (r != NULL) { + r->next = r; + } +} + +/** + * Remove a request item with a specific packet identifier from request queue + * @param tail Pointer to request queue tail pointer + * @param pkt_id Packet identifier of request to take + * @return Request item if found, NULL if not + */ +static struct mqtt_request_t * +mqtt_take_request(struct mqtt_request_t **tail, u16_t pkt_id) +{ + struct mqtt_request_t *iter = NULL, *prev = NULL; + LWIP_ASSERT("mqtt_take_request: tail != NULL", tail != NULL); + /* Search all request for pkt_id */ + for (iter = *tail; iter != NULL; iter = iter->next) { + if (iter->pkt_id == pkt_id) { + break; + } + prev = iter; + } + + /* If request was found */ + if (iter != NULL) { + /* unchain */ + if (prev == NULL) { + *tail = iter->next; + } else { + prev->next = iter->next; + } + /* If exists, add remaining timeout time for the request to next */ + if (iter->next != NULL) { + iter->next->timeout_diff += iter->timeout_diff; + } + iter->next = NULL; + } + return iter; +} + +/** + * Handle requests timeout + * @param tail Pointer to request queue tail pointer + * @param t Time since last call in seconds + */ +static void +mqtt_request_time_elapsed(struct mqtt_request_t **tail, u8_t t) +{ + struct mqtt_request_t *r; + LWIP_ASSERT("mqtt_request_time_elapsed: tail != NULL", tail != NULL); + r = *tail; + while (t > 0 && r != NULL) { + if (t >= r->timeout_diff) { + t -= (u8_t)r->timeout_diff; + /* Unchain */ + *tail = r->next; + /* Notify upper layer about timeout */ + if (r->cb != NULL) { + r->cb(r->arg, ERR_TIMEOUT); + } + mqtt_delete_request(r); + /* Tail might be be modified in callback, so re-read it in every iteration */ + r = *(struct mqtt_request_t *const volatile *)tail; + } else { + r->timeout_diff -= t; + t = 0; + } + } +} + +/** + * Free all request items + * @param tail Pointer to request queue tail pointer + */ +static void +mqtt_clear_requests(struct mqtt_request_t **tail) +{ + struct mqtt_request_t *iter, *next; + LWIP_ASSERT("mqtt_clear_requests: tail != NULL", tail != NULL); + for (iter = *tail; iter != NULL; iter = next) { + next = iter->next; + mqtt_delete_request(iter); + } + *tail = NULL; +} +/** + * Initialize all request items + * @param r_objs Pointer to request objects + * @param r_objs_len Number of array entries + */ +static void +mqtt_init_requests(struct mqtt_request_t *r_objs, size_t r_objs_len) +{ + u8_t n; + LWIP_ASSERT("mqtt_init_requests: r_objs != NULL", r_objs != NULL); + for (n = 0; n < r_objs_len; n++) { + /* Item pointing to itself indicates unused */ + r_objs[n].next = &r_objs[n]; + } +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output message build helpers */ + + +static void +mqtt_output_append_u8(struct mqtt_ringbuf_t *rb, u8_t value) +{ + mqtt_ringbuf_put(rb, value); +} + +static +void mqtt_output_append_u16(struct mqtt_ringbuf_t *rb, u16_t value) +{ + mqtt_ringbuf_put(rb, value >> 8); + mqtt_ringbuf_put(rb, value & 0xff); +} + +static void +mqtt_output_append_buf(struct mqtt_ringbuf_t *rb, const void *data, u16_t length) +{ + u16_t n; + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, ((const u8_t *)data)[n]); + } +} + +static void +mqtt_output_append_string(struct mqtt_ringbuf_t *rb, const char *str, u16_t length) +{ + u16_t n; + mqtt_ringbuf_put(rb, length >> 8); + mqtt_ringbuf_put(rb, length & 0xff); + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, str[n]); + } +} + +/** + * Append fixed header + * @param rb Output ring buffer + * @param msg_type see enum mqtt_message_type + * @param fdup MQTT DUP flag + * @param fqos MQTT QoS field + * @param fretain MQTT retain flag + * @param r_length Remaining length after fixed header + */ + +static void +mqtt_output_append_fixed_header(struct mqtt_ringbuf_t *rb, u8_t msg_type, u8_t fdup, + u8_t fqos, u8_t fretain, u16_t r_length) +{ + /* Start with control byte */ + mqtt_output_append_u8(rb, (((msg_type & 0x0f) << 4) | ((fdup & 1) << 3) | ((fqos & 3) << 1) | (fretain & 1))); + /* Encode remaining length field */ + do { + mqtt_output_append_u8(rb, (r_length & 0x7f) | (r_length >= 128 ? 0x80 : 0)); + r_length >>= 7; + } while (r_length > 0); +} + + +/** + * Check output buffer space + * @param rb Output ring buffer + * @param r_length Remaining length after fixed header + * @return 1 if message will fit, 0 if not enough buffer space + */ +static u8_t +mqtt_output_check_space(struct mqtt_ringbuf_t *rb, u16_t r_length) +{ + /* Start with length of type byte + remaining length */ + u16_t total_len = 1 + r_length; + + LWIP_ASSERT("mqtt_output_check_space: rb != NULL", rb != NULL); + + /* Calculate number of required bytes to contain the remaining bytes field and add to total*/ + do { + total_len++; + r_length >>= 7; + } while (r_length > 0); + + return (total_len <= mqtt_ringbuf_free(rb)); +} + + +/** + * Close connection to server + * @param client MQTT client + * @param reason Reason for disconnection + */ +static void +mqtt_close(mqtt_client_t *client, mqtt_connection_status_t reason) +{ + LWIP_ASSERT("mqtt_close: client != NULL", client != NULL); + + /* Bring down TCP connection if not already done */ + if (client->conn != NULL) { + err_t res; + altcp_recv(client->conn, NULL); + altcp_err(client->conn, NULL); + altcp_sent(client->conn, NULL); + res = altcp_close(client->conn); + if (res != ERR_OK) { + altcp_abort(client->conn); + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_close: Close err=%s\n", lwip_strerr(res))); + } + client->conn = NULL; + } + + /* Remove all pending requests */ + mqtt_clear_requests(&client->pend_req_queue); + /* Stop cyclic timer */ + sys_untimeout(mqtt_cyclic_timer, client); + + /* Notify upper layer of disconnection if changed state */ + if (client->conn_state != TCP_DISCONNECTED) { + + client->conn_state = TCP_DISCONNECTED; + if (client->connect_cb != NULL) { + client->connect_cb(client, client->connect_arg, reason); + } + } +} + + +/** + * Interval timer, called every MQTT_CYCLIC_TIMER_INTERVAL seconds in MQTT_CONNECTING and MQTT_CONNECTED states + * @param arg MQTT client + */ +static void +mqtt_cyclic_timer(void *arg) +{ + u8_t restart_timer = 1; + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_cyclic_timer: client != NULL", client != NULL); + + if (client->conn_state == MQTT_CONNECTING) { + client->cyclic_tick++; + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= MQTT_CONNECT_TIMOUT) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_cyclic_timer: CONNECT attempt to server timed out\n")); + /* Disconnect TCP */ + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + } else if (client->conn_state == MQTT_CONNECTED) { + /* Handle timeout for pending requests */ + mqtt_request_time_elapsed(&client->pend_req_queue, MQTT_CYCLIC_TIMER_INTERVAL); + + /* keep_alive > 0 means keep alive functionality shall be used */ + if (client->keep_alive > 0) { + + client->server_watchdog++; + /* If reception from server has been idle for 1.5*keep_alive time, server is considered unresponsive */ + if ((client->server_watchdog * MQTT_CYCLIC_TIMER_INTERVAL) > (client->keep_alive + client->keep_alive / 2)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_cyclic_timer: Server incoming keep-alive timeout\n")); + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + + /* If time for a keep alive message to be sent, transmission has been idle for keep_alive time */ + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= client->keep_alive) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_cyclic_timer: Sending keep-alive message to server\n")); + if (mqtt_output_check_space(&client->output, 0) != 0) { + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PINGREQ, 0, 0, 0, 0); + client->cyclic_tick = 0; + } + } else { + client->cyclic_tick++; + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_cyclic_timer: Timer should not be running in state %d\n", client->conn_state)); + restart_timer = 0; + } + if (restart_timer) { + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL * 1000, mqtt_cyclic_timer, arg); + } +} + + +/** + * Send PUBACK, PUBREC or PUBREL response message + * @param client MQTT client + * @param msg PUBACK, PUBREC or PUBREL + * @param pkt_id Packet identifier + * @param qos QoS value + * @return ERR_OK if successful, ERR_MEM if out of memory + */ +static err_t +pub_ack_rec_rel_response(mqtt_client_t *client, u8_t msg, u16_t pkt_id, u8_t qos) +{ + err_t err = ERR_OK; + if (mqtt_output_check_space(&client->output, 2)) { + mqtt_output_append_fixed_header(&client->output, msg, 0, qos, 0, 2); + mqtt_output_append_u16(&client->output, pkt_id); + mqtt_output_send(&client->output, client->conn); + } else { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("pub_ack_rec_rel_response: OOM creating response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(msg), pkt_id)); + err = ERR_MEM; + } + return err; +} + +/** + * Subscribe response from server + * @param r Matching request + * @param result Result code from server + */ +static void +mqtt_incomming_suback(struct mqtt_request_t *r, u8_t result) +{ + if (r->cb != NULL) { + r->cb(r->arg, result < 3 ? ERR_OK : ERR_ABRT); + } +} + + +/** + * Complete MQTT message received or buffer full + * @param client MQTT client + * @param fixed_hdr_idx header index + * @param length length received part + * @param remaining_length Remaining length of complete message + */ +static mqtt_connection_status_t +mqtt_message_received(mqtt_client_t *client, u8_t fixed_hdr_idx, u16_t length, u32_t remaining_length) +{ + mqtt_connection_status_t res = MQTT_CONNECT_ACCEPTED; + + u8_t *var_hdr_payload = client->rx_buffer + fixed_hdr_idx; + size_t var_hdr_payload_bufsize = sizeof(client->rx_buffer) - fixed_hdr_idx; + + /* Control packet type */ + u8_t pkt_type = MQTT_CTL_PACKET_TYPE(client->rx_buffer[0]); + u16_t pkt_id = 0; + + LWIP_ASSERT("client->msg_idx < MQTT_VAR_HEADER_BUFFER_LEN", client->msg_idx < MQTT_VAR_HEADER_BUFFER_LEN); + LWIP_ASSERT("fixed_hdr_idx <= client->msg_idx", fixed_hdr_idx <= client->msg_idx); + LWIP_ERROR("buffer length mismatch", fixed_hdr_idx + length <= MQTT_VAR_HEADER_BUFFER_LEN, + return MQTT_CONNECT_DISCONNECTED); + + if (pkt_type == MQTT_MSG_TYPE_CONNACK) { + if (client->conn_state == MQTT_CONNECTING) { + if (length < 2) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short CONNACK message\n")); + goto out_disconnect; + } + /* Get result code from CONNACK */ + res = (mqtt_connection_status_t)var_hdr_payload[1]; + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: Connect response code %d\n", res)); + if (res == MQTT_CONNECT_ACCEPTED) { + /* Reset cyclic_tick when changing to connected state */ + client->cyclic_tick = 0; + client->conn_state = MQTT_CONNECTED; + /* Notify upper layer */ + if (client->connect_cb != 0) { + client->connect_cb(client, client->connect_arg, res); + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Received CONNACK in connected state\n")); + } + } else if (pkt_type == MQTT_MSG_TYPE_PINGRESP) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ( "mqtt_message_received: Received PINGRESP from server\n")); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBLISH) { + u16_t payload_offset = 0; + u16_t payload_length = length; + u8_t qos = MQTT_CTL_PACKET_QOS(client->rx_buffer[0]); + + if (client->msg_idx <= MQTT_VAR_HEADER_BUFFER_LEN) { + /* Should have topic and pkt id*/ + u8_t *topic; + u16_t after_topic; + u8_t bkp; + u16_t topic_len; + u16_t qos_len = (qos ? 2U : 0U); + if (length < 2 + qos_len) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet\n")); + goto out_disconnect; + } + topic_len = var_hdr_payload[0]; + topic_len = (topic_len << 8) + (u16_t)(var_hdr_payload[1]); + if ((topic_len > length - (2 + qos_len)) || + (topic_len > var_hdr_payload_bufsize - (2 + qos_len))) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet (topic)\n")); + goto out_disconnect; + } + + topic = var_hdr_payload + 2; + after_topic = 2 + topic_len; + /* Check buffer length, add one byte even for QoS 0 so that zero termination will fit */ + if ((after_topic + (qos ? 2U : 1U)) > var_hdr_payload_bufsize) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Receive buffer can not fit topic + pkt_id\n")); + goto out_disconnect; + } + + /* id for QoS 1 and 2 */ + if (qos > 0) { + if (length < after_topic + 2U) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet (after_topic)\n")); + goto out_disconnect; + } + client->inpub_pkt_id = ((u16_t)var_hdr_payload[after_topic] << 8) + (u16_t)var_hdr_payload[after_topic + 1]; + after_topic += 2; + } else { + client->inpub_pkt_id = 0; + } + /* Take backup of byte after topic */ + bkp = topic[topic_len]; + /* Zero terminate string */ + topic[topic_len] = 0; + /* Payload data remaining in receive buffer */ + payload_length = length - after_topic; + payload_offset = after_topic; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_incomming_publish: Received message with QoS %d at topic: %s, payload length %"U32_F"\n", + qos, topic, remaining_length + payload_length)); + if (client->pub_cb != NULL) { + client->pub_cb(client->inpub_arg, (const char *)topic, remaining_length + payload_length); + } + /* Restore byte after topic */ + topic[topic_len] = bkp; + } + if (payload_length > 0 || remaining_length == 0) { + if (length < (size_t)(payload_offset + payload_length)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short packet (payload)\n")); + goto out_disconnect; + } + client->data_cb(client->inpub_arg, var_hdr_payload + payload_offset, payload_length, remaining_length == 0 ? MQTT_DATA_FLAG_LAST : 0); + /* Reply if QoS > 0 */ + if (remaining_length == 0 && qos > 0) { + /* Send PUBACK for QoS 1 or PUBREC for QoS 2 */ + u8_t resp_msg = (qos == 1) ? MQTT_MSG_TYPE_PUBACK : MQTT_MSG_TYPE_PUBREC; + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_incomming_publish: Sending publish response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(resp_msg), client->inpub_pkt_id)); + pub_ack_rec_rel_response(client, resp_msg, client->inpub_pkt_id, 0); + } + } + } else { + /* Get packet identifier */ + pkt_id = (u16_t)var_hdr_payload[0] << 8; + pkt_id |= (u16_t)var_hdr_payload[1]; + if (pkt_id == 0) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Got message with illegal packet identifier: 0\n")); + goto out_disconnect; + } + if (pkt_type == MQTT_MSG_TYPE_PUBREC) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: PUBREC, sending PUBREL with pkt_id: %d\n", pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBREL, pkt_id, 1); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBREL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: PUBREL, sending PUBCOMP response with pkt_id: %d\n", pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBCOMP, pkt_id, 0); + + } else if (pkt_type == MQTT_MSG_TYPE_SUBACK || pkt_type == MQTT_MSG_TYPE_UNSUBACK || + pkt_type == MQTT_MSG_TYPE_PUBCOMP || pkt_type == MQTT_MSG_TYPE_PUBACK) { + struct mqtt_request_t *r = mqtt_take_request(&client->pend_req_queue, pkt_id); + if (r != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: %s response with id %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + if (pkt_type == MQTT_MSG_TYPE_SUBACK) { + if (length < 3) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: To small SUBACK packet\n")); + goto out_disconnect; + } else { + mqtt_incomming_suback(r, var_hdr_payload[2]); + } + } else if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ( "mqtt_message_received: Received %s reply, with wrong pkt_id: %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ( "mqtt_message_received: Received unknown message type: %d\n", pkt_type)); + goto out_disconnect; + } + } + return res; +out_disconnect: + return MQTT_CONNECT_DISCONNECTED; +} + + +/** + * MQTT incoming message parser + * @param client MQTT client + * @param p PBUF chain of received data + * @return Connection status + */ +static mqtt_connection_status_t +mqtt_parse_incoming(mqtt_client_t *client, struct pbuf *p) +{ + u16_t in_offset = 0; + u32_t msg_rem_len = 0; + u8_t fixed_hdr_idx = 0; + u8_t b = 0; + + while (p->tot_len > in_offset) { + /* We ALWAYS parse the header here first. Even if the header was not + included in this segment, we re-parse it here by buffering it in + client->rx_buffer. client->msg_idx keeps track of this. */ + if ((fixed_hdr_idx < 2) || ((b & 0x80) != 0)) { + + if (fixed_hdr_idx < client->msg_idx) { + /* parse header from old pbuf (buffered in client->rx_buffer) */ + b = client->rx_buffer[fixed_hdr_idx]; + } else { + /* parse header from this pbuf and save it in client->rx_buffer in case + it comes in segmented */ + b = pbuf_get_at(p, in_offset++); + client->rx_buffer[client->msg_idx++] = b; + } + fixed_hdr_idx++; + + if (fixed_hdr_idx >= 2) { + /* fixed header contains at least 2 bytes but can contain more, depending on + 'remaining length'. All bytes but the last of this have 0x80 set to + indicate more bytes are coming. */ + msg_rem_len |= (u32_t)(b & 0x7f) << ((fixed_hdr_idx - 2) * 7); + if ((b & 0x80) == 0) { + /* fixed header is done */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_parse_incoming: Remaining length after fixed header: %"U32_F"\n", msg_rem_len)); + if (msg_rem_len == 0) { + /* Complete message with no extra headers of payload received */ + mqtt_message_received(client, fixed_hdr_idx, 0, 0); + client->msg_idx = 0; + fixed_hdr_idx = 0; + } else { + /* Bytes remaining in message (changes remaining length if this is + not the first segment of this message) */ + msg_rem_len = (msg_rem_len + fixed_hdr_idx) - client->msg_idx; + } + } + } + } else { + /* Fixed header has been parsed, parse variable header */ + u16_t cpy_len, cpy_start, buffer_space; + + cpy_start = (client->msg_idx - fixed_hdr_idx) % (MQTT_VAR_HEADER_BUFFER_LEN - fixed_hdr_idx) + fixed_hdr_idx; + + /* Allow to copy the lesser one of available length in input data or bytes remaining in message */ + cpy_len = (u16_t)LWIP_MIN((u16_t)(p->tot_len - in_offset), msg_rem_len); + + /* Limit to available space in buffer */ + buffer_space = MQTT_VAR_HEADER_BUFFER_LEN - cpy_start; + if (cpy_len > buffer_space) { + cpy_len = buffer_space; + } + pbuf_copy_partial(p, client->rx_buffer + cpy_start, cpy_len, in_offset); + + /* Advance get and put indexes */ + client->msg_idx += cpy_len; + in_offset += cpy_len; + msg_rem_len -= cpy_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_parse_incoming: msg_idx: %"U32_F", cpy_len: %"U16_F", remaining %"U32_F"\n", client->msg_idx, cpy_len, msg_rem_len)); + if ((msg_rem_len == 0) || (cpy_len == buffer_space)) { + /* Whole message received or buffer is full */ + mqtt_connection_status_t res = mqtt_message_received(client, fixed_hdr_idx, (cpy_start + cpy_len) - fixed_hdr_idx, msg_rem_len); + if (res != MQTT_CONNECT_ACCEPTED) { + return res; + } + if (msg_rem_len == 0) { + /* Reset parser state */ + client->msg_idx = 0; + /* msg_tot_len = 0; */ + fixed_hdr_idx = 0; + } + } + } + } + return MQTT_CONNECT_ACCEPTED; +} + + +/** + * TCP received callback function. @see tcp_recv_fn + * @param arg MQTT client + * @param p PBUF chain of received data + * @param err Passed as return value if not ERR_OK + * @return ERR_OK or err passed into callback + */ +static err_t +mqtt_tcp_recv_cb(void *arg, struct altcp_pcb *pcb, struct pbuf *p, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_tcp_recv_cb: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_tcp_recv_cb: client->conn == pcb", client->conn == pcb); + + if (p == NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_recv_cb: Recv pbuf=NULL, remote has closed connection\n")); + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); + } else { + mqtt_connection_status_t res; + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_tcp_recv_cb: Recv err=%d\n", err)); + pbuf_free(p); + return err; + } + + /* Tell remote that data has been received */ + altcp_recved(pcb, p->tot_len); + res = mqtt_parse_incoming(client, p); + pbuf_free(p); + + if (res != MQTT_CONNECT_ACCEPTED) { + mqtt_close(client, res); + } + /* If keep alive functionality is used */ + if (client->keep_alive != 0) { + /* Reset server alive watchdog */ + client->server_watchdog = 0; + } + + } + return ERR_OK; +} + + +/** + * TCP data sent callback function. @see tcp_sent_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @param len Number of bytes sent + * @return ERR_OK + */ +static err_t +mqtt_tcp_sent_cb(void *arg, struct altcp_pcb *tpcb, u16_t len) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + LWIP_UNUSED_ARG(tpcb); + LWIP_UNUSED_ARG(len); + + if (client->conn_state == MQTT_CONNECTED) { + struct mqtt_request_t *r; + + /* Reset keep-alive send timer and server watchdog */ + client->cyclic_tick = 0; + client->server_watchdog = 0; + /* QoS 0 publish has no response from server, so call its callbacks here */ + while ((r = mqtt_take_request(&client->pend_req_queue, 0)) != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_sent_cb: Calling QoS 0 publish complete callback\n")); + if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, client->conn); + } + return ERR_OK; +} + +/** + * TCP error callback function. @see tcp_err_fn + * @param arg MQTT client + * @param err Error encountered + */ +static void +mqtt_tcp_err_cb(void *arg, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_UNUSED_ARG(err); /* only used for debug output */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_err_cb: TCP error callback: error %d, arg: %p\n", err, arg)); + LWIP_ASSERT("mqtt_tcp_err_cb: client != NULL", client != NULL); + /* Set conn to null before calling close as pcb is already deallocated*/ + client->conn = 0; + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); +} + +/** + * TCP poll callback function. @see tcp_poll_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @return err ERR_OK + */ +static err_t +mqtt_tcp_poll_cb(void *arg, struct altcp_pcb *tpcb) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + if (client->conn_state == MQTT_CONNECTED) { + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, tpcb); + } + return ERR_OK; +} + +/** + * TCP connect callback function. @see tcp_connected_fn + * @param arg MQTT client + * @param err Always ERR_OK, mqtt_tcp_err_cb is called in case of error + * @return ERR_OK + */ +static err_t +mqtt_tcp_connect_cb(void *arg, struct altcp_pcb *tpcb, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_tcp_connect_cb: TCP connect error %d\n", err)); + return err; + } + + /* Initiate receiver state */ + client->msg_idx = 0; + + /* Setup TCP callbacks */ + altcp_recv(tpcb, mqtt_tcp_recv_cb); + altcp_sent(tpcb, mqtt_tcp_sent_cb); + altcp_poll(tpcb, mqtt_tcp_poll_cb, 2); + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_connect_cb: TCP connection established to server\n")); + /* Enter MQTT connect state */ + client->conn_state = MQTT_CONNECTING; + + /* Start cyclic timer */ + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL * 1000, mqtt_cyclic_timer, client); + client->cyclic_tick = 0; + + /* Start transmission from output queue, connect message is the first one out*/ + mqtt_output_send(&client->output, client->conn); + + return ERR_OK; +} + + + +/*---------------------------------------------------------------------------------------------------- */ +/* Public API */ + + +/** + * @ingroup mqtt + * MQTT publish function. + * @param client MQTT client + * @param topic Publish topic string + * @param payload Data to publish (NULL is allowed) + * @param payload_length Length of payload (0 is allowed) + * @param qos Quality of service, 0 1 or 2 + * @param retain MQTT retain flag + * @param cb Callback to call when publish is complete or has timed out + * @param arg User supplied argument to publish callback + * @return ERR_OK if successful + * ERR_CONN if client is disconnected + * ERR_MEM if short on memory + */ +err_t +mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r; + u16_t pkt_id; + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_publish: client != NULL", client); + LWIP_ASSERT("mqtt_publish: topic != NULL", topic); + LWIP_ERROR("mqtt_publish: TCP disconnected", (client->conn_state != TCP_DISCONNECTED), return ERR_CONN); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_publish: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + total_len = 2 + topic_len + payload_length; + + if (qos > 0) { + total_len += 2; + /* Generate pkt_id id for QoS1 and 2 */ + pkt_id = msg_generate_packet_id(client); + } else { + /* Use reserved value pkt_id 0 for QoS 0 in request handle */ + pkt_id = 0; + } + LWIP_ERROR("mqtt_publish: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_publish: Publish with payload length %d to topic \"%s\"\n", payload_length, topic)); + + r = mqtt_create_request(client->req_list, LWIP_ARRAYSIZE(client->req_list), pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PUBLISH, 0, qos, retain, remaining_length); + + /* Append Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + + /* Append packet if for QoS 1 and 2*/ + if (qos > 0) { + mqtt_output_append_u16(&client->output, pkt_id); + } + + /* Append optional publish payload */ + if ((payload != NULL) && (payload_length > 0)) { + mqtt_output_append_buf(&client->output, payload, payload_length); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * MQTT subscribe/unsubscribe function. + * @param client MQTT client + * @param topic topic to subscribe to + * @param qos Quality of service, 0 1 or 2 (only used for subscribe) + * @param cb Callback to call when subscribe/unsubscribe reponse is received + * @param arg User supplied argument to publish callback + * @param sub 1 for subscribe, 0 for unsubscribe + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub) +{ + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + u16_t pkt_id; + struct mqtt_request_t *r; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_sub_unsub: client != NULL", client); + LWIP_ASSERT("mqtt_sub_unsub: topic != NULL", topic); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_sub_unsub: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + /* Topic string, pkt_id, qos for subscribe */ + total_len = topic_len + 2 + 2 + (sub != 0); + LWIP_ERROR("mqtt_sub_unsub: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_ASSERT("mqtt_sub_unsub: qos < 3", qos < 3); + if (client->conn_state == TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_sub_unsub: Can not (un)subscribe in disconnected state\n")); + return ERR_CONN; + } + + pkt_id = msg_generate_packet_id(client); + r = mqtt_create_request(client->req_list, LWIP_ARRAYSIZE(client->req_list), pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_sub_unsub: Client (un)subscribe to topic \"%s\", id: %d\n", topic, pkt_id)); + + mqtt_output_append_fixed_header(&client->output, sub ? MQTT_MSG_TYPE_SUBSCRIBE : MQTT_MSG_TYPE_UNSUBSCRIBE, 0, 1, 0, remaining_length); + /* Packet id */ + mqtt_output_append_u16(&client->output, pkt_id); + /* Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + /* QoS */ + if (sub != 0) { + mqtt_output_append_u8(&client->output, LWIP_MIN(qos, 2)); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * Set callback to handle incoming publish requests from server + * @param client MQTT client + * @param pub_cb Callback invoked when publish starts, contain topic and total length of payload + * @param data_cb Callback for each fragment of payload that arrives + * @param arg User supplied argument to both callbacks + */ +void +mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t pub_cb, + mqtt_incoming_data_cb_t data_cb, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_set_inpub_callback: client != NULL", client != NULL); + client->data_cb = data_cb; + client->pub_cb = pub_cb; + client->inpub_arg = arg; +} + +/** + * @ingroup mqtt + * Create a new MQTT client instance + * @return Pointer to instance on success, NULL otherwise + */ +mqtt_client_t * +mqtt_client_new(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + return (mqtt_client_t *)mem_calloc(1, sizeof(mqtt_client_t)); +} + +/** + * @ingroup mqtt + * Free MQTT client instance + * @param client Pointer to instance to be freed + */ +void +mqtt_client_free(mqtt_client_t *client) +{ + mem_free(client); +} + +/** + * @ingroup mqtt + * Connect to MQTT server + * @param client MQTT client + * @param ip_addr Server IP + * @param port Server port + * @param cb Connection state change callback + * @param arg User supplied argument to connection callback + * @param client_info Client identification and connection options + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ip_addr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info) +{ + err_t err; + size_t len; + u16_t client_id_length; + /* Length is the sum of 2+"MQTT", protocol level, flags and keep alive */ + u16_t remaining_length = 2 + 4 + 1 + 1 + 2; + u8_t flags = 0, will_topic_len = 0, will_msg_len = 0; + u16_t client_user_len = 0, client_pass_len = 0; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_client_connect: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_client_connect: ip_addr != NULL", ip_addr != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info != NULL", client_info != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info->client_id != NULL", client_info->client_id != NULL); + + if (client->conn_state != TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_client_connect: Already connected\n")); + return ERR_ISCONN; + } + + /* Wipe clean */ + memset(client, 0, sizeof(mqtt_client_t)); + client->connect_arg = arg; + client->connect_cb = cb; + client->keep_alive = client_info->keep_alive; + mqtt_init_requests(client->req_list, LWIP_ARRAYSIZE(client->req_list)); + + /* Build connect message */ + if (client_info->will_topic != NULL && client_info->will_msg != NULL) { + flags |= MQTT_CONNECT_FLAG_WILL; + flags |= (client_info->will_qos & 3) << 3; + if (client_info->will_retain) { + flags |= MQTT_CONNECT_FLAG_WILL_RETAIN; + } + len = strlen(client_info->will_topic); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length overflow", len <= 0xFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length must be > 0", len > 0, return ERR_VAL); + will_topic_len = (u8_t)len; + len = strlen(client_info->will_msg); + LWIP_ERROR("mqtt_client_connect: client_info->will_msg length overflow", len <= 0xFF, return ERR_VAL); + will_msg_len = (u8_t)len; + len = remaining_length + 2 + will_topic_len + 2 + will_msg_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + if (client_info->client_user != NULL) { + flags |= MQTT_CONNECT_FLAG_USERNAME; + len = strlen(client_info->client_user); + LWIP_ERROR("mqtt_client_connect: client_info->client_user length overflow", len <= 0xFFFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->client_user length must be > 0", len > 0, return ERR_VAL); + client_user_len = (u16_t)len; + len = remaining_length + 2 + client_user_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + if (client_info->client_pass != NULL) { + flags |= MQTT_CONNECT_FLAG_PASSWORD; + len = strlen(client_info->client_pass); + LWIP_ERROR("mqtt_client_connect: client_info->client_pass length overflow", len <= 0xFFFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->client_pass length must be > 0", len > 0, return ERR_VAL); + client_pass_len = (u16_t)len; + len = remaining_length + 2 + client_pass_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + + /* Don't complicate things, always connect using clean session */ + flags |= MQTT_CONNECT_FLAG_CLEAN_SESSION; + + len = strlen(client_info->client_id); + LWIP_ERROR("mqtt_client_connect: client_info->client_id length overflow", len <= 0xFFFF, return ERR_VAL); + client_id_length = (u16_t)len; + len = remaining_length + 2 + client_id_length; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + return ERR_MEM; + } + +#if LWIP_ALTCP && LWIP_ALTCP_TLS + if (client_info->tls_config) { + client->conn = altcp_tls_new(client_info->tls_config, IP_GET_TYPE(ip_addr)); + } else +#endif + { + client->conn = altcp_tcp_new_ip_type(IP_GET_TYPE(ip_addr)); + } + if (client->conn == NULL) { + return ERR_MEM; + } + + /* Set arg pointer for callbacks */ + altcp_arg(client->conn, client); + /* Any local address, pick random local port number */ + err = altcp_bind(client->conn, IP_ADDR_ANY, 0); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_client_connect: Error binding to local ip/port, %d\n", err)); + goto tcp_fail; + } + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_client_connect: Connecting to host: %s at port:%"U16_F"\n", ipaddr_ntoa(ip_addr), port)); + + /* Connect to server */ + err = altcp_connect(client->conn, ip_addr, port, mqtt_tcp_connect_cb); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_client_connect: Error connecting to remote ip/port, %d\n", err)); + goto tcp_fail; + } + /* Set error callback */ + altcp_err(client->conn, mqtt_tcp_err_cb); + client->conn_state = TCP_CONNECTING; + + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_CONNECT, 0, 0, 0, remaining_length); + /* Append Protocol string */ + mqtt_output_append_string(&client->output, "MQTT", 4); + /* Append Protocol level */ + mqtt_output_append_u8(&client->output, 4); + /* Append connect flags */ + mqtt_output_append_u8(&client->output, flags); + /* Append keep-alive */ + mqtt_output_append_u16(&client->output, client_info->keep_alive); + /* Append client id */ + mqtt_output_append_string(&client->output, client_info->client_id, client_id_length); + /* Append will message if used */ + if ((flags & MQTT_CONNECT_FLAG_WILL) != 0) { + mqtt_output_append_string(&client->output, client_info->will_topic, will_topic_len); + mqtt_output_append_string(&client->output, client_info->will_msg, will_msg_len); + } + /* Append user name if given */ + if ((flags & MQTT_CONNECT_FLAG_USERNAME) != 0) { + mqtt_output_append_string(&client->output, client_info->client_user, client_user_len); + } + /* Append password if given */ + if ((flags & MQTT_CONNECT_FLAG_PASSWORD) != 0) { + mqtt_output_append_string(&client->output, client_info->client_pass, client_pass_len); + } + return ERR_OK; + +tcp_fail: + altcp_abort(client->conn); + client->conn = NULL; + return err; +} + + +/** + * @ingroup mqtt + * Disconnect from MQTT server + * @param client MQTT client + */ +void +mqtt_disconnect(mqtt_client_t *client) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_disconnect: client != NULL", client); + /* If connection in not already closed */ + if (client->conn_state != TCP_DISCONNECTED) { + /* Set conn_state before calling mqtt_close to prevent callback from being called */ + client->conn_state = TCP_DISCONNECTED; + mqtt_close(client, (mqtt_connection_status_t)0); + } +} + +/** + * @ingroup mqtt + * Check connection with server + * @param client MQTT client + * @return 1 if connected to server, 0 otherwise + */ +u8_t +mqtt_client_is_connected(mqtt_client_t *client) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_client_is_connected: client != NULL", client); + return client->conn_state == MQTT_CONNECTED; +} + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ diff --git a/Middlewares/Third_Party/LwIP/src/core/altcp.c b/Middlewares/Third_Party/LwIP/src/core/altcp.c new file mode 100644 index 00000000..d46d6cdb --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/altcp.c @@ -0,0 +1,681 @@ +/** + * @file + * @defgroup altcp Application layered TCP Functions + * @ingroup altcp_api + * + * This file contains the common functions for altcp to work. + * For more details see @ref altcp_api. + */ + +/** + * @defgroup altcp_api Application layered TCP Introduction + * @ingroup callbackstyle_api + * + * Overview + * -------- + * altcp (application layered TCP connection API; to be used from TCPIP thread) + * is an abstraction layer that prevents applications linking hard against the + * @ref tcp.h functions while providing the same functionality. It is used to + * e.g. add SSL/TLS (see LWIP_ALTCP_TLS) or proxy-connect support to an application + * written for the tcp callback API without that application knowing the + * protocol details. + * + * * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * * This is achieved by simply including "lwip/altcp.h" instead of "lwip/tcp.h", + * replacing "struct tcp_pcb" with "struct altcp_pcb" and prefixing all functions + * with "altcp_" instead of "tcp_". + * + * With altcp support disabled (LWIP_ALTCP==0), applications written against the + * altcp API can still be compiled but are directly linked against the tcp.h + * callback API and then cannot use layered protocols. To minimize code changes + * in this case, the use of altcp_allocators is strongly suggested. + * + * Usage + * ----- + * To make use of this API from an existing tcp raw API application: + * * Include "lwip/altcp.h" instead of "lwip/tcp.h" + * * Replace "struct tcp_pcb" with "struct altcp_pcb" + * * Prefix all called tcp API functions with "altcp_" instead of "tcp_" to link + * against the altcp functions + * * @ref altcp_new (and @ref altcp_new_ip_type/@ref altcp_new_ip6) take + * an @ref altcp_allocator_t as an argument, whereas the original tcp API + * functions take no arguments. + * * An @ref altcp_allocator_t allocator is an object that holds a pointer to an + * allocator object and a corresponding state (e.g. for TLS, the corresponding + * state may hold certificates or keys). This way, the application does not + * even need to know if it uses TLS or pure TCP, this is handled at runtime + * by passing a specific allocator. + * * An application can alternatively bind hard to the altcp_tls API by calling + * @ref altcp_tls_new or @ref altcp_tls_wrap. + * * The TLS layer is not directly implemented by lwIP, but a port to mbedTLS is + * provided. + * * Another altcp layer is proxy-connect to use TLS behind a HTTP proxy (see + * @ref altcp_proxyconnect.h) + * + * altcp_allocator_t + * ----------------- + * An altcp allocator is created by the application by combining an allocator + * callback function and a corresponding state, e.g.:\code{.c} + * static const unsigned char cert[] = {0x2D, ... (see mbedTLS doc for how to create this)}; + * struct altcp_tls_config * conf = altcp_tls_create_config_client(cert, sizeof(cert)); + * altcp_allocator_t tls_allocator = { + * altcp_tls_alloc, conf + * }; + * \endcode + * + * + * struct altcp_tls_config + * ----------------------- + * The struct altcp_tls_config holds state that is needed to create new TLS client + * or server connections (e.g. certificates and private keys). + * + * It is not defined by lwIP itself but by the TLS port (e.g. altcp_tls to mbedTLS + * adaption). However, the parameters used to create it are defined in @ref + * altcp_tls.h (see @ref altcp_tls_create_config_server_privkey_cert for servers + * and @ref altcp_tls_create_config_client/@ref altcp_tls_create_config_client_2wayauth + * for clients). + * + * For mbedTLS, ensure that certificates can be parsed by 'mbedtls_x509_crt_parse()' and + * private keys can be parsed by 'mbedtls_pk_parse_key()'. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/altcp_tcp.h" +#include "lwip/tcp.h" +#include "lwip/mem.h" + +#include + +extern const struct altcp_functions altcp_tcp_functions; + +/** + * For altcp layer implementations only: allocate a new struct altcp_pcb from the pool + * and zero the memory + */ +struct altcp_pcb * +altcp_alloc(void) +{ + struct altcp_pcb *ret = (struct altcp_pcb *)memp_malloc(MEMP_ALTCP_PCB); + if (ret != NULL) { + memset(ret, 0, sizeof(struct altcp_pcb)); + } + return ret; +} + +/** + * For altcp layer implementations only: return a struct altcp_pcb to the pool + */ +void +altcp_free(struct altcp_pcb *conn) +{ + if (conn) { + if (conn->fns && conn->fns->dealloc) { + conn->fns->dealloc(conn); + } + memp_free(MEMP_ALTCP_PCB, conn); + } +} + +/** + * @ingroup altcp + * altcp_new_ip6: @ref altcp_new for IPv6 + */ +struct altcp_pcb * +altcp_new_ip6(altcp_allocator_t *allocator) +{ + return altcp_new_ip_type(allocator, IPADDR_TYPE_V6); +} + +/** + * @ingroup altcp + * altcp_new: @ref altcp_new for IPv4 + */ +struct altcp_pcb * +altcp_new(altcp_allocator_t *allocator) +{ + return altcp_new_ip_type(allocator, IPADDR_TYPE_V4); +} + +/** + * @ingroup altcp + * altcp_new_ip_type: called by applications to allocate a new pcb with the help of an + * allocator function. + * + * @param allocator allocator function and argument + * @param ip_type IP version of the pcb (@ref lwip_ip_addr_type) + * @return a new altcp_pcb or NULL on error + */ +struct altcp_pcb * +altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type) +{ + struct altcp_pcb *conn; + if (allocator == NULL) { + /* no allocator given, create a simple TCP connection */ + return altcp_tcp_new_ip_type(ip_type); + } + if (allocator->alloc == NULL) { + /* illegal allocator */ + return NULL; + } + conn = allocator->alloc(allocator->arg, ip_type); + if (conn == NULL) { + /* allocation failed */ + return NULL; + } + return conn; +} + +/** + * @ingroup altcp + * @see tcp_arg() + */ +void +altcp_arg(struct altcp_pcb *conn, void *arg) +{ + if (conn) { + conn->arg = arg; + } +} + +/** + * @ingroup altcp + * @see tcp_accept() + */ +void +altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept) +{ + if (conn != NULL) { + conn->accept = accept; + } +} + +/** + * @ingroup altcp + * @see tcp_recv() + */ +void +altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv) +{ + if (conn) { + conn->recv = recv; + } +} + +/** + * @ingroup altcp + * @see tcp_sent() + */ +void +altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent) +{ + if (conn) { + conn->sent = sent; + } +} + +/** + * @ingroup altcp + * @see tcp_poll() + */ +void +altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval) +{ + if (conn) { + conn->poll = poll; + conn->pollinterval = interval; + if (conn->fns && conn->fns->set_poll) { + conn->fns->set_poll(conn, interval); + } + } +} + +/** + * @ingroup altcp + * @see tcp_err() + */ +void +altcp_err(struct altcp_pcb *conn, altcp_err_fn err) +{ + if (conn) { + conn->err = err; + } +} + +/* Generic functions calling the "virtual" ones */ + +/** + * @ingroup altcp + * @see tcp_recved() + */ +void +altcp_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn && conn->fns && conn->fns->recved) { + conn->fns->recved(conn, len); + } +} + +/** + * @ingroup altcp + * @see tcp_bind() + */ +err_t +altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + if (conn && conn->fns && conn->fns->bind) { + return conn->fns->bind(conn, ipaddr, port); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_connect() + */ +err_t +altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + if (conn && conn->fns && conn->fns->connect) { + return conn->fns->connect(conn, ipaddr, port, connected); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_listen_with_backlog_and_err() + */ +struct altcp_pcb * +altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + if (conn && conn->fns && conn->fns->listen) { + return conn->fns->listen(conn, backlog, err); + } + return NULL; +} + +/** + * @ingroup altcp + * @see tcp_abort() + */ +void +altcp_abort(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->abort) { + conn->fns->abort(conn); + } +} + +/** + * @ingroup altcp + * @see tcp_close() + */ +err_t +altcp_close(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->close) { + return conn->fns->close(conn); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_shutdown() + */ +err_t +altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + if (conn && conn->fns && conn->fns->shutdown) { + return conn->fns->shutdown(conn, shut_rx, shut_tx); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_write() + */ +err_t +altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + if (conn && conn->fns && conn->fns->write) { + return conn->fns->write(conn, dataptr, len, apiflags); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_output() + */ +err_t +altcp_output(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->output) { + return conn->fns->output(conn); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_mss() + */ +u16_t +altcp_mss(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->mss) { + return conn->fns->mss(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_sndbuf() + */ +u16_t +altcp_sndbuf(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->sndbuf) { + return conn->fns->sndbuf(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_sndqueuelen() + */ +u16_t +altcp_sndqueuelen(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->sndqueuelen) { + return conn->fns->sndqueuelen(conn); + } + return 0; +} + +void +altcp_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_disable) { + conn->fns->nagle_disable(conn); + } +} + +void +altcp_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_enable) { + conn->fns->nagle_enable(conn); + } +} + +int +altcp_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_disabled) { + return conn->fns->nagle_disabled(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_setprio() + */ +void +altcp_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn && conn->fns && conn->fns->setprio) { + conn->fns->setprio(conn, prio); + } +} + +err_t +altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn && conn->fns && conn->fns->addrinfo) { + return conn->fns->addrinfo(conn, local, addr, port); + } + return ERR_VAL; +} + +ip_addr_t * +altcp_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn && conn->fns && conn->fns->getip) { + return conn->fns->getip(conn, local); + } + return NULL; +} + +u16_t +altcp_get_port(struct altcp_pcb *conn, int local) +{ + if (conn && conn->fns && conn->fns->getport) { + return conn->fns->getport(conn, local); + } + return 0; +} + +#ifdef LWIP_DEBUG +enum tcp_state +altcp_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->dbg_get_tcp_state) { + return conn->fns->dbg_get_tcp_state(conn); + } + return CLOSED; +} +#endif + +/* Default implementations for the "virtual" functions */ + +void +altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn && conn->inner_conn) { + altcp_poll(conn->inner_conn, conn->poll, interval); + } +} + +void +altcp_default_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn && conn->inner_conn) { + altcp_recved(conn->inner_conn, len); + } +} + +err_t +altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + if (conn && conn->inner_conn) { + return altcp_bind(conn->inner_conn, ipaddr, port); + } + return ERR_VAL; +} + +err_t +altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + if (conn) { + if (shut_rx && shut_tx && conn->fns && conn->fns->close) { + /* default shutdown for both sides is close */ + return conn->fns->close(conn); + } + if (conn->inner_conn) { + return altcp_shutdown(conn->inner_conn, shut_rx, shut_tx); + } + } + return ERR_VAL; +} + +err_t +altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + if (conn && conn->inner_conn) { + return altcp_write(conn->inner_conn, dataptr, len, apiflags); + } + return ERR_VAL; +} + +err_t +altcp_default_output(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_output(conn->inner_conn); + } + return ERR_VAL; +} + +u16_t +altcp_default_mss(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_mss(conn->inner_conn); + } + return 0; +} + +u16_t +altcp_default_sndbuf(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_sndbuf(conn->inner_conn); + } + return 0; +} + +u16_t +altcp_default_sndqueuelen(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_sndqueuelen(conn->inner_conn); + } + return 0; +} + +void +altcp_default_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + altcp_nagle_disable(conn->inner_conn); + } +} + +void +altcp_default_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + altcp_nagle_enable(conn->inner_conn); + } +} + +int +altcp_default_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_nagle_disabled(conn->inner_conn); + } + return 0; +} + +void +altcp_default_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn && conn->inner_conn) { + altcp_setprio(conn->inner_conn, prio); + } +} + +void +altcp_default_dealloc(struct altcp_pcb *conn) +{ + LWIP_UNUSED_ARG(conn); + /* nothing to do */ +} + +err_t +altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn && conn->inner_conn) { + return altcp_get_tcp_addrinfo(conn->inner_conn, local, addr, port); + } + return ERR_VAL; +} + +ip_addr_t * +altcp_default_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn && conn->inner_conn) { + return altcp_get_ip(conn->inner_conn, local); + } + return NULL; +} + +u16_t +altcp_default_get_port(struct altcp_pcb *conn, int local) +{ + if (conn && conn->inner_conn) { + return altcp_get_port(conn->inner_conn, local); + } + return 0; +} + +#ifdef LWIP_DEBUG +enum tcp_state +altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_dbg_get_tcp_state(conn->inner_conn); + } + return CLOSED; +} +#endif + + +#endif /* LWIP_ALTCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c b/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c new file mode 100644 index 00000000..cd619bc1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c @@ -0,0 +1,87 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains allocation implementation that combine several layers. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/altcp_tls.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/mem.h" + +#include + +#if LWIP_ALTCP_TLS + +/** This standard allocator function creates an altcp pcb for + * TLS over TCP */ +struct altcp_pcb * +altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type) +{ + struct altcp_pcb *inner_conn, *ret; + LWIP_UNUSED_ARG(ip_type); + + inner_conn = altcp_tcp_new_ip_type(ip_type); + if (inner_conn == NULL) { + return NULL; + } + ret = altcp_tls_wrap(config, inner_conn); + if (ret == NULL) { + altcp_close(inner_conn); + } + return ret; +} + +/** This standard allocator function creates an altcp pcb for + * TLS over TCP */ +struct altcp_pcb * +altcp_tls_alloc(void *arg, u8_t ip_type) +{ + return altcp_tls_new((struct altcp_tls_config *)arg, ip_type); +} + +#endif /* LWIP_ALTCP_TLS */ + +#endif /* LWIP_ALTCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c b/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c new file mode 100644 index 00000000..b715f045 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c @@ -0,0 +1,543 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains the base implementation calling into tcp. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/tcp.h" +#include "lwip/mem.h" + +#include + +#define ALTCP_TCP_ASSERT_CONN(conn) do { \ + LWIP_ASSERT("conn->inner_conn == NULL", (conn)->inner_conn == NULL); \ + LWIP_UNUSED_ARG(conn); /* for LWIP_NOASSERT */ } while(0) +#define ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb) do { \ + LWIP_ASSERT("pcb mismatch", (conn)->state == tpcb); \ + LWIP_UNUSED_ARG(tpcb); /* for LWIP_NOASSERT */ \ + ALTCP_TCP_ASSERT_CONN(conn); } while(0) + + +/* Variable prototype, the actual declaration is at the end of this file + since it contains pointers to static functions declared here */ +extern const struct altcp_functions altcp_tcp_functions; + +static void altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb); + +/* callback functions for TCP */ +static err_t +altcp_tcp_accept(void *arg, struct tcp_pcb *new_tpcb, err_t err) +{ + struct altcp_pcb *listen_conn = (struct altcp_pcb *)arg; + if (listen_conn && listen_conn->accept) { + /* create a new altcp_conn to pass to the next 'accept' callback */ + struct altcp_pcb *new_conn = altcp_alloc(); + if (new_conn == NULL) { + return ERR_MEM; + } + altcp_tcp_setup(new_conn, new_tpcb); + return listen_conn->accept(listen_conn->arg, new_conn, err); + } + return ERR_ARG; +} + +static err_t +altcp_tcp_connected(void *arg, struct tcp_pcb *tpcb, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->connected) { + return conn->connected(conn->arg, conn, err); + } + } + return ERR_OK; +} + +static err_t +altcp_tcp_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->recv) { + return conn->recv(conn->arg, conn, p, err); + } + } + if (p != NULL) { + /* prevent memory leaks */ + pbuf_free(p); + } + return ERR_OK; +} + +static err_t +altcp_tcp_sent(void *arg, struct tcp_pcb *tpcb, u16_t len) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->sent) { + return conn->sent(conn->arg, conn, len); + } + } + return ERR_OK; +} + +static err_t +altcp_tcp_poll(void *arg, struct tcp_pcb *tpcb) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->poll) { + return conn->poll(conn->arg, conn); + } + } + return ERR_OK; +} + +static void +altcp_tcp_err(void *arg, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + conn->state = NULL; /* already freed */ + if (conn->err) { + conn->err(conn->arg, err); + } + altcp_free(conn); + } +} + +/* setup functions */ + +static void +altcp_tcp_remove_callbacks(struct tcp_pcb *tpcb) +{ + tcp_arg(tpcb, NULL); + tcp_recv(tpcb, NULL); + tcp_sent(tpcb, NULL); + tcp_err(tpcb, NULL); + tcp_poll(tpcb, NULL, tpcb->pollinterval); +} + +static void +altcp_tcp_setup_callbacks(struct altcp_pcb *conn, struct tcp_pcb *tpcb) +{ + tcp_arg(tpcb, conn); + tcp_recv(tpcb, altcp_tcp_recv); + tcp_sent(tpcb, altcp_tcp_sent); + tcp_err(tpcb, altcp_tcp_err); + /* tcp_poll is set when interval is set by application */ + /* listen is set totally different :-) */ +} + +static void +altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb) +{ + altcp_tcp_setup_callbacks(conn, tpcb); + conn->state = tpcb; + conn->fns = &altcp_tcp_functions; +} + +struct altcp_pcb * +altcp_tcp_new_ip_type(u8_t ip_type) +{ + /* Allocate the tcp pcb first to invoke the priority handling code + if we're out of pcbs */ + struct tcp_pcb *tpcb = tcp_new_ip_type(ip_type); + if (tpcb != NULL) { + struct altcp_pcb *ret = altcp_alloc(); + if (ret != NULL) { + altcp_tcp_setup(ret, tpcb); + return ret; + } else { + /* altcp_pcb allocation failed -> free the tcp_pcb too */ + tcp_close(tpcb); + } + } + return NULL; +} + +/** altcp_tcp allocator function fitting to @ref altcp_allocator_t / @ref altcp_new. +* +* arg pointer is not used for TCP. +*/ +struct altcp_pcb * +altcp_tcp_alloc(void *arg, u8_t ip_type) +{ + LWIP_UNUSED_ARG(arg); + return altcp_tcp_new_ip_type(ip_type); +} + +struct altcp_pcb * +altcp_tcp_wrap(struct tcp_pcb *tpcb) +{ + if (tpcb != NULL) { + struct altcp_pcb *ret = altcp_alloc(); + if (ret != NULL) { + altcp_tcp_setup(ret, tpcb); + return ret; + } + } + return NULL; +} + + +/* "virtual" functions calling into tcp */ +static void +altcp_tcp_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_poll(pcb, altcp_tcp_poll, interval); + } +} + +static void +altcp_tcp_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_recved(pcb, len); + } +} + +static err_t +altcp_tcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_bind(pcb, ipaddr, port); +} + +static err_t +altcp_tcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + conn->connected = connected; + pcb = (struct tcp_pcb *)conn->state; + return tcp_connect(pcb, ipaddr, port, altcp_tcp_connected); +} + +static struct altcp_pcb * +altcp_tcp_listen(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + struct tcp_pcb *pcb; + struct tcp_pcb *lpcb; + if (conn == NULL) { + return NULL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + lpcb = tcp_listen_with_backlog_and_err(pcb, backlog, err); + if (lpcb != NULL) { + conn->state = lpcb; + tcp_accept(lpcb, altcp_tcp_accept); + return conn; + } + return NULL; +} + +static void +altcp_tcp_abort(struct altcp_pcb *conn) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + tcp_abort(pcb); + } + } +} + +static err_t +altcp_tcp_close(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + if (pcb) { + err_t err; + tcp_poll_fn oldpoll = pcb->poll; + altcp_tcp_remove_callbacks(pcb); + err = tcp_close(pcb); + if (err != ERR_OK) { + /* not closed, set up all callbacks again */ + altcp_tcp_setup_callbacks(conn, pcb); + /* poll callback is not included in the above */ + tcp_poll(pcb, oldpoll, pcb->pollinterval); + return err; + } + conn->state = NULL; /* unsafe to reference pcb after tcp_close(). */ + } + altcp_free(conn); + return ERR_OK; +} + +static err_t +altcp_tcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_shutdown(pcb, shut_rx, shut_tx); +} + +static err_t +altcp_tcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_write(pcb, dataptr, len, apiflags); +} + +static err_t +altcp_tcp_output(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_output(pcb); +} + +static u16_t +altcp_tcp_mss(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_mss(pcb); +} + +static u16_t +altcp_tcp_sndbuf(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_sndbuf(pcb); +} + +static u16_t +altcp_tcp_sndqueuelen(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_sndqueuelen(pcb); +} + +static void +altcp_tcp_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_nagle_disable(pcb); + } +} + +static void +altcp_tcp_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_nagle_enable(pcb); + } +} + +static int +altcp_tcp_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + return tcp_nagle_disabled(pcb); + } + return 0; +} + +static void +altcp_tcp_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_setprio(pcb, prio); + } +} + +static void +altcp_tcp_dealloc(struct altcp_pcb *conn) +{ + LWIP_UNUSED_ARG(conn); + ALTCP_TCP_ASSERT_CONN(conn); + /* no private state to clean up */ +} + +static err_t +altcp_tcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + return tcp_tcp_get_tcp_addrinfo(pcb, local, addr, port); + } + return ERR_VAL; +} + +static ip_addr_t * +altcp_tcp_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + if (local) { + return &pcb->local_ip; + } else { + return &pcb->remote_ip; + } + } + } + return NULL; +} + +static u16_t +altcp_tcp_get_port(struct altcp_pcb *conn, int local) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + if (local) { + return pcb->local_port; + } else { + return pcb->remote_port; + } + } + } + return 0; +} + +#ifdef LWIP_DEBUG +static enum tcp_state +altcp_tcp_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + return pcb->state; + } + } + return CLOSED; +} +#endif +const struct altcp_functions altcp_tcp_functions = { + altcp_tcp_set_poll, + altcp_tcp_recved, + altcp_tcp_bind, + altcp_tcp_connect, + altcp_tcp_listen, + altcp_tcp_abort, + altcp_tcp_close, + altcp_tcp_shutdown, + altcp_tcp_write, + altcp_tcp_output, + altcp_tcp_mss, + altcp_tcp_sndbuf, + altcp_tcp_sndqueuelen, + altcp_tcp_nagle_disable, + altcp_tcp_nagle_enable, + altcp_tcp_nagle_disabled, + altcp_tcp_setprio, + altcp_tcp_dealloc, + altcp_tcp_get_tcp_addrinfo, + altcp_tcp_get_ip, + altcp_tcp_get_port +#ifdef LWIP_DEBUG + , altcp_tcp_dbg_get_tcp_state +#endif +}; + +#endif /* LWIP_ALTCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/def.c b/Middlewares/Third_Party/LwIP/src/core/def.c new file mode 100644 index 00000000..9da36fee --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/def.c @@ -0,0 +1,240 @@ +/** + * @file + * Common functions used throughout the stack. + * + * These are reference implementations of the byte swapping functions. + * Again with the aim of being simple, correct and fully portable. + * Byte swapping is the second thing you would want to optimize. You will + * need to port it to your architecture and in your cc.h: + * + * \#define lwip_htons(x) your_htons + * \#define lwip_htonl(x) your_htonl + * + * Note lwip_ntohs() and lwip_ntohl() are merely references to the htonx counterparts. + * + * If you \#define them to htons() and htonl(), you should + * \#define LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS to prevent lwIP from + * defining htonx/ntohx compatibility macros. + + * @defgroup sys_nonstandard Non-standard functions + * @ingroup sys_layer + * lwIP provides default implementations for non-standard functions. + * These can be mapped to OS functions to reduce code footprint if desired. + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include + +#if BYTE_ORDER == LITTLE_ENDIAN + +#if !defined(lwip_htons) +/** + * Convert an u16_t from host- to network byte order. + * + * @param n u16_t in host byte order + * @return n in network byte order + */ +u16_t +lwip_htons(u16_t n) +{ + return PP_HTONS(n); +} +#endif /* lwip_htons */ + +#if !defined(lwip_htonl) +/** + * Convert an u32_t from host- to network byte order. + * + * @param n u32_t in host byte order + * @return n in network byte order + */ +u32_t +lwip_htonl(u32_t n) +{ + return PP_HTONL(n); +} +#endif /* lwip_htonl */ + +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + +#ifndef lwip_strnstr +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnstr() non-standard function. + * This can be \#defined to strnstr() depending on your platform port. + */ +char * +lwip_strnstr(const char *buffer, const char *token, size_t n) +{ + const char *p; + size_t tokenlen = strlen(token); + if (tokenlen == 0) { + return LWIP_CONST_CAST(char *, buffer); + } + for (p = buffer; *p && (p + tokenlen <= buffer + n); p++) { + if ((*p == *token) && (strncmp(p, token, tokenlen) == 0)) { + return LWIP_CONST_CAST(char *, p); + } + } + return NULL; +} +#endif + +#ifndef lwip_stricmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for stricmp() non-standard function. + * This can be \#defined to stricmp() depending on your platform port. + */ +int +lwip_stricmp(const char *str1, const char *str2) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + } while (c1 != 0); + return 0; +} +#endif + +#ifndef lwip_strnicmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnicmp() non-standard function. + * This can be \#defined to strnicmp() depending on your platform port. + */ +int +lwip_strnicmp(const char *str1, const char *str2, size_t len) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + len--; + } while ((len != 0) && (c1 != 0)); + return 0; +} +#endif + +#ifndef lwip_itoa +/** + * @ingroup sys_nonstandard + * lwIP default implementation for itoa() non-standard function. + * This can be \#defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform port. + */ +void +lwip_itoa(char *result, size_t bufsize, int number) +{ + char *res = result; + char *tmp = result + bufsize - 1; + int n = (number >= 0) ? number : -number; + + /* handle invalid bufsize */ + if (bufsize < 2) { + if (bufsize == 1) { + *result = 0; + } + return; + } + + /* First, add sign */ + if (number < 0) { + *res++ = '-'; + } + /* Then create the string from the end and stop if buffer full, + and ensure output string is zero terminated */ + *tmp = 0; + while ((n != 0) && (tmp > res)) { + char val = (char)('0' + (n % 10)); + tmp--; + *tmp = val; + n = n / 10; + } + if (n) { + /* buffer is too small */ + *result = 0; + return; + } + if (*tmp == 0) { + /* Nothing added? */ + *res++ = '0'; + *res++ = 0; + return; + } + /* move from temporary buffer to output buffer (sign is not moved) */ + memmove(res, tmp, (size_t)((result + bufsize) - tmp)); +} +#endif diff --git a/Middlewares/Third_Party/LwIP/src/core/dns.c b/Middlewares/Third_Party/LwIP/src/core/dns.c new file mode 100644 index 00000000..9d2f61ed --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/dns.c @@ -0,0 +1,1631 @@ +/** + * @file + * DNS - host name to IP address resolver. + * + * @defgroup dns DNS + * @ingroup callbackstyle_api + * + * Implements a DNS host name to IP address resolver. + * + * The lwIP DNS resolver functions are used to lookup a host name and + * map it to a numerical IP address. It maintains a list of resolved + * hostnames that can be queried with the dns_lookup() function. + * New hostnames can be resolved using the dns_query() function. + * + * The lwIP version of the resolver also adds a non-blocking version of + * gethostbyname() that will work with a raw API application. This function + * checks for an IP address string first and converts it if it is valid. + * gethostbyname() then does a dns_lookup() to see if the name is + * already in the table. If so, the IP is returned. If not, a query is + * issued and the function returns with a ERR_INPROGRESS status. The app + * using the dns client must then go into a waiting state. + * + * Once a hostname has been resolved (or found to be non-existent), + * the resolver code calls a specified callback function (which + * must be implemented by the module that uses the resolver). + * + * Multicast DNS queries are supported for names ending on ".local". + * However, only "One-Shot Multicast DNS Queries" are supported (RFC 6762 + * chapter 5.1), this is not a fully compliant implementation of continuous + * mDNS querying! + * + * All functions must be called from TCPIP thread. + * + * @see DNS_MAX_SERVERS + * @see LWIP_DHCP_MAX_DNS_SERVERS + * @see @ref netconn_common for thread-safe access. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*----------------------------------------------------------------------------- + * RFC 1035 - Domain names - implementation and specification + * RFC 2181 - Clarifications to the DNS Specification + *----------------------------------------------------------------------------*/ + +/** @todo: define good default values (rfc compliance) */ +/** @todo: improve answer parsing, more checkings... */ +/** @todo: check RFC1035 - 7.3. Processing responses */ +/** @todo: one-shot mDNS: dual-stack fallback to another IP version */ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_DNS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/dns.h" +#include "lwip/prot/dns.h" + +#include + +/** Random generator function to create random TXIDs and source ports for queries */ +#ifndef DNS_RAND_TXID +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_XID) != 0) +#define DNS_RAND_TXID LWIP_RAND +#else +static u16_t dns_txid; +#define DNS_RAND_TXID() (++dns_txid) +#endif +#endif + +/** Limits the source port to be >= 1024 by default */ +#ifndef DNS_PORT_ALLOWED +#define DNS_PORT_ALLOWED(port) ((port) >= 1024) +#endif + +/** DNS resource record max. TTL (one week as default) */ +#ifndef DNS_MAX_TTL +#define DNS_MAX_TTL 604800 +#elif DNS_MAX_TTL > 0x7FFFFFFF +#error DNS_MAX_TTL must be a positive 32-bit value +#endif + +#if DNS_TABLE_SIZE > 255 +#error DNS_TABLE_SIZE must fit into an u8_t +#endif +#if DNS_MAX_SERVERS > 255 +#error DNS_MAX_SERVERS must fit into an u8_t +#endif + +/* The number of parallel requests (i.e. calls to dns_gethostbyname + * that cannot be answered from the DNS table. + * This is set to the table size by default. + */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) +#ifndef DNS_MAX_REQUESTS +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#else +#if DNS_MAX_REQUESTS > 255 +#error DNS_MAX_REQUESTS must fit into an u8_t +#endif +#endif +#else +/* In this configuration, both arrays have to have the same size and are used + * like one entry (used/free) */ +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#endif + +/* The number of UDP source ports used in parallel */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +#ifndef DNS_MAX_SOURCE_PORTS +#define DNS_MAX_SOURCE_PORTS DNS_MAX_REQUESTS +#else +#if DNS_MAX_SOURCE_PORTS > 255 +#error DNS_MAX_SOURCE_PORTS must fit into an u8_t +#endif +#endif +#else +#ifdef DNS_MAX_SOURCE_PORTS +#undef DNS_MAX_SOURCE_PORTS +#endif +#define DNS_MAX_SOURCE_PORTS 1 +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) (((t) == LWIP_DNS_ADDRTYPE_IPV6_IPV4) || ((t) == LWIP_DNS_ADDRTYPE_IPV6)) +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) (IP_IS_V6_VAL(ip) ? LWIP_DNS_ADDRTYPE_IS_IPV6(t) : (!LWIP_DNS_ADDRTYPE_IS_IPV6(t))) +#define LWIP_DNS_ADDRTYPE_ARG(x) , x +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) x +#define LWIP_DNS_SET_ADDRTYPE(x, y) do { x = y; } while(0) +#else +#if LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 1 +#else +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 0 +#endif +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) 1 +#define LWIP_DNS_ADDRTYPE_ARG(x) +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) 0 +#define LWIP_DNS_SET_ADDRTYPE(x, y) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES +#define LWIP_DNS_ISMDNS_ARG(x) , x +#else +#define LWIP_DNS_ISMDNS_ARG(x) +#endif + +/** DNS query message structure. + No packing needed: only used locally on the stack. */ +struct dns_query { + /* DNS query record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; +}; +#define SIZEOF_DNS_QUERY 4 + +/** DNS answer message structure. + No packing needed: only used locally on the stack. */ +struct dns_answer { + /* DNS answer record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; + u32_t ttl; + u16_t len; +}; +#define SIZEOF_DNS_ANSWER 10 +/* maximum allowed size for the struct due to non-packed */ +#define SIZEOF_DNS_ANSWER_ASSERT 12 + +/* DNS table entry states */ +typedef enum { + DNS_STATE_UNUSED = 0, + DNS_STATE_NEW = 1, + DNS_STATE_ASKING = 2, + DNS_STATE_DONE = 3 +} dns_state_enum_t; + +/** DNS table entry */ +struct dns_table_entry { + u32_t ttl; + ip_addr_t ipaddr; + u16_t txid; + u8_t state; + u8_t server_idx; + u8_t tmr; + u8_t retries; + u8_t seqno; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + u8_t pcb_idx; +#endif + char name[DNS_MAX_NAME_LENGTH]; +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif +}; + +/** DNS request table entry: used when dns_gehostbyname cannot answer the + * request from the DNS table */ +struct dns_req_entry { + /* pointer to callback on DNS query done */ + dns_found_callback found; + /* argument passed to the callback function */ + void *arg; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t dns_table_idx; +#endif +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +}; + +#if DNS_LOCAL_HOSTLIST + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** Local host-list. For hostnames in this list, no + * external name resolution is performed */ +static struct local_hostlist_entry *local_hostlist_dynamic; +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_PRE +#define DNS_LOCAL_HOSTLIST_STORAGE_PRE static +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_PRE */ +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_POST +#define DNS_LOCAL_HOSTLIST_STORAGE_POST +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_POST */ +DNS_LOCAL_HOSTLIST_STORAGE_PRE struct local_hostlist_entry local_hostlist_static[] + DNS_LOCAL_HOSTLIST_STORAGE_POST = DNS_LOCAL_HOSTLIST_INIT; + +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +static void dns_init_local(void); +static err_t dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)); +#endif /* DNS_LOCAL_HOSTLIST */ + + +/* forward declarations */ +static void dns_recv(void *s, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); +static void dns_check_entries(void); +static void dns_call_found(u8_t idx, ip_addr_t *addr); + +/*----------------------------------------------------------------------------- + * Globals + *----------------------------------------------------------------------------*/ + +/* DNS variables */ +static struct udp_pcb *dns_pcbs[DNS_MAX_SOURCE_PORTS]; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static u8_t dns_last_pcb_idx; +#endif +static u8_t dns_seqno; +static struct dns_table_entry dns_table[DNS_TABLE_SIZE]; +static struct dns_req_entry dns_requests[DNS_MAX_REQUESTS]; +static ip_addr_t dns_servers[DNS_MAX_SERVERS]; + +#if LWIP_IPV4 +const ip_addr_t dns_mquery_v4group = DNS_MQUERY_IPV4_GROUP_INIT; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +const ip_addr_t dns_mquery_v6group = DNS_MQUERY_IPV6_GROUP_INIT; +#endif /* LWIP_IPV6 */ + +/** + * Initialize the resolver: set up the UDP pcb and configure the default server + * (if DNS_SERVER_ADDRESS is set). + */ +void +dns_init(void) +{ +#ifdef DNS_SERVER_ADDRESS + /* initialize default DNS server address */ + ip_addr_t dnsserver; + DNS_SERVER_ADDRESS(&dnsserver); + dns_setserver(0, &dnsserver); +#endif /* DNS_SERVER_ADDRESS */ + + LWIP_ASSERT("sanity check SIZEOF_DNS_QUERY", + sizeof(struct dns_query) == SIZEOF_DNS_QUERY); + LWIP_ASSERT("sanity check SIZEOF_DNS_ANSWER", + sizeof(struct dns_answer) <= SIZEOF_DNS_ANSWER_ASSERT); + + LWIP_DEBUGF(DNS_DEBUG, ("dns_init: initializing\n")); + + /* if dns client not yet initialized... */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + dns_pcbs[0] = udp_new_ip_type(IPADDR_TYPE_ANY); + LWIP_ASSERT("dns_pcbs[0] != NULL", dns_pcbs[0] != NULL); + + /* initialize DNS table not needed (initialized to zero since it is a + * global variable) */ + LWIP_ASSERT("For implicit initialization to work, DNS_STATE_UNUSED needs to be 0", + DNS_STATE_UNUSED == 0); + + /* initialize DNS client */ + udp_bind(dns_pcbs[0], IP_ANY_TYPE, 0); + udp_recv(dns_pcbs[0], dns_recv, NULL); + } +#endif + +#if DNS_LOCAL_HOSTLIST + dns_init_local(); +#endif +} + +/** + * @ingroup dns + * Initialize one of the DNS servers. + * + * @param numdns the index of the DNS server to set must be < DNS_MAX_SERVERS + * @param dnsserver IP address of the DNS server to set + */ +void +dns_setserver(u8_t numdns, const ip_addr_t *dnsserver) +{ + if (numdns < DNS_MAX_SERVERS) { + if (dnsserver != NULL) { + dns_servers[numdns] = (*dnsserver); + } else { + dns_servers[numdns] = *IP_ADDR_ANY; + } + } +} + +/** + * @ingroup dns + * Obtain one of the currently configured DNS server. + * + * @param numdns the index of the DNS server + * @return IP address of the indexed DNS server or "ip_addr_any" if the DNS + * server has not been configured. + */ +const ip_addr_t * +dns_getserver(u8_t numdns) +{ + if (numdns < DNS_MAX_SERVERS) { + return &dns_servers[numdns]; + } else { + return IP_ADDR_ANY; + } +} + +/** + * The DNS resolver client timer - handle retries and timeouts and should + * be called every DNS_TMR_INTERVAL milliseconds (every second by default). + */ +void +dns_tmr(void) +{ + LWIP_DEBUGF(DNS_DEBUG, ("dns_tmr: dns_check_entries\n")); + dns_check_entries(); +} + +#if DNS_LOCAL_HOSTLIST +static void +dns_init_local(void) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) + size_t i; + struct local_hostlist_entry *entry; + /* Dynamic: copy entries from DNS_LOCAL_HOSTLIST_INIT to list */ + struct local_hostlist_entry local_hostlist_init[] = DNS_LOCAL_HOSTLIST_INIT; + size_t namelen; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_init); i++) { + struct local_hostlist_entry *init_entry = &local_hostlist_init[i]; + LWIP_ASSERT("invalid host name (NULL)", init_entry->name != NULL); + namelen = strlen(init_entry->name); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + LWIP_ASSERT("mem-error in dns_init_local", entry != NULL); + if (entry != NULL) { + char *entry_name = (char *)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, init_entry->name, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + entry->addr = init_entry->addr; + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) */ +} + +/** + * @ingroup dns + * Iterate the local host-list for a hostname. + * + * @param iterator_fn a function that is called for every entry in the local host-list + * @param iterator_arg 3rd argument passed to iterator_fn + * @return the number of entries in the local host-list + */ +size_t +dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg) +{ + size_t i; +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + i = 0; + while (entry != NULL) { + if (iterator_fn != NULL) { + iterator_fn(entry->name, &entry->addr, iterator_arg); + } + i++; + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if (iterator_fn != NULL) { + iterator_fn(local_hostlist_static[i].name, &local_hostlist_static[i].addr, iterator_arg); + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return i; +} + +/** + * @ingroup dns + * Scans the local host-list for a hostname. + * + * @param hostname Hostname to look for in the local host-list + * @param addr the first IP address for the hostname in the local host-list or + * IPADDR_NONE if not found. + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + * @return ERR_OK if found, ERR_ARG if not found + */ +err_t +dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype) +{ + LWIP_UNUSED_ARG(dns_addrtype); + return dns_lookup_local(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)); +} + +/* Internal implementation for dns_local_lookup and dns_lookup */ +static err_t +dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + while (entry != NULL) { + if ((lwip_stricmp(entry->name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, entry->addr)) { + if (addr) { + ip_addr_copy(*addr, entry->addr); + } + return ERR_OK; + } + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + size_t i; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if ((lwip_stricmp(local_hostlist_static[i].name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, local_hostlist_static[i].addr)) { + if (addr) { + ip_addr_copy(*addr, local_hostlist_static[i].addr); + } + return ERR_OK; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return ERR_ARG; +} + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** + * @ingroup dns + * Remove all entries from the local host-list for a specific hostname + * and/or IP address + * + * @param hostname hostname for which entries shall be removed from the local + * host-list + * @param addr address for which entries shall be removed from the local host-list + * @return the number of removed entries + */ +int +dns_local_removehost(const char *hostname, const ip_addr_t *addr) +{ + int removed = 0; + struct local_hostlist_entry *entry = local_hostlist_dynamic; + struct local_hostlist_entry *last_entry = NULL; + while (entry != NULL) { + if (((hostname == NULL) || !lwip_stricmp(entry->name, hostname)) && + ((addr == NULL) || ip_addr_cmp(&entry->addr, addr))) { + struct local_hostlist_entry *free_entry; + if (last_entry != NULL) { + last_entry->next = entry->next; + } else { + local_hostlist_dynamic = entry->next; + } + free_entry = entry; + entry = entry->next; + memp_free(MEMP_LOCALHOSTLIST, free_entry); + removed++; + } else { + last_entry = entry; + entry = entry->next; + } + } + return removed; +} + +/** + * @ingroup dns + * Add a hostname/IP address pair to the local host-list. + * Duplicates are not checked. + * + * @param hostname hostname of the new entry + * @param addr IP address of the new entry + * @return ERR_OK if succeeded or ERR_MEM on memory error + */ +err_t +dns_local_addhost(const char *hostname, const ip_addr_t *addr) +{ + struct local_hostlist_entry *entry; + size_t namelen; + char *entry_name; + LWIP_ASSERT("invalid host name (NULL)", hostname != NULL); + namelen = strlen(hostname); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + if (entry == NULL) { + return ERR_MEM; + } + entry_name = (char *)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, hostname, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + ip_addr_copy(entry->addr, *addr); + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + return ERR_OK; +} +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC*/ +#endif /* DNS_LOCAL_HOSTLIST */ + +/** + * @ingroup dns + * Look up a hostname in the array of known hostnames. + * + * @note This function only looks in the internal array of known + * hostnames, it does not send out a query for the hostname if none + * was found. The function dns_enqueue() can be used to send a query + * for a hostname. + * + * @param name the hostname to look up + * @param addr the hostname's IP address, as u32_t (instead of ip_addr_t to + * better check for failure: != IPADDR_NONE) or IPADDR_NONE if the hostname + * was not found in the cached dns_table. + * @return ERR_OK if found, ERR_ARG if not found + */ +static err_t +dns_lookup(const char *name, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ + u8_t i; +#if DNS_LOCAL_HOSTLIST + if (dns_lookup_local(name, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOCAL_HOSTLIST */ +#ifdef DNS_LOOKUP_LOCAL_EXTERN + if (DNS_LOOKUP_LOCAL_EXTERN(name, addr, LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOOKUP_LOCAL_EXTERN */ + + /* Walk through name list, return entry if found. If not, return NULL. */ + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + if ((dns_table[i].state == DNS_STATE_DONE) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, dns_table[i].ipaddr)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_lookup: \"%s\": found = ", name)); + ip_addr_debug_print_val(DNS_DEBUG, dns_table[i].ipaddr); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + if (addr) { + ip_addr_copy(*addr, dns_table[i].ipaddr); + } + return ERR_OK; + } + } + + return ERR_ARG; +} + +/** + * Compare the "dotted" name "query" with the encoded name "response" + * to make sure an answer from the DNS server matches the current dns_table + * entry (otherwise, answers might arrive late for hostname not on the list + * any more). + * + * For now, this function compares case-insensitive to cope with all kinds of + * servers. This also means that "dns 0x20 bit encoding" must be checked + * externally, if we want to implement it. + * Currently, the request is sent exactly as passed in by he user request. + * + * @param query hostname (not encoded) from the dns_table + * @param p pbuf containing the encoded hostname in the DNS response + * @param start_offset offset into p where the name starts + * @return 0xFFFF: names differ, other: names equal -> offset behind name + */ +static u16_t +dns_compare_name(const char *query, struct pbuf *p, u16_t start_offset) +{ + int n; + u16_t response_offset = start_offset; + + do { + n = pbuf_try_get_at(p, response_offset); + if ((n < 0) || (response_offset == 0xFFFF)) { + /* error or overflow */ + return 0xFFFF; + } + response_offset++; + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: cannot be equal since we don't send them */ + return 0xFFFF; + } else { + /* Not compressed name */ + while (n > 0) { + int c = pbuf_try_get_at(p, response_offset); + if (c < 0) { + return 0xFFFF; + } + if (lwip_tolower((*query)) != lwip_tolower((u8_t)c)) { + return 0xFFFF; + } + if (response_offset == 0xFFFF) { + /* would overflow */ + return 0xFFFF; + } + response_offset++; + ++query; + --n; + } + ++query; + } + n = pbuf_try_get_at(p, response_offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + if (response_offset == 0xFFFF) { + /* would overflow */ + return 0xFFFF; + } + return (u16_t)(response_offset + 1); +} + +/** + * Walk through a compact encoded DNS name and return the end of the name. + * + * @param p pbuf containing the name + * @param query_idx start index into p pointing to encoded DNS name in the DNS server response + * @return index to end of the name + */ +static u16_t +dns_skip_name(struct pbuf *p, u16_t query_idx) +{ + int n; + u16_t offset = query_idx; + + do { + n = pbuf_try_get_at(p, offset++); + if ((n < 0) || (offset == 0)) { + return 0xFFFF; + } + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: since we only want to skip it (not check it), stop here */ + break; + } else { + /* Not compressed name */ + if (offset + n >= p->tot_len) { + return 0xFFFF; + } + offset = (u16_t)(offset + n); + } + n = pbuf_try_get_at(p, offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + if (offset == 0xFFFF) { + return 0xFFFF; + } + return (u16_t)(offset + 1); +} + +/** + * Send a DNS query packet. + * + * @param idx the DNS table entry index for which to send a request + * @return ERR_OK if packet is sent; an err_t indicating the problem otherwise + */ +static err_t +dns_send(u8_t idx) +{ + err_t err; + struct dns_hdr hdr; + struct dns_query qry; + struct pbuf *p; + u16_t query_idx, copy_len; + const char *hostname, *hostname_part; + u8_t n; + u8_t pcb_idx; + struct dns_table_entry *entry = &dns_table[idx]; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_send: dns_servers[%"U16_F"] \"%s\": request\n", + (u16_t)(entry->server_idx), entry->name)); + LWIP_ASSERT("dns server out of array", entry->server_idx < DNS_MAX_SERVERS); + if (ip_addr_isany_val(dns_servers[entry->server_idx]) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif + ) { + /* DNS server not valid anymore, e.g. PPP netif has been shut down */ + /* call specified callback function if provided */ + dns_call_found(idx, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + return ERR_OK; + } + + /* if here, we have either a new query or a retry on a previous query to process */ + p = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(SIZEOF_DNS_HDR + strlen(entry->name) + 2 + + SIZEOF_DNS_QUERY), PBUF_RAM); + if (p != NULL) { + const ip_addr_t *dst; + u16_t dst_port; + /* fill dns header */ + memset(&hdr, 0, SIZEOF_DNS_HDR); + hdr.id = lwip_htons(entry->txid); + hdr.flags1 = DNS_FLAG1_RD; + hdr.numquestions = PP_HTONS(1); + pbuf_take(p, &hdr, SIZEOF_DNS_HDR); + hostname = entry->name; + --hostname; + + /* convert hostname into suitable query format. */ + query_idx = SIZEOF_DNS_HDR; + do { + ++hostname; + hostname_part = hostname; + for (n = 0; *hostname != '.' && *hostname != 0; ++hostname) { + ++n; + } + copy_len = (u16_t)(hostname - hostname_part); + if (query_idx + n + 1 > 0xFFFF) { + /* u16_t overflow */ + goto overflow_return; + } + pbuf_put_at(p, query_idx, n); + pbuf_take_at(p, hostname_part, copy_len, (u16_t)(query_idx + 1)); + query_idx = (u16_t)(query_idx + n + 1); + } while (*hostname != 0); + pbuf_put_at(p, query_idx, 0); + query_idx++; + + /* fill dns query */ + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + qry.type = PP_HTONS(DNS_RRTYPE_AAAA); + } else { + qry.type = PP_HTONS(DNS_RRTYPE_A); + } + qry.cls = PP_HTONS(DNS_RRCLASS_IN); + pbuf_take_at(p, &qry, SIZEOF_DNS_QUERY, query_idx); + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + pcb_idx = entry->pcb_idx; +#else + pcb_idx = 0; +#endif + /* send dns packet */ + LWIP_DEBUGF(DNS_DEBUG, ("sending DNS request ID %d for name \"%s\" to server %d\r\n", + entry->txid, entry->name, entry->server_idx)); +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (entry->is_mdns) { + dst_port = DNS_MQUERY_PORT; +#if LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + dst = &dns_mquery_v6group; + } +#endif +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif +#if LWIP_IPV4 + { + dst = &dns_mquery_v4group; + } +#endif + } else +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + dst_port = DNS_SERVER_PORT; + dst = &dns_servers[entry->server_idx]; + } + err = udp_sendto(dns_pcbs[pcb_idx], p, dst, dst_port); + + /* free pbuf */ + pbuf_free(p); + } else { + err = ERR_MEM; + } + + return err; +overflow_return: + pbuf_free(p); + return ERR_VAL; +} + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static struct udp_pcb * +dns_alloc_random_port(void) +{ + err_t err; + struct udp_pcb *pcb; + + pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + if (pcb == NULL) { + /* out of memory, have to reuse an existing pcb */ + return NULL; + } + do { + u16_t port = (u16_t)DNS_RAND_TXID(); + if (DNS_PORT_ALLOWED(port)) { + err = udp_bind(pcb, IP_ANY_TYPE, port); + } else { + /* this port is not allowed, try again */ + err = ERR_USE; + } + } while (err == ERR_USE); + if (err != ERR_OK) { + udp_remove(pcb); + return NULL; + } + udp_recv(pcb, dns_recv, NULL); + return pcb; +} + +/** + * dns_alloc_pcb() - allocates a new pcb (or reuses an existing one) to be used + * for sending a request + * + * @return an index into dns_pcbs + */ +static u8_t +dns_alloc_pcb(void) +{ + u8_t i; + u8_t idx; + + for (i = 0; i < DNS_MAX_SOURCE_PORTS; i++) { + if (dns_pcbs[i] == NULL) { + break; + } + } + if (i < DNS_MAX_SOURCE_PORTS) { + dns_pcbs[i] = dns_alloc_random_port(); + if (dns_pcbs[i] != NULL) { + /* succeeded */ + dns_last_pcb_idx = i; + return i; + } + } + /* if we come here, creating a new UDP pcb failed, so we have to use + an already existing one (so overflow is no issue) */ + for (i = 0, idx = (u8_t)(dns_last_pcb_idx + 1); i < DNS_MAX_SOURCE_PORTS; i++, idx++) { + if (idx >= DNS_MAX_SOURCE_PORTS) { + idx = 0; + } + if (dns_pcbs[idx] != NULL) { + dns_last_pcb_idx = idx; + return idx; + } + } + return DNS_MAX_SOURCE_PORTS; +} +#endif /* ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) */ + +/** + * dns_call_found() - call the found callback and check if there are duplicate + * entries for the given hostname. If there are any, their found callback will + * be called and they will be removed. + * + * @param idx dns table index of the entry that is resolved or removed + * @param addr IP address for the hostname (or NULL on error or memory shortage) + */ +static void +dns_call_found(u8_t idx, ip_addr_t *addr) +{ +#if ((LWIP_DNS_SECURE & (LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT)) != 0) + u8_t i; +#endif + +#if LWIP_IPV4 && LWIP_IPV6 + if (addr != NULL) { + /* check that address type matches the request and adapt the table entry */ + if (IP_IS_V6_VAL(*addr)) { + LWIP_ASSERT("invalid response", LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + LWIP_ASSERT("invalid response", !LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (dns_requests[i].found && (dns_requests[i].dns_table_idx == idx)) { + (*dns_requests[i].found)(dns_table[idx].name, addr, dns_requests[i].arg); + /* flush this entry */ + dns_requests[i].found = NULL; + } + } +#else + if (dns_requests[idx].found) { + (*dns_requests[idx].found)(dns_table[idx].name, addr, dns_requests[idx].arg); + } + dns_requests[idx].found = NULL; +#endif +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + /* close the pcb used unless other request are using it */ + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (i == idx) { + continue; /* only check other requests */ + } + if (dns_table[i].state == DNS_STATE_ASKING) { + if (dns_table[i].pcb_idx == dns_table[idx].pcb_idx) { + /* another request is still using the same pcb */ + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + break; + } + } + } + if (dns_table[idx].pcb_idx < DNS_MAX_SOURCE_PORTS) { + /* if we come here, the pcb is not used any more and can be removed */ + udp_remove(dns_pcbs[dns_table[idx].pcb_idx]); + dns_pcbs[dns_table[idx].pcb_idx] = NULL; + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + } +#endif +} + +/* Create a query transmission ID that is unique for all outstanding queries */ +static u16_t +dns_create_txid(void) +{ + u16_t txid; + u8_t i; + +again: + txid = (u16_t)DNS_RAND_TXID(); + + /* check whether the ID is unique */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (dns_table[i].txid == txid)) { + /* ID already used by another pending query */ + goto again; + } + } + + return txid; +} + +/** + * Check whether there are other backup DNS servers available to try + */ +static u8_t +dns_backupserver_available(struct dns_table_entry *pentry) +{ + u8_t ret = 0; + + if (pentry) { + if ((pentry->server_idx + 1 < DNS_MAX_SERVERS) && !ip_addr_isany_val(dns_servers[pentry->server_idx + 1])) { + ret = 1; + } + } + + return ret; +} + +/** + * dns_check_entry() - see if entry has not yet been queried and, if so, sends out a query. + * Check an entry in the dns_table: + * - send out query for new entries + * - retry old pending entries on timeout (also with different servers) + * - remove completed entries from the table if their TTL has expired + * + * @param i index of the dns_table entry to check + */ +static void +dns_check_entry(u8_t i) +{ + err_t err; + struct dns_table_entry *entry = &dns_table[i]; + + LWIP_ASSERT("array index out of bounds", i < DNS_TABLE_SIZE); + + switch (entry->state) { + case DNS_STATE_NEW: + /* initialize new entry */ + entry->txid = dns_create_txid(); + entry->state = DNS_STATE_ASKING; + entry->server_idx = 0; + entry->tmr = 1; + entry->retries = 0; + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + break; + case DNS_STATE_ASKING: + if (--entry->tmr == 0) { + if (++entry->retries == DNS_MAX_RETRIES) { + if (dns_backupserver_available(entry) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + ) { + /* change of server */ + entry->server_idx++; + entry->tmr = 1; + entry->retries = 0; + } else { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": timeout\n", entry->name)); + /* call specified callback function if provided */ + dns_call_found(i, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + break; + } + } else { + /* wait longer for the next retry */ + entry->tmr = entry->retries; + } + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + } + break; + case DNS_STATE_DONE: + /* if the time to live is nul */ + if ((entry->ttl == 0) || (--entry->ttl == 0)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": flush\n", entry->name)); + /* flush this entry, there cannot be any related pending entries in this state */ + entry->state = DNS_STATE_UNUSED; + } + break; + case DNS_STATE_UNUSED: + /* nothing to do */ + break; + default: + LWIP_ASSERT("unknown dns_table entry state:", 0); + break; + } +} + +/** + * Call dns_check_entry for each entry in dns_table - check all entries. + */ +static void +dns_check_entries(void) +{ + u8_t i; + + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + dns_check_entry(i); + } +} + +/** + * Save TTL and call dns_call_found for correct response. + */ +static void +dns_correct_response(u8_t idx, u32_t ttl) +{ + struct dns_table_entry *entry = &dns_table[idx]; + + entry->state = DNS_STATE_DONE; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response = ", entry->name)); + ip_addr_debug_print_val(DNS_DEBUG, entry->ipaddr); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + + /* read the answer resource record's TTL, and maximize it if needed */ + entry->ttl = ttl; + if (entry->ttl > DNS_MAX_TTL) { + entry->ttl = DNS_MAX_TTL; + } + dns_call_found(idx, &entry->ipaddr); + + if (entry->ttl == 0) { + /* RFC 883, page 29: "Zero values are + interpreted to mean that the RR can only be used for the + transaction in progress, and should not be cached." + -> flush this entry now */ + /* entry reused during callback? */ + if (entry->state == DNS_STATE_DONE) { + entry->state = DNS_STATE_UNUSED; + } + } +} + +/** + * Receive input function for DNS response packets arriving for the dns UDP pcb. + */ +static void +dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + u8_t i; + u16_t txid; + u16_t res_idx; + struct dns_hdr hdr; + struct dns_answer ans; + struct dns_query qry; + u16_t nquestions, nanswers; + + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(port); + + /* is the dns message big enough ? */ + if (p->tot_len < (SIZEOF_DNS_HDR + SIZEOF_DNS_QUERY)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: pbuf too small\n")); + /* free pbuf and return */ + goto ignore_packet; + } + + /* copy dns payload inside static buffer for processing */ + if (pbuf_copy_partial(p, &hdr, SIZEOF_DNS_HDR, 0) == SIZEOF_DNS_HDR) { + /* Match the ID in the DNS header with the name table. */ + txid = lwip_htons(hdr.id); + for (i = 0; i < DNS_TABLE_SIZE; i++) { + struct dns_table_entry *entry = &dns_table[i]; + if ((entry->state == DNS_STATE_ASKING) && + (entry->txid == txid)) { + + /* We only care about the question(s) and the answers. The authrr + and the extrarr are simply discarded. */ + nquestions = lwip_htons(hdr.numquestions); + nanswers = lwip_htons(hdr.numanswers); + + /* Check for correct response. */ + if ((hdr.flags1 & DNS_FLAG1_RESPONSE) == 0) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": not a response\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + if (nquestions != 1) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (!entry->is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* Check whether response comes from the same network address to which the + question was sent. (RFC 5452) */ + if (!ip_addr_cmp(addr, &dns_servers[entry->server_idx])) { + goto ignore_packet; /* ignore this packet */ + } + } + + /* Check if the name in the "question" part match with the name in the entry and + skip it if equal. */ + res_idx = dns_compare_name(entry->name, p, SIZEOF_DNS_HDR); + if (res_idx == 0xFFFF) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + + /* check if "question" part matches the request */ + if (pbuf_copy_partial(p, &qry, SIZEOF_DNS_QUERY, res_idx) != SIZEOF_DNS_QUERY) { + goto ignore_packet; /* ignore this packet */ + } + if ((qry.cls != PP_HTONS(DNS_RRCLASS_IN)) || + (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_AAAA))) || + (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_A)))) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + /* skip the rest of the "question" part */ + if (res_idx + SIZEOF_DNS_QUERY > 0xFFFF) { + goto ignore_packet; + } + res_idx = (u16_t)(res_idx + SIZEOF_DNS_QUERY); + + /* Check for error. If so, call callback to inform. */ + if (hdr.flags2 & DNS_FLAG2_ERR_MASK) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in flags\n", entry->name)); + + /* if there is another backup DNS server to try + * then don't stop the DNS request + */ + if (dns_backupserver_available(entry)) { + /* avoid retrying the same server */ + entry->retries = DNS_MAX_RETRIES-1; + entry->tmr = 1; + + /* contact next available server for this entry */ + dns_check_entry(i); + + goto ignore_packet; + } + } else { + while ((nanswers > 0) && (res_idx < p->tot_len)) { + /* skip answer resource record's host name */ + res_idx = dns_skip_name(p, res_idx); + if (res_idx == 0xFFFF) { + goto ignore_packet; /* ignore this packet */ + } + + /* Check for IP address type and Internet class. Others are discarded. */ + if (pbuf_copy_partial(p, &ans, SIZEOF_DNS_ANSWER, res_idx) != SIZEOF_DNS_ANSWER) { + goto ignore_packet; /* ignore this packet */ + } + if (res_idx + SIZEOF_DNS_ANSWER > 0xFFFF) { + goto ignore_packet; + } + res_idx = (u16_t)(res_idx + SIZEOF_DNS_ANSWER); + + if (ans.cls == PP_HTONS(DNS_RRCLASS_IN)) { +#if LWIP_IPV4 + if ((ans.type == PP_HTONS(DNS_RRTYPE_A)) && (ans.len == PP_HTONS(sizeof(ip4_addr_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip4_addr_t ip4addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip4addr, sizeof(ip4_addr_t), res_idx) != sizeof(ip4_addr_t)) { + goto ignore_packet; /* ignore this packet */ + } + ip_addr_copy_from_ip4(dns_table[i].ipaddr, ip4addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + if ((ans.type == PP_HTONS(DNS_RRTYPE_AAAA)) && (ans.len == PP_HTONS(sizeof(ip6_addr_p_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip6_addr_p_t ip6addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip6addr, sizeof(ip6_addr_p_t), res_idx) != sizeof(ip6_addr_p_t)) { + goto ignore_packet; /* ignore this packet */ + } + /* @todo: scope ip6addr? Might be required for link-local addresses at least? */ + ip_addr_copy_from_ip6_packed(dns_table[i].ipaddr, ip6addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV6 */ + } + /* skip this answer */ + if ((int)(res_idx + lwip_htons(ans.len)) > 0xFFFF) { + goto ignore_packet; /* ignore this packet */ + } + res_idx = (u16_t)(res_idx + lwip_htons(ans.len)); + --nanswers; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || + (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + if (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + /* IPv4 failed, try IPv6 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + /* IPv6 failed, try IPv4 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + pbuf_free(p); + dns_table[i].state = DNS_STATE_NEW; + dns_check_entry(i); + return; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in response\n", entry->name)); + } + /* call callback to indicate error, clean up memory and return */ + pbuf_free(p); + dns_call_found(i, NULL); + dns_table[i].state = DNS_STATE_UNUSED; + return; + } + } + } + +ignore_packet: + /* deallocate memory and return */ + pbuf_free(p); + return; +} + +/** + * Queues a new hostname to resolve and sends out a DNS query for that hostname + * + * @param name the hostname that is to be queried + * @param hostnamelen length of the hostname + * @param found a callback function to be called on success, failure or timeout + * @param callback_arg argument to pass to the callback function + * @return err_t return code. + */ +static err_t +dns_enqueue(const char *name, size_t hostnamelen, dns_found_callback found, + void *callback_arg LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype) LWIP_DNS_ISMDNS_ARG(u8_t is_mdns)) +{ + u8_t i; + u8_t lseq, lseqi; + struct dns_table_entry *entry = NULL; + size_t namelen; + struct dns_req_entry *req; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t r; + /* check for duplicate entries */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0)) { +#if LWIP_IPV4 && LWIP_IPV6 + if (dns_table[i].reqaddrtype != dns_addrtype) { + /* requested address types don't match + this can lead to 2 concurrent requests, but mixing the address types + for the same host should not be that common */ + continue; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /* this is a duplicate entry, find a free request entry */ + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == 0) { + dns_requests[r].found = found; + dns_requests[r].arg = callback_arg; + dns_requests[r].dns_table_idx = i; + LWIP_DNS_SET_ADDRTYPE(dns_requests[r].reqaddrtype, dns_addrtype); + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": duplicate request\n", name)); + return ERR_INPROGRESS; + } + } + } + } + /* no duplicate entries found */ +#endif + + /* search an unused entry, or the oldest one */ + lseq = 0; + lseqi = DNS_TABLE_SIZE; + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + entry = &dns_table[i]; + /* is it an unused entry ? */ + if (entry->state == DNS_STATE_UNUSED) { + break; + } + /* check if this is the oldest completed entry */ + if (entry->state == DNS_STATE_DONE) { + u8_t age = (u8_t)(dns_seqno - entry->seqno); + if (age > lseq) { + lseq = age; + lseqi = i; + } + } + } + + /* if we don't have found an unused entry, use the oldest completed one */ + if (i == DNS_TABLE_SIZE) { + if ((lseqi >= DNS_TABLE_SIZE) || (dns_table[lseqi].state != DNS_STATE_DONE)) { + /* no entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS entries table is full\n", name)); + return ERR_MEM; + } else { + /* use the oldest completed one */ + i = lseqi; + entry = &dns_table[i]; + } + } + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + /* find a free request entry */ + req = NULL; + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == NULL) { + req = &dns_requests[r]; + break; + } + } + if (req == NULL) { + /* no request entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS request entries table is full\n", name)); + return ERR_MEM; + } + req->dns_table_idx = i; +#else + /* in this configuration, the entry index is the same as the request index */ + req = &dns_requests[i]; +#endif + + /* use this entry */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS entry %"U16_F"\n", name, (u16_t)(i))); + + /* fill the entry */ + entry->state = DNS_STATE_NEW; + entry->seqno = dns_seqno; + LWIP_DNS_SET_ADDRTYPE(entry->reqaddrtype, dns_addrtype); + LWIP_DNS_SET_ADDRTYPE(req->reqaddrtype, dns_addrtype); + req->found = found; + req->arg = callback_arg; + namelen = LWIP_MIN(hostnamelen, DNS_MAX_NAME_LENGTH - 1); + MEMCPY(entry->name, name, namelen); + entry->name[namelen] = 0; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + entry->pcb_idx = dns_alloc_pcb(); + if (entry->pcb_idx >= DNS_MAX_SOURCE_PORTS) { + /* failed to get a UDP pcb */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": failed to allocate a pcb\n", name)); + entry->state = DNS_STATE_UNUSED; + req->found = NULL; + return ERR_MEM; + } + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS pcb %"U16_F"\n", name, (u16_t)(entry->pcb_idx))); +#endif + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + entry->is_mdns = is_mdns; +#endif + + dns_seqno++; + + /* force to send query without waiting timer */ + dns_check_entry(i); + + /* dns query is enqueued */ + return ERR_INPROGRESS; +} + +/** + * @ingroup dns + * Resolve a hostname (string) into an IP address. + * NON-BLOCKING callback version for use with raw API!!! + * + * Returns immediately with one of err_t return codes: + * - ERR_OK if hostname is a valid IP address string or the host + * name is already in the local names table. + * - ERR_INPROGRESS enqueue a request to be sent to the DNS server + * for resolution if no errors are present. + * - ERR_ARG: dns client not initialized or invalid hostname + * + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @return a err_t return code. + */ +err_t +dns_gethostbyname(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg) +{ + return dns_gethostbyname_addrtype(hostname, addr, found, callback_arg, LWIP_DNS_ADDRTYPE_DEFAULT); +} + +/** + * @ingroup dns + * Like dns_gethostbyname, but returned address type can be controlled: + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 first, try IPv6 if IPv4 fails only + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 first, try IPv4 if IPv6 fails only + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + */ +err_t +dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg, u8_t dns_addrtype) +{ + size_t hostnamelen; +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif + /* not initialized or no valid server yet, or invalid addr pointer + * or invalid hostname or invalid hostname length */ + if ((addr == NULL) || + (!hostname) || (!hostname[0])) { + return ERR_ARG; + } +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + return ERR_ARG; + } +#endif + hostnamelen = strlen(hostname); + if (hostnamelen >= DNS_MAX_NAME_LENGTH) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_gethostbyname: name too long to resolve")); + return ERR_ARG; + } + + +#if LWIP_HAVE_LOOPIF + if (strcmp(hostname, "localhost") == 0) { + ip_addr_set_loopback(LWIP_DNS_ADDRTYPE_IS_IPV6(dns_addrtype), addr); + return ERR_OK; + } +#endif /* LWIP_HAVE_LOOPIF */ + + /* host name already in octet notation? set ip addr and return ERR_OK */ + if (ipaddr_aton(hostname, addr)) { +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV4)) || + (IP_IS_V4(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV6))) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + return ERR_OK; + } + } + /* already have this address cached? */ + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + /* fallback to 2nd IP type and try again to lookup */ + u8_t fallback; + if (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + fallback = LWIP_DNS_ADDRTYPE_IPV6; + } else { + fallback = LWIP_DNS_ADDRTYPE_IPV4; + } + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(fallback)) == ERR_OK) { + return ERR_OK; + } + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(dns_addrtype); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (strstr(hostname, ".local") == &hostname[hostnamelen] - 6) { + is_mdns = 1; + } else { + is_mdns = 0; + } + + if (!is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* prevent calling found callback if no server is set, return error instead */ + if (ip_addr_isany_val(dns_servers[0])) { + return ERR_VAL; + } + } + + /* queue query with specified callback */ + return dns_enqueue(hostname, hostnamelen, found, callback_arg LWIP_DNS_ADDRTYPE_ARG(dns_addrtype) + LWIP_DNS_ISMDNS_ARG(is_mdns)); +} + +#endif /* LWIP_DNS */ diff --git a/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c b/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c new file mode 100644 index 00000000..818c68f4 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c @@ -0,0 +1,608 @@ +/** + * @file + * Internet checksum functions.\n + * + * These are some reference implementations of the checksum algorithm, with the + * aim of being simple, correct and fully portable. Checksumming is the + * first thing you would want to optimize for your platform. If you create + * your own version, link it in and in your cc.h put: + * + * \#define LWIP_CHKSUM your_checksum_routine + * + * Or you can select from the implementations below by defining + * LWIP_CHKSUM_ALGORITHM to 1, 2 or 3. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/inet_chksum.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" + +#include + +#ifndef LWIP_CHKSUM +# define LWIP_CHKSUM lwip_standard_chksum +# ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 2 +# endif +u16_t lwip_standard_chksum(const void *dataptr, int len); +#endif +/* If none set: */ +#ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 0 +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */ +/** + * lwip checksum + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * @note accumulator size limits summable length to 64k + * @note host endianess is irrelevant (p3 RFC1071) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + u32_t acc; + u16_t src; + const u8_t *octetptr; + + acc = 0; + /* dataptr may be at odd or even addresses */ + octetptr = (const u8_t *)dataptr; + while (len > 1) { + /* declare first octet as most significant + thus assume network order, ignoring host order */ + src = (*octetptr) << 8; + octetptr++; + /* declare second octet as least significant */ + src |= (*octetptr); + octetptr++; + acc += src; + len -= 2; + } + if (len > 0) { + /* accumulate remaining octet */ + src = (*octetptr) << 8; + acc += src; + } + /* add deferred carry bits */ + acc = (acc >> 16) + (acc & 0x0000ffffUL); + if ((acc & 0xffff0000UL) != 0) { + acc = (acc >> 16) + (acc & 0x0000ffffUL); + } + /* This maybe a little confusing: reorder sum using lwip_htons() + instead of lwip_ntohs() since it has a little less call overhead. + The caller must invert bits for Internet sum ! */ + return lwip_htons((u16_t)acc); +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 2) /* Alternative version #2 */ +/* + * Curt McDowell + * Broadcom Corp. + * csm@broadcom.com + * + * IP checksum two bytes at a time with support for + * unaligned buffer. + * Works for len up to and including 0x20000. + * by Curt McDowell, Broadcom Corp. 12/08/2005 + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + u32_t sum = 0; + int odd = ((mem_ptr_t)pb & 1); + + /* Get aligned to u16_t */ + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + /* Add the bulk of the data */ + ps = (const u16_t *)(const void *)pb; + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* Consume left-over byte, if any */ + if (len > 0) { + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + /* Add end bytes */ + sum += t; + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + /* Swap if alignment was odd */ + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 3) /* Alternative version #3 */ +/** + * An optimized checksum routine. Basically, it uses loop-unrolling on + * the checksum loop, treating the head and tail bytes specially, whereas + * the inner loop acts on 8 bytes at a time. + * + * @arg start of buffer to be checksummed. May be an odd byte address. + * @len number of bytes in the buffer to be checksummed. + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * by Curt McDowell, Broadcom Corp. December 8th, 2005 + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + const u32_t *pl; + u32_t sum = 0, tmp; + /* starts at odd byte address? */ + int odd = ((mem_ptr_t)pb & 1); + + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + ps = (const u16_t *)(const void *)pb; + + if (((mem_ptr_t)ps & 3) && len > 1) { + sum += *ps++; + len -= 2; + } + + pl = (const u32_t *)(const void *)ps; + + while (len > 7) { + tmp = sum + *pl++; /* ping */ + if (tmp < sum) { + tmp++; /* add back carry */ + } + + sum = tmp + *pl++; /* pong */ + if (sum < tmp) { + sum++; /* add back carry */ + } + + len -= 8; + } + + /* make room in upper bits */ + sum = FOLD_U32T(sum); + + ps = (const u16_t *)pl; + + /* 16-bit aligned word remaining? */ + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* dangling tail byte remaining? */ + if (len > 0) { /* include odd byte */ + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + sum += t; /* add end bytes */ + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_base(struct pbuf *p, u8_t proto, u16_t proto_len, u32_t acc) +{ + struct pbuf *q; + int swapped = 0; + + /* iterate through all pbuf in chain */ + for (q = p; q != NULL; q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + acc += LWIP_CHKSUM(q->payload, q->len); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* just executing this next line is probably faster that the if statement needed + to check whether we really need to execute it, and does no harm */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = ip4_addr_get_u32(dest); + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = dest->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo(p, proto, proto_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo(p, proto, proto_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_partial_base(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, u32_t acc) +{ + struct pbuf *q; + int swapped = 0; + u16_t chklen; + + /* iterate through all pbuf in chain */ + for (q = p; (q != NULL) && (chksum_len > 0); q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + chklen = q->len; + if (chklen > chksum_len) { + chklen = chksum_len; + } + acc += LWIP_CHKSUM(q->payload, chklen); + chksum_len = (u16_t)(chksum_len - chklen); + LWIP_ASSERT("delete me", chksum_len < 0x7fff); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* fold the upper bit down */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo_partial: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = ip4_addr_get_u32(dest); + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. Will only compute for a + * portion of the payload. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param chksum_len number of payload bytes used to compute chksum + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = dest->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo_partial: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/* inet_chksum: + * + * Calculates the Internet checksum over a portion of memory. Used primarily for IP + * and ICMP. + * + * @param dataptr start of the buffer to calculate the checksum (no alignment needed) + * @param len length of the buffer to calculate the checksum + * @return checksum (as u16_t) to be saved directly in the protocol header + */ + +u16_t +inet_chksum(const void *dataptr, u16_t len) +{ + return (u16_t)~(unsigned int)LWIP_CHKSUM(dataptr, len); +} + +/** + * Calculate a checksum over a chain of pbufs (without pseudo-header, much like + * inet_chksum only pbufs are used). + * + * @param p pbuf chain over that the checksum should be calculated + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pbuf(struct pbuf *p) +{ + u32_t acc; + struct pbuf *q; + int swapped = 0; + + acc = 0; + for (q = p; q != NULL; q = q->next) { + acc += LWIP_CHKSUM(q->payload, q->len); + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + return (u16_t)~(acc & 0xffffUL); +} + +/* These are some implementations for LWIP_CHKSUM_COPY, which copies data + * like MEMCPY but generates a checksum at the same time. Since this is a + * performance-sensitive function, you might want to create your own version + * in assembly targeted at your hardware by defining it in lwipopts.h: + * #define LWIP_CHKSUM_COPY(dst, src, len) your_chksum_copy(dst, src, len) + */ + +#if (LWIP_CHKSUM_COPY_ALGORITHM == 1) /* Version #1 */ +/** Safe but slow: first call MEMCPY, then call LWIP_CHKSUM. + * For architectures with big caches, data might still be in cache when + * generating the checksum after copying. + */ +u16_t +lwip_chksum_copy(void *dst, const void *src, u16_t len) +{ + MEMCPY(dst, src, len); + return LWIP_CHKSUM(dst, len); +} +#endif /* (LWIP_CHKSUM_COPY_ALGORITHM == 1) */ diff --git a/Middlewares/Third_Party/LwIP/src/core/init.c b/Middlewares/Third_Party/LwIP/src/core/init.c new file mode 100644 index 00000000..b3737a35 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/init.c @@ -0,0 +1,380 @@ +/** + * @file + * Modules initialization + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include "lwip/init.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/sockets.h" +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/timeouts.h" +#include "lwip/etharp.h" +#include "lwip/ip6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/api.h" + +#include "netif/ppp/ppp_opts.h" +#include "netif/ppp/ppp_impl.h" + +#ifndef LWIP_SKIP_PACKING_CHECK + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct packed_struct_test { + PACK_STRUCT_FLD_8(u8_t dummy1); + PACK_STRUCT_FIELD(u32_t dummy2); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define PACKED_STRUCT_TEST_EXPECTED_SIZE 5 + +#endif + +/* Compile-time sanity checks for configuration errors. + * These can be done independently of LWIP_DEBUG, without penalty. + */ +#ifndef BYTE_ORDER +#error "BYTE_ORDER is not defined, you have to define it in your cc.h" +#endif +#if (!IP_SOF_BROADCAST && IP_SOF_BROADCAST_RECV) +#error "If you want to use broadcast filter per pcb on recv operations, you have to define IP_SOF_BROADCAST=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_UDPLITE) +#error "If you want to use UDP Lite, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DHCP) +#error "If you want to use DHCP, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && !LWIP_RAW && LWIP_MULTICAST_TX_OPTIONS) +#error "If you want to use LWIP_MULTICAST_TX_OPTIONS, you have to define LWIP_UDP=1 and/or LWIP_RAW=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DNS) +#error "If you want to use DNS, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if !MEMP_MEM_MALLOC /* MEMP_NUM_* checks are disabled when not using the pool allocator */ +#if (LWIP_ARP && ARP_QUEUEING && (MEMP_NUM_ARP_QUEUE<=0)) +#error "If you want to use ARP Queueing, you have to define MEMP_NUM_ARP_QUEUE>=1 in your lwipopts.h" +#endif +#if (LWIP_RAW && (MEMP_NUM_RAW_PCB<=0)) +#error "If you want to use RAW, you have to define MEMP_NUM_RAW_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_UDP && (MEMP_NUM_UDP_PCB<=0)) +#error "If you want to use UDP, you have to define MEMP_NUM_UDP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && (MEMP_NUM_TCP_PCB<=0)) +#error "If you want to use TCP, you have to define MEMP_NUM_TCP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && (MEMP_NUM_IGMP_GROUP<=1)) +#error "If you want to use IGMP, you have to define MEMP_NUM_IGMP_GROUP>1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_MULTICAST_TX_OPTIONS) +#error "If you want to use IGMP, you have to define LWIP_MULTICAST_TX_OPTIONS==1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_IPV4) +#error "IGMP needs LWIP_IPV4 enabled in your lwipopts.h" +#endif +#if ((LWIP_NETCONN || LWIP_SOCKET) && (MEMP_NUM_TCPIP_MSG_API<=0)) +#error "If you want to use Sequential API, you have to define MEMP_NUM_TCPIP_MSG_API>=1 in your lwipopts.h" +#endif +/* There must be sufficient timeouts, taking into account requirements of the subsystems. */ +#if LWIP_TIMERS && (MEMP_NUM_SYS_TIMEOUT < LWIP_NUM_SYS_TIMEOUT_INTERNAL) +#error "MEMP_NUM_SYS_TIMEOUT is too low to accomodate all required timeouts" +#endif +#if (IP_REASSEMBLY && (MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS)) +#error "MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS doesn't make sense since each struct ip_reassdata must hold 2 pbufs at least!" +#endif +#endif /* !MEMP_MEM_MALLOC */ +#if LWIP_WND_SCALE +#if (LWIP_TCP && (TCP_WND > 0xffffffff)) +#error "If you want to use TCP, TCP_WND must fit in an u32_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_RCV_SCALE > 14)) +#error "The maximum valid window scale value is 14!" +#endif +#if (LWIP_TCP && (TCP_WND > (0xFFFFU << TCP_RCV_SCALE))) +#error "TCP_WND is bigger than the configured LWIP_WND_SCALE allows!" +#endif +#if (LWIP_TCP && ((TCP_WND >> TCP_RCV_SCALE) == 0)) +#error "TCP_WND is too small for the configured LWIP_WND_SCALE (results in zero window)!" +#endif +#else /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_WND > 0xffff)) +#error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)" +#endif +#endif /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff)) +#error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_SND_QUEUELEN < 2)) +#error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work" +#endif +#if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12))) +#error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h" +#endif +#if (LWIP_TCP && TCP_LISTEN_BACKLOG && ((TCP_DEFAULT_LISTEN_BACKLOG < 0) || (TCP_DEFAULT_LISTEN_BACKLOG > 0xff))) +#error "If you want to use TCP backlog, TCP_DEFAULT_LISTEN_BACKLOG must fit into an u8_t" +#endif +#if (LWIP_TCP && LWIP_TCP_SACK_OUT && !TCP_QUEUE_OOSEQ) +#error "To use LWIP_TCP_SACK_OUT, TCP_QUEUE_OOSEQ needs to be enabled" +#endif +#if (LWIP_TCP && LWIP_TCP_SACK_OUT && (LWIP_TCP_MAX_SACK_NUM < 1)) +#error "LWIP_TCP_MAX_SACK_NUM must be greater than 0" +#endif +#if (LWIP_NETIF_API && (NO_SYS==1)) +#error "If you want to use NETIF API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if ((LWIP_SOCKET || LWIP_NETCONN) && (NO_SYS==1)) +#error "If you want to use Sequential API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (NO_SYS==1)) +#error "If you want to use PPP API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (PPP_SUPPORT==0)) +#error "If you want to use PPP API, you have to enable PPP_SUPPORT in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_AUTOIP)) && LWIP_DHCP_AUTOIP_COOP) +#error "If you want to use DHCP/AUTOIP cooperation mode, you have to define LWIP_DHCP=1 and LWIP_AUTOIP=1 in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_ARP)) && DHCP_DOES_ARP_CHECK) +#error "If you want to use DHCP ARP checking, you have to define LWIP_DHCP=1 and LWIP_ARP=1 in your lwipopts.h" +#endif +#if (!LWIP_ARP && LWIP_AUTOIP) +#error "If you want to use AUTOIP, you have to define LWIP_ARP=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && ((LWIP_EVENT_API && LWIP_CALLBACK_API) || (!LWIP_EVENT_API && !LWIP_CALLBACK_API))) +#error "One and exactly one of LWIP_EVENT_API and LWIP_CALLBACK_API has to be enabled in your lwipopts.h" +#endif +#if (LWIP_ALTCP && LWIP_EVENT_API) +#error "The application layered tcp API does not work with LWIP_EVENT_API" +#endif +#if (MEM_LIBC_MALLOC && MEM_USE_POOLS) +#error "MEM_LIBC_MALLOC and MEM_USE_POOLS may not both be simultaneously enabled in your lwipopts.h" +#endif +#if (MEM_USE_POOLS && !MEMP_USE_CUSTOM_POOLS) +#error "MEM_USE_POOLS requires custom pools (MEMP_USE_CUSTOM_POOLS) to be enabled in your lwipopts.h" +#endif +#if (PBUF_POOL_BUFSIZE <= MEM_ALIGNMENT) +#error "PBUF_POOL_BUFSIZE must be greater than MEM_ALIGNMENT or the offset may take the full first pbuf" +#endif +#if (DNS_LOCAL_HOSTLIST && !DNS_LOCAL_HOSTLIST_IS_DYNAMIC && !(defined(DNS_LOCAL_HOSTLIST_INIT))) +#error "you have to define define DNS_LOCAL_HOSTLIST_INIT {{'host1', 0x123}, {'host2', 0x234}} to initialize DNS_LOCAL_HOSTLIST" +#endif +#if PPP_SUPPORT && !PPPOS_SUPPORT && !PPPOE_SUPPORT && !PPPOL2TP_SUPPORT +#error "PPP_SUPPORT needs at least one of PPPOS_SUPPORT, PPPOE_SUPPORT or PPPOL2TP_SUPPORT turned on" +#endif +#if PPP_SUPPORT && !PPP_IPV4_SUPPORT && !PPP_IPV6_SUPPORT +#error "PPP_SUPPORT needs PPP_IPV4_SUPPORT and/or PPP_IPV6_SUPPORT turned on" +#endif +#if PPP_SUPPORT && PPP_IPV4_SUPPORT && !LWIP_IPV4 +#error "PPP_IPV4_SUPPORT needs LWIP_IPV4 turned on" +#endif +#if PPP_SUPPORT && PPP_IPV6_SUPPORT && !LWIP_IPV6 +#error "PPP_IPV6_SUPPORT needs LWIP_IPV6 turned on" +#endif +#if !LWIP_ETHERNET && (LWIP_ARP || PPPOE_SUPPORT) +#error "LWIP_ETHERNET needs to be turned on for LWIP_ARP or PPPOE_SUPPORT" +#endif +#if LWIP_TCPIP_CORE_LOCKING_INPUT && !LWIP_TCPIP_CORE_LOCKING +#error "When using LWIP_TCPIP_CORE_LOCKING_INPUT, LWIP_TCPIP_CORE_LOCKING must be enabled, too" +#endif +#if LWIP_TCP && LWIP_NETIF_TX_SINGLE_PBUF && !TCP_OVERSIZE +#error "LWIP_NETIF_TX_SINGLE_PBUF needs TCP_OVERSIZE enabled to create single-pbuf TCP packets" +#endif +#if LWIP_NETCONN && LWIP_TCP +#if NETCONN_COPY != TCP_WRITE_FLAG_COPY +#error "NETCONN_COPY != TCP_WRITE_FLAG_COPY" +#endif +#if NETCONN_MORE != TCP_WRITE_FLAG_MORE +#error "NETCONN_MORE != TCP_WRITE_FLAG_MORE" +#endif +#endif /* LWIP_NETCONN && LWIP_TCP */ +#if LWIP_SOCKET +#endif /* LWIP_SOCKET */ + + +/* Compile-time checks for deprecated options. + */ +#ifdef MEMP_NUM_TCPIP_MSG +#error "MEMP_NUM_TCPIP_MSG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef TCP_REXMIT_DEBUG +#error "TCP_REXMIT_DEBUG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef RAW_STATS +#error "RAW_STATS option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_QUEUE_FIRST +#error "ETHARP_QUEUE_FIRST option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_ALWAYS_INSERT +#error "ETHARP_ALWAYS_INSERT option is deprecated. Remove it from your lwipopts.h." +#endif +#if !NO_SYS && LWIP_TCPIP_CORE_LOCKING && LWIP_COMPAT_MUTEX && !defined(LWIP_COMPAT_MUTEX_ALLOWED) +#error "LWIP_COMPAT_MUTEX cannot prevent priority inversion. It is recommended to implement priority-aware mutexes. (Define LWIP_COMPAT_MUTEX_ALLOWED to disable this error.)" +#endif + +#ifndef LWIP_DISABLE_TCP_SANITY_CHECKS +#define LWIP_DISABLE_TCP_SANITY_CHECKS 0 +#endif +#ifndef LWIP_DISABLE_MEMP_SANITY_CHECKS +#define LWIP_DISABLE_MEMP_SANITY_CHECKS 0 +#endif + +/* MEMP sanity checks */ +#if MEMP_MEM_MALLOC +#if !LWIP_DISABLE_MEMP_SANITY_CHECKS +#if LWIP_NETCONN || LWIP_SOCKET +#if !MEMP_NUM_NETCONN && LWIP_SOCKET +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN cannot be 0 when using sockets!" +#endif +#else /* MEMP_MEM_MALLOC */ +#if MEMP_NUM_NETCONN > (MEMP_NUM_TCP_PCB+MEMP_NUM_TCP_PCB_LISTEN+MEMP_NUM_UDP_PCB+MEMP_NUM_RAW_PCB) +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN should be less than the sum of MEMP_NUM_{TCP,RAW,UDP}_PCB+MEMP_NUM_TCP_PCB_LISTEN. If you know what you are doing, define LWIP_DISABLE_MEMP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_NETCONN || LWIP_SOCKET */ +#endif /* !LWIP_DISABLE_MEMP_SANITY_CHECKS */ +#if MEM_USE_POOLS +#error "MEMP_MEM_MALLOC and MEM_USE_POOLS cannot be enabled at the same time" +#endif +#ifdef LWIP_HOOK_MEMP_AVAILABLE +#error "LWIP_HOOK_MEMP_AVAILABLE doesn't make sense with MEMP_MEM_MALLOC" +#endif +#endif /* MEMP_MEM_MALLOC */ + +/* TCP sanity checks */ +#if !LWIP_DISABLE_TCP_SANITY_CHECKS +#if LWIP_TCP +#if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN) +#error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_BUF < (2 * TCP_MSS) +#error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_QUEUELEN < (2 * (TCP_SND_BUF / TCP_MSS)) +#error "lwip_sanity_check: WARNING: TCP_SND_QUEUELEN must be at least as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= TCP_SND_BUF +#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must be less than TCP_SND_BUF. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= (0xFFFF - (4 * TCP_MSS)) +#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must at least be 4*MSS below u16_t overflow!" +#endif +#if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN +#error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)) +#error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)))) +#error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_WND < TCP_MSS +#error "lwip_sanity_check: WARNING: TCP_WND is smaller than MSS. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_TCP */ +#endif /* !LWIP_DISABLE_TCP_SANITY_CHECKS */ + +/** + * @ingroup lwip_nosys + * Initialize all modules. + * Use this in NO_SYS mode. Use tcpip_init() otherwise. + */ +void +lwip_init(void) +{ +#ifndef LWIP_SKIP_CONST_CHECK + int a = 0; + LWIP_UNUSED_ARG(a); + LWIP_ASSERT("LWIP_CONST_CAST not implemented correctly. Check your lwIP port.", LWIP_CONST_CAST(void *, &a) == &a); +#endif +#ifndef LWIP_SKIP_PACKING_CHECK + LWIP_ASSERT("Struct packing not implemented correctly. Check your lwIP port.", sizeof(struct packed_struct_test) == PACKED_STRUCT_TEST_EXPECTED_SIZE); +#endif + + /* Modules initialization */ + stats_init(); +#if !NO_SYS + sys_init(); +#endif /* !NO_SYS */ + mem_init(); + memp_init(); + pbuf_init(); + netif_init(); +#if LWIP_IPV4 + ip_init(); +#if LWIP_ARP + etharp_init(); +#endif /* LWIP_ARP */ +#endif /* LWIP_IPV4 */ +#if LWIP_RAW + raw_init(); +#endif /* LWIP_RAW */ +#if LWIP_UDP + udp_init(); +#endif /* LWIP_UDP */ +#if LWIP_TCP + tcp_init(); +#endif /* LWIP_TCP */ +#if LWIP_IGMP + igmp_init(); +#endif /* LWIP_IGMP */ +#if LWIP_DNS + dns_init(); +#endif /* LWIP_DNS */ +#if PPP_SUPPORT + ppp_init(); +#endif + +#if LWIP_TIMERS + sys_timeouts_init(); +#endif /* LWIP_TIMERS */ +} diff --git a/Middlewares/Third_Party/LwIP/src/core/ip.c b/Middlewares/Third_Party/LwIP/src/core/ip.c new file mode 100644 index 00000000..18514cf3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ip.c @@ -0,0 +1,167 @@ +/** + * @file + * Common IPv4 and IPv6 code + * + * @defgroup ip IP + * @ingroup callbackstyle_api + * + * @defgroup ip4 IPv4 + * @ingroup ip + * + * @defgroup ip6 IPv6 + * @ingroup ip + * + * @defgroup ipaddr IP address handling + * @ingroup infrastructure + * + * @defgroup ip4addr IPv4 only + * @ingroup ipaddr + * + * @defgroup ip6addr IPv6 only + * @ingroup ipaddr + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 || LWIP_IPV6 + +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +/** Global data for both IPv4 and IPv6 */ +struct ip_globals ip_data; + +#if LWIP_IPV4 && LWIP_IPV6 + +const ip_addr_t ip_addr_any_type = IPADDR_ANY_TYPE_INIT; + +/** + * @ingroup ipaddr + * Convert numeric IP address (both versions) into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char *ipaddr_ntoa(const ip_addr_t *addr) +{ + if (addr == NULL) { + return NULL; + } + if (IP_IS_V6(addr)) { + return ip6addr_ntoa(ip_2_ip6(addr)); + } else { + return ip4addr_ntoa(ip_2_ip4(addr)); + } +} + +/** + * @ingroup ipaddr + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen) +{ + if (addr == NULL) { + return NULL; + } + if (IP_IS_V6(addr)) { + return ip6addr_ntoa_r(ip_2_ip6(addr), buf, buflen); + } else { + return ip4addr_ntoa_r(ip_2_ip4(addr), buf, buflen); + } +} + +/** + * @ingroup ipaddr + * Convert IP address string (both versions) to numeric. + * The version is auto-detected from the string. + * + * @param cp IP address string to convert + * @param addr conversion result is stored here + * @return 1 on success, 0 on error + */ +int +ipaddr_aton(const char *cp, ip_addr_t *addr) +{ + if (cp != NULL) { + const char *c; + for (c = cp; *c != 0; c++) { + if (*c == ':') { + /* contains a colon: IPv6 address */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V6); + } + return ip6addr_aton(cp, ip_2_ip6(addr)); + } else if (*c == '.') { + /* contains a dot: IPv4 address */ + break; + } + } + /* call ip4addr_aton as fallback or if IPv4 was found */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V4); + } + return ip4addr_aton(cp, ip_2_ip4(addr)); + } + return 0; +} + +/** + * @ingroup lwip_nosys + * If both IP versions are enabled, this function can dispatch packets to the correct one. + * Don't call directly, pass to netif_add() and call netif->input(). + */ +err_t +ip_input(struct pbuf *p, struct netif *inp) +{ + if (p != NULL) { + if (IP_HDR_GET_VERSION(p->payload) == 6) { + return ip6_input(p, inp); + } + return ip4_input(p, inp); + } + return ERR_VAL; +} + +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#endif /* LWIP_IPV4 || LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c new file mode 100644 index 00000000..9f7139bc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c @@ -0,0 +1,527 @@ +/** + * @file + * AutoIP Automatic LinkLocal IP Configuration + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + * @defgroup autoip AUTOIP + * @ingroup ip4 + * AUTOIP related functions + * USAGE: + * + * define @ref LWIP_AUTOIP 1 in your lwipopts.h + * Options: + * AUTOIP_TMR_INTERVAL msecs, + * I recommend a value of 100. The value must divide 1000 with a remainder almost 0. + * Possible values are 1000, 500, 333, 250, 200, 166, 142, 125, 111, 100 .... + * + * Without DHCP: + * - Call autoip_start() after netif_add(). + * + * With DHCP: + * - define @ref LWIP_DHCP_AUTOIP_COOP 1 in your lwipopts.h. + * - Configure your DHCP Client. + * + * @see netifapi_autoip + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mem.h" +/* #include "lwip/udp.h" */ +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/autoip.h" +#include "lwip/etharp.h" +#include "lwip/prot/autoip.h" + +#include + +/** Pseudo random macro based on netif informations. + * You could use "rand()" from the C Library if you define LWIP_AUTOIP_RAND in lwipopts.h */ +#ifndef LWIP_AUTOIP_RAND +#define LWIP_AUTOIP_RAND(netif) ( (((u32_t)((netif->hwaddr[5]) & 0xff) << 24) | \ + ((u32_t)((netif->hwaddr[3]) & 0xff) << 16) | \ + ((u32_t)((netif->hwaddr[2]) & 0xff) << 8) | \ + ((u32_t)((netif->hwaddr[4]) & 0xff))) + \ + (netif_autoip_data(netif)? netif_autoip_data(netif)->tried_llipaddr : 0)) +#endif /* LWIP_AUTOIP_RAND */ + +/** + * Macro that generates the initial IP address to be tried by AUTOIP. + * If you want to override this, define it to something else in lwipopts.h. + */ +#ifndef LWIP_AUTOIP_CREATE_SEED_ADDR +#define LWIP_AUTOIP_CREATE_SEED_ADDR(netif) \ + lwip_htonl(AUTOIP_RANGE_START + ((u32_t)(((u8_t)(netif->hwaddr[4])) | \ + ((u32_t)((u8_t)(netif->hwaddr[5]))) << 8))) +#endif /* LWIP_AUTOIP_CREATE_SEED_ADDR */ + +/* static functions */ +static err_t autoip_arp_announce(struct netif *netif); +static void autoip_start_probing(struct netif *netif); + +/** + * @ingroup autoip + * Set a statically allocated struct autoip to work with. + * Using this prevents autoip_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct autoip + * @param autoip (uninitialised) autoip struct allocated by the application + */ +void +autoip_set_struct(struct netif *netif, struct autoip *autoip) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("autoip != NULL", autoip != NULL); + LWIP_ASSERT("netif already has a struct autoip set", + netif_autoip_data(netif) == NULL); + + /* clear data structure */ + memset(autoip, 0, sizeof(struct autoip)); + /* autoip->state = AUTOIP_STATE_OFF; */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); +} + +/** Restart AutoIP client and check the next address (conflict detected) + * + * @param netif The netif under AutoIP control + */ +static void +autoip_restart(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + autoip->tried_llipaddr++; + autoip_start(netif); +} + +/** + * Handle a IP address conflict after an ARP conflict detection + */ +static void +autoip_handle_arp_conflict(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + /* RFC3927, 2.5 "Conflict Detection and Defense" allows two options where + a) means retreat on the first conflict and + b) allows to keep an already configured address when having only one + conflict in 10 seconds + We use option b) since it helps to improve the chance that one of the two + conflicting hosts may be able to retain its address. */ + + if (autoip->lastconflict > 0) { + /* retreat, there was a conflicting ARP in the last DEFEND_INTERVAL seconds */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defending, but in DEFEND_INTERVAL, retreating\n")); + + /* Active TCP sessions are aborted when removing the ip addresss */ + autoip_restart(netif); + } else { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defend, send ARP Announce\n")); + autoip_arp_announce(netif); + autoip->lastconflict = DEFEND_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * + * @param netif network interface on which create the IP-Address + * @param ipaddr ip address to initialize + */ +static void +autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr) +{ + struct autoip *autoip = netif_autoip_data(netif); + + /* Here we create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * compliant to RFC 3927 Section 2.1 + * We have 254 * 256 possibilities */ + + u32_t addr = lwip_ntohl(LWIP_AUTOIP_CREATE_SEED_ADDR(netif)); + addr += autoip->tried_llipaddr; + addr = AUTOIP_NET | (addr & 0xffff); + /* Now, 169.254.0.0 <= addr <= 169.254.255.255 */ + + if (addr < AUTOIP_RANGE_START) { + addr += AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + if (addr > AUTOIP_RANGE_END) { + addr -= AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + LWIP_ASSERT("AUTOIP address not in range", (addr >= AUTOIP_RANGE_START) && + (addr <= AUTOIP_RANGE_END)); + ip4_addr_set_u32(ipaddr, lwip_htonl(addr)); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_create_addr(): tried_llipaddr=%"U16_F", %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (u16_t)(autoip->tried_llipaddr), ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), + ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); +} + +/** + * Sends an ARP probe from a network interface + * + * @param netif network interface used to send the probe + */ +static err_t +autoip_arp_probe(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + /* this works because netif->ip_addr is ANY */ + return etharp_request(netif, &autoip->llipaddr); +} + +/** + * Sends an ARP announce from a network interface + * + * @param netif network interface used to send the announce + */ +static err_t +autoip_arp_announce(struct netif *netif) +{ + return etharp_gratuitous(netif); +} + +/** + * Configure interface for use with current LL IP-Address + * + * @param netif network interface to configure with current LL IP-Address + */ +static err_t +autoip_bind(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + ip4_addr_t sn_mask, gw_addr; + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_bind(netif=%p) %c%c%"U16_F" %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num, + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + IP4_ADDR(&sn_mask, 255, 255, 0, 0); + IP4_ADDR(&gw_addr, 0, 0, 0, 0); + + netif_set_addr(netif, &autoip->llipaddr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ + + return ERR_OK; +} + +/** + * @ingroup autoip + * Start AutoIP client + * + * @param netif network interface on which start the AutoIP client + */ +err_t +autoip_start(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + err_t result = ERR_OK; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + + /* Set IP-Address, Netmask and Gateway to 0 to make sure that + * ARP Packets are formed correctly + */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], + netif->name[1], (u16_t)netif->num)); + if (autoip == NULL) { + /* no AutoIP client attached yet? */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): starting new AUTOIP client\n")); + autoip = (struct autoip *)mem_calloc(1, sizeof(struct autoip)); + if (autoip == NULL) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): could not allocate autoip\n")); + return ERR_MEM; + } + /* store this AutoIP client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): allocated autoip")); + } else { + autoip->state = AUTOIP_STATE_OFF; + autoip->ttw = 0; + autoip->sent_num = 0; + ip4_addr_set_zero(&autoip->llipaddr); + autoip->lastconflict = 0; + } + + autoip_create_addr(netif, &(autoip->llipaddr)); + autoip_start_probing(netif); + + return result; +} + +static void +autoip_start_probing(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + autoip->state = AUTOIP_STATE_PROBING; + autoip->sent_num = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start_probing(): changing state to PROBING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + /* time to wait to first probe, this is randomly + * chosen out of 0 to PROBE_WAIT seconds. + * compliant to RFC 3927 Section 2.2.1 + */ + autoip->ttw = (u16_t)(LWIP_AUTOIP_RAND(netif) % (PROBE_WAIT * AUTOIP_TICKS_PER_SECOND)); + + /* + * if we tried more then MAX_CONFLICTS we must limit our rate for + * acquiring and probing address + * compliant to RFC 3927 Section 2.2.1 + */ + if (autoip->tried_llipaddr > MAX_CONFLICTS) { + autoip->ttw = RATE_LIMIT_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Handle a possible change in the network configuration. + * + * If there is an AutoIP address configured, take the interface down + * and begin probing with the same address. + */ +void +autoip_network_changed(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + if (autoip && (autoip->state != AUTOIP_STATE_OFF)) { + autoip_start_probing(netif); + } +} + +/** + * @ingroup autoip + * Stop AutoIP client + * + * @param netif network interface on which stop the AutoIP client + */ +err_t +autoip_stop(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + LWIP_ASSERT_CORE_LOCKED(); + if (autoip != NULL) { + autoip->state = AUTOIP_STATE_OFF; + if (ip4_addr_islinklocal(netif_ip4_addr(netif))) { + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + } + } + return ERR_OK; +} + +/** + * Has to be called in loop every AUTOIP_TMR_INTERVAL milliseconds + */ +void +autoip_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct autoip *autoip = netif_autoip_data(netif); + /* only act on AutoIP configured interfaces */ + if (autoip != NULL) { + if (autoip->lastconflict > 0) { + autoip->lastconflict--; + } + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_tmr() AutoIP-State: %"U16_F", ttw=%"U16_F"\n", + (u16_t)(autoip->state), autoip->ttw)); + + if (autoip->ttw > 0) { + autoip->ttw--; + } + + switch (autoip->state) { + case AUTOIP_STATE_PROBING: + if (autoip->ttw == 0) { + if (autoip->sent_num >= PROBE_NUM) { + /* Switch to ANNOUNCING: now we can bind to an IP address and use it */ + autoip->state = AUTOIP_STATE_ANNOUNCING; + autoip_bind(netif); + /* autoip_bind() calls netif_set_addr(): this triggers a gratuitous ARP + which counts as an announcement */ + autoip->sent_num = 1; + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to ANNOUNCING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } else { + autoip_arp_probe(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() PROBING Sent Probe\n")); + autoip->sent_num++; + if (autoip->sent_num == PROBE_NUM) { + /* calculate time to wait to for announce */ + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + } else { + /* calculate time to wait to next probe */ + autoip->ttw = (u16_t)((LWIP_AUTOIP_RAND(netif) % + ((PROBE_MAX - PROBE_MIN) * AUTOIP_TICKS_PER_SECOND) ) + + PROBE_MIN * AUTOIP_TICKS_PER_SECOND); + } + } + } + break; + + case AUTOIP_STATE_ANNOUNCING: + if (autoip->ttw == 0) { + autoip_arp_announce(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() ANNOUNCING Sent Announce\n")); + autoip->ttw = ANNOUNCE_INTERVAL * AUTOIP_TICKS_PER_SECOND; + autoip->sent_num++; + + if (autoip->sent_num >= ANNOUNCE_NUM) { + autoip->state = AUTOIP_STATE_BOUND; + autoip->sent_num = 0; + autoip->ttw = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to BOUND: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } + } + break; + + default: + /* nothing to do in other states */ + break; + } + } + } +} + +/** + * Handles every incoming ARP Packet, called by etharp_input(). + * + * @param netif network interface to use for autoip processing + * @param hdr Incoming ARP packet + */ +void +autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr) +{ + struct autoip *autoip = netif_autoip_data(netif); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_arp_reply()\n")); + if ((autoip != NULL) && (autoip->state != AUTOIP_STATE_OFF)) { + /* when ip.src == llipaddr && hw.src != netif->hwaddr + * + * when probing ip.dst == llipaddr && hw.src != netif->hwaddr + * we have a conflict and must solve it + */ + ip4_addr_t sipaddr, dipaddr; + struct eth_addr netifaddr; + SMEMCPY(netifaddr.addr, netif->hwaddr, ETH_HWADDR_LEN); + + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). + */ + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr); + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr); + + if (autoip->state == AUTOIP_STATE_PROBING) { + /* RFC 3927 Section 2.2.1: + * from beginning to after ANNOUNCE_WAIT + * seconds we have a conflict if + * ip.src == llipaddr OR + * ip.dst == llipaddr && hw.src != own hwaddr + */ + if ((ip4_addr_cmp(&sipaddr, &autoip->llipaddr)) || + (ip4_addr_isany_val(sipaddr) && + ip4_addr_cmp(&dipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr))) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Probe Conflict detected\n")); + autoip_restart(netif); + } + } else { + /* RFC 3927 Section 2.5: + * in any state we have a conflict if + * ip.src == llipaddr && hw.src != own hwaddr + */ + if (ip4_addr_cmp(&sipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr)) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Conflicting ARP-Packet detected\n")); + autoip_handle_arp_conflict(netif); + } + } + } +} + +/** check if AutoIP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if AutoIP supplied netif->ip_addr (state BOUND or ANNOUNCING), + * 0 otherwise + */ +u8_t +autoip_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_autoip_data(netif) != NULL)) { + struct autoip *autoip = netif_autoip_data(netif); + return (autoip->state == AUTOIP_STATE_BOUND) || (autoip->state == AUTOIP_STATE_ANNOUNCING); + } + return 0; +} + +u8_t +autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr) +{ + struct autoip *autoip = netif_autoip_data(netif); + return (autoip != NULL) && ip4_addr_cmp(addr, &(autoip->llipaddr)); +} + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c new file mode 100644 index 00000000..534574fe --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c @@ -0,0 +1,1990 @@ +/** + * @file + * Dynamic Host Configuration Protocol client + * + * @defgroup dhcp4 DHCPv4 + * @ingroup ip4 + * DHCP (IPv4) related functions + * This is a DHCP client for the lwIP TCP/IP stack. It aims to conform + * with RFC 2131 and RFC 2132. + * + * @todo: + * - Support for interfaces other than Ethernet (SLIP, PPP, ...) + * + * Options: + * @ref DHCP_COARSE_TIMER_SECS (recommended 60 which is a minute) + * @ref DHCP_FINE_TIMER_MSECS (recommended 500 which equals TCP coarse timer) + * + * dhcp_start() starts a DHCP client instance which + * configures the interface by obtaining an IP address lease and maintaining it. + * + * Use dhcp_release() to end the lease and use dhcp_stop() + * to remove the DHCP client. + * + * @see LWIP_HOOK_DHCP_APPEND_OPTIONS + * @see LWIP_HOOK_DHCP_PARSE_OPTION + * + * @see netifapi_dhcp4 + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. + * + * Author: Leon Woestenberg + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/def.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/dns.h" +#include "lwip/etharp.h" +#include "lwip/prot/dhcp.h" +#include "lwip/prot/iana.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif +#ifndef LWIP_HOOK_DHCP_APPEND_OPTIONS +#define LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr) +#endif +#ifndef LWIP_HOOK_DHCP_PARSE_OPTION +#define LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0) +#endif + +/** DHCP_CREATE_RAND_XID: if this is set to 1, the xid is created using + * LWIP_RAND() (this overrides DHCP_GLOBAL_XID) + */ +#ifndef DHCP_CREATE_RAND_XID +#define DHCP_CREATE_RAND_XID 1 +#endif + +/** Default for DHCP_GLOBAL_XID is 0xABCD0000 + * This can be changed by defining DHCP_GLOBAL_XID and DHCP_GLOBAL_XID_HEADER, e.g. + * \#define DHCP_GLOBAL_XID_HEADER "stdlib.h" + * \#define DHCP_GLOBAL_XID rand() + */ +#ifdef DHCP_GLOBAL_XID_HEADER +#include DHCP_GLOBAL_XID_HEADER /* include optional starting XID generation prototypes */ +#endif + +/** DHCP_OPTION_MAX_MSG_SIZE is set to the MTU + * MTU is checked to be big enough in dhcp_start */ +#define DHCP_MAX_MSG_LEN(netif) (netif->mtu) +#define DHCP_MAX_MSG_LEN_MIN_REQUIRED 576 +/** Minimum length for reply before packet is parsed */ +#define DHCP_MIN_REPLY_LEN 44 + +#define REBOOT_TRIES 2 + +#if LWIP_DNS && LWIP_DHCP_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP_MAX_DNS_SERVERS +#define LWIP_DHCP_PROVIDE_DNS_SERVERS LWIP_DHCP_MAX_DNS_SERVERS +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS 0 +#endif + +/** Option handling: options are parsed in dhcp_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp_option_idx { + DHCP_OPTION_IDX_OVERLOAD = 0, + DHCP_OPTION_IDX_MSG_TYPE, + DHCP_OPTION_IDX_SERVER_ID, + DHCP_OPTION_IDX_LEASE_TIME, + DHCP_OPTION_IDX_T1, + DHCP_OPTION_IDX_T2, + DHCP_OPTION_IDX_SUBNET_MASK, + DHCP_OPTION_IDX_ROUTER, +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + DHCP_OPTION_IDX_DNS_SERVER, + DHCP_OPTION_IDX_DNS_SERVER_LAST = DHCP_OPTION_IDX_DNS_SERVER + LWIP_DHCP_PROVIDE_DNS_SERVERS - 1, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + DHCP_OPTION_IDX_NTP_SERVER, + DHCP_OPTION_IDX_NTP_SERVER_LAST = DHCP_OPTION_IDX_NTP_SERVER + LWIP_DHCP_MAX_NTP_SERVERS - 1, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP_OPTION_IDX_MAX +}; + +/** Holds the decoded option values, only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u32_t dhcp_rx_options_val[DHCP_OPTION_IDX_MAX]; +/** Holds a flag which option was received and is contained in dhcp_rx_options_val, + only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u8_t dhcp_rx_options_given[DHCP_OPTION_IDX_MAX]; + +static u8_t dhcp_discover_request_options[] = { + DHCP_OPTION_SUBNET_MASK, + DHCP_OPTION_ROUTER, + DHCP_OPTION_BROADCAST +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + , DHCP_OPTION_DNS_SERVER +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + , DHCP_OPTION_NTP +#endif /* LWIP_DHCP_GET_NTP_SRV */ +}; + +#ifdef DHCP_GLOBAL_XID +static u32_t xid; +static u8_t xid_initialised; +#endif /* DHCP_GLOBAL_XID */ + +#define dhcp_option_given(dhcp, idx) (dhcp_rx_options_given[idx] != 0) +#define dhcp_got_option(dhcp, idx) (dhcp_rx_options_given[idx] = 1) +#define dhcp_clear_option(dhcp, idx) (dhcp_rx_options_given[idx] = 0) +#define dhcp_clear_all_options(dhcp) (memset(dhcp_rx_options_given, 0, sizeof(dhcp_rx_options_given))) +#define dhcp_get_option_value(dhcp, idx) (dhcp_rx_options_val[idx]) +#define dhcp_set_option_value(dhcp, idx, val) (dhcp_rx_options_val[idx] = (val)) + +static struct udp_pcb *dhcp_pcb; +static u8_t dhcp_pcb_refcount; + +/* DHCP client state machine functions */ +static err_t dhcp_discover(struct netif *netif); +static err_t dhcp_select(struct netif *netif); +static void dhcp_bind(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +static err_t dhcp_decline(struct netif *netif); +#endif /* DHCP_DOES_ARP_CHECK */ +static err_t dhcp_rebind(struct netif *netif); +static err_t dhcp_reboot(struct netif *netif); +static void dhcp_set_state(struct dhcp *dhcp, u8_t new_state); + +/* receive, unfold, parse and free incoming messages */ +static void dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/* set the DHCP timers */ +static void dhcp_timeout(struct netif *netif); +static void dhcp_t1_timeout(struct netif *netif); +static void dhcp_t2_timeout(struct netif *netif); + +/* build outgoing messages */ +/* create a DHCP message, fill in common headers */ +static struct pbuf *dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len); +/* add a DHCP option (type, then length in bytes) */ +static u16_t dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len); +/* add option values */ +static u16_t dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value); +static u16_t dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value); +static u16_t dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value); +#if LWIP_NETIF_HOSTNAME +static u16_t dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif); +#endif /* LWIP_NETIF_HOSTNAME */ +/* always add the DHCP options trailer to end and pad */ +static void dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp_inc_pcb_refcount(void) +{ + if (dhcp_pcb_refcount == 0) { + LWIP_ASSERT("dhcp_inc_pcb_refcount(): memory leak", dhcp_pcb == NULL); + + /* allocate UDP PCB */ + dhcp_pcb = udp_new(); + + if (dhcp_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_CLIENT); + udp_connect(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_SERVER); + udp_recv(dhcp_pcb, dhcp_recv, NULL); + } + + dhcp_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp_pcb_refcount(): refcount error", (dhcp_pcb_refcount > 0)); + dhcp_pcb_refcount--; + + if (dhcp_pcb_refcount == 0) { + udp_remove(dhcp_pcb); + dhcp_pcb = NULL; + } +} + +/** + * Back-off the DHCP client (because of a received NAK response). + * + * Back-off the DHCP client because of a received NAK. Receiving a + * NAK means the client asked for something non-sensible, for + * example when it tries to renew a lease obtained on another network. + * + * We clear any existing set IP address and restart DHCP negotiation + * afresh (as per RFC2131 3.2.3). + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_nak(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_nak(netif=%p) %c%c%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* Change to a defined state - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* remove IP address from interface (must no longer be used, as per RFC2131) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + /* We can immediately restart discovery */ + dhcp_discover(netif); +} + +#if DHCP_DOES_ARP_CHECK +/** + * Checks if the offered IP address is already in use. + * + * It does so by sending an ARP request for the offered address and + * entering CHECKING state. If no ARP reply is received within a small + * interval, the address is assumed to be free for use by us. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_check(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_check(netif=%p) %c%c\n", (void *)netif, (s16_t)netif->name[0], + (s16_t)netif->name[1])); + dhcp_set_state(dhcp, DHCP_STATE_CHECKING); + /* create an ARP query for the offered IP address, expecting that no host + responds, as the IP address should not be in use. */ + result = etharp_query(netif, &dhcp->offered_ip_addr, NULL); + if (result != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_check: could not perform ARP query\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 500; + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_check(): set request timeout %"U16_F" msecs\n", msecs)); +} +#endif /* DHCP_DOES_ARP_CHECK */ + +/** + * Remember the configuration offered by a DHCP server. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_offer(struct netif *netif, struct dhcp_msg *msg_in) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_offer(netif=%p) %c%c%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* obtain the server address */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SERVER_ID)) { + dhcp->request_timeout = 0; /* stop timer */ + + ip_addr_set_ip4_u32(&dhcp->server_ip_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SERVER_ID))); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): server 0x%08"X32_F"\n", + ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + /* remember offered address */ + ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): offer for 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_handle_offer(netif=%p) did not get server ID!\n", (void *)netif)); + } +} + +/** + * Select a DHCP server offer out of all offers. + * + * Simply select the first offer received. + * + * @param netif the netif under DHCP control + * @return lwIP specific error (see error.h) + */ +static err_t +dhcp_select(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ERROR("dhcp_select: netif != NULL", (netif != NULL), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_select: dhcp != NULL", (dhcp != NULL), return ERR_VAL;); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_select(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + dhcp_set_state(dhcp, DHCP_STATE_REQUESTING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + /* MUST request the offered IP address */ + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REQUESTING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* send broadcast to any DHCP server */ + result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_select: REQUESTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_select: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_select(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * The DHCP timer that checks for lease renewal/rebind timeouts. + * Must be called once a minute (see @ref DHCP_COARSE_TIMER_SECS). + */ +void +dhcp_coarse_tmr(void) +{ + struct netif *netif; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_coarse_tmr()\n")); + /* iterate through all network interfaces */ + NETIF_FOREACH(netif) { + /* only act on DHCP configured interfaces */ + struct dhcp *dhcp = netif_dhcp_data(netif); + if ((dhcp != NULL) && (dhcp->state != DHCP_STATE_OFF)) { + /* compare lease time to expire timeout */ + if (dhcp->t0_timeout && (++dhcp->lease_used == dhcp->t0_timeout)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t0 timeout\n")); + /* this clients' lease time has expired */ + dhcp_release_and_stop(netif); + dhcp_start(netif); + /* timer is active (non zero), and triggers (zeroes) now? */ + } else if (dhcp->t2_rebind_time && (dhcp->t2_rebind_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t2 timeout\n")); + /* this clients' rebind timeout triggered */ + dhcp_t2_timeout(netif); + /* timer is active (non zero), and triggers (zeroes) now */ + } else if (dhcp->t1_renew_time && (dhcp->t1_renew_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t1 timeout\n")); + /* this clients' renewal timeout triggered */ + dhcp_t1_timeout(netif); + } + } + } +} + +/** + * DHCP transaction timeout handling (this function must be called every 500ms, + * see @ref DHCP_FINE_TIMER_MSECS). + * + * A DHCP server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCP request is timed out. + */ +void +dhcp_fine_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct dhcp *dhcp = netif_dhcp_data(netif); + /* only act on DHCP configured interfaces */ + if (dhcp != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp->request_timeout > 1) { + dhcp->request_timeout--; + } else if (dhcp->request_timeout == 1) { + dhcp->request_timeout--; + /* { dhcp->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_fine_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp_timeout(netif); + } + } + } +} + +/** + * A DHCP negotiation transaction, or ARP request, has timed out. + * + * The timer that was started with the DHCP or ARP request has + * timed out, indicating no response was received in time. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout()\n")); + /* back-off period has passed, or server selection timed out */ + if ((dhcp->state == DHCP_STATE_BACKING_OFF) || (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout(): restarting discovery\n")); + dhcp_discover(netif); + /* receiving the requested lease timed out */ + } else if (dhcp->state == DHCP_STATE_REQUESTING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, DHCP request timed out\n")); + if (dhcp->tries <= 5) { + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, releasing, restarting\n")); + dhcp_release_and_stop(netif); + dhcp_start(netif); + } +#if DHCP_DOES_ARP_CHECK + /* received no ARP reply for the offered address (which is good) */ + } else if (dhcp->state == DHCP_STATE_CHECKING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): CHECKING, ARP request timed out\n")); + if (dhcp->tries <= 1) { + dhcp_check(netif); + /* no ARP replies on the offered address, + looks like the IP address is indeed free */ + } else { + /* bind the interface to the offered address */ + dhcp_bind(netif); + } +#endif /* DHCP_DOES_ARP_CHECK */ + } else if (dhcp->state == DHCP_STATE_REBOOTING) { + if (dhcp->tries < REBOOT_TRIES) { + dhcp_reboot(netif); + } else { + dhcp_discover(netif); + } + } +} + +/** + * The renewal period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t1_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_t1_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING)) { + /* just retry to renew - note that the rebind timer (t2) will + * eventually time-out if renew tries fail. */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t1_timeout(): must renew\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_RENEWING, not DHCP_STATE_BOUND */ + dhcp_renew(netif); + /* Calculate next timeout */ + if (((dhcp->t2_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { + dhcp->t1_renew_time = (u16_t)((dhcp->t2_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * The rebind period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t2_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_t2_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING) || (dhcp->state == DHCP_STATE_REBINDING)) { + /* just retry to rebind */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t2_timeout(): must rebind\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_REBINDING, not DHCP_STATE_BOUND */ + dhcp_rebind(netif); + /* Calculate next timeout */ + if (((dhcp->t0_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { + dhcp->t2_rebind_time = (u16_t)((dhcp->t0_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * Handle a DHCP ACK packet + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_ack(struct netif *netif, struct dhcp_msg *msg_in) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV + u8_t n; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV */ +#if LWIP_DHCP_GET_NTP_SRV + ip4_addr_t ntp_server_addrs[LWIP_DHCP_MAX_NTP_SERVERS]; +#endif + + /* clear options we might not get from the ACK */ + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* lease time given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_LEASE_TIME)) { + /* remember offered lease time */ + dhcp->offered_t0_lease = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_LEASE_TIME); + } + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T1)) { + /* remember given renewal period */ + dhcp->offered_t1_renew = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T1); + } else { + /* calculate safe periods for renewal */ + dhcp->offered_t1_renew = dhcp->offered_t0_lease / 2; + } + + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T2)) { + /* remember given rebind period */ + dhcp->offered_t2_rebind = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T2); + } else { + /* calculate safe periods for rebinding (offered_t0_lease * 0.875 -> 87.5%)*/ + dhcp->offered_t2_rebind = (dhcp->offered_t0_lease * 7U) / 8U; + } + + /* (y)our internet address */ + ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr); + +#if LWIP_DHCP_BOOTP_FILE + /* copy boot server address, + boot file name copied in dhcp_parse_reply if not overloaded */ + ip4_addr_copy(dhcp->offered_si_addr, msg_in->siaddr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* subnet mask given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SUBNET_MASK)) { + /* remember given subnet mask */ + ip4_addr_set_u32(&dhcp->offered_sn_mask, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SUBNET_MASK))); + dhcp->subnet_mask_given = 1; + } else { + dhcp->subnet_mask_given = 0; + } + + /* gateway router */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_ROUTER)) { + ip4_addr_set_u32(&dhcp->offered_gw_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_ROUTER))); + } + +#if LWIP_DHCP_GET_NTP_SRV + /* NTP servers */ + for (n = 0; (n < LWIP_DHCP_MAX_NTP_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n); n++) { + ip4_addr_set_u32(&ntp_server_addrs[n], lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n))); + } + dhcp_set_ntp_servers(n, ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + /* DNS servers */ + for (n = 0; (n < LWIP_DHCP_PROVIDE_DNS_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n); n++) { + ip_addr_t dns_addr; + ip_addr_set_ip4_u32_val(dns_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n))); + dns_setserver(n, &dns_addr); + } +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +} + +/** + * @ingroup dhcp4 + * Set a statically allocated struct dhcp to work with. + * Using this prevents dhcp_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp (uninitialised) dhcp struct allocated by the application + */ +void +dhcp_set_struct(struct netif *netif, struct dhcp *dhcp) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp != NULL", dhcp != NULL); + LWIP_ASSERT("netif already has a struct dhcp set", netif_dhcp_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); +} + +/** + * @ingroup dhcp4 + * Removes a struct dhcp from a netif. + * + * ATTENTION: Only use this when not using dhcp_set_struct() to allocate the + * struct dhcp since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp_cleanup(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp_data(netif) != NULL) { + mem_free(netif_dhcp_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL); + } +} + +/** + * @ingroup dhcp4 + * Start DHCP negotiation for a network interface. + * + * If no DHCP client instance was attached to this interface, + * a new client is created first. If a DHCP client instance + * was already present, it restarts negotiation. + * + * @param netif The lwIP network interface + * @return lwIP error code + * - ERR_OK - No error + * - ERR_MEM - Out of memory + */ +err_t +dhcp_start(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif != NULL", (netif != NULL), return ERR_ARG;); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* check MTU of the netif */ + if (netif->mtu < DHCP_MAX_MSG_LEN_MIN_REQUIRED) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): Cannot use this netif with DHCP: MTU is too small\n")); + return ERR_MEM; + } + + /* no DHCP client attached yet? */ + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): mallocing new DHCP client\n")); + dhcp = (struct dhcp *)mem_malloc(sizeof(struct dhcp)); + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): could not allocate dhcp\n")); + return ERR_MEM; + } + + /* store this dhcp client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): allocated dhcp")); + /* already has DHCP client attached */ + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(): restarting DHCP configuration\n")); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + } + /* dhcp is cleared below, no need to reset flag*/ + } + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): starting DHCP configuration\n")); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return ERR_MEM; + } + dhcp->pcb_allocated = 1; + + if (!netif_is_link_up(netif)) { + /* set state INIT and wait for dhcp_network_changed() to call dhcp_discover() */ + dhcp_set_state(dhcp, DHCP_STATE_INIT); + return ERR_OK; + } + + /* (re)start the DHCP negotiation */ + result = dhcp_discover(netif); + if (result != ERR_OK) { + /* free resources allocated above */ + dhcp_release_and_stop(netif); + return ERR_MEM; + } + return result; +} + +/** + * @ingroup dhcp4 + * Inform a DHCP server of our manual configuration. + * + * This informs DHCP servers of our fixed IP address configuration + * by sending an INFORM message. It does not involve DHCP address + * configuration, it is just here to be nice to the network. + * + * @param netif The lwIP network interface + */ +void +dhcp_inform(struct netif *netif) +{ + struct dhcp dhcp; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return; + } + + memset(&dhcp, 0, sizeof(struct dhcp)); + dhcp_set_state(&dhcp, DHCP_STATE_INFORMING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, &dhcp, DHCP_INFORM, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, &dhcp, DHCP_STATE_INFORMING, msg_out, DHCP_INFORM, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_inform: INFORMING\n")); + + udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + + pbuf_free(p_out); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_inform: could not allocate DHCP request\n")); + } + + dhcp_dec_pcb_refcount(); /* delete DHCP PCB if not needed any more */ +} + +/** Handle a possible change in the network configuration. + * + * This enters the REBOOTING state to verify that the currently bound + * address is still valid. + */ +void +dhcp_network_changed(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + if (!dhcp) { + return; + } + switch (dhcp->state) { + case DHCP_STATE_REBINDING: + case DHCP_STATE_RENEWING: + case DHCP_STATE_BOUND: + case DHCP_STATE_REBOOTING: + dhcp->tries = 0; + dhcp_reboot(netif); + break; + case DHCP_STATE_OFF: + /* stay off */ + break; + default: + LWIP_ASSERT("invalid dhcp->state", dhcp->state <= DHCP_STATE_BACKING_OFF); + /* INIT/REQUESTING/CHECKING/BACKING_OFF restart with new 'rid' because the + state changes, SELECTING: continue with current 'rid' as we stay in the + same state */ +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + /* ensure we start with short timeouts, even if already discovering */ + dhcp->tries = 0; + dhcp_discover(netif); + break; + } +} + +#if DHCP_DOES_ARP_CHECK +/** + * Match an ARP reply with the offered IP address: + * check whether the offered IP address is not in use using ARP + * + * @param netif the network interface on which the reply was received + * @param addr The IP address we received a reply from + */ +void +dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr) +{ + struct dhcp *dhcp; + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_arp_reply()\n")); + /* is a DHCP client doing an ARP check? */ + if ((dhcp != NULL) && (dhcp->state == DHCP_STATE_CHECKING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_arp_reply(): CHECKING, arp reply for 0x%08"X32_F"\n", + ip4_addr_get_u32(addr))); + /* did a host respond with the address we + were offered by the DHCP server? */ + if (ip4_addr_cmp(addr, &dhcp->offered_ip_addr)) { + /* we will not accept the offered address */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("dhcp_arp_reply(): arp reply matched with offered address, declining\n")); + dhcp_decline(netif); + } + } +} + +/** + * Decline an offered lease. + * + * Tell the DHCP server we do not accept the offered address. + * One reason to decline the lease is when we find out the address + * is already in use by another host (through ARP). + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_decline(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline()\n")); + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_DECLINE, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_BACKING_OFF, msg_out, DHCP_DECLINE, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* per section 4.4.4, broadcast DECLINE messages */ + result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_decline: BACKING OFF\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_decline: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 10 * 1000; + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} +#endif /* DHCP_DOES_ARP_CHECK */ + + +/** + * Start the DHCP process, discover a DHCP server. + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_discover(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result = ERR_OK; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover()\n")); + + ip4_addr_set_any(&dhcp->offered_ip_addr); + dhcp_set_state(dhcp, DHCP_STATE_SELECTING); + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_DISCOVER, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: making request\n")); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_SELECTING, msg_out, DHCP_DISCOVER, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: sendto(DISCOVER, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER)\n")); + udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: deleting()ing\n")); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover: SELECTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_discover: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->tries >= LWIP_DHCP_AUTOIP_COOP_TRIES && dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_OFF) { + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_ON; + autoip_start(netif); + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + + +/** + * Bind the interface to the offered IP address. + * + * @param netif network interface to bind to the offered address + */ +static void +dhcp_bind(struct netif *netif) +{ + u32_t timeout; + struct dhcp *dhcp; + ip4_addr_t sn_mask, gw_addr; + LWIP_ERROR("dhcp_bind: netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_bind: dhcp != NULL", (dhcp != NULL), return;); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* reset time used of lease */ + dhcp->lease_used = 0; + + if (dhcp->offered_t0_lease != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t0 renewal timer %"U32_F" secs\n", dhcp->offered_t0_lease)); + timeout = (dhcp->offered_t0_lease + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t0_timeout = (u16_t)timeout; + if (dhcp->t0_timeout == 0) { + dhcp->t0_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t0_lease * 1000)); + } + + /* temporary DHCP lease? */ + if (dhcp->offered_t1_renew != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t1 renewal timer %"U32_F" secs\n", dhcp->offered_t1_renew)); + timeout = (dhcp->offered_t1_renew + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t1_timeout = (u16_t)timeout; + if (dhcp->t1_timeout == 0) { + dhcp->t1_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t1_renew * 1000)); + dhcp->t1_renew_time = dhcp->t1_timeout; + } + /* set renewal period timer */ + if (dhcp->offered_t2_rebind != 0xffffffffUL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t2 rebind timer %"U32_F" secs\n", dhcp->offered_t2_rebind)); + timeout = (dhcp->offered_t2_rebind + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t2_timeout = (u16_t)timeout; + if (dhcp->t2_timeout == 0) { + dhcp->t2_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t2_rebind * 1000)); + dhcp->t2_rebind_time = dhcp->t2_timeout; + } + + /* If we have sub 1 minute lease, t2 and t1 will kick in at the same time. */ + if ((dhcp->t1_timeout >= dhcp->t2_timeout) && (dhcp->t2_timeout > 0)) { + dhcp->t1_timeout = 0; + } + + if (dhcp->subnet_mask_given) { + /* copy offered network mask */ + ip4_addr_copy(sn_mask, dhcp->offered_sn_mask); + } else { + /* subnet mask not given, choose a safe subnet mask given the network class */ + u8_t first_octet = ip4_addr1(&dhcp->offered_ip_addr); + if (first_octet <= 127) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xff000000UL)); + } else if (first_octet >= 192) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffffff00UL)); + } else { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffff0000UL)); + } + } + + ip4_addr_copy(gw_addr, dhcp->offered_gw_addr); + /* gateway address not given? */ + if (ip4_addr_isany_val(gw_addr)) { + /* copy network address */ + ip4_addr_get_network(&gw_addr, &dhcp->offered_ip_addr, &sn_mask); + /* use first host address on network as gateway */ + ip4_addr_set_u32(&gw_addr, ip4_addr_get_u32(&gw_addr) | PP_HTONL(0x00000001UL)); + } + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_bind(): IP: 0x%08"X32_F" SN: 0x%08"X32_F" GW: 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr), ip4_addr_get_u32(&sn_mask), ip4_addr_get_u32(&gw_addr))); + /* netif is now bound to DHCP leased address - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BOUND); + + netif_set_addr(netif, &dhcp->offered_ip_addr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ +} + +/** + * @ingroup dhcp4 + * Renew an existing DHCP lease at the involved DHCP server. + * + * @param netif network interface which must renew its lease + */ +err_t +dhcp_renew(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_renew()\n")); + dhcp_set_state(dhcp, DHCP_STATE_RENEWING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_RENEWING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + result = udp_sendto_if(dhcp_pcb, p_out, &dhcp->server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew: RENEWING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_renew: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + /* back-off on retries, but to a maximum of 20 seconds */ + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 2000 : 20 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Rebind with a DHCP server for an existing DHCP lease. + * + * @param netif network interface which must rebind with a DHCP server + */ +static err_t +dhcp_rebind(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBINDING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBINDING, msg_out, DHCP_DISCOVER, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* broadcast to server */ + result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind: REBINDING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_rebind: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Enter REBOOTING state to verify an existing lease + * + * @param netif network interface which must reboot + */ +static err_t +dhcp_reboot(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBOOTING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN_MIN_REQUIRED); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBOOTING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* broadcast to server */ + result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot: REBOOTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_reboot: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * @ingroup dhcp4 + * Release a DHCP lease and stop DHCP statemachine (and AUTOIP if LWIP_DHCP_AUTOIP_COOP). + * + * @param netif network interface + */ +void +dhcp_release_and_stop(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + ip_addr_t server_ip_addr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_release_and_stop()\n")); + if (dhcp == NULL) { + return; + } + + /* already off? -> nothing to do */ + if (dhcp->state == DHCP_STATE_OFF) { + return; + } + + ip_addr_copy(server_ip_addr, dhcp->server_ip_addr); + + /* clean old DHCP offer */ + ip_addr_set_zero_ip4(&dhcp->server_ip_addr); + ip4_addr_set_zero(&dhcp->offered_ip_addr); + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + dhcp->offered_t0_lease = dhcp->offered_t1_renew = dhcp->offered_t2_rebind = 0; + dhcp->t1_renew_time = dhcp->t2_rebind_time = dhcp->lease_used = dhcp->t0_timeout = 0; + + /* send release message when current IP was assigned via DHCP */ + if (dhcp_supplied_address(netif)) { + /* create and initialize the DHCP message header */ + struct pbuf *p_out; + u16_t options_out_len; + p_out = dhcp_create_msg(netif, dhcp, DHCP_RELEASE, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&server_ip_addr)))); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, dhcp->state, msg_out, DHCP_RELEASE, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + udp_sendto_if(dhcp_pcb, p_out, &server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_release: RELEASED, DHCP_STATE_OFF\n")); + } else { + /* sending release failed, but that's not a problem since the correct behaviour of dhcp does not rely on release */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_release: could not allocate DHCP request\n")); + } + } + + /* remove IP address from interface (prevents routing from selecting this interface) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + dhcp_set_state(dhcp, DHCP_STATE_OFF); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + dhcp->pcb_allocated = 0; + } +} + +/** + * @ingroup dhcp4 + * This function calls dhcp_release_and_stop() internally. + * @deprecated Use dhcp_release_and_stop() instead. + */ +err_t +dhcp_release(struct netif *netif) +{ + dhcp_release_and_stop(netif); + return ERR_OK; +} + +/** + * @ingroup dhcp4 + * This function calls dhcp_release_and_stop() internally. + * @deprecated Use dhcp_release_and_stop() instead. + */ +void +dhcp_stop(struct netif *netif) +{ + dhcp_release_and_stop(netif); +} + +/* + * Set the DHCP state of a DHCP client. + * + * If the state changed, reset the number of tries. + */ +static void +dhcp_set_state(struct dhcp *dhcp, u8_t new_state) +{ + if (new_state != dhcp->state) { + dhcp->state = new_state; + dhcp->tries = 0; + dhcp->request_timeout = 0; + } +} + +/* + * Concatenate an option type and length field to the outgoing + * DHCP message. + * + */ +static u16_t +dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len) +{ + LWIP_ASSERT("dhcp_option: options_out_len + 2 + option_len <= DHCP_OPTIONS_LEN", options_out_len + 2U + option_len <= DHCP_OPTIONS_LEN); + options[options_out_len++] = option_type; + options[options_out_len++] = option_len; + return options_out_len; +} +/* + * Concatenate a single byte to the outgoing DHCP message. + * + */ +static u16_t +dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value) +{ + LWIP_ASSERT("dhcp_option_byte: options_out_len < DHCP_OPTIONS_LEN", options_out_len < DHCP_OPTIONS_LEN); + options[options_out_len++] = value; + return options_out_len; +} + +static u16_t +dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value) +{ + LWIP_ASSERT("dhcp_option_short: options_out_len + 2 <= DHCP_OPTIONS_LEN", options_out_len + 2U <= DHCP_OPTIONS_LEN); + options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + options[options_out_len++] = (u8_t) (value & 0x00ffU); + return options_out_len; +} + +static u16_t +dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value) +{ + LWIP_ASSERT("dhcp_option_long: options_out_len + 4 <= DHCP_OPTIONS_LEN", options_out_len + 4U <= DHCP_OPTIONS_LEN); + options[options_out_len++] = (u8_t)((value & 0xff000000UL) >> 24); + options[options_out_len++] = (u8_t)((value & 0x00ff0000UL) >> 16); + options[options_out_len++] = (u8_t)((value & 0x0000ff00UL) >> 8); + options[options_out_len++] = (u8_t)((value & 0x000000ffUL)); + return options_out_len; +} + +#if LWIP_NETIF_HOSTNAME +static u16_t +dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif) +{ + if (netif->hostname != NULL) { + size_t namelen = strlen(netif->hostname); + if (namelen > 0) { + size_t len; + const char *p = netif->hostname; + /* Shrink len to available bytes (need 2 bytes for OPTION_HOSTNAME + and 1 byte for trailer) */ + size_t available = DHCP_OPTIONS_LEN - options_out_len - 3; + LWIP_ASSERT("DHCP: hostname is too long!", namelen <= available); + len = LWIP_MIN(namelen, available); + LWIP_ASSERT("DHCP: hostname is too long!", len <= 0xFF); + options_out_len = dhcp_option(options_out_len, options, DHCP_OPTION_HOSTNAME, (u8_t)len); + while (len--) { + options_out_len = dhcp_option_byte(options_out_len, options, *p++); + } + } + } + return options_out_len; +} +#endif /* LWIP_NETIF_HOSTNAME */ + +/** + * Extract the DHCP message and the DHCP options. + * + * Extract the DHCP message and the DHCP options, each into a contiguous + * piece of memory. As a DHCP message is variable sized by its options, + * and also allows overriding some fields for options, the easy approach + * is to first unfold the options into a contiguous piece of memory, and + * use that further on. + * + */ +static err_t +dhcp_parse_reply(struct pbuf *p, struct dhcp *dhcp) +{ + u8_t *options; + u16_t offset; + u16_t offset_max; + u16_t options_idx; + u16_t options_idx_max; + struct pbuf *q; + int parse_file_as_options = 0; + int parse_sname_as_options = 0; + struct dhcp_msg *msg_in; +#if LWIP_DHCP_BOOTP_FILE + int file_overloaded = 0; +#endif + + LWIP_UNUSED_ARG(dhcp); + + /* clear received options */ + dhcp_clear_all_options(dhcp); + /* check that beginning of dhcp_msg (up to and including chaddr) is in first pbuf */ + if (p->len < DHCP_SNAME_OFS) { + return ERR_BUF; + } + msg_in = (struct dhcp_msg *)p->payload; +#if LWIP_DHCP_BOOTP_FILE + /* clear boot file name */ + dhcp->boot_file_name[0] = 0; +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* parse options */ + + /* start with options field */ + options_idx = DHCP_OPTIONS_OFS; + /* parse options to the end of the received packet */ + options_idx_max = p->tot_len; +again: + q = p; + while ((q != NULL) && (options_idx >= q->len)) { + options_idx = (u16_t)(options_idx - q->len); + options_idx_max = (u16_t)(options_idx_max - q->len); + q = q->next; + } + if (q == NULL) { + return ERR_BUF; + } + offset = options_idx; + offset_max = options_idx_max; + options = (u8_t *)q->payload; + /* at least 1 byte to read and no end marker, then at least 3 bytes to read? */ + while ((q != NULL) && (offset < offset_max) && (options[offset] != DHCP_OPTION_END)) { + u8_t op = options[offset]; + u8_t len; + u8_t decode_len = 0; + int decode_idx = -1; + u16_t val_offset = (u16_t)(offset + 2); + if (val_offset < offset) { + /* overflow */ + return ERR_BUF; + } + /* len byte might be in the next pbuf */ + if ((offset + 1) < q->len) { + len = options[offset + 1]; + } else { + len = (q->next != NULL ? ((u8_t *)q->next->payload)[0] : 0); + } + /* LWIP_DEBUGF(DHCP_DEBUG, ("msg_offset=%"U16_F", q->len=%"U16_F, msg_offset, q->len)); */ + decode_len = len; + switch (op) { + /* case(DHCP_OPTION_END): handled above */ + case (DHCP_OPTION_PAD): + /* special option: no len encoded */ + decode_len = len = 0; + /* will be increased below */ + break; + case (DHCP_OPTION_SUBNET_MASK): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SUBNET_MASK; + break; + case (DHCP_OPTION_ROUTER): + decode_len = 4; /* only copy the first given router */ + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_ROUTER; + break; +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + case (DHCP_OPTION_DNS_SERVER): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of DNS servers */ + decode_len = LWIP_MIN(len, 4 * DNS_MAX_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_DNS_SERVER; + break; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ + case (DHCP_OPTION_LEASE_TIME): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_LEASE_TIME; + break; +#if LWIP_DHCP_GET_NTP_SRV + case (DHCP_OPTION_NTP): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of NTP servers */ + decode_len = LWIP_MIN(len, 4 * LWIP_DHCP_MAX_NTP_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_NTP_SERVER; + break; +#endif /* LWIP_DHCP_GET_NTP_SRV*/ + case (DHCP_OPTION_OVERLOAD): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + /* decode overload only in options, not in file/sname: invalid packet */ + LWIP_ERROR("overload in file/sname", options_idx == DHCP_OPTIONS_OFS, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_OVERLOAD; + break; + case (DHCP_OPTION_MESSAGE_TYPE): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_MSG_TYPE; + break; + case (DHCP_OPTION_SERVER_ID): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SERVER_ID; + break; + case (DHCP_OPTION_T1): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T1; + break; + case (DHCP_OPTION_T2): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T2; + break; + default: + decode_len = 0; + LWIP_DEBUGF(DHCP_DEBUG, ("skipping option %"U16_F" in options\n", (u16_t)op)); + LWIP_HOOK_DHCP_PARSE_OPTION(ip_current_netif(), dhcp, dhcp->state, msg_in, + dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) ? (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) : 0, + op, len, q, val_offset); + break; + } + if (op == DHCP_OPTION_PAD) { + offset++; + } else { + if (offset + len + 2 > 0xFFFF) { + /* overflow */ + return ERR_BUF; + } + offset = (u16_t)(offset + len + 2); + if (decode_len > 0) { + u32_t value = 0; + u16_t copy_len; +decode_next: + LWIP_ASSERT("check decode_idx", decode_idx >= 0 && decode_idx < DHCP_OPTION_IDX_MAX); + if (!dhcp_option_given(dhcp, decode_idx)) { + copy_len = LWIP_MIN(decode_len, 4); + if (pbuf_copy_partial(q, &value, copy_len, val_offset) != copy_len) { + return ERR_BUF; + } + if (decode_len > 4) { + /* decode more than one u32_t */ + u16_t next_val_offset; + LWIP_ERROR("decode_len %% 4 == 0", decode_len % 4 == 0, return ERR_VAL;); + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, lwip_htonl(value)); + decode_len = (u8_t)(decode_len - 4); + next_val_offset = (u16_t)(val_offset + 4); + if (next_val_offset < val_offset) { + /* overflow */ + return ERR_BUF; + } + val_offset = next_val_offset; + decode_idx++; + goto decode_next; + } else if (decode_len == 4) { + value = lwip_ntohl(value); + } else { + LWIP_ERROR("invalid decode_len", decode_len == 1, return ERR_VAL;); + value = ((u8_t *)&value)[0]; + } + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, value); + } + } + } + if (offset >= q->len) { + offset = (u16_t)(offset - q->len); + offset_max = (u16_t)(offset_max - q->len); + if (offset < offset_max) { + q = q->next; + LWIP_ERROR("next pbuf was null", q != NULL, return ERR_VAL;); + options = (u8_t *)q->payload; + } else { + /* We've run out of bytes, probably no end marker. Don't proceed. */ + return ERR_BUF; + } + } + } + /* is this an overloaded message? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_OVERLOAD)) { + u32_t overload = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_OVERLOAD); + dhcp_clear_option(dhcp, DHCP_OPTION_IDX_OVERLOAD); + if (overload == DHCP_OVERLOAD_FILE) { + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded file field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME) { + parse_sname_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME_FILE) { + parse_sname_as_options = 1; + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname and file field\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("invalid overload option: %d\n", (int)overload)); + } + } + if (parse_file_as_options) { + /* if both are overloaded, parse file first and then sname (RFC 2131 ch. 4.1) */ + parse_file_as_options = 0; + options_idx = DHCP_FILE_OFS; + options_idx_max = DHCP_FILE_OFS + DHCP_FILE_LEN; +#if LWIP_DHCP_BOOTP_FILE + file_overloaded = 1; +#endif + goto again; + } else if (parse_sname_as_options) { + parse_sname_as_options = 0; + options_idx = DHCP_SNAME_OFS; + options_idx_max = DHCP_SNAME_OFS + DHCP_SNAME_LEN; + goto again; + } +#if LWIP_DHCP_BOOTP_FILE + if (!file_overloaded) { + /* only do this for ACK messages */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) && + (dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) == DHCP_ACK)) + /* copy bootp file name, don't care for sname (server hostname) */ + if (pbuf_copy_partial(p, dhcp->boot_file_name, DHCP_FILE_LEN-1, DHCP_FILE_OFS) != (DHCP_FILE_LEN-1)) { + return ERR_BUF; + } + /* make sure the string is really NULL-terminated */ + dhcp->boot_file_name[DHCP_FILE_LEN-1] = 0; + } +#endif /* LWIP_DHCP_BOOTP_FILE */ + return ERR_OK; +} + +/** + * If an incoming DHCP message is in response to us, then trigger the state machine + */ +static void +dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp *dhcp = netif_dhcp_data(netif); + struct dhcp_msg *reply_msg = (struct dhcp_msg *)p->payload; + u8_t msg_type; + u8_t i; + struct dhcp_msg *msg_in; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCP message from netif that does not have DHCP enabled? -> not interested */ + if ((dhcp == NULL) || (dhcp->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ASSERT("invalid server address type", IP_IS_V4(addr)); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_recv(pbuf = %p) from DHCP server %"U16_F".%"U16_F".%"U16_F".%"U16_F" port %"U16_F"\n", (void *)p, + ip4_addr1_16(ip_2_ip4(addr)), ip4_addr2_16(ip_2_ip4(addr)), ip4_addr3_16(ip_2_ip4(addr)), ip4_addr4_16(ip_2_ip4(addr)), port)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + if (p->len < DHCP_MIN_REPLY_LEN) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + if (reply_msg->op != DHCP_BOOTREPLY) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("not a DHCP reply message, but type %"U16_F"\n", (u16_t)reply_msg->op)); + goto free_pbuf_and_return; + } + /* iterate through hardware address and match against DHCP message */ + for (i = 0; i < netif->hwaddr_len && i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) { + if (netif->hwaddr[i] != reply_msg->chaddr[i]) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("netif->hwaddr[%"U16_F"]==%02"X16_F" != reply_msg->chaddr[%"U16_F"]==%02"X16_F"\n", + (u16_t)i, (u16_t)netif->hwaddr[i], (u16_t)i, (u16_t)reply_msg->chaddr[i])); + goto free_pbuf_and_return; + } + } + /* match transaction ID against what we expected */ + if (lwip_ntohl(reply_msg->xid) != dhcp->xid) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!=dhcp->xid(%"X32_F")\n", lwip_ntohl(reply_msg->xid), dhcp->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp_parse_reply(p, dhcp) != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCP message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("searching DHCP_OPTION_MESSAGE_TYPE\n")); + /* obtain pointer to DHCP message type */ + if (!dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP_OPTION_MESSAGE_TYPE option not found\n")); + goto free_pbuf_and_return; + } + + msg_in = (struct dhcp_msg *)p->payload; + /* read DHCP message type */ + msg_type = (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE); + /* message type is DHCP ACK? */ + if (msg_type == DHCP_ACK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_ACK received\n")); + /* in requesting state? */ + if (dhcp->state == DHCP_STATE_REQUESTING) { + dhcp_handle_ack(netif, msg_in); +#if DHCP_DOES_ARP_CHECK + if ((netif->flags & NETIF_FLAG_ETHARP) != 0) { + /* check if the acknowledged lease address is already in use */ + dhcp_check(netif); + } else { + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); + } +#else + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); +#endif + } + /* already bound to the given lease address? */ + else if ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REBINDING) || + (dhcp->state == DHCP_STATE_RENEWING)) { + dhcp_handle_ack(netif, msg_in); + dhcp_bind(netif); + } + } + /* received a DHCP_NAK in appropriate state? */ + else if ((msg_type == DHCP_NAK) && + ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REQUESTING) || + (dhcp->state == DHCP_STATE_REBINDING) || (dhcp->state == DHCP_STATE_RENEWING ))) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_NAK received\n")); + dhcp_handle_nak(netif); + } + /* received a DHCP_OFFER in DHCP_STATE_SELECTING state? */ + else if ((msg_type == DHCP_OFFER) && (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_OFFER received in DHCP_STATE_SELECTING state\n")); + /* remember offered lease */ + dhcp_handle_offer(netif, msg_in); + } + +free_pbuf_and_return: + pbuf_free(p); +} + +/** + * Create a DHCP request, fill in common headers + * + * @param netif the netif under DHCP control + * @param dhcp dhcp control struct + * @param message_type message type of the request + */ +static struct pbuf * +dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len) +{ + u16_t i; + struct pbuf *p_out; + struct dhcp_msg *msg_out; + u16_t options_out_len_loc; + +#ifndef DHCP_GLOBAL_XID + /** default global transaction identifier starting value (easy to match + * with a packet analyser). We simply increment for each new request. + * Predefine DHCP_GLOBAL_XID to a better value or a function call to generate one + * at runtime, any supporting function prototypes can be defined in DHCP_GLOBAL_XID_HEADER */ +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + static u32_t xid; +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + static u32_t xid = 0xABCD0000; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ +#else + if (!xid_initialised) { + xid = DHCP_GLOBAL_XID; + xid_initialised = !xid_initialised; + } +#endif + LWIP_ERROR("dhcp_create_msg: netif != NULL", (netif != NULL), return NULL;); + LWIP_ERROR("dhcp_create_msg: dhcp != NULL", (dhcp != NULL), return NULL;); + p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp_msg), PBUF_RAM); + if (p_out == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_create_msg(): could not allocate pbuf\n")); + return NULL; + } + LWIP_ASSERT("dhcp_create_msg: check that first pbuf can hold struct dhcp_msg", + (p_out->len >= sizeof(struct dhcp_msg))); + + /* DHCP_REQUEST should reuse 'xid' from DHCPOFFER */ + if ((message_type != DHCP_REQUEST) || (dhcp->state == DHCP_STATE_REBOOTING)) { + /* reuse transaction identifier in retransmissions */ + if (dhcp->tries == 0) { +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + xid = LWIP_RAND(); +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + xid++; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + } + dhcp->xid = xid; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", xid)); + + msg_out = (struct dhcp_msg *)p_out->payload; + memset(msg_out, 0, sizeof(struct dhcp_msg)); + + msg_out->op = DHCP_BOOTREQUEST; + /* @todo: make link layer independent */ + msg_out->htype = LWIP_IANA_HWTYPE_ETHERNET; + msg_out->hlen = netif->hwaddr_len; + msg_out->xid = lwip_htonl(dhcp->xid); + /* we don't need the broadcast flag since we can receive unicast traffic + before being fully configured! */ + /* set ciaddr to netif->ip_addr based on message_type and state */ + if ((message_type == DHCP_INFORM) || (message_type == DHCP_DECLINE) || (message_type == DHCP_RELEASE) || + ((message_type == DHCP_REQUEST) && /* DHCP_STATE_BOUND not used for sending! */ + ((dhcp->state == DHCP_STATE_RENEWING) || dhcp->state == DHCP_STATE_REBINDING))) { + ip4_addr_copy(msg_out->ciaddr, *netif_ip4_addr(netif)); + } + for (i = 0; i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) { + /* copy netif hardware address (padded with zeroes through memset already) */ + msg_out->chaddr[i] = netif->hwaddr[i]; + } + msg_out->cookie = PP_HTONL(DHCP_MAGIC_COOKIE); + /* Add option MESSAGE_TYPE */ + options_out_len_loc = dhcp_option(0, msg_out->options, DHCP_OPTION_MESSAGE_TYPE, DHCP_OPTION_MESSAGE_TYPE_LEN); + options_out_len_loc = dhcp_option_byte(options_out_len_loc, msg_out->options, message_type); + if (options_out_len) { + *options_out_len = options_out_len_loc; + } + return p_out; +} + +/** + * Add a DHCP message trailer + * + * Adds the END option to the DHCP message, and if + * necessary, up to three padding bytes. + */ +static void +dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out) +{ + options[options_out_len++] = DHCP_OPTION_END; + /* packet is too small, or not 4 byte aligned? */ + while (((options_out_len < DHCP_MIN_OPTIONS_LEN) || (options_out_len & 3)) && + (options_out_len < DHCP_OPTIONS_LEN)) { + /* add a fill/padding byte */ + options[options_out_len++] = 0; + } + /* shrink the pbuf to the actual content length */ + pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + options_out_len)); +} + +/** check if DHCP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if DHCP supplied netif->ip_addr (states BOUND or RENEWING), + * 0 otherwise + */ +u8_t +dhcp_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_dhcp_data(netif) != NULL)) { + struct dhcp *dhcp = netif_dhcp_data(netif); + return (dhcp->state == DHCP_STATE_BOUND) || (dhcp->state == DHCP_STATE_RENEWING) || + (dhcp->state == DHCP_STATE_REBINDING); + } + return 0; +} + +#endif /* LWIP_IPV4 && LWIP_DHCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c new file mode 100644 index 00000000..442aac08 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c @@ -0,0 +1,1204 @@ +/** + * @file + * Address Resolution Protocol module for IP over Ethernet + * + * Functionally, ARP is divided into two parts. The first maps an IP address + * to a physical address when sending a packet, and the second part answers + * requests from other machines for our physical address. + * + * This implementation complies with RFC 826 (Ethernet ARP). It supports + * Gratuitious ARP from RFC3220 (IP Mobility Support for IPv4) section 4.6 + * if an interface calls etharp_gratuitous(our_netif) upon address change. + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/prot/iana.h" +#include "netif/ethernet.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Re-request a used ARP entry 1 minute before it would expire to prevent + * breaking a steadily used connection because the ARP entry timed out. */ +#define ARP_AGE_REREQUEST_USED_UNICAST (ARP_MAXAGE - 30) +#define ARP_AGE_REREQUEST_USED_BROADCAST (ARP_MAXAGE - 15) + +/** the time an ARP entry stays pending after first request, + * for ARP_TMR_INTERVAL = 1000, this is + * 10 seconds. + * + * @internal Keep this number at least 2, otherwise it might + * run out instantly if the timeout occurs directly after a request. + */ +#define ARP_MAXPENDING 5 + +/** ARP states */ +enum etharp_state { + ETHARP_STATE_EMPTY = 0, + ETHARP_STATE_PENDING, + ETHARP_STATE_STABLE, + ETHARP_STATE_STABLE_REREQUESTING_1, + ETHARP_STATE_STABLE_REREQUESTING_2 +#if ETHARP_SUPPORT_STATIC_ENTRIES + , ETHARP_STATE_STATIC +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ +}; + +struct etharp_entry { +#if ARP_QUEUEING + /** Pointer to queue of pending outgoing packets on this ARP entry. */ + struct etharp_q_entry *q; +#else /* ARP_QUEUEING */ + /** Pointer to a single pending outgoing packet on this ARP entry. */ + struct pbuf *q; +#endif /* ARP_QUEUEING */ + ip4_addr_t ipaddr; + struct netif *netif; + struct eth_addr ethaddr; + u16_t ctime; + u8_t state; +}; + +static struct etharp_entry arp_table[ARP_TABLE_SIZE]; + +#if !LWIP_NETIF_HWADDRHINT +static netif_addr_idx_t etharp_cached_entry; +#endif /* !LWIP_NETIF_HWADDRHINT */ + +/** Try hard to create a new entry - we want the IP address to appear in + the cache (even if this means removing an active entry or so). */ +#define ETHARP_FLAG_TRY_HARD 1 +#define ETHARP_FLAG_FIND_ONLY 2 +#if ETHARP_SUPPORT_STATIC_ENTRIES +#define ETHARP_FLAG_STATIC_ENTRY 4 +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +#if LWIP_NETIF_HWADDRHINT +#define ETHARP_SET_ADDRHINT(netif, addrhint) do { if (((netif) != NULL) && ((netif)->hints != NULL)) { \ + (netif)->hints->addr_hint = (addrhint); }} while(0) +#else /* LWIP_NETIF_HWADDRHINT */ +#define ETHARP_SET_ADDRHINT(netif, addrhint) (etharp_cached_entry = (addrhint)) +#endif /* LWIP_NETIF_HWADDRHINT */ + + +/* Check for maximum ARP_TABLE_SIZE */ +#if (ARP_TABLE_SIZE > NETIF_ADDR_IDX_MAX) +#error "ARP_TABLE_SIZE must fit in an s16_t, you have to reduce it in your lwipopts.h" +#endif + + +static err_t etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr); +static err_t etharp_raw(struct netif *netif, + const struct eth_addr *ethsrc_addr, const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode); + +#if ARP_QUEUEING +/** + * Free a complete queue of etharp entries + * + * @param q a qeueue of etharp_q_entry's to free + */ +static void +free_etharp_q(struct etharp_q_entry *q) +{ + struct etharp_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ARP_QUEUE, r); + } +} +#else /* ARP_QUEUEING */ + +/** Compatibility define: free the queued pbuf */ +#define free_etharp_q(q) pbuf_free(q) + +#endif /* ARP_QUEUEING */ + +/** Clean up ARP table entries */ +static void +etharp_free_entry(int i) +{ + /* remove from SNMP ARP index tree */ + mib2_remove_arp_entry(arp_table[i].netif, &arp_table[i].ipaddr); + /* and empty packet queue */ + if (arp_table[i].q != NULL) { + /* remove all queued packets */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_free_entry: freeing entry %"U16_F", packet queue %p.\n", (u16_t)i, (void *)(arp_table[i].q))); + free_etharp_q(arp_table[i].q); + arp_table[i].q = NULL; + } + /* recycle entry for re-use */ + arp_table[i].state = ETHARP_STATE_EMPTY; +#ifdef LWIP_DEBUG + /* for debugging, clean out the complete entry */ + arp_table[i].ctime = 0; + arp_table[i].netif = NULL; + ip4_addr_set_zero(&arp_table[i].ipaddr); + arp_table[i].ethaddr = ethzero; +#endif /* LWIP_DEBUG */ +} + +/** + * Clears expired entries in the ARP table. + * + * This function should be called every ARP_TMR_INTERVAL milliseconds (1 second), + * in order to expire entries in the ARP table. + */ +void +etharp_tmr(void) +{ + int i; + + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer\n")); + /* remove expired entries from the ARP table */ + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if (state != ETHARP_STATE_EMPTY +#if ETHARP_SUPPORT_STATIC_ENTRIES + && (state != ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + ) { + arp_table[i].ctime++; + if ((arp_table[i].ctime >= ARP_MAXAGE) || + ((arp_table[i].state == ETHARP_STATE_PENDING) && + (arp_table[i].ctime >= ARP_MAXPENDING))) { + /* pending or stable entry has become old! */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer: expired %s entry %d.\n", + arp_table[i].state >= ETHARP_STATE_STABLE ? "stable" : "pending", i)); + /* clean up entries that have just been expired */ + etharp_free_entry(i); + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_1) { + /* Don't send more than one request every 2 seconds. */ + arp_table[i].state = ETHARP_STATE_STABLE_REREQUESTING_2; + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_2) { + /* Reset state to stable, so that the next transmitted packet will + re-send an ARP request. */ + arp_table[i].state = ETHARP_STATE_STABLE; + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* still pending, resend an ARP query */ + etharp_request(arp_table[i].netif, &arp_table[i].ipaddr); + } + } + } +} + +/** + * Search the ARP table for a matching or new entry. + * + * If an IP address is given, return a pending or stable ARP entry that matches + * the address. If no match is found, create a new entry with this address set, + * but in state ETHARP_EMPTY. The caller must check and possibly change the + * state of the returned entry. + * + * If ipaddr is NULL, return a initialized new entry in state ETHARP_EMPTY. + * + * In all cases, attempt to create new entries from an empty entry. If no + * empty entries are available and ETHARP_FLAG_TRY_HARD flag is set, recycle + * old entries. Heuristic choose the least important entry for recycling. + * + * @param ipaddr IP address to find in ARP cache, or to add if not found. + * @param flags See @ref etharp_state + * @param netif netif related to this address (used for NETIF_HWADDRHINT) + * + * @return The ARP entry index that matched or is created, ERR_MEM if no + * entry is found or could be recycled. + */ +static s16_t +etharp_find_entry(const ip4_addr_t *ipaddr, u8_t flags, struct netif *netif) +{ + s16_t old_pending = ARP_TABLE_SIZE, old_stable = ARP_TABLE_SIZE; + s16_t empty = ARP_TABLE_SIZE; + s16_t i = 0; + /* oldest entry with packets on queue */ + s16_t old_queue = ARP_TABLE_SIZE; + /* its age */ + u16_t age_queue = 0, age_pending = 0, age_stable = 0; + + LWIP_UNUSED_ARG(netif); + + /** + * a) do a search through the cache, remember candidates + * b) select candidate entry + * c) create new entry + */ + + /* a) in a single search sweep, do all of this + * 1) remember the first empty entry (if any) + * 2) remember the oldest stable entry (if any) + * 3) remember the oldest pending entry without queued packets (if any) + * 4) remember the oldest pending entry with queued packets (if any) + * 5) search for a matching IP entry, either pending or stable + * until 5 matches, or all entries are searched for. + */ + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + /* no empty entry found yet and now we do find one? */ + if ((empty == ARP_TABLE_SIZE) && (state == ETHARP_STATE_EMPTY)) { + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_find_entry: found empty entry %d\n", (int)i)); + /* remember first empty entry */ + empty = i; + } else if (state != ETHARP_STATE_EMPTY) { + LWIP_ASSERT("state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE", + state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE); + /* if given, does IP address match IP address in ARP entry? */ + if (ipaddr && ip4_addr_cmp(ipaddr, &arp_table[i].ipaddr) +#if ETHARP_TABLE_MATCH_NETIF + && ((netif == NULL) || (netif == arp_table[i].netif)) +#endif /* ETHARP_TABLE_MATCH_NETIF */ + ) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: found matching entry %d\n", (int)i)); + /* found exact IP address match, simply bail out */ + return i; + } + /* pending entry? */ + if (state == ETHARP_STATE_PENDING) { + /* pending with queued packets? */ + if (arp_table[i].q != NULL) { + if (arp_table[i].ctime >= age_queue) { + old_queue = i; + age_queue = arp_table[i].ctime; + } + } else + /* pending without queued packets? */ + { + if (arp_table[i].ctime >= age_pending) { + old_pending = i; + age_pending = arp_table[i].ctime; + } + } + /* stable entry? */ + } else if (state >= ETHARP_STATE_STABLE) { +#if ETHARP_SUPPORT_STATIC_ENTRIES + /* don't record old_stable for static entries since they never expire */ + if (state < ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* remember entry with oldest stable entry in oldest, its age in maxtime */ + if (arp_table[i].ctime >= age_stable) { + old_stable = i; + age_stable = arp_table[i].ctime; + } + } + } + } + } + /* { we have no match } => try to create a new entry */ + + /* don't create new entry, only search? */ + if (((flags & ETHARP_FLAG_FIND_ONLY) != 0) || + /* or no empty entry found and not allowed to recycle? */ + ((empty == ARP_TABLE_SIZE) && ((flags & ETHARP_FLAG_TRY_HARD) == 0))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty entry found and not allowed to recycle\n")); + return (s16_t)ERR_MEM; + } + + /* b) choose the least destructive entry to recycle: + * 1) empty entry + * 2) oldest stable entry + * 3) oldest pending entry without queued packets + * 4) oldest pending entry with queued packets + * + * { ETHARP_FLAG_TRY_HARD is set at this point } + */ + + /* 1) empty entry available? */ + if (empty < ARP_TABLE_SIZE) { + i = empty; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting empty entry %d\n", (int)i)); + } else { + /* 2) found recyclable stable entry? */ + if (old_stable < ARP_TABLE_SIZE) { + /* recycle oldest stable*/ + i = old_stable; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest stable entry %d\n", (int)i)); + /* no queued packets should exist on stable entries */ + LWIP_ASSERT("arp_table[i].q == NULL", arp_table[i].q == NULL); + /* 3) found recyclable pending entry without queued packets? */ + } else if (old_pending < ARP_TABLE_SIZE) { + /* recycle oldest pending */ + i = old_pending; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d (without queue)\n", (int)i)); + /* 4) found recyclable pending entry with queued packets? */ + } else if (old_queue < ARP_TABLE_SIZE) { + /* recycle oldest pending (queued packets are free in etharp_free_entry) */ + i = old_queue; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d, freeing packet queue %p\n", (int)i, (void *)(arp_table[i].q))); + /* no empty or recyclable entries found */ + } else { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty or recyclable entries found\n")); + return (s16_t)ERR_MEM; + } + + /* { empty or recyclable entry found } */ + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + etharp_free_entry(i); + } + + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + LWIP_ASSERT("arp_table[i].state == ETHARP_STATE_EMPTY", + arp_table[i].state == ETHARP_STATE_EMPTY); + + /* IP address given? */ + if (ipaddr != NULL) { + /* set IP address */ + ip4_addr_copy(arp_table[i].ipaddr, *ipaddr); + } + arp_table[i].ctime = 0; +#if ETHARP_TABLE_MATCH_NETIF + arp_table[i].netif = netif; +#endif /* ETHARP_TABLE_MATCH_NETIF */ + return (s16_t)i; +} + +/** + * Update (or insert) a IP/MAC address pair in the ARP cache. + * + * If a pending entry is resolved, any queued packets will be sent + * at this point. + * + * @param netif netif related to this entry (used for NETIF_ADDRHINT) + * @param ipaddr IP address of the inserted ARP entry. + * @param ethaddr Ethernet address of the inserted ARP entry. + * @param flags See @ref etharp_state + * + * @return + * - ERR_OK Successfully updated ARP cache. + * - ERR_MEM If we could not add a new ARP entry when ETHARP_FLAG_TRY_HARD was set. + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + * @see pbuf_free() + */ +static err_t +etharp_update_arp_entry(struct netif *netif, const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, u8_t flags) +{ + s16_t i; + LWIP_ASSERT("netif->hwaddr_len == ETH_HWADDR_LEN", netif->hwaddr_len == ETH_HWADDR_LEN); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + /* non-unicast address? */ + if (ip4_addr_isany(ipaddr) || + ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, flags, netif); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + +#if ETHARP_SUPPORT_STATIC_ENTRIES + if (flags & ETHARP_FLAG_STATIC_ENTRY) { + /* record static type */ + arp_table[i].state = ETHARP_STATE_STATIC; + } else if (arp_table[i].state == ETHARP_STATE_STATIC) { + /* found entry is a static type, don't overwrite it */ + return ERR_VAL; + } else +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* mark it stable */ + arp_table[i].state = ETHARP_STATE_STABLE; + } + + /* record network interface */ + arp_table[i].netif = netif; + /* insert in SNMP ARP index tree */ + mib2_add_arp_entry(netif, &arp_table[i].ipaddr); + + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: updating stable entry %"S16_F"\n", i)); + /* update address */ + SMEMCPY(&arp_table[i].ethaddr, ethaddr, ETH_HWADDR_LEN); + /* reset time stamp */ + arp_table[i].ctime = 0; + /* this is where we will send out queued packets! */ +#if ARP_QUEUEING + while (arp_table[i].q != NULL) { + struct pbuf *p; + /* remember remainder of queue */ + struct etharp_q_entry *q = arp_table[i].q; + /* pop first item off the queue */ + arp_table[i].q = q->next; + /* get the packet pointer */ + p = q->p; + /* now queue entry can be freed */ + memp_free(MEMP_ARP_QUEUE, q); +#else /* ARP_QUEUEING */ + if (arp_table[i].q != NULL) { + struct pbuf *p = arp_table[i].q; + arp_table[i].q = NULL; +#endif /* ARP_QUEUEING */ + /* send the queued IP packet */ + ethernet_output(netif, p, (struct eth_addr *)(netif->hwaddr), ethaddr, ETHTYPE_IP); + /* free the queued IP packet */ + pbuf_free(p); + } + return ERR_OK; +} + +#if ETHARP_SUPPORT_STATIC_ENTRIES +/** Add a new static entry to the ARP table. If an entry exists for the + * specified IP address, this entry is overwritten. + * If packets are queued for the specified IP address, they are sent out. + * + * @param ipaddr IP address for the new static entry + * @param ethaddr ethernet address for the new static entry + * @return See return values of etharp_add_static_entry + */ +err_t +etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr) +{ + struct netif *netif; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_add_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + + netif = ip4_route(ipaddr); + if (netif == NULL) { + return ERR_RTE; + } + + return etharp_update_arp_entry(netif, ipaddr, ethaddr, ETHARP_FLAG_TRY_HARD | ETHARP_FLAG_STATIC_ENTRY); +} + +/** Remove a static entry from the ARP table previously added with a call to + * etharp_add_static_entry. + * + * @param ipaddr IP address of the static entry to remove + * @return ERR_OK: entry removed + * ERR_MEM: entry wasn't found + * ERR_ARG: entry wasn't a static entry but a dynamic one + */ +err_t +etharp_remove_static_entry(const ip4_addr_t *ipaddr) +{ + s16_t i; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_remove_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); + + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, NULL); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + + if (arp_table[i].state != ETHARP_STATE_STATIC) { + /* entry wasn't a static entry, cannot remove it */ + return ERR_ARG; + } + /* entry found, free it */ + etharp_free_entry(i); + return ERR_OK; +} +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +/** + * Remove all ARP table entries of the specified netif. + * + * @param netif points to a network interface + */ +void +etharp_cleanup_netif(struct netif *netif) +{ + int i; + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if ((state != ETHARP_STATE_EMPTY) && (arp_table[i].netif == netif)) { + etharp_free_entry(i); + } + } +} + +/** + * Finds (stable) ethernet/IP address pair from ARP table + * using interface and IP address index. + * @note the addresses in the ARP table are in network order! + * + * @param netif points to interface index + * @param ipaddr points to the (network order) IP address index + * @param eth_ret points to return pointer + * @param ip_ret points to return pointer + * @return table index if found, -1 otherwise + */ +ssize_t +etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret) +{ + s16_t i; + + LWIP_ASSERT("eth_ret != NULL && ip_ret != NULL", + eth_ret != NULL && ip_ret != NULL); + + LWIP_UNUSED_ARG(netif); + + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, netif); + if ((i >= 0) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *eth_ret = &arp_table[i].ethaddr; + *ip_ret = &arp_table[i].ipaddr; + return i; + } + return -1; +} + +/** + * Possibility to iterate over stable ARP table entries + * + * @param i entry number, 0 to ARP_TABLE_SIZE + * @param ipaddr return value: IP address + * @param netif return value: points to interface + * @param eth_ret return value: ETH address + * @return 1 on valid index, 0 otherwise + */ +int +etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret) +{ + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("eth_ret != NULL", eth_ret != NULL); + + if ((i < ARP_TABLE_SIZE) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *ipaddr = &arp_table[i].ipaddr; + *netif = arp_table[i].netif; + *eth_ret = &arp_table[i].ethaddr; + return 1; + } else { + return 0; + } +} + +/** + * Responds to ARP requests to us. Upon ARP replies to us, add entry to cache + * send out queued IP packets. Updates cache with snooped address pairs. + * + * Should be called for incoming ARP packets. The pbuf in the argument + * is freed by this function. + * + * @param p The ARP packet that arrived on netif. Is freed by this function. + * @param netif The lwIP network interface on which the ARP packet pbuf arrived. + * + * @see pbuf_free() + */ +void +etharp_input(struct pbuf *p, struct netif *netif) +{ + struct etharp_hdr *hdr; + /* these are aligned properly, whereas the ARP header fields might not be */ + ip4_addr_t sipaddr, dipaddr; + u8_t for_us; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + hdr = (struct etharp_hdr *)p->payload; + + /* RFC 826 "Packet Reception": */ + if ((hdr->hwtype != PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET)) || + (hdr->hwlen != ETH_HWADDR_LEN) || + (hdr->protolen != sizeof(ip4_addr_t)) || + (hdr->proto != PP_HTONS(ETHTYPE_IP))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("etharp_input: packet dropped, wrong hw type, hwlen, proto, protolen or ethernet type (%"U16_F"/%"U16_F"/%"U16_F"/%"U16_F")\n", + hdr->hwtype, (u16_t)hdr->hwlen, hdr->proto, (u16_t)hdr->protolen)); + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + pbuf_free(p); + return; + } + ETHARP_STATS_INC(etharp.recv); + +#if LWIP_AUTOIP + /* We have to check if a host already has configured our random + * created link local address and continuously check if there is + * a host with this IP-address so we can detect collisions */ + autoip_arp_reply(netif, hdr); +#endif /* LWIP_AUTOIP */ + + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). */ + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr); + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr); + + /* this interface is not configured? */ + if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + for_us = 0; + } else { + /* ARP packet directed to us? */ + for_us = (u8_t)ip4_addr_cmp(&dipaddr, netif_ip4_addr(netif)); + } + + /* ARP message directed to us? + -> add IP address in ARP cache; assume requester wants to talk to us, + can result in directly sending the queued packets for this host. + ARP message not directed to us? + -> update the source IP address in the cache, if present */ + etharp_update_arp_entry(netif, &sipaddr, &(hdr->shwaddr), + for_us ? ETHARP_FLAG_TRY_HARD : ETHARP_FLAG_FIND_ONLY); + + /* now act on the message itself */ + switch (hdr->opcode) { + /* ARP request? */ + case PP_HTONS(ARP_REQUEST): + /* ARP request. If it asked for our address, we send out a + * reply. In any case, we time-stamp any existing ARP entry, + * and possibly send out an IP packet that was queued on it. */ + + LWIP_DEBUGF (ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP request\n")); + /* ARP request for our address? */ + if (for_us) { + /* send ARP response */ + etharp_raw(netif, + (struct eth_addr *)netif->hwaddr, &hdr->shwaddr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), + &hdr->shwaddr, &sipaddr, + ARP_REPLY); + /* we are not configured? */ + } else if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* { for_us == 0 and netif->ip_addr.addr == 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: we are unconfigured, ARP request ignored.\n")); + /* request was not directed to us */ + } else { + /* { for_us == 0 and netif->ip_addr.addr != 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP request was not for us.\n")); + } + break; + case PP_HTONS(ARP_REPLY): + /* ARP reply. We already updated the ARP cache earlier. */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP reply\n")); +#if (LWIP_DHCP && DHCP_DOES_ARP_CHECK) + /* DHCP wants to know about ARP replies from any host with an + * IP address also offered to us by the DHCP server. We do not + * want to take a duplicate IP address on a single network. + * @todo How should we handle redundant (fail-over) interfaces? */ + dhcp_arp_reply(netif, &sipaddr); +#endif /* (LWIP_DHCP && DHCP_DOES_ARP_CHECK) */ + break; + default: + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP unknown opcode type %"S16_F"\n", lwip_htons(hdr->opcode))); + ETHARP_STATS_INC(etharp.err); + break; + } + /* free ARP packet */ + pbuf_free(p); +} + +/** Just a small helper function that sends a pbuf to an ethernet address + * in the arp_table specified by the index 'arp_idx'. + */ +static err_t +etharp_output_to_arp_index(struct netif *netif, struct pbuf *q, netif_addr_idx_t arp_idx) +{ + LWIP_ASSERT("arp_table[arp_idx].state >= ETHARP_STATE_STABLE", + arp_table[arp_idx].state >= ETHARP_STATE_STABLE); + /* if arp table entry is about to expire: re-request it, + but only if its state is ETHARP_STATE_STABLE to prevent flooding the + network with ARP requests if this address is used frequently. */ + if (arp_table[arp_idx].state == ETHARP_STATE_STABLE) { + if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_BROADCAST) { + /* issue a standard request using broadcast */ + if (etharp_request(netif, &arp_table[arp_idx].ipaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } else if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_UNICAST) { + /* issue a unicast request (for 15 seconds) to prevent unnecessary broadcast */ + if (etharp_request_dst(netif, &arp_table[arp_idx].ipaddr, &arp_table[arp_idx].ethaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } + } + + return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), &arp_table[arp_idx].ethaddr, ETHTYPE_IP); +} + +/** + * Resolve and fill-in Ethernet address header for outgoing IP packet. + * + * For IP multicast and broadcast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, the packet is submitted to etharp_query(). In + * case the IP address is outside the local network, the IP address of + * the gateway is used. + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ipaddr The IP address of the packet destination. + * + * @return + * - ERR_RTE No route to destination (no gateway to external networks), + * or the return type of either etharp_query() or ethernet_output(). + */ +err_t +etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +{ + const struct eth_addr *dest; + struct eth_addr mcastaddr; + const ip4_addr_t *dst_addr = ipaddr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + + /* Determine on destination hardware address. Broadcasts and multicasts + * are special, other IP addresses are looked up in the ARP table. */ + + /* broadcast destination IP address? */ + if (ip4_addr_isbroadcast(ipaddr, netif)) { + /* broadcast on Ethernet also */ + dest = (const struct eth_addr *)ðbroadcast; + /* multicast destination IP address? */ + } else if (ip4_addr_ismulticast(ipaddr)) { + /* Hash IP multicast address to MAC address.*/ + mcastaddr.addr[0] = LL_IP4_MULTICAST_ADDR_0; + mcastaddr.addr[1] = LL_IP4_MULTICAST_ADDR_1; + mcastaddr.addr[2] = LL_IP4_MULTICAST_ADDR_2; + mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f; + mcastaddr.addr[4] = ip4_addr3(ipaddr); + mcastaddr.addr[5] = ip4_addr4(ipaddr); + /* destination Ethernet address is multicast */ + dest = &mcastaddr; + /* unicast destination IP address? */ + } else { + netif_addr_idx_t i; + /* outside local network? if so, this can neither be a global broadcast nor + a subnet broadcast. */ + if (!ip4_addr_netcmp(ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) && + !ip4_addr_islinklocal(ipaddr)) { +#if LWIP_AUTOIP + struct ip_hdr *iphdr = LWIP_ALIGNMENT_CAST(struct ip_hdr *, q->payload); + /* According to RFC 3297, chapter 2.6.2 (Forwarding Rules), a packet with + a link-local source address must always be "directly to its destination + on the same physical link. The host MUST NOT send the packet to any + router for forwarding". */ + if (!ip4_addr_islinklocal(&iphdr->src)) +#endif /* LWIP_AUTOIP */ + { +#ifdef LWIP_HOOK_ETHARP_GET_GW + /* For advanced routing, a single default gateway might not be enough, so get + the IP address of the gateway to handle the current destination address. */ + dst_addr = LWIP_HOOK_ETHARP_GET_GW(netif, ipaddr); + if (dst_addr == NULL) +#endif /* LWIP_HOOK_ETHARP_GET_GW */ + { + /* interface has default gateway? */ + if (!ip4_addr_isany_val(*netif_ip4_gw(netif))) { + /* send to hardware address of default gateway IP address */ + dst_addr = netif_ip4_gw(netif); + /* no default gateway available */ + } else { + /* no route to destination error (default gateway missing) */ + return ERR_RTE; + } + } + } + } +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif_addr_idx_t etharp_cached_entry = netif->hints->addr_hint; + if (etharp_cached_entry < ARP_TABLE_SIZE) { +#endif /* LWIP_NETIF_HWADDRHINT */ + if ((arp_table[etharp_cached_entry].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[etharp_cached_entry].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[etharp_cached_entry].ipaddr))) { + /* the per-pcb-cached entry is stable and the right one! */ + ETHARP_STATS_INC(etharp.cachehit); + return etharp_output_to_arp_index(netif, q, etharp_cached_entry); + } +#if LWIP_NETIF_HWADDRHINT + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* find stable entry: do this here since this is a critical path for + throughput and etharp_find_entry() is kind of slow */ + for (i = 0; i < ARP_TABLE_SIZE; i++) { + if ((arp_table[i].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[i].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[i].ipaddr))) { + /* found an existing, stable entry */ + ETHARP_SET_ADDRHINT(netif, i); + return etharp_output_to_arp_index(netif, q, i); + } + } + /* no stable entry found, use the (slower) query function: + queue on destination Ethernet address belonging to ipaddr */ + return etharp_query(netif, dst_addr, q); + } + + /* continuation for multicast/broadcast destinations */ + /* obtain source Ethernet address of the given interface */ + /* send packet directly on the link */ + return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), dest, ETHTYPE_IP); +} + +/** + * Send an ARP request for the given IP address and/or queue a packet. + * + * If the IP address was not yet in the cache, a pending ARP cache entry + * is added and an ARP request is sent for the given address. The packet + * is queued on this entry. + * + * If the IP address was already pending in the cache, a new ARP request + * is sent for the given address. The packet is queued on this entry. + * + * If the IP address was already stable in the cache, and a packet is + * given, it is directly sent and no ARP request is sent out. + * + * If the IP address was already stable in the cache, and no packet is + * given, an ARP request is sent out. + * + * @param netif The lwIP network interface on which ipaddr + * must be queried for. + * @param ipaddr The IP address to be resolved. + * @param q If non-NULL, a pbuf that must be delivered to the IP address. + * q is not freed by this function. + * + * @note q must only be ONE packet, not a packet queue! + * + * @return + * - ERR_BUF Could not make room for Ethernet header. + * - ERR_MEM Hardware address unknown, and no more ARP entries available + * to query for address or queue the packet. + * - ERR_MEM Could not queue packet due to memory shortage. + * - ERR_RTE No route to destination (no gateway to external networks). + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + */ +err_t +etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q) +{ + struct eth_addr *srcaddr = (struct eth_addr *)netif->hwaddr; + err_t result = ERR_MEM; + int is_new_entry = 0; + s16_t i_err; + netif_addr_idx_t i; + + /* non-unicast address? */ + if (ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr) || + ip4_addr_isany(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + + /* find entry in ARP cache, ask to create entry if queueing packet */ + i_err = etharp_find_entry(ipaddr, ETHARP_FLAG_TRY_HARD, netif); + + /* could not find or create entry? */ + if (i_err < 0) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not create ARP entry\n")); + if (q) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: packet dropped\n")); + ETHARP_STATS_INC(etharp.memerr); + } + return (err_t)i_err; + } + LWIP_ASSERT("type overflow", (size_t)i_err < NETIF_ADDR_IDX_MAX); + i = (netif_addr_idx_t)i_err; + + /* mark a fresh entry as pending (we just sent a request) */ + if (arp_table[i].state == ETHARP_STATE_EMPTY) { + is_new_entry = 1; + arp_table[i].state = ETHARP_STATE_PENDING; + /* record network interface for re-sending arp request in etharp_tmr */ + arp_table[i].netif = netif; + } + + /* { i is either a STABLE or (new or existing) PENDING entry } */ + LWIP_ASSERT("arp_table[i].state == PENDING or STABLE", + ((arp_table[i].state == ETHARP_STATE_PENDING) || + (arp_table[i].state >= ETHARP_STATE_STABLE))); + + /* do we have a new entry? or an implicit query request? */ + if (is_new_entry || (q == NULL)) { + /* try to resolve it; send out ARP request */ + result = etharp_request(netif, ipaddr); + if (result != ERR_OK) { + /* ARP request couldn't be sent */ + /* We don't re-send arp request in etharp_tmr, but we still queue packets, + since this failure could be temporary, and the next packet calling + etharp_query again could lead to sending the queued packets. */ + } + if (q == NULL) { + return result; + } + } + + /* packet given? */ + LWIP_ASSERT("q != NULL", q != NULL); + /* stable entry? */ + if (arp_table[i].state >= ETHARP_STATE_STABLE) { + /* we have a valid IP->Ethernet address mapping */ + ETHARP_SET_ADDRHINT(netif, i); + /* send the packet */ + result = ethernet_output(netif, q, srcaddr, &(arp_table[i].ethaddr), ETHTYPE_IP); + /* pending entry? (either just created or already pending */ + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* entry is still pending, queue the given packet 'q' */ + struct pbuf *p; + int copy_needed = 0; + /* IF q includes a pbuf that must be copied, copy the whole chain into a + * new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */ + p = q; + while (p) { + LWIP_ASSERT("no packet queues allowed!", (p->len != p->tot_len) || (p->next == 0)); + if (PBUF_NEEDS_COPY(p)) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet could be taken over? */ + if (p != NULL) { + /* queue packet ... */ +#if ARP_QUEUEING + struct etharp_q_entry *new_entry; + /* allocate a new arp queue entry */ + new_entry = (struct etharp_q_entry *)memp_malloc(MEMP_ARP_QUEUE); + if (new_entry != NULL) { + unsigned int qlen = 0; + new_entry->next = 0; + new_entry->p = p; + if (arp_table[i].q != NULL) { + /* queue was already existent, append the new entry to the end */ + struct etharp_q_entry *r; + r = arp_table[i].q; + qlen++; + while (r->next != NULL) { + r = r->next; + qlen++; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + arp_table[i].q = new_entry; + } +#if ARP_QUEUE_LEN + if (qlen >= ARP_QUEUE_LEN) { + struct etharp_q_entry *old; + old = arp_table[i].q; + arp_table[i].q = arp_table[i].q->next; + pbuf_free(old->p); + memp_free(MEMP_ARP_QUEUE, old); + } +#endif + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, i)); + result = ERR_OK; + } else { + /* the pool MEMP_ARP_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } +#else /* ARP_QUEUEING */ + /* always queue one packet per ARP request only, freeing a previously queued packet */ + if (arp_table[i].q != NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: dropped previously queued packet %p for ARP entry %"U16_F"\n", (void *)q, (u16_t)i)); + pbuf_free(arp_table[i].q); + } + arp_table[i].q = p; + result = ERR_OK; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, (u16_t)i)); +#endif /* ARP_QUEUEING */ + } else { + ETHARP_STATS_INC(etharp.memerr); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } + } + return result; +} + +/** + * Send a raw ARP packet (opcode and all addresses can be modified) + * + * @param netif the lwip network interface on which to send the ARP packet + * @param ethsrc_addr the source MAC address for the ethernet header + * @param ethdst_addr the destination MAC address for the ethernet header + * @param hwsrc_addr the source MAC address for the ARP protocol header + * @param ipsrc_addr the source IP address for the ARP protocol header + * @param hwdst_addr the destination MAC address for the ARP protocol header + * @param ipdst_addr the destination IP address for the ARP protocol header + * @param opcode the type of the ARP packet + * @return ERR_OK if the ARP packet has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_raw(struct netif *netif, const struct eth_addr *ethsrc_addr, + const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode) +{ + struct pbuf *p; + err_t result = ERR_OK; + struct etharp_hdr *hdr; + + LWIP_ASSERT("netif != NULL", netif != NULL); + + /* allocate a pbuf for the outgoing ARP request packet */ + p = pbuf_alloc(PBUF_LINK, SIZEOF_ETHARP_HDR, PBUF_RAM); + /* could allocate a pbuf for an ARP request? */ + if (p == NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("etharp_raw: could not allocate pbuf for ARP request.\n")); + ETHARP_STATS_INC(etharp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("check that first pbuf can hold struct etharp_hdr", + (p->len >= SIZEOF_ETHARP_HDR)); + + hdr = (struct etharp_hdr *)p->payload; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_raw: sending raw ARP packet.\n")); + hdr->opcode = lwip_htons(opcode); + + LWIP_ASSERT("netif->hwaddr_len must be the same as ETH_HWADDR_LEN for etharp!", + (netif->hwaddr_len == ETH_HWADDR_LEN)); + + /* Write the ARP MAC-Addresses */ + SMEMCPY(&hdr->shwaddr, hwsrc_addr, ETH_HWADDR_LEN); + SMEMCPY(&hdr->dhwaddr, hwdst_addr, ETH_HWADDR_LEN); + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing. */ + IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->sipaddr, ipsrc_addr); + IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->dipaddr, ipdst_addr); + + hdr->hwtype = PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET); + hdr->proto = PP_HTONS(ETHTYPE_IP); + /* set hwlen and protolen */ + hdr->hwlen = ETH_HWADDR_LEN; + hdr->protolen = sizeof(ip4_addr_t); + + /* send ARP query */ +#if LWIP_AUTOIP + /* If we are using Link-Local, all ARP packets that contain a Link-Local + * 'sender IP address' MUST be sent using link-layer broadcast instead of + * link-layer unicast. (See RFC3927 Section 2.5, last paragraph) */ + if (ip4_addr_islinklocal(ipsrc_addr)) { + ethernet_output(netif, p, ethsrc_addr, ðbroadcast, ETHTYPE_ARP); + } else +#endif /* LWIP_AUTOIP */ + { + ethernet_output(netif, p, ethsrc_addr, ethdst_addr, ETHTYPE_ARP); + } + + ETHARP_STATS_INC(etharp.xmit); + /* free ARP query packet */ + pbuf_free(p); + p = NULL; + /* could not allocate pbuf for ARP request */ + + return result; +} + +/** + * Send an ARP request packet asking for ipaddr to a specific eth address. + * Used to send unicast request to refresh the ARP table just before an entry + * times out + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @param hw_dst_addr the ethernet address to send this packet to + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr) +{ + return etharp_raw(netif, (struct eth_addr *)netif->hwaddr, hw_dst_addr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), ðzero, + ipaddr, ARP_REQUEST); +} + +/** + * Send an ARP request packet asking for ipaddr. + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +err_t +etharp_request(struct netif *netif, const ip4_addr_t *ipaddr) +{ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_request: sending ARP request.\n")); + return etharp_request_dst(netif, ipaddr, ðbroadcast); +} + +#endif /* LWIP_IPV4 && LWIP_ARP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c new file mode 100644 index 00000000..a462ccd3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c @@ -0,0 +1,404 @@ +/** + * @file + * ICMP - Internet Control Message Protocol + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* Some ICMP messages should be passed to the transport protocols. This + is not implemented. */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/stats.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be + * used to modify and send a response packet (and to 1 if this is not the case, + * e.g. when link header is stripped off when receiving) */ +#ifndef LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN +#define LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN 1 +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + +/* The amount of data from the original packet to return in a dest-unreachable */ +#define ICMP_DEST_UNREACH_DATASIZE 8 + +static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code); + +/** + * Processes ICMP input packets, called from ip_input(). + * + * Currently only processes icmp echo requests and sends + * out the echo response. + * + * @param p the icmp echo request packet, p->payload pointing to the icmp header + * @param inp the netif on which this packet was received + */ +void +icmp_input(struct pbuf *p, struct netif *inp) +{ + u8_t type; +#ifdef LWIP_DEBUG + u8_t code; +#endif /* LWIP_DEBUG */ + struct icmp_echo_hdr *iecho; + const struct ip_hdr *iphdr_in; + u16_t hlen; + const ip4_addr_t *src; + + ICMP_STATS_INC(icmp.recv); + MIB2_STATS_INC(mib2.icmpinmsgs); + + iphdr_in = ip4_current_header(); + hlen = IPH_HL_BYTES(iphdr_in); + if (hlen < IP_HLEN) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short IP header (%"S16_F" bytes) received\n", hlen)); + goto lenerr; + } + if (p->len < sizeof(u16_t) * 2) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len)); + goto lenerr; + } + + type = *((u8_t *)p->payload); +#ifdef LWIP_DEBUG + code = *(((u8_t *)p->payload) + 1); + /* if debug is enabled but debug statement below is somehow disabled: */ + LWIP_UNUSED_ARG(code); +#endif /* LWIP_DEBUG */ + switch (type) { + case ICMP_ER: + /* This is OK, echo reply might have been parsed by a raw PCB + (as obviously, an echo request has been sent, too). */ + MIB2_STATS_INC(mib2.icmpinechoreps); + break; + case ICMP_ECHO: + MIB2_STATS_INC(mib2.icmpinechos); + src = ip4_current_dest_addr(); + /* multicast destination address? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_MULTICAST_PING + /* For multicast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_MULTICAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n")); + goto icmperr; +#endif /* LWIP_MULTICAST_PING */ + } + /* broadcast destination address? */ + if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) { +#if LWIP_BROADCAST_PING + /* For broadcast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_BROADCAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n")); + goto icmperr; +#endif /* LWIP_BROADCAST_PING */ + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n")); + if (p->tot_len < sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n")); + goto lenerr; + } +#if CHECKSUM_CHECK_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) { + if (inet_chksum_pbuf(p) != 0) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n")); + pbuf_free(p); + ICMP_STATS_INC(icmp.chkerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; + } + } +#endif +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN + if (pbuf_add_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) { + /* p is not big enough to contain link headers + * allocate a new one and copy p into it + */ + struct pbuf *r; + u16_t alloc_len = (u16_t)(p->tot_len + hlen); + if (alloc_len < p->tot_len) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed (tot_len overflow)\n")); + goto icmperr; + } + /* allocate new packet buffer with space for link headers */ + r = pbuf_alloc(PBUF_LINK, alloc_len, PBUF_RAM); + if (r == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n")); + goto icmperr; + } + if (r->len < hlen + sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("first pbuf cannot hold the ICMP header")); + pbuf_free(r); + goto icmperr; + } + /* copy the ip header */ + MEMCPY(r->payload, iphdr_in, hlen); + /* switch r->payload back to icmp header (cannot fail) */ + if (pbuf_remove_header(r, hlen)) { + LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0); + pbuf_free(r); + goto icmperr; + } + /* copy the rest of the packet without ip header */ + if (pbuf_copy(r, p) != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("icmp_input: copying to new pbuf failed")); + pbuf_free(r); + goto icmperr; + } + /* free the original p */ + pbuf_free(p); + /* we now have an identical copy of p that has room for link headers */ + p = r; + } else { + /* restore p->payload to point to icmp header (cannot fail) */ + if (pbuf_remove_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) { + LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0); + goto icmperr; + } + } +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + /* At this point, all checks are OK. */ + /* We generate an answer by switching the dest and src ip addresses, + * setting the icmp type to ECHO_RESPONSE and updating the checksum. */ + iecho = (struct icmp_echo_hdr *)p->payload; + if (pbuf_add_header(p, hlen)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Can't move over header in packet")); + } else { + err_t ret; + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(iphdr->src, *src); + ip4_addr_copy(iphdr->dest, *ip4_current_src_addr()); + ICMPH_TYPE_SET(iecho, ICMP_ER); +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) { + /* adjust the checksum */ + if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) { + iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS((u16_t)(ICMP_ECHO << 8)) + 1); + } else { + iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS(ICMP_ECHO << 8)); + } + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + iecho->chksum = 0; + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#else /* CHECKSUM_GEN_ICMP */ + iecho->chksum = 0; +#endif /* CHECKSUM_GEN_ICMP */ + + /* Set the correct TTL and recalculate the header checksum. */ + IPH_TTL_SET(iphdr, ICMP_TTL); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, hlen)); + } +#endif /* CHECKSUM_GEN_IP */ + + ICMP_STATS_INC(icmp.xmit); + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + /* increase number of echo replies attempted to send */ + MIB2_STATS_INC(mib2.icmpoutechoreps); + + /* send an ICMP packet */ + ret = ip4_output_if(p, src, LWIP_IP_HDRINCL, + ICMP_TTL, 0, IP_PROTO_ICMP, inp); + if (ret != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret))); + } + } + break; + default: + if (type == ICMP_DUR) { + MIB2_STATS_INC(mib2.icmpindestunreachs); + } else if (type == ICMP_TE) { + MIB2_STATS_INC(mib2.icmpintimeexcds); + } else if (type == ICMP_PP) { + MIB2_STATS_INC(mib2.icmpinparmprobs); + } else if (type == ICMP_SQ) { + MIB2_STATS_INC(mib2.icmpinsrcquenchs); + } else if (type == ICMP_RD) { + MIB2_STATS_INC(mib2.icmpinredirects); + } else if (type == ICMP_TS) { + MIB2_STATS_INC(mib2.icmpintimestamps); + } else if (type == ICMP_TSR) { + MIB2_STATS_INC(mib2.icmpintimestampreps); + } else if (type == ICMP_AM) { + MIB2_STATS_INC(mib2.icmpinaddrmasks); + } else if (type == ICMP_AMR) { + MIB2_STATS_INC(mib2.icmpinaddrmaskreps); + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ICMP type %"S16_F" code %"S16_F" not supported.\n", + (s16_t)type, (s16_t)code)); + ICMP_STATS_INC(icmp.proterr); + ICMP_STATS_INC(icmp.drop); + } + pbuf_free(p); + return; +lenerr: + pbuf_free(p); + ICMP_STATS_INC(icmp.lenerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING +icmperr: + pbuf_free(p); + ICMP_STATS_INC(icmp.err); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING */ +} + +/** + * Send an icmp 'destination unreachable' packet, called from ip_input() if + * the transport layer protocol is unknown and from udp_input() if the local + * port is not bound. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'unreachable' packet + */ +void +icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t) +{ + MIB2_STATS_INC(mib2.icmpoutdestunreachs); + icmp_send_response(p, ICMP_DUR, t); +} + +#if IP_FORWARD || IP_REASSEMBLY +/** + * Send a 'time exceeded' packet, called from ip_forward() if TTL is 0. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'time exceeded' packet + */ +void +icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t) +{ + MIB2_STATS_INC(mib2.icmpouttimeexcds); + icmp_send_response(p, ICMP_TE, t); +} + +#endif /* IP_FORWARD || IP_REASSEMBLY */ + +/** + * Send an icmp packet in response to an incoming packet. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param type Type of the ICMP header + * @param code Code of the ICMP header + */ +static void +icmp_send_response(struct pbuf *p, u8_t type, u8_t code) +{ + struct pbuf *q; + struct ip_hdr *iphdr; + /* we can use the echo header here */ + struct icmp_echo_hdr *icmphdr; + ip4_addr_t iphdr_src; + struct netif *netif; + + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + + /* ICMP header + IP header + 8 bytes of data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMP packet.\n")); + MIB2_STATS_INC(mib2.icmpouterrors); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp message", + (q->len >= (sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE))); + + iphdr = (struct ip_hdr *)p->payload; + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded from ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->src); + LWIP_DEBUGF(ICMP_DEBUG, (" to ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->dest); + LWIP_DEBUGF(ICMP_DEBUG, ("\n")); + + icmphdr = (struct icmp_echo_hdr *)q->payload; + icmphdr->type = type; + icmphdr->code = code; + icmphdr->id = 0; + icmphdr->seqno = 0; + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp_echo_hdr), (u8_t *)p->payload, + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE); + + ip4_addr_copy(iphdr_src, iphdr->src); +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + { + ip4_addr_t iphdr_dst; + ip4_addr_copy(iphdr_dst, iphdr->dest); + netif = ip4_route_src(&iphdr_dst, &iphdr_src); + } +#else + netif = ip4_route(&iphdr_src); +#endif + if (netif != NULL) { + /* calculate checksum */ + icmphdr->chksum = 0; +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP) { + icmphdr->chksum = inet_chksum(icmphdr, q->len); + } +#endif + ICMP_STATS_INC(icmp.xmit); + ip4_output_if(q, NULL, &iphdr_src, ICMP_TTL, 0, IP_PROTO_ICMP, netif); + } + pbuf_free(q); +} + +#endif /* LWIP_IPV4 && LWIP_ICMP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c new file mode 100644 index 00000000..b655aa3a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c @@ -0,0 +1,801 @@ +/** + * @file + * IGMP - Internet Group Management Protocol + * + * @defgroup igmp IGMP + * @ingroup ip4 + * To be called from TCPIP thread + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +/*------------------------------------------------------------- +Note 1) +Although the rfc requires V1 AND V2 capability +we will only support v2 since now V1 is very old (August 1989) +V1 can be added if required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 2) +A query for a specific group address (as opposed to ALLHOSTS) +has now been implemented as I am unsure if it is required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 3) +The router alert rfc 2113 is implemented in outgoing packets +but not checked rigorously incoming +------------------------------------------------------------- +Steve Reynolds +------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * RFC 988 - Host extensions for IP multicasting - V0 + * RFC 1054 - Host extensions for IP multicasting - + * RFC 1112 - Host extensions for IP multicasting - V1 + * RFC 2236 - Internet Group Management Protocol, Version 2 - V2 <- this code is based on this RFC (it's the "de facto" standard) + * RFC 3376 - Internet Group Management Protocol, Version 3 - V3 + * RFC 4604 - Using Internet Group Management Protocol Version 3... - V3+ + * RFC 2113 - IP Router Alert Option - + *----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/igmp.h" +#include "lwip/debug.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/prot/igmp.h" + +#include + +static struct igmp_group *igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr); +static err_t igmp_remove_group(struct netif *netif, struct igmp_group *group); +static void igmp_timeout(struct netif *netif, struct igmp_group *group); +static void igmp_start_timer(struct igmp_group *group, u8_t max_time); +static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp); +static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif); +static void igmp_send(struct netif *netif, struct igmp_group *group, u8_t type); + +static ip4_addr_t allsystems; +static ip4_addr_t allrouters; + +/** + * Initialize the IGMP module + */ +void +igmp_init(void) +{ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_init: initializing\n")); + + IP4_ADDR(&allsystems, 224, 0, 0, 1); + IP4_ADDR(&allrouters, 224, 0, 0, 2); +} + +/** + * Start IGMP processing on interface + * + * @param netif network interface on which start IGMP processing + */ +err_t +igmp_start(struct netif *netif) +{ + struct igmp_group *group; + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: starting IGMP processing on if %p\n", (void *)netif)); + + group = igmp_lookup_group(netif, &allsystems); + + if (group != NULL) { + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->use++; + + /* Allow the igmp messages at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: igmp_mac_filter(ADD ")); + ip4_addr_debug_print_val(IGMP_DEBUG, allsystems); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, &allsystems, NETIF_ADD_MAC_FILTER); + } + + return ERR_OK; + } + + return ERR_MEM; +} + +/** + * Stop IGMP processing on interface + * + * @param netif network interface on which stop IGMP processing + */ +err_t +igmp_stop(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, NULL); + + while (group != NULL) { + struct igmp_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_stop: igmp_mac_filter(DEL ")); + ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_IGMP_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report IGMP memberships for this interface + * + * @param netif network interface on which report IGMP memberships + */ +void +igmp_report_groups(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_report_groups: sending IGMP reports on if %p\n", (void *)netif)); + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if (group != NULL) { + group = group->next; + } + + while (group != NULL) { + igmp_delaying_member(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + group = group->next; + } +} + +/** + * Search for a group in the netif's igmp group list + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search for + * @return a struct igmp_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct igmp_group * +igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group = netif_igmp_data(ifp); + + while (group != NULL) { + if (ip4_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + /* to be clearer, we return NULL here instead of + * 'group' (which is also NULL at this point). + */ + return NULL; +} + +/** + * Search for a specific igmp group and create a new one if not found- + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search + * @return a struct igmp_group*, + * NULL on memory error. + */ +static struct igmp_group * +igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group; + struct igmp_group *list_head = netif_igmp_data(ifp); + + /* Search if the group already exists */ + group = igmp_lookfor_group(ifp, addr); + if (group != NULL) { + /* Group already exists. */ + return group; + } + + /* Group doesn't exist yet, create a new one */ + group = (struct igmp_group *)memp_malloc(MEMP_IGMP_GROUP); + if (group != NULL) { + ip4_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = IGMP_GROUP_NON_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + + /* Ensure allsystems group is always first in list */ + if (list_head == NULL) { + /* this is the first entry in linked list */ + LWIP_ASSERT("igmp_lookup_group: first group must be allsystems", + (ip4_addr_cmp(addr, &allsystems) != 0)); + group->next = NULL; + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, group); + } else { + /* append _after_ first entry */ + LWIP_ASSERT("igmp_lookup_group: all except first group must not be allsystems", + (ip4_addr_cmp(addr, &allsystems) == 0)); + group->next = list_head->next; + list_head->next = group; + } + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_lookup_group: %sallocated a new group with address ", (group ? "" : "impossible to "))); + ip4_addr_debug_print(IGMP_DEBUG, addr); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)ifp)); + + return group; +} + +/** + * Remove a group from netif's igmp group list, but don't free it yet + * + * @param group the group to remove from the netif's igmp group list + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +igmp_remove_group(struct netif *netif, struct igmp_group *group) +{ + err_t err = ERR_OK; + struct igmp_group *tmp_group; + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + for (tmp_group = netif_igmp_data(netif); tmp_group != NULL; tmp_group = tmp_group->next) { + if (tmp_group->next == group) { + tmp_group->next = group->next; + break; + } + } + /* Group not found in netif's igmp group list */ + if (tmp_group == NULL) { + err = ERR_ARG; + } + + return err; +} + +/** + * Called from ip_input() if a new IGMP packet is received. + * + * @param p received igmp packet, p->payload pointing to the igmp header + * @param inp network interface on which the packet was received + * @param dest destination ip address of the igmp packet + */ +void +igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest) +{ + struct igmp_msg *igmp; + struct igmp_group *group; + struct igmp_group *groupref; + + IGMP_STATS_INC(igmp.recv); + + /* Note that the length CAN be greater than 8 but only 8 are used - All are included in the checksum */ + if (p->len < IGMP_MINLEN) { + pbuf_free(p); + IGMP_STATS_INC(igmp.lenerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: length error\n")); + return; + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: message from ")); + ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->src); + LWIP_DEBUGF(IGMP_DEBUG, (" to address ")); + ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->dest); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)inp)); + + /* Now calculate and check the checksum */ + igmp = (struct igmp_msg *)p->payload; + if (inet_chksum(igmp, p->len)) { + pbuf_free(p); + IGMP_STATS_INC(igmp.chkerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: checksum error\n")); + return; + } + + /* Packet is ok so find an existing group */ + group = igmp_lookfor_group(inp, dest); /* use the destination IP address of incoming packet */ + + /* If group can be found or create... */ + if (!group) { + pbuf_free(p); + IGMP_STATS_INC(igmp.drop); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP frame not for us\n")); + return; + } + + /* NOW ACT ON THE INCOMING MESSAGE TYPE... */ + switch (igmp->igmp_msgtype) { + case IGMP_MEMB_QUERY: + /* IGMP_MEMB_QUERY to the "all systems" address ? */ + if ((ip4_addr_cmp(dest, &allsystems)) && ip4_addr_isany(&igmp->igmp_group_address)) { + /* THIS IS THE GENERAL QUERY */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: General IGMP_MEMB_QUERY on \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + + if (igmp->igmp_maxresp == 0) { + IGMP_STATS_INC(igmp.rx_v1); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: got an all hosts query with time== 0 - this is V1 and not implemented - treat as v2\n")); + igmp->igmp_maxresp = IGMP_V1_DELAYING_MEMBER_TMR; + } else { + IGMP_STATS_INC(igmp.rx_general); + } + + groupref = netif_igmp_data(inp); + + /* Do not send messages on the all systems group address! */ + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if (groupref != NULL) { + groupref = groupref->next; + } + + while (groupref) { + igmp_delaying_member(groupref, igmp->igmp_maxresp); + groupref = groupref->next; + } + } else { + /* IGMP_MEMB_QUERY to a specific group ? */ + if (!ip4_addr_isany(&igmp->igmp_group_address)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_MEMB_QUERY to a specific group ")); + ip4_addr_debug_print_val(IGMP_DEBUG, igmp->igmp_group_address); + if (ip4_addr_cmp(dest, &allsystems)) { + ip4_addr_t groupaddr; + LWIP_DEBUGF(IGMP_DEBUG, (" using \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + /* we first need to re-look for the group since we used dest last time */ + ip4_addr_copy(groupaddr, igmp->igmp_group_address); + group = igmp_lookfor_group(inp, &groupaddr); + } else { + LWIP_DEBUGF(IGMP_DEBUG, (" with the group address as destination [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + } + + if (group != NULL) { + IGMP_STATS_INC(igmp.rx_group); + igmp_delaying_member(group, igmp->igmp_maxresp); + } else { + IGMP_STATS_INC(igmp.drop); + } + } else { + IGMP_STATS_INC(igmp.proterr); + } + } + break; + case IGMP_V2_MEMB_REPORT: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_V2_MEMB_REPORT\n")); + IGMP_STATS_INC(igmp.rx_report); + if (group->group_state == IGMP_GROUP_DELAYING_MEMBER) { + /* This is on a specific group we have already looked up */ + group->timer = 0; /* stopped */ + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + break; + default: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: unexpected msg %d in state %d on group %p on if %p\n", + igmp->igmp_msgtype, group->group_state, (void *)&group, (void *)inp)); + IGMP_STATS_INC(igmp.proterr); + break; + } + + pbuf_free(p); + return; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param ifaddr ip address of the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we join this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err = igmp_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Return an error even if some network interfaces are joined */ + /** @todo undo any other netif already joined */ + return err; + } + } + } + + return err; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param netif the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup_netif: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group or create a new one if not found */ + group = igmp_lookup_group(netif, groupaddr); + + if (group != NULL) { + /* This should create a new group, check the state to make sure */ + if (group->group_state != IGMP_GROUP_NON_MEMBER) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to group not in state IGMP_GROUP_NON_MEMBER\n")); + } else { + /* OK - it was new group */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to new group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If first use of the group, allow the group at the MAC level */ + if ((group->use == 0) && (netif->igmp_mac_filter != NULL)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: igmp_mac_filter(ADD ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + IGMP_STATS_INC(igmp.tx_join); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + + igmp_start_timer(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + + /* Need to work out where this timer comes from */ + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } + /* Increment group use */ + group->use++; + /* Join on this interface */ + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: Not enough memory to join to group\n")); + return ERR_MEM; + } +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param ifaddr ip address of the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we leave this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err_t res = igmp_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + } + + return err; +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param netif the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group */ + group = igmp_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Only send a leave if the flag is set according to the state diagram */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: Leaving group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + igmp_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: sending leaving group\n")); + IGMP_STATS_INC(igmp.tx_leave); + igmp_send(netif, group, IGMP_LEAVE_GROUP); + } + + /* Disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: igmp_mac_filter(DEL ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* Free group struct */ + memp_free(MEMP_IGMP_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: not member of group\n")); + return ERR_VAL; + } +} + +/** + * The igmp timer function (both for NO_SYS=1 and =0) + * Should be called every IGMP_TMR_INTERVAL milliseconds (100 ms is default). + */ +void +igmp_tmr(void) +{ + struct netif *netif; + + NETIF_FOREACH(netif) { + struct igmp_group *group = netif_igmp_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + igmp_timeout(netif, group); + } + } + group = group->next; + } + } +} + +/** + * Called if a timeout for one group is reached. + * Sends a report for this group. + * + * @param group an igmp_group for which a timeout is reached + */ +static void +igmp_timeout(struct netif *netif, struct igmp_group *group) +{ + /* If the state is IGMP_GROUP_DELAYING_MEMBER then we send a report for this group + (unless it is the allsystems group) */ + if ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + (!(ip4_addr_cmp(&(group->group_address), &allsystems)))) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_timeout: report membership for group with address ")); + ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)netif)); + + group->group_state = IGMP_GROUP_IDLE_MEMBER; + + IGMP_STATS_INC(igmp.tx_report); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + } +} + +/** + * Start a timer for an igmp group + * + * @param group the igmp_group for which to start a timer + * @param max_time the time in multiples of IGMP_TMR_INTERVAL (decrease with + * every call to igmp_tmr()) + */ +static void +igmp_start_timer(struct igmp_group *group, u8_t max_time) +{ +#ifdef LWIP_RAND + group->timer = (u16_t)(max_time > 2 ? (LWIP_RAND() % max_time) : 1); +#else /* LWIP_RAND */ + /* ATTENTION: use this only if absolutely necessary! */ + group->timer = max_time / 2; +#endif /* LWIP_RAND */ + + if (group->timer == 0) { + group->timer = 1; + } +} + +/** + * Delaying membership report for a group if necessary + * + * @param group the igmp_group for which "delaying" membership report + * @param maxresp query delay + */ +static void +igmp_delaying_member(struct igmp_group *group, u8_t maxresp) +{ + if ((group->group_state == IGMP_GROUP_IDLE_MEMBER) || + ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + igmp_start_timer(group, maxresp); + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } +} + + +/** + * Sends an IP packet on a network interface. This function constructs the IP header + * and calculates the IP header checksum. If the source IP address is NULL, + * the IP address of the outgoing network interface is filled in as source address. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + */ +static err_t +igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif) +{ + /* This is the "router alert" option */ + u16_t ra[2]; + ra[0] = PP_HTONS(ROUTER_ALERT); + ra[1] = 0x0000; /* Router shall examine packet */ + IGMP_STATS_INC(igmp.xmit); + return ip4_output_if_opt(p, src, dest, IGMP_TTL, 0, IP_PROTO_IGMP, netif, ra, ROUTER_ALERTLEN); +} + +/** + * Send an igmp packet to a specific group. + * + * @param group the group to which to send the packet + * @param type the type of igmp packet to send + */ +static void +igmp_send(struct netif *netif, struct igmp_group *group, u8_t type) +{ + struct pbuf *p = NULL; + struct igmp_msg *igmp = NULL; + ip4_addr_t src = *IP4_ADDR_ANY4; + ip4_addr_t *dest = NULL; + + /* IP header + "router alert" option + IGMP header */ + p = pbuf_alloc(PBUF_TRANSPORT, IGMP_MINLEN, PBUF_RAM); + + if (p) { + igmp = (struct igmp_msg *)p->payload; + LWIP_ASSERT("igmp_send: check that first pbuf can hold struct igmp_msg", + (p->len >= sizeof(struct igmp_msg))); + ip4_addr_copy(src, *netif_ip4_addr(netif)); + + if (type == IGMP_V2_MEMB_REPORT) { + dest = &(group->group_address); + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + group->last_reporter_flag = 1; /* Remember we were the last to report */ + } else { + if (type == IGMP_LEAVE_GROUP) { + dest = &allrouters; + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + } + } + + if ((type == IGMP_V2_MEMB_REPORT) || (type == IGMP_LEAVE_GROUP)) { + igmp->igmp_msgtype = type; + igmp->igmp_maxresp = 0; + igmp->igmp_checksum = 0; + igmp->igmp_checksum = inet_chksum(igmp, IGMP_MINLEN); + + igmp_ip_output_if(p, &src, dest, netif); + } + + pbuf_free(p); + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_send: not enough memory for igmp_send\n")); + IGMP_STATS_INC(igmp.memerr); + } +} + +#endif /* LWIP_IPV4 && LWIP_IGMP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c new file mode 100644 index 00000000..26c26a91 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c @@ -0,0 +1,1132 @@ +/** + * @file + * This is the IPv4 layer implementation for incoming and outgoing IP traffic. + * + * @see ip_frag.c + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip4_frag.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/igmp.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/autoip.h" +#include "lwip/stats.h" +#include "lwip/prot/iana.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Set this to 0 in the rare case of wanting to call an extra function to + * generate the IP checksum (in contrast to calculating it on-the-fly). */ +#ifndef LWIP_INLINE_IP_CHKSUM +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define LWIP_INLINE_IP_CHKSUM 0 +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define LWIP_INLINE_IP_CHKSUM 1 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#endif + +#if LWIP_INLINE_IP_CHKSUM && CHECKSUM_GEN_IP +#define CHECKSUM_GEN_IP_INLINE 1 +#else +#define CHECKSUM_GEN_IP_INLINE 0 +#endif + +#if LWIP_DHCP || defined(LWIP_IP_ACCEPT_UDP_PORT) +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 1 + +/** Some defines for DHCP to let link-layer-addressed packets through while the + * netif is down. + * To use this in your own application/protocol, define LWIP_IP_ACCEPT_UDP_PORT(port) + * to return 1 if the port is accepted and 0 if the port is not accepted. + */ +#if LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) +/* accept DHCP client port and custom port */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) \ + || (LWIP_IP_ACCEPT_UDP_PORT(port))) +#elif defined(LWIP_IP_ACCEPT_UDP_PORT) /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept custom port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (LWIP_IP_ACCEPT_UDP_PORT(port)) +#else /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept DHCP client port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) ((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) +#endif /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ + +#else /* LWIP_DHCP */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 0 +#endif /* LWIP_DHCP */ + +/** The IP header ID of the next outgoing IP packet */ +static u16_t ip_id; + +#if LWIP_MULTICAST_TX_OPTIONS +/** The default netif used for multicast */ +static struct netif *ip4_default_multicast_netif; + +/** + * @ingroup ip4 + * Set a default netif for IPv4 multicast. */ +void +ip4_set_default_multicast_netif(struct netif *default_multicast_netif) +{ + ip4_default_multicast_netif = default_multicast_netif; +} +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +/** + * Source based IPv4 routing must be fully implemented in + * LWIP_HOOK_IP4_ROUTE_SRC(). This function only provides the parameters. + */ +struct netif * +ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest) +{ + if (src != NULL) { + /* when src==NULL, the hook is called from ip4_route(dest) */ + struct netif *netif = LWIP_HOOK_IP4_ROUTE_SRC(src, dest); + if (netif != NULL) { + return netif; + } + } + return ip4_route(dest); +} +#endif /* LWIP_HOOK_IP4_ROUTE_SRC */ + +/** + * Finds the appropriate network interface for a given IP address. It + * searches the list of network interfaces linearly. A match is found + * if the masked IP address of the network interface equals the masked + * IP address given to the function. + * + * @param dest the destination IP address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip4_route(const ip4_addr_t *dest) +{ +#if !LWIP_SINGLE_NETIF + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_MULTICAST_TX_OPTIONS + /* Use administratively selected interface for multicast by default */ + if (ip4_addr_ismulticast(dest) && ip4_default_multicast_netif) { + return ip4_default_multicast_netif; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + /* bug #54569: in case LWIP_SINGLE_NETIF=1 and LWIP_DEBUGF() disabled, the following loop is optimized away */ + LWIP_UNUSED_ARG(dest); + + /* iterate through netifs */ + NETIF_FOREACH(netif) { + /* is the netif up, does it have a link and a valid address? */ + if (netif_is_up(netif) && netif_is_link_up(netif) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* network mask matches? */ + if (ip4_addr_netcmp(dest, netif_ip4_addr(netif), netif_ip4_netmask(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + /* gateway matches on a non broadcast interface? (i.e. peer in a point to point interface) */ + if (((netif->flags & NETIF_FLAG_BROADCAST) == 0) && ip4_addr_cmp(dest, netif_ip4_gw(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, looopback traffic is passed through any netif */ + if (ip4_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + netif = LWIP_HOOK_IP4_ROUTE_SRC(NULL, dest); + if (netif != NULL) { + return netif; + } +#elif defined(LWIP_HOOK_IP4_ROUTE) + netif = LWIP_HOOK_IP4_ROUTE(dest); + if (netif != NULL) { + return netif; + } +#endif +#endif /* !LWIP_SINGLE_NETIF */ + + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default) || + ip4_addr_isany_val(*netif_ip4_addr(netif_default)) || ip4_addr_isloopback(dest)) { + /* No matching netif found and default netif is not usable. + If this is not good enough for you, use LWIP_HOOK_IP4_ROUTE() */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_route: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + MIB2_STATS_INC(mib2.ipoutnoroutes); + return NULL; + } + + return netif_default; +} + +#if IP_FORWARD +/** + * Determine whether an IP address is in a reserved set of addresses + * that may not be forwarded, or whether datagrams to that destination + * may be forwarded. + * @param p the packet to forward + * @return 1: can forward 0: discard + */ +static int +ip4_canforward(struct pbuf *p) +{ + u32_t addr = lwip_htonl(ip4_addr_get_u32(ip4_current_dest_addr())); + +#ifdef LWIP_HOOK_IP4_CANFORWARD + int ret = LWIP_HOOK_IP4_CANFORWARD(p, addr); + if (ret >= 0) { + return ret; + } +#endif /* LWIP_HOOK_IP4_CANFORWARD */ + + if (p->flags & PBUF_FLAG_LLBCAST) { + /* don't route link-layer broadcasts */ + return 0; + } + if ((p->flags & PBUF_FLAG_LLMCAST) || IP_MULTICAST(addr)) { + /* don't route link-layer multicasts (use LWIP_HOOK_IP4_CANFORWARD instead) */ + return 0; + } + if (IP_EXPERIMENTAL(addr)) { + return 0; + } + if (IP_CLASSA(addr)) { + u32_t net = addr & IP_CLASSA_NET; + if ((net == 0) || (net == ((u32_t)IP_LOOPBACKNET << IP_CLASSA_NSHIFT))) { + /* don't route loopback packets */ + return 0; + } + } + return 1; +} + +/** + * Forwards an IP packet. It finds an appropriate route for the + * packet, decrements the TTL value of the packet, adjusts the + * checksum and outputs the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IP header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip4_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + PERF_START; + LWIP_UNUSED_ARG(inp); + + if (!ip4_canforward(p)) { + goto return_noroute; + } + + /* RFC3927 2.7: do not forward link-local addresses */ + if (ip4_addr_islinklocal(ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not forwarding LLA %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + goto return_noroute; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip4_route_src(ip4_current_src_addr(), ip4_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: no forwarding route for %"U16_F".%"U16_F".%"U16_F".%"U16_F" found\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + /* @todo: send ICMP_DUR_NET? */ + goto return_noroute; + } +#if !IP_FORWARD_ALLOW_TX_ON_RX_NETIF + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not bouncing packets back on incoming interface.\n")); + goto return_noroute; + } +#endif /* IP_FORWARD_ALLOW_TX_ON_RX_NETIF */ + + /* decrement TTL */ + IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1); + /* send ICMP if TTL == 0 */ + if (IPH_TTL(iphdr) == 0) { + MIB2_STATS_INC(mib2.ipinhdrerrors); +#if LWIP_ICMP + /* Don't send ICMP messages in response to ICMP messages */ + if (IPH_PROTO(iphdr) != IP_PROTO_ICMP) { + icmp_time_exceeded(p, ICMP_TE_TTL); + } +#endif /* LWIP_ICMP */ + return; + } + + /* Incrementally update the IP checksum. */ + if (IPH_CHKSUM(iphdr) >= PP_HTONS(0xffffU - 0x100)) { + IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100) + 1)); + } else { + IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100))); + } + + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: forwarding packet to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + + IP_STATS_INC(ip.fw); + MIB2_STATS_INC(mib2.ipforwdatagrams); + IP_STATS_INC(ip.xmit); + + PERF_STOP("ip4_forward"); + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + if ((IPH_OFFSET(iphdr) & PP_NTOHS(IP_DF)) == 0) { +#if IP_FRAG + ip4_frag(p, netif, ip4_current_dest_addr()); +#else /* IP_FRAG */ + /* @todo: send ICMP Destination Unreachable code 13 "Communication administratively prohibited"? */ +#endif /* IP_FRAG */ + } else { +#if LWIP_ICMP + /* send ICMP Destination Unreachable code 4: "Fragmentation Needed and DF Set" */ + icmp_dest_unreach(p, ICMP_DUR_FRAG); +#endif /* LWIP_ICMP */ + } + return; + } + /* transmit pbuf on chosen interface */ + netif->output(netif, p, ip4_current_dest_addr()); + return; +return_noroute: + MIB2_STATS_INC(mib2.ipoutnoroutes); +} +#endif /* IP_FORWARD */ + +/** Return true if the current input packet should be accepted on this netif */ +static int +ip4_input_accept(struct netif *netif) +{ + LWIP_DEBUGF(IP_DEBUG, ("ip_input: iphdr->dest 0x%"X32_F" netif->ip_addr 0x%"X32_F" (0x%"X32_F", 0x%"X32_F", 0x%"X32_F")\n", + ip4_addr_get_u32(ip4_current_dest_addr()), ip4_addr_get_u32(netif_ip4_addr(netif)), + ip4_addr_get_u32(ip4_current_dest_addr()) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(netif_ip4_addr(netif)) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(ip4_current_dest_addr()) & ~ip4_addr_get_u32(netif_ip4_netmask(netif)))); + + /* interface is up and configured? */ + if ((netif_is_up(netif)) && (!ip4_addr_isany_val(*netif_ip4_addr(netif)))) { + /* unicast to this interface address? */ + if (ip4_addr_cmp(ip4_current_dest_addr(), netif_ip4_addr(netif)) || + /* or broadcast on this interface network address? */ + ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + || (ip4_addr_get_u32(ip4_current_dest_addr()) == PP_HTONL(IPADDR_LOOPBACK)) +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + ) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* accept on this netif */ + return 1; + } +#if LWIP_AUTOIP + /* connections to link-local addresses must persist after changing + the netif's address (RFC3927 ch. 1.9) */ + if (autoip_accept_packet(netif, ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: LLA packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* accept on this netif */ + return 1; + } +#endif /* LWIP_AUTOIP */ + } + return 0; +} + +/** + * This function is called by the network interface device driver when + * an IP packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip_forward). The IP checksum is always checked. + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IP packet (p->payload points to IP header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip4_input(struct pbuf *p, struct netif *inp) +{ + const struct ip_hdr *iphdr; + struct netif *netif; + u16_t iphdr_hlen; + u16_t iphdr_len; +#if IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP + int check_ip_src = 1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP */ +#if LWIP_RAW + raw_input_state_t raw_status; +#endif /* LWIP_RAW */ + + LWIP_ASSERT_CORE_LOCKED(); + + IP_STATS_INC(ip.recv); + MIB2_STATS_INC(mib2.ipinreceives); + + /* identify the IP header */ + iphdr = (struct ip_hdr *)p->payload; + if (IPH_V(iphdr) != 4) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IP packet dropped due to bad version number %"U16_F"\n", (u16_t)IPH_V(iphdr))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.err); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP4_INPUT + if (LWIP_HOOK_IP4_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* obtain IP header length in bytes */ + iphdr_hlen = IPH_HL_BYTES(iphdr); + /* obtain ip length in bytes */ + iphdr_len = lwip_ntohs(IPH_LEN(iphdr)); + + /* Trim pbuf. This is especially required for packets < 60 bytes. */ + if (iphdr_len < p->tot_len) { + pbuf_realloc(p, iphdr_len); + } + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((iphdr_hlen > p->len) || (iphdr_len > p->tot_len) || (iphdr_hlen < IP_HLEN)) { + if (iphdr_hlen < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("ip4_input: short IP header (%"U16_F" bytes) received, IP packet dropped\n", iphdr_hlen)); + } + if (iphdr_hlen > p->len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_hlen, p->len)); + } + if (iphdr_len > p->tot_len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP (len %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_len, p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.lenerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + + /* verify checksum */ +#if CHECKSUM_CHECK_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_IP) { + if (inet_chksum(iphdr, iphdr_hlen) != 0) { + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("Checksum (0x%"X16_F") failed, IP packet dropped.\n", inet_chksum(iphdr, iphdr_hlen))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.chkerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + } +#endif + + /* copy IP addresses to aligned ip_addr_t */ + ip_addr_copy_from_ip4(ip_data.current_iphdr_dest, iphdr->dest); + ip_addr_copy_from_ip4(ip_data.current_iphdr_src, iphdr->src); + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_IGMP + if ((inp->flags & NETIF_FLAG_IGMP) && (igmp_lookfor_group(inp, ip4_current_dest_addr()))) { + /* IGMP snooping switches need 0.0.0.0 to be allowed as source address (RFC 4541) */ + ip4_addr_t allsystems; + IP4_ADDR(&allsystems, 224, 0, 0, 1); + if (ip4_addr_cmp(ip4_current_dest_addr(), &allsystems) && + ip4_addr_isany(ip4_current_src_addr())) { + check_ip_src = 0; + } + netif = inp; + } else { + netif = NULL; + } +#else /* LWIP_IGMP */ + if ((netif_is_up(inp)) && (!ip4_addr_isany_val(*netif_ip4_addr(inp)))) { + netif = inp; + } else { + netif = NULL; + } +#endif /* LWIP_IGMP */ + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. */ + if (ip4_input_accept(inp)) { + netif = inp; + } else { + netif = NULL; +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* Packets sent to the loopback address must not be accepted on an + * interface that does not have the loopback address assigned to it, + * unless a non-loopback interface is used for loopback traffic. */ + if (!ip4_addr_isloopback(ip4_current_dest_addr())) +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ + { +#if !LWIP_SINGLE_NETIF + NETIF_FOREACH(netif) { + if (netif == inp) { + /* we checked that before already */ + continue; + } + if (ip4_input_accept(netif)) { + break; + } + } +#endif /* !LWIP_SINGLE_NETIF */ + } + } + } + +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* Pass DHCP messages regardless of destination address. DHCP traffic is addressed + * using link layer addressing (such as Ethernet MAC) so we must not filter on IP. + * According to RFC 1542 section 3.1.1, referred by RFC 2131). + * + * If you want to accept private broadcast communication while a netif is down, + * define LWIP_IP_ACCEPT_UDP_PORT(dst_port), e.g.: + * + * #define LWIP_IP_ACCEPT_UDP_PORT(dst_port) ((dst_port) == PP_NTOHS(12345)) + */ + if (netif == NULL) { + /* remote port is DHCP server? */ + if (IPH_PROTO(iphdr) == IP_PROTO_UDP) { + const struct udp_hdr *udphdr = (const struct udp_hdr *)((const u8_t *)iphdr + iphdr_hlen); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: UDP packet to DHCP client port %"U16_F"\n", + lwip_ntohs(udphdr->dest))); + if (IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(udphdr->dest)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: DHCP packet accepted.\n")); + netif = inp; + check_ip_src = 0; + } + } + } +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + + /* broadcast or multicast packet source address? Compliant with RFC 1122: 3.2.1.3 */ +#if LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING + if (check_ip_src +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* DHCP servers need 0.0.0.0 to be allowed as source address (RFC 1.1.2.2: 3.2.1.3/a) */ + && !ip4_addr_isany_val(*ip4_current_src_addr()) +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + ) +#endif /* LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING */ + { + if ((ip4_addr_isbroadcast(ip4_current_src_addr(), inp)) || + (ip4_addr_ismulticast(ip4_current_src_addr()))) { + /* packet source is not valid */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ip4_input: packet source is not valid.\n")); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: packet not for us.\n")); +#if IP_FORWARD + /* non-broadcast packet? */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), inp)) { + /* try to forward IP packet on (other) interfaces */ + ip4_forward(p, (struct ip_hdr *)p->payload, inp); + } else +#endif /* IP_FORWARD */ + { + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + } + pbuf_free(p); + return ERR_OK; + } + /* packet consists of multiple fragments? */ + if ((IPH_OFFSET(iphdr) & PP_HTONS(IP_OFFMASK | IP_MF)) != 0) { +#if IP_REASSEMBLY /* packet fragment reassembly code present? */ + LWIP_DEBUGF(IP_DEBUG, ("IP packet is a fragment (id=0x%04"X16_F" tot_len=%"U16_F" len=%"U16_F" MF=%"U16_F" offset=%"U16_F"), calling ip4_reass()\n", + lwip_ntohs(IPH_ID(iphdr)), p->tot_len, lwip_ntohs(IPH_LEN(iphdr)), (u16_t)!!(IPH_OFFSET(iphdr) & PP_HTONS(IP_MF)), (u16_t)((lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK) * 8))); + /* reassemble the packet*/ + p = ip4_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + return ERR_OK; + } + iphdr = (const struct ip_hdr *)p->payload; +#else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */ + pbuf_free(p); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since it was fragmented (0x%"X16_F") (while IP_REASSEMBLY == 0).\n", + lwip_ntohs(IPH_OFFSET(iphdr)))); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; +#endif /* IP_REASSEMBLY */ + } + +#if IP_OPTIONS_ALLOWED == 0 /* no support for IP options in the IP header? */ + +#if LWIP_IGMP + /* there is an extra "router alert" option in IGMP messages which we allow for but do not police */ + if ((iphdr_hlen > IP_HLEN) && (IPH_PROTO(iphdr) != IP_PROTO_IGMP)) { +#else + if (iphdr_hlen > IP_HLEN) { +#endif /* LWIP_IGMP */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since there were IP options (while IP_OPTIONS_ALLOWED == 0).\n")); + pbuf_free(p); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; + } +#endif /* IP_OPTIONS_ALLOWED == 0 */ + + /* send to upper layers */ + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: \n")); + ip4_debug_print(p); + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_netif = netif; + ip_data.current_input_netif = inp; + ip_data.current_ip4_header = iphdr; + ip_data.current_ip_header_tot_len = IPH_HL_BYTES(iphdr); + +#if LWIP_RAW + /* raw input did not eat the packet? */ + raw_status = raw_input(p, inp); + if (raw_status != RAW_INPUT_EATEN) +#endif /* LWIP_RAW */ + { + pbuf_remove_header(p, iphdr_hlen); /* Move to payload, no check necessary. */ + + switch (IPH_PROTO(iphdr)) { +#if LWIP_UDP + case IP_PROTO_UDP: +#if LWIP_UDPLITE + case IP_PROTO_UDPLITE: +#endif /* LWIP_UDPLITE */ + MIB2_STATS_INC(mib2.ipindelivers); + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP_PROTO_TCP: + MIB2_STATS_INC(mib2.ipindelivers); + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP + case IP_PROTO_ICMP: + MIB2_STATS_INC(mib2.ipindelivers); + icmp_input(p, inp); + break; +#endif /* LWIP_ICMP */ +#if LWIP_IGMP + case IP_PROTO_IGMP: + igmp_input(p, inp, ip4_current_dest_addr()); + break; +#endif /* LWIP_IGMP */ + default: +#if LWIP_RAW + if (raw_status == RAW_INPUT_DELIVERED) { + MIB2_STATS_INC(mib2.ipindelivers); + } else +#endif /* LWIP_RAW */ + { +#if LWIP_ICMP + /* send ICMP destination protocol unreachable unless is was a broadcast */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) && + !ip4_addr_ismulticast(ip4_current_dest_addr())) { + pbuf_header_force(p, (s16_t)iphdr_hlen); /* Move to ip header, no check necessary. */ + icmp_dest_unreach(p, ICMP_DUR_PROTO); + } +#endif /* LWIP_ICMP */ + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Unsupported transport protocol %"U16_F"\n", (u16_t)IPH_PROTO(iphdr))); + + IP_STATS_INC(ip.proterr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinunknownprotos); + } + pbuf_free(p); + break; + } + } + + /* @todo: this is not really necessary... */ + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip4_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip4_addr_set_any(ip4_current_src_addr()); + ip4_addr_set_any(ip4_current_dest_addr()); + + return ERR_OK; +} + +/** + * Sends an IP packet on a network interface. This function constructs + * the IP header and calculates the IP header checksum. If the source + * IP address is NULL, the IP address of the outgoing network + * interface is filled in as source address. + * If the destination IP address is LWIP_IP_HDRINCL, p is assumed to already + * include an IP header and p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + * + * @note ip_id: RFC791 "some host may be able to simply use + * unique identifiers independent of destination" + */ +err_t +ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if() but with the possibility to include IP options: + * + * @ param ip_options pointer to the IP options, copied into the IP header + * @ param optlen length of ip_options + */ +err_t +ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + const ip4_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (ip4_addr_isany(src)) { + src_used = netif_ip4_addr(netif); + } + } + +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src_used, dest, ttl, tos, proto, netif, + ip_options, optlen); +#else /* IP_OPTIONS_SEND */ + return ip4_output_if_src(p, src_used, dest, ttl, tos, proto, netif); +#endif /* IP_OPTIONS_SEND */ +} + +/** + * Same as ip_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if_opt() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + struct ip_hdr *iphdr; + ip4_addr_t dest_addr; +#if CHECKSUM_GEN_IP_INLINE + u32_t chk_sum = 0; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + MIB2_STATS_INC(mib2.ipoutrequests); + + /* Should the IP header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { + u16_t ip_hlen = IP_HLEN; +#if IP_OPTIONS_SEND + u16_t optlen_aligned = 0; + if (optlen != 0) { +#if CHECKSUM_GEN_IP_INLINE + int i; +#endif /* CHECKSUM_GEN_IP_INLINE */ + if (optlen > (IP_HLEN_MAX - IP_HLEN)) { + /* optlen too long */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: optlen too long\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_VAL; + } + /* round up to a multiple of 4 */ + optlen_aligned = (u16_t)((optlen + 3) & ~3); + ip_hlen = (u16_t)(ip_hlen + optlen_aligned); + /* First write in the IP options */ + if (pbuf_add_header(p, optlen_aligned)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: not enough room for IP options in pbuf\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + MEMCPY(p->payload, ip_options, optlen); + if (optlen < optlen_aligned) { + /* zero the remaining bytes */ + memset(((char *)p->payload) + optlen, 0, (size_t)(optlen_aligned - optlen)); + } +#if CHECKSUM_GEN_IP_INLINE + for (i = 0; i < optlen_aligned / 2; i++) { + chk_sum += ((u16_t *)p->payload)[i]; + } +#endif /* CHECKSUM_GEN_IP_INLINE */ + } +#endif /* IP_OPTIONS_SEND */ + /* generate IP header */ + if (pbuf_add_header(p, IP_HLEN)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: not enough room for IP header in pbuf\n")); + + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + + iphdr = (struct ip_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip_hdr", + (p->len >= sizeof(struct ip_hdr))); + + IPH_TTL_SET(iphdr, ttl); + IPH_PROTO_SET(iphdr, proto); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(proto | (ttl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + + /* dest cannot be NULL here */ + ip4_addr_copy(iphdr->dest, *dest); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->dest) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->dest) >> 16; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + IPH_VHL_SET(iphdr, 4, ip_hlen / 4); + IPH_TOS_SET(iphdr, tos); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(tos | (iphdr->_v_hl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_LEN_SET(iphdr, lwip_htons(p->tot_len)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_len; +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_OFFSET_SET(iphdr, 0); + IPH_ID_SET(iphdr, lwip_htons(ip_id)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_id; +#endif /* CHECKSUM_GEN_IP_INLINE */ + ++ip_id; + + if (src == NULL) { + ip4_addr_copy(iphdr->src, *IP4_ADDR_ANY4); + } else { + /* src cannot be NULL here */ + ip4_addr_copy(iphdr->src, *src); + } + +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->src) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->src) >> 16; + chk_sum = (chk_sum >> 16) + (chk_sum & 0xFFFF); + chk_sum = (chk_sum >> 16) + chk_sum; + chk_sum = ~chk_sum; + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + iphdr->_chksum = (u16_t)chk_sum; /* network order */ + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + IPH_CHKSUM_SET(iphdr, 0); + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ +#else /* CHECKSUM_GEN_IP_INLINE */ + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, ip_hlen)); + } +#endif /* CHECKSUM_GEN_IP */ +#endif /* CHECKSUM_GEN_IP_INLINE */ + } else { + /* IP header already included in p */ + if (p->len < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: LWIP_IP_HDRINCL but pbuf is too short\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(dest_addr, iphdr->dest); + dest = &dest_addr; + } + + IP_STATS_INC(ip.xmit); + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip4_debug_print(p); + +#if ENABLE_LOOPBACK + if (ip4_addr_cmp(dest, netif_ip4_addr(netif)) +#if !LWIP_HAVE_LOOPIF + || ip4_addr_isloopback(dest) +#endif /* !LWIP_HAVE_LOOPIF */ + ) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()")); + return netif_loop_output(netif, p); + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if IP_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + return ip4_frag(p, netif, dest); + } +#endif /* IP_FRAG */ + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: call netif->output()\n")); + return netif->output(netif, p, dest); +} + +/** + * Simple interface to ip_output_if. It finds the outgoing network + * interface and calls upon ip_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto) +{ + struct netif *netif; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(src, dest)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + return ip4_output_if(p, src, dest, ttl, tos, proto, netif); +} + +#if LWIP_NETIF_USE_HINTS +/** Like ip_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif_hint netif output hint pointer set to netif->hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint) +{ + struct netif *netif; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(src, dest)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + NETIF_SET_HINTS(netif, netif_hint); + err = ip4_output_if(p, src, dest, ttl, tos, proto, netif); + NETIF_RESET_HINTS(netif); + + return err; +} +#endif /* LWIP_NETIF_USE_HINTS*/ + +#if IP_DEBUG +/* Print an IP header by using LWIP_DEBUGF + * @param p an IP packet, p->payload pointing to the IP header + */ +void +ip4_debug_print(struct pbuf *p) +{ + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + + LWIP_DEBUGF(IP_DEBUG, ("IP header:\n")); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("|%2"S16_F" |%2"S16_F" | 0x%02"X16_F" | %5"U16_F" | (v, hl, tos, len)\n", + (u16_t)IPH_V(iphdr), + (u16_t)IPH_HL(iphdr), + (u16_t)IPH_TOS(iphdr), + lwip_ntohs(IPH_LEN(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %5"U16_F" |%"U16_F"%"U16_F"%"U16_F"| %4"U16_F" | (id, flags, offset)\n", + lwip_ntohs(IPH_ID(iphdr)), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 15 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 14 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 13 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | 0x%04"X16_F" | (ttl, proto, chksum)\n", + (u16_t)IPH_TTL(iphdr), + (u16_t)IPH_PROTO(iphdr), + lwip_ntohs(IPH_CHKSUM(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (src)\n", + ip4_addr1_16_val(iphdr->src), + ip4_addr2_16_val(iphdr->src), + ip4_addr3_16_val(iphdr->src), + ip4_addr4_16_val(iphdr->src))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (dest)\n", + ip4_addr1_16_val(iphdr->dest), + ip4_addr2_16_val(iphdr->dest), + ip4_addr3_16_val(iphdr->dest), + ip4_addr4_16_val(iphdr->dest))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP_DEBUG */ + +#endif /* LWIP_IPV4 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c new file mode 100644 index 00000000..33204d11 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c @@ -0,0 +1,321 @@ +/** + * @file + * This is the IPv4 address tools implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +/* used by IP4_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */ +const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY); +const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST); + +/** + * Determine if an address is a broadcast address on a network interface + * + * @param addr address to be checked + * @param netif the network interface against which the address is checked + * @return returns non-zero if the address is a broadcast address + */ +u8_t +ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif) +{ + ip4_addr_t ipaddr; + ip4_addr_set_u32(&ipaddr, addr); + + /* all ones (broadcast) or all zeroes (old skool broadcast) */ + if ((~addr == IPADDR_ANY) || + (addr == IPADDR_ANY)) { + return 1; + /* no broadcast support on this network interface? */ + } else if ((netif->flags & NETIF_FLAG_BROADCAST) == 0) { + /* the given address cannot be a broadcast address + * nor can we check against any broadcast addresses */ + return 0; + /* address matches network interface address exactly? => no broadcast */ + } else if (addr == ip4_addr_get_u32(netif_ip4_addr(netif))) { + return 0; + /* on the same (sub) network... */ + } else if (ip4_addr_netcmp(&ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) + /* ...and host identifier bits are all ones? =>... */ + && ((addr & ~ip4_addr_get_u32(netif_ip4_netmask(netif))) == + (IPADDR_BROADCAST & ~ip4_addr_get_u32(netif_ip4_netmask(netif))))) { + /* => network broadcast address */ + return 1; + } else { + return 0; + } +} + +/** Checks if a netmask is valid (starting with ones, then only zeros) + * + * @param netmask the IPv4 netmask to check (in network byte order!) + * @return 1 if the netmask is valid, 0 if it is not + */ +u8_t +ip4_addr_netmask_valid(u32_t netmask) +{ + u32_t mask; + u32_t nm_hostorder = lwip_htonl(netmask); + + /* first, check for the first zero */ + for (mask = 1UL << 31 ; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) == 0) { + break; + } + } + /* then check that there is no one */ + for (; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) != 0) { + /* there is a one after the first zero -> invalid */ + return 0; + } + } + /* no one after the first zero -> valid */ + return 1; +} + +/** + * Ascii internet address interpretation routine. + * The value returned is in network order. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @return ip address in network order + */ +u32_t +ipaddr_addr(const char *cp) +{ + ip4_addr_t val; + + if (ip4addr_aton(cp, &val)) { + return ip4_addr_get_u32(&val); + } + return (IPADDR_NONE); +} + +/** + * Check whether "cp" is a valid ascii representation + * of an Internet address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * This replaces inet_addr, the return value from which + * cannot distinguish between failure and a local broadcast address. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip4addr_aton(const char *cp, ip4_addr_t *addr) +{ + u32_t val; + u8_t base; + char c; + u32_t parts[4]; + u32_t *pp = parts; + + c = *cp; + for (;;) { + /* + * Collect number up to ``.''. + * Values are specified as for C: + * 0x=hex, 0=octal, 1-9=decimal. + */ + if (!lwip_isdigit(c)) { + return 0; + } + val = 0; + base = 10; + if (c == '0') { + c = *++cp; + if (c == 'x' || c == 'X') { + base = 16; + c = *++cp; + } else { + base = 8; + } + } + for (;;) { + if (lwip_isdigit(c)) { + val = (val * base) + (u32_t)(c - '0'); + c = *++cp; + } else if (base == 16 && lwip_isxdigit(c)) { + val = (val << 4) | (u32_t)(c + 10 - (lwip_islower(c) ? 'a' : 'A')); + c = *++cp; + } else { + break; + } + } + if (c == '.') { + /* + * Internet format: + * a.b.c.d + * a.b.c (with c treated as 16 bits) + * a.b (with b treated as 24 bits) + */ + if (pp >= parts + 3) { + return 0; + } + *pp++ = val; + c = *++cp; + } else { + break; + } + } + /* + * Check for trailing characters. + */ + if (c != '\0' && !lwip_isspace(c)) { + return 0; + } + /* + * Concoct the address according to + * the number of parts specified. + */ + switch (pp - parts + 1) { + + case 0: + return 0; /* initial nondigit */ + + case 1: /* a -- 32 bits */ + break; + + case 2: /* a.b -- 8.24 bits */ + if (val > 0xffffffUL) { + return 0; + } + if (parts[0] > 0xff) { + return 0; + } + val |= parts[0] << 24; + break; + + case 3: /* a.b.c -- 8.8.16 bits */ + if (val > 0xffff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16); + break; + + case 4: /* a.b.c.d -- 8.8.8.8 bits */ + if (val > 0xff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8); + break; + default: + LWIP_ASSERT("unhandled", 0); + break; + } + if (addr) { + ip4_addr_set_u32(addr, lwip_htonl(val)); + } + return 1; +} + +/** + * Convert numeric IP address into decimal dotted ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip4addr_ntoa(const ip4_addr_t *addr) +{ + static char str[IP4ADDR_STRLEN_MAX]; + return ip4addr_ntoa_r(addr, str, IP4ADDR_STRLEN_MAX); +} + +/** + * Same as ip4addr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen) +{ + u32_t s_addr; + char inv[3]; + char *rp; + u8_t *ap; + u8_t rem; + u8_t n; + u8_t i; + int len = 0; + + s_addr = ip4_addr_get_u32(addr); + + rp = buf; + ap = (u8_t *)&s_addr; + for (n = 0; n < 4; n++) { + i = 0; + do { + rem = *ap % (u8_t)10; + *ap /= (u8_t)10; + inv[i++] = (char)('0' + rem); + } while (*ap); + while (i--) { + if (len++ >= buflen) { + return NULL; + } + *rp++ = inv[i]; + } + if (len++ >= buflen) { + return NULL; + } + *rp++ = '.'; + ap++; + } + *--rp = 0; + return buf; +} + +#endif /* LWIP_IPV4 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c new file mode 100644 index 00000000..a445530e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c @@ -0,0 +1,894 @@ +/** + * @file + * This is the IPv4 packet segmentation and reassembly implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * Simon Goldschmidt + * original reassembly code by Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip4_frag.h" +#include "lwip/def.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/icmp.h" + +#include + +#if IP_REASSEMBLY +/** + * The IP reassembly code currently has the following limitations: + * - IP header options are not supported + * - fragments must not overlap (e.g. due to different routes), + * currently, overlapping or duplicate fragments are thrown away + * if IP_REASS_CHECK_OVERLAP=1 (the default)! + * + * @todo: work with IP header options + */ + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +#define IP_REASS_VALIDATE_TELEGRAM_FINISHED 1 +#define IP_REASS_VALIDATE_PBUF_QUEUED 0 +#define IP_REASS_VALIDATE_PBUF_DROPPED -1 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IP header, since it replaces + * the IP header in memory in incoming fragments (after copying it) to keep + * track of the various fragments. (-> If the IP header doesn't need packing, + * this struct doesn't need packing, too.) + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \ + (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \ + ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \ + IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0 + +/* global variables */ +static struct ip_reassdata *reassdatagrams; +static u16_t ip_reass_pbufcount; + +/* function prototypes */ +static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); +static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); + +/** + * Reassembly timer base function + * for both NO_SYS == 0 and 1 (!). + * + * Should be called every 1000 msec (defined by IP_TMR_INTERVAL). + */ +void +ip_reass_tmr(void) +{ + struct ip_reassdata *r, *prev = NULL; + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n", (u16_t)r->timer)); + prev = r; + r = r->next; + } else { + /* reassembly timed out */ + struct ip_reassdata *tmp; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n")); + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip_reass_free_complete_datagram(tmp, prev); + } + } +} + +/** + * Free a datagram (struct ip_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip_reass_pbufcount), + * SNMP counters and sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + * @param prev the previous datagram in the linked list + * @return the number of pbufs freed + */ +static int +ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip_reass_helper *iprh; + + LWIP_ASSERT("prev != ipr", prev != ipr); + if (prev != NULL) { + LWIP_ASSERT("prev->next == ipr", prev->next == ipr); + } + + MIB2_STATS_INC(mib2.ipreasmfails); +#if LWIP_ICMP + iprh = (struct ip_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Then, copy the original header into it. */ + SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN); + icmp_time_exceeded(p, ICMP_TE_FRAG); + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(p); + } +#endif /* LWIP_ICMP */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(pcur); + } + /* Then, unchain the struct ip_reassdata from the list and free it. */ + ip_reass_dequeue_datagram(ipr, prev); + LWIP_ASSERT("ip_reass_pbufcount >= pbufs_freed", ip_reass_pbufcount >= pbufs_freed); + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - pbufs_freed); + + return pbufs_freed; +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram 'fraghdr' belongs to is not freed! + * + * @param fraghdr IP header of the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + * @return the number of pbufs freed + */ +static int +ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed) +{ + /* @todo Can't we simply remove the last datagram in the + * linked list behind reassdatagrams? + */ + struct ip_reassdata *r, *oldest, *prev, *oldest_prev; + int pbufs_freed = 0, pbufs_freed_current; + int other_datagrams; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the datagram that 'fraghdr' belongs to! */ + do { + oldest = NULL; + prev = NULL; + oldest_prev = NULL; + other_datagrams = 0; + r = reassdatagrams; + while (r != NULL) { + if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) { + /* Not the same datagram as fraghdr */ + other_datagrams++; + if (oldest == NULL) { + oldest = r; + oldest_prev = prev; + } else if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + oldest_prev = prev; + } + } + if (r->next != NULL) { + prev = r; + } + r = r->next; + } + if (oldest != NULL) { + pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev); + pbufs_freed += pbufs_freed_current; + } + } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1)); + return pbufs_freed; +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Enqueues a new fragment into the fragment queue + * @param fraghdr points to the new fragments IP hdr + * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space) + * @return A pointer to the queue location into which the fragment was enqueued + */ +static struct ip_reassdata * +ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen) +{ + struct ip_reassdata *ipr; +#if ! IP_REASS_FREE_OLDEST + LWIP_UNUSED_ARG(clen); +#endif + + /* No matching previous fragment found, allocate a new reassdata struct */ + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) { + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + } + if (ipr == NULL) +#endif /* IP_REASS_FREE_OLDEST */ + { + IPFRAG_STATS_INC(ip_frag.memerr); + LWIP_DEBUGF(IP_REASS_DEBUG, ("Failed to alloc reassdata struct\n")); + return NULL; + } + } + memset(ipr, 0, sizeof(struct ip_reassdata)); + ipr->timer = IP_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + /* copy the ip header for later tests and input */ + /* @todo: no ip options supported? */ + SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN); + return ipr; +} + +/** + * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs. + * @param ipr points to the queue entry to dequeue + */ +static void +ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + /* dequeue the reass struct */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", prev != NULL); + prev->next = ipr->next; + } + + /* now we can free the ip_reassdata struct */ + memp_free(MEMP_REASSDATA, ipr); +} + +/** + * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list + * will grow over time as new pbufs are rx. + * Also checks that the datagram passes basic continuity checks (if the last + * fragment was received at least once). + * @param ipr points to the reassembly state + * @param new_p points to the pbuf for the current fragment + * @param is_last is 1 if this pbuf has MF==0 (ipr->flags not updated yet) + * @return see IP_REASS_VALIDATE_* defines + */ +static int +ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p, int is_last) +{ + struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev = NULL; + struct pbuf *q; + u16_t offset, len; + u8_t hlen; + struct ip_hdr *fraghdr; + int valid = 1; + + /* Extract length and fragment offset from current fragment */ + fraghdr = (struct ip_hdr *)new_p->payload; + len = lwip_ntohs(IPH_LEN(fraghdr)); + hlen = IPH_HL_BYTES(fraghdr); + if (hlen > len) { + /* invalid datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } + len = (u16_t)(len - hlen); + offset = IPH_OFFSET_BYTES(fraghdr); + + /* overwrite the fragment's ip header from the pbuf with our helper struct, + * and setup the embedded helper structure. */ + /* make sure the struct ip_reass_helper fits into the IP header */ + LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN", + sizeof(struct ip_reass_helper) <= IP_HLEN); + iprh = (struct ip_reass_helper *)new_p->payload; + iprh->next_pbuf = NULL; + iprh->start = offset; + iprh->end = (u16_t)(offset + len); + if (iprh->end < offset) { + /* u16_t overflow, cannot handle this */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } + + /* Iterate through until we either get to the end of the list (append), + * or we find one with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip_reass_helper *)q->payload; + if (iprh->start < iprh_tmp->start) { + /* the new pbuf should be inserted before this */ + iprh->next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ +#if IP_REASS_CHECK_OVERLAP + if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) { + /* fragment overlaps with previous or following, throw away */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + if (iprh->end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* fragment with the lowest offset */ + ipr->p = new_p; + } + break; + } else if (iprh->start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; +#if IP_REASS_CHECK_OVERLAP + } else if (iprh->start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no holes. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = new_p; + } + } + + /* At this point, the validation part begins: */ + /* If we already received the last fragment */ + if (is_last || ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0)) { + /* and had no holes so far */ + if (valid) { + /* then check if the rest of the fragments is here */ + /* Check if the queue starts with the first datagram */ + if ((ipr->p == NULL) || (((struct ip_reass_helper *)ipr->p->payload)->start != 0)) { + valid = 0; + } else { + /* and check that there are no holes after this datagram */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while (q != NULL) { + iprh = (struct ip_reass_helper *)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + /* if still valid, all fragments are received + * (because to the MF==0 already arrived */ + if (valid) { + LWIP_ASSERT("sanity check", ipr->p != NULL); + LWIP_ASSERT("sanity check", + ((struct ip_reass_helper *)ipr->p->payload) != iprh); + LWIP_ASSERT("validate_datagram:next_pbuf!=NULL", + iprh->next_pbuf == NULL); + } + } + } + /* If valid is 0 here, there are some fragments missing in the middle + * (since MF == 0 has already arrived). Such datagrams simply time out if + * no more fragments are received... */ + return valid ? IP_REASS_VALIDATE_TELEGRAM_FINISHED : IP_REASS_VALIDATE_PBUF_QUEUED; + } + /* If we come here, not all fragments were received, yet! */ + return IP_REASS_VALIDATE_PBUF_QUEUED; /* not yet valid! */ +} + +/** + * Reassembles incoming IP fragments into an IP datagram. + * + * @param p points to a pbuf chain of the fragment + * @return NULL if reassembly is incomplete, ? otherwise + */ +struct pbuf * +ip4_reass(struct pbuf *p) +{ + struct pbuf *r; + struct ip_hdr *fraghdr; + struct ip_reassdata *ipr; + struct ip_reass_helper *iprh; + u16_t offset, len, clen; + u8_t hlen; + int valid; + int is_last; + + IPFRAG_STATS_INC(ip_frag.recv); + MIB2_STATS_INC(mib2.ipreasmreqds); + + fraghdr = (struct ip_hdr *)p->payload; + + if (IPH_HL_BYTES(fraghdr) != IP_HLEN) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: IP options currently not supported!\n")); + IPFRAG_STATS_INC(ip_frag.err); + goto nullreturn; + } + + offset = IPH_OFFSET_BYTES(fraghdr); + len = lwip_ntohs(IPH_LEN(fraghdr)); + hlen = IPH_HL_BYTES(fraghdr); + if (hlen > len) { + /* invalid datagram */ + goto nullreturn; + } + len = (u16_t)(len - hlen); + + /* Check if we are allowed to enqueue more datagrams. */ + clen = pbuf_clen(p); + if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + if (!ip_reass_remove_oldest_datagram(fraghdr, clen) || + ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS)) +#endif /* IP_REASS_FREE_OLDEST */ + { + /* No datagram could be freed and still too many pbufs enqueued */ + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n", + ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS)); + IPFRAG_STATS_INC(ip_frag.memerr); + /* @todo: send ICMP time exceeded here? */ + /* drop this pbuf */ + goto nullreturn; + } + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n", + lwip_ntohs(IPH_ID(fraghdr)))); + IPFRAG_STATS_INC(ip_frag.cachehit); + break; + } + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = ip_reass_enqueue_new_datagram(fraghdr, clen); + /* Bail if unable to enqueue */ + if (ipr == NULL) { + goto nullreturn; + } + } else { + if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) && + ((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) { + /* ipr->iphdr is not the header from the first fragment, but fraghdr is + * -> copy fraghdr into ipr->iphdr since we want to have the header + * of the first fragment (for ICMP time exceeded and later, for copying + * all options, if supported)*/ + SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN); + } + } + + /* At this point, we have either created a new entry or pointing + * to an existing one */ + + /* check for 'no more fragments', and update queue entry*/ + is_last = (IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0; + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + if ((datagram_len < offset) || (datagram_len > (0xFFFF - IP_HLEN))) { + /* u16_t overflow, cannot handle this */ + goto nullreturn_ipr; + } + } + /* find the right place to insert this pbuf */ + /* @todo: trim pbufs if fragments are overlapping */ + valid = ip_reass_chain_frag_into_datagram_and_validate(ipr, p, is_last); + if (valid == IP_REASS_VALIDATE_PBUF_DROPPED) { + goto nullreturn_ipr; + } + /* if we come here, the pbuf has been enqueued */ + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time + (overflow checked by testing against IP_REASS_MAX_PBUFS) */ + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount + clen); + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + ipr->datagram_len = datagram_len; + ipr->flags |= IP_REASS_FLAG_LASTFRAG; + LWIP_DEBUGF(IP_REASS_DEBUG, + ("ip4_reass: last fragment seen, total len %"S16_F"\n", + ipr->datagram_len)); + } + + if (valid == IP_REASS_VALIDATE_TELEGRAM_FINISHED) { + struct ip_reassdata *ipr_prev; + /* the totally last fragment (flag more fragments = 0) was received at least + * once AND all fragments are received */ + u16_t datagram_len = (u16_t)(ipr->datagram_len + IP_HLEN); + + /* save the second pbuf before copying the header over the pointer */ + r = ((struct ip_reass_helper *)ipr->p->payload)->next_pbuf; + + /* copy the original ip header back to the first pbuf */ + fraghdr = (struct ip_hdr *)(ipr->p->payload); + SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN); + IPH_LEN_SET(fraghdr, lwip_htons(datagram_len)); + IPH_OFFSET_SET(fraghdr, 0); + IPH_CHKSUM_SET(fraghdr, 0); + /* @todo: do we need to set/calculate the correct checksum? */ +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + p = ipr->p; + + /* chain together the pbufs contained within the reass_data list. */ + while (r != NULL) { + iprh = (struct ip_reass_helper *)r->payload; + + /* hide the ip header for every succeeding fragment */ + pbuf_remove_header(r, IP_HLEN); + pbuf_cat(p, r); + r = iprh->next_pbuf; + } + + /* find the previous entry in the linked list */ + if (ipr == reassdatagrams) { + ipr_prev = NULL; + } else { + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } + + /* release the sources allocate for the fragment queue entry */ + ip_reass_dequeue_datagram(ipr, ipr_prev); + + /* and adjust the number of pbufs currently queued for reassembly. */ + clen = pbuf_clen(p); + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= clen); + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - clen); + + MIB2_STATS_INC(mib2.ipreasmoks); + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount)); + return NULL; + +nullreturn_ipr: + LWIP_ASSERT("ipr != NULL", ipr != NULL); + if (ipr->p == NULL) { + /* dropped pbuf after creating a new datagram entry: remove the entry, too */ + LWIP_ASSERT("not firstalthough just enqueued", ipr == reassdatagrams); + ip_reass_dequeue_datagram(ipr, NULL); + } + +nullreturn: + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: nullreturn\n")); + IPFRAG_STATS_INC(ip_frag.drop); + pbuf_free(p); + return NULL; +} +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref * +ip_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref *)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref *p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ipfrag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref *)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void *)pcr == (void *)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IP datagram if too large for the netif. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p. + * + * @param p ip packet to send + * @param netif the netif on which to send + * @param dest destination ip address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest) +{ + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + struct ip_hdr *original_iphdr; + struct ip_hdr *iphdr; + const u16_t nfb = (u16_t)((netif->mtu - IP_HLEN) / 8); + u16_t left, fragsize; + u16_t ofo; + int last; + u16_t poff = IP_HLEN; + u16_t tmp; + int mf_set; + + original_iphdr = (struct ip_hdr *)p->payload; + iphdr = original_iphdr; + if (IPH_HL_BYTES(iphdr) != IP_HLEN) { + /* ip4_frag() does not support IP options */ + return ERR_VAL; + } + LWIP_ERROR("ip4_frag(): pbuf too short", p->len >= IP_HLEN, return ERR_VAL); + + /* Save original offset */ + tmp = lwip_ntohs(IPH_OFFSET(iphdr)); + ofo = tmp & IP_OFFMASK; + /* already fragmented? if so, the last fragment we create must have MF, too */ + mf_set = tmp & IP_MF; + + left = (u16_t)(p->tot_len - IP_HLEN); + + while (left) { + /* Fill this fragment */ + fragsize = LWIP_MIN(left, (u16_t)(nfb * 8)); + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff); + /* make room for the IP header */ + if (pbuf_add_header(rambuf, IP_HLEN)) { + pbuf_free(rambuf); + goto memerr; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link and IP header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len >= (IP_HLEN))); + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; + + left_to_copy = fragsize; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + u16_t plen = (u16_t)(p->len - poff); + LWIP_ASSERT("p->len >= poff", p->len >= poff); + newpbuflen = LWIP_MIN(left_to_copy, plen); + /* Is this pbuf already empty? */ + if (!newpbuflen) { + poff = 0; + p = p->next; + continue; + } + pcr = ip_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + goto memerr; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, + (u8_t *)p->payload + poff, newpbuflen); + if (newpbuf == NULL) { + ip_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + goto memerr; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ipfrag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy = (u16_t)(left_to_copy - newpbuflen); + if (left_to_copy) { + poff = 0; + p = p->next; + } + } + poff = (u16_t)(poff + newpbuflen); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Correct header */ + last = (left <= netif->mtu - IP_HLEN); + + /* Set new offset and MF flag */ + tmp = (IP_OFFMASK & (ofo)); + if (!last || mf_set) { + /* the last fragment has MF set if the input frame had it */ + tmp = tmp | IP_MF; + } + IPH_OFFSET_SET(iphdr, lwip_htons(tmp)); + IPH_LEN_SET(iphdr, lwip_htons((u16_t)(fragsize + IP_HLEN))); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + netif->output(netif, rambuf, dest); + IPFRAG_STATS_INC(ip_frag.xmit); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left = (u16_t)(left - fragsize); + ofo = (u16_t)(ofo + nfb); + } + MIB2_STATS_INC(mib2.ipfragoks); + return ERR_OK; +memerr: + MIB2_STATS_INC(mib2.ipfragfails); + return ERR_MEM; +} +#endif /* IP_FRAG */ + +#endif /* LWIP_IPV4 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c new file mode 100644 index 00000000..7cf98a52 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c @@ -0,0 +1,812 @@ +/** + * @file + * + * @defgroup dhcp6 DHCPv6 + * @ingroup ip6 + * DHCPv6 client: IPv6 address autoconfiguration as per + * RFC 3315 (stateful DHCPv6) and + * RFC 3736 (stateless DHCPv6). + * + * For now, only stateless DHCPv6 is implemented! + * + * TODO: + * - enable/disable API to not always start when RA is received + * - stateful DHCPv6 (for now, only stateless DHCPv6 for DNS and NTP servers works) + * - create Client Identifier? + * - only start requests if a valid local address is available on the netif + * - only start information requests if required (not for every RA) + * + * dhcp6_enable_stateful() enables stateful DHCPv6 for a netif (stateless disabled)\n + * dhcp6_enable_stateless() enables stateless DHCPv6 for a netif (stateful disabled)\n + * dhcp6_disable() disable DHCPv6 for a netif + * + * When enabled, requests are only issued after receipt of RA with the + * corresponding bits set. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/dhcp6.h" +#include "lwip/prot/dhcp6.h" +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif +#ifndef LWIP_HOOK_DHCP6_APPEND_OPTIONS +#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len) +#endif +#ifndef LWIP_HOOK_DHCP6_PARSE_OPTION +#define LWIP_HOOK_DHCP6_PARSE_OPTION(netif, dhcp6, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0) +#endif + +#if LWIP_DNS && LWIP_DHCP6_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP6_MAX_DNS_SERVERS +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS LWIP_DHCP6_MAX_DNS_SERVERS +#else +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS 0 +#endif + + +/** Option handling: options are parsed in dhcp6_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp6 (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp6_option_idx { + DHCP6_OPTION_IDX_CLI_ID = 0, + DHCP6_OPTION_IDX_SERVER_ID, +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + DHCP6_OPTION_IDX_DNS_SERVER, + DHCP6_OPTION_IDX_DOMAIN_LIST, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP6_GET_NTP_SRV + DHCP6_OPTION_IDX_NTP_SERVER, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP6_OPTION_IDX_MAX +}; + +struct dhcp6_option_info { + u8_t option_given; + u16_t val_start; + u16_t val_length; +}; + +/** Holds the decoded option info, only valid while in dhcp6_recv. */ +struct dhcp6_option_info dhcp6_rx_options[DHCP6_OPTION_IDX_MAX]; + +#define dhcp6_option_given(dhcp6, idx) (dhcp6_rx_options[idx].option_given != 0) +#define dhcp6_got_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 1) +#define dhcp6_clear_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 0) +#define dhcp6_clear_all_options(dhcp6) (memset(dhcp6_rx_options, 0, sizeof(dhcp6_rx_options))) +#define dhcp6_get_option_start(dhcp6, idx) (dhcp6_rx_options[idx].val_start) +#define dhcp6_get_option_length(dhcp6, idx) (dhcp6_rx_options[idx].val_length) +#define dhcp6_set_option(dhcp6, idx, start, len) do { dhcp6_rx_options[idx].val_start = (start); dhcp6_rx_options[idx].val_length = (len); }while(0) + + +const ip_addr_t dhcp6_All_DHCP6_Relay_Agents_and_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010002); +const ip_addr_t dhcp6_All_DHCP6_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010003); + +static struct udp_pcb *dhcp6_pcb; +static u8_t dhcp6_pcb_refcount; + + +/* receive, unfold, parse and free incoming messages */ +static void dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp6_inc_pcb_refcount(void) +{ + if (dhcp6_pcb_refcount == 0) { + LWIP_ASSERT("dhcp6_inc_pcb_refcount(): memory leak", dhcp6_pcb == NULL); + + /* allocate UDP PCB */ + dhcp6_pcb = udp_new_ip6(); + + if (dhcp6_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp6_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp6_pcb, IP6_ADDR_ANY, DHCP6_CLIENT_PORT); + udp_recv(dhcp6_pcb, dhcp6_recv, NULL); + } + + dhcp6_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp6_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp6_pcb_refcount(): refcount error", (dhcp6_pcb_refcount > 0)); + dhcp6_pcb_refcount--; + + if (dhcp6_pcb_refcount == 0) { + udp_remove(dhcp6_pcb); + dhcp6_pcb = NULL; + } +} + +/** + * @ingroup dhcp6 + * Set a statically allocated struct dhcp6 to work with. + * Using this prevents dhcp6_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp6 (uninitialised) dhcp6 struct allocated by the application + */ +void +dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp6 != NULL", dhcp6 != NULL); + LWIP_ASSERT("netif already has a struct dhcp6 set", netif_dhcp6_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp6, 0, sizeof(struct dhcp6)); + /* dhcp6_set_state(&dhcp, DHCP6_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6); +} + +/** + * @ingroup dhcp6 + * Removes a struct dhcp6 from a netif. + * + * ATTENTION: Only use this when not using dhcp6_set_struct() to allocate the + * struct dhcp6 since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp6_cleanup(struct netif *netif) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp6_data(netif) != NULL) { + mem_free(netif_dhcp6_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL); + } +} + +static struct dhcp6* +dhcp6_get_struct(struct netif *netif, const char *dbg_requester) +{ + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + if (dhcp6 == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: mallocing new DHCPv6 client\n", dbg_requester)); + dhcp6 = (struct dhcp6 *)mem_malloc(sizeof(struct dhcp6)); + if (dhcp6 == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: could not allocate dhcp6\n", dbg_requester)); + return NULL; + } + + /* clear data structure, this implies DHCP6_STATE_OFF */ + memset(dhcp6, 0, sizeof(struct dhcp6)); + /* store this dhcp6 client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6); + } else { + /* already has DHCP6 client attached */ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("%s: using existing DHCPv6 client\n", dbg_requester)); + } + + if (!dhcp6->pcb_allocated) { + if (dhcp6_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP6 PCB is allocated */ + mem_free(dhcp6); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL); + return NULL; + } + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: allocated dhcp6", dbg_requester)); + dhcp6->pcb_allocated = 1; + } + return dhcp6; +} + +/* + * Set the DHCPv6 state + * If the state changed, reset the number of tries. + */ +static void +dhcp6_set_state(struct dhcp6 *dhcp6, u8_t new_state, const char *dbg_caller) +{ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("DHCPv6 state: %d -> %d (%s)\n", + dhcp6->state, new_state, dbg_caller)); + if (new_state != dhcp6->state) { + dhcp6->state = new_state; + dhcp6->tries = 0; + dhcp6->request_timeout = 0; + } +} + +static int +dhcp6_stateless_enabled(struct dhcp6 *dhcp6) +{ + if ((dhcp6->state == DHCP6_STATE_STATELESS_IDLE) || + (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG)) { + return 1; + } + return 0; +} + +/*static int +dhcp6_stateful_enabled(struct dhcp6 *dhcp6) +{ + if (dhcp6->state == DHCP6_STATE_OFF) { + return 0; + } + if (dhcp6_stateless_enabled(dhcp6)) { + return 0; + } + return 1; +}*/ + +/** + * @ingroup dhcp6 + * Enable stateful DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_MANAGED_ADDR_CONFIG flag set. + * + * A struct dhcp6 will be allocated for this netif if not + * set via @ref dhcp6_set_struct before. + * + * @todo: stateful DHCPv6 not supported, yet + */ +err_t +dhcp6_enable_stateful(struct netif *netif) +{ + LWIP_UNUSED_ARG(netif); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("stateful dhcp6 not implemented yet")); + return ERR_VAL; +} + +/** + * @ingroup dhcp6 + * Enable stateless DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_OTHER_CONFIG flag set. + * + * A struct dhcp6 will be allocated for this netif if not + * set via @ref dhcp6_set_struct before. + */ +err_t +dhcp6_enable_stateless(struct netif *netif) +{ + struct dhcp6 *dhcp6; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_enable_stateless(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + dhcp6 = dhcp6_get_struct(netif, "dhcp6_enable_stateless()"); + if (dhcp6 == NULL) { + return ERR_MEM; + } + if (dhcp6_stateless_enabled(dhcp6)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 already enabled")); + return ERR_OK; + } else if (dhcp6->state != DHCP6_STATE_OFF) { + /* stateful running */ + /* @todo: stop stateful once it is implemented */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): switching from stateful to stateless DHCPv6")); + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 enabled\n")); + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_enable_stateless"); + return ERR_OK; +} + +/** + * @ingroup dhcp6 + * Disable stateful or stateless DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_OTHER_CONFIG flag set. + */ +void +dhcp6_disable(struct netif *netif) +{ + struct dhcp6 *dhcp6; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_disable(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + dhcp6 = netif_dhcp6_data(netif); + if (dhcp6 != NULL) { + if (dhcp6->state != DHCP6_STATE_OFF) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_disable(): DHCPv6 disabled (old state: %s)\n", + (dhcp6_stateless_enabled(dhcp6) ? "stateless" : "stateful"))); + dhcp6_set_state(dhcp6, DHCP6_STATE_OFF, "dhcp6_disable"); + if (dhcp6->pcb_allocated != 0) { + dhcp6_dec_pcb_refcount(); /* free DHCPv6 PCB if not needed any more */ + dhcp6->pcb_allocated = 0; + } + } + } +} + +/** + * Create a DHCPv6 request, fill in common headers + * + * @param netif the netif under DHCPv6 control + * @param dhcp6 dhcp6 control struct + * @param message_type message type of the request + * @param opt_len_alloc option length to allocate + * @param options_out_len option length on exit + * @return a pbuf for the message + */ +static struct pbuf * +dhcp6_create_msg(struct netif *netif, struct dhcp6 *dhcp6, u8_t message_type, + u16_t opt_len_alloc, u16_t *options_out_len) +{ + struct pbuf *p_out; + struct dhcp6_msg *msg_out; + + LWIP_ERROR("dhcp6_create_msg: netif != NULL", (netif != NULL), return NULL;); + LWIP_ERROR("dhcp6_create_msg: dhcp6 != NULL", (dhcp6 != NULL), return NULL;); + p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp6_msg) + opt_len_alloc, PBUF_RAM); + if (p_out == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp6_create_msg(): could not allocate pbuf\n")); + return NULL; + } + LWIP_ASSERT("dhcp6_create_msg: check that first pbuf can hold struct dhcp6_msg", + (p_out->len >= sizeof(struct dhcp6_msg) + opt_len_alloc)); + + /* @todo: limit new xid for certain message types? */ + /* reuse transaction identifier in retransmissions */ + if (dhcp6->tries == 0) { + dhcp6->xid = LWIP_RAND() & 0xFFFFFF; + } + + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", dhcp6->xid)); + + msg_out = (struct dhcp6_msg *)p_out->payload; + memset(msg_out, 0, sizeof(struct dhcp6_msg) + opt_len_alloc); + + msg_out->msgtype = message_type; + msg_out->transaction_id[0] = (u8_t)(dhcp6->xid >> 16); + msg_out->transaction_id[1] = (u8_t)(dhcp6->xid >> 8); + msg_out->transaction_id[2] = (u8_t)dhcp6->xid; + *options_out_len = 0; + return p_out; +} + +static u16_t +dhcp6_option_short(u16_t options_out_len, u8_t *options, u16_t value) +{ + options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + options[options_out_len++] = (u8_t) (value & 0x00ffU); + return options_out_len; +} + +static u16_t +dhcp6_option_optionrequest(u16_t options_out_len, u8_t *options, const u16_t *req_options, + u16_t num_req_options, u16_t max_len) +{ + size_t i; + u16_t ret; + + LWIP_ASSERT("dhcp6_option_optionrequest: options_out_len + sizeof(struct dhcp6_msg) + addlen <= max_len", + sizeof(struct dhcp6_msg) + options_out_len + 4U + (2U * num_req_options) <= max_len); + LWIP_UNUSED_ARG(max_len); + + ret = dhcp6_option_short(options_out_len, options, DHCP6_OPTION_ORO); + ret = dhcp6_option_short(ret, options, 2 * num_req_options); + for (i = 0; i < num_req_options; i++) { + ret = dhcp6_option_short(ret, options, req_options[i]); + } + return ret; +} + +/* All options are added, shrink the pbuf to the required size */ +static void +dhcp6_msg_finalize(u16_t options_out_len, struct pbuf *p_out) +{ + /* shrink the pbuf to the actual content length */ + pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp6_msg) + options_out_len)); +} + + +#if LWIP_IPV6_DHCP6_STATELESS +static void +dhcp6_information_request(struct netif *netif, struct dhcp6 *dhcp6) +{ + const u16_t requested_options[] = {DHCP6_OPTION_DNS_SERVERS, DHCP6_OPTION_DOMAIN_LIST, DHCP6_OPTION_SNTP_SERVERS}; + u16_t msecs; + struct pbuf *p_out; + u16_t options_out_len; + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request()\n")); + /* create and initialize the DHCP message header */ + p_out = dhcp6_create_msg(netif, dhcp6, DHCP6_INFOREQUEST, 4 + sizeof(requested_options), &options_out_len); + if (p_out != NULL) { + err_t err; + struct dhcp6_msg *msg_out = (struct dhcp6_msg *)p_out->payload; + u8_t *options = (u8_t *)(msg_out + 1); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request: making request\n")); + + options_out_len = dhcp6_option_optionrequest(options_out_len, options, requested_options, + LWIP_ARRAYSIZE(requested_options), p_out->len); + LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, DHCP6_STATE_REQUESTING_CONFIG, msg_out, + DHCP6_INFOREQUEST, options_out_len, p_out->len); + dhcp6_msg_finalize(options_out_len, p_out); + + err = udp_sendto_if(dhcp6_pcb, p_out, &dhcp6_All_DHCP6_Relay_Agents_and_Servers, DHCP6_SERVER_PORT, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request: INFOREQUESTING -> %d\n", (int)err)); + LWIP_UNUSED_ARG(err); + } else { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp6_information_request: could not allocate DHCP6 request\n")); + } + dhcp6_set_state(dhcp6, DHCP6_STATE_REQUESTING_CONFIG, "dhcp6_information_request"); + if (dhcp6->tries < 255) { + dhcp6->tries++; + } + msecs = (u16_t)((dhcp6->tries < 6 ? 1 << dhcp6->tries : 60) * 1000); + dhcp6->request_timeout = (u16_t)((msecs + DHCP6_TIMER_MSECS - 1) / DHCP6_TIMER_MSECS); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request(): set request timeout %"U16_F" msecs\n", msecs)); +} + +static err_t +dhcp6_request_config(struct netif *netif, struct dhcp6 *dhcp6) +{ + /* stateless mode enabled and no request running? */ + if (dhcp6->state == DHCP6_STATE_STATELESS_IDLE) { + /* send Information-request and wait for answer; setup receive timeout */ + dhcp6_information_request(netif, dhcp6); + } + + return ERR_OK; +} + +static void +dhcp6_abort_config_request(struct dhcp6 *dhcp6) +{ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + /* abort running request */ + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_abort_config_request"); + } +} + +/* Handle a REPLY to INFOREQUEST + * This parses DNS and NTP server addresses from the reply. + */ +static void +dhcp6_handle_config_reply(struct netif *netif, struct pbuf *p_msg_in) +{ + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + + LWIP_UNUSED_ARG(dhcp6); + LWIP_UNUSED_ARG(p_msg_in); + +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER)) { + ip_addr_t dns_addr; + ip6_addr_t *dns_addr6; + u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + u16_t idx; + u8_t n; + + memset(&dns_addr, 0, sizeof(dns_addr)); + dns_addr6 = ip_2_ip6(&dns_addr); + for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_PROVIDE_DNS_SERVERS); + n++, idx += sizeof(struct ip6_addr_packed)) { + u16_t copied = pbuf_copy_partial(p_msg_in, dns_addr6, sizeof(struct ip6_addr_packed), idx); + if (copied != sizeof(struct ip6_addr_packed)) { + /* pbuf length mismatch */ + return; + } + ip6_addr_assign_zone(dns_addr6, IP6_UNKNOWN, netif); + /* @todo: do we need a different offset than DHCP(v4)? */ + dns_setserver(n, &dns_addr); + } + } + /* @ todo: parse and set Domain Search List */ +#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */ + +#if LWIP_DHCP6_GET_NTP_SRV + if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER)) { + ip_addr_t ntp_server_addrs[LWIP_DHCP6_MAX_NTP_SERVERS]; + u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + u16_t idx; + u8_t n; + + for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_MAX_NTP_SERVERS); + n++, idx += sizeof(struct ip6_addr_packed)) { + u16_t copied; + ip6_addr_t *ntp_addr6 = ip_2_ip6(&ntp_server_addrs[n]); + ip_addr_set_zero_ip6(&ntp_server_addrs[n]); + copied = pbuf_copy_partial(p_msg_in, ntp_addr6, sizeof(struct ip6_addr_packed), idx); + if (copied != sizeof(struct ip6_addr_packed)) { + /* pbuf length mismatch */ + return; + } + ip6_addr_assign_zone(ntp_addr6, IP6_UNKNOWN, netif); + } + dhcp6_set_ntp_servers(n, ntp_server_addrs); + } +#endif /* LWIP_DHCP6_GET_NTP_SRV */ +} +#endif /* LWIP_IPV6_DHCP6_STATELESS */ + +/** This function is called from nd6 module when an RA messsage is received + * It triggers DHCPv6 requests (if enabled). + */ +void +dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config) +{ + struct dhcp6 *dhcp6; + + LWIP_ASSERT("netif != NULL", netif != NULL); + dhcp6 = netif_dhcp6_data(netif); + + LWIP_UNUSED_ARG(managed_addr_config); + LWIP_UNUSED_ARG(other_config); + LWIP_UNUSED_ARG(dhcp6); + +#if LWIP_IPV6_DHCP6_STATELESS + if (dhcp6 != NULL) { + if (dhcp6_stateless_enabled(dhcp6)) { + if (other_config) { + dhcp6_request_config(netif, dhcp6); + } else { + dhcp6_abort_config_request(dhcp6); + } + } + } +#endif /* LWIP_IPV6_DHCP6_STATELESS */ +} + +/** + * Parse the DHCPv6 message and extract the DHCPv6 options. + * + * Extract the DHCPv6 options (offset + length) so that we can later easily + * check for them or extract the contents. + */ +static err_t +dhcp6_parse_reply(struct pbuf *p, struct dhcp6 *dhcp6) +{ + u16_t offset; + u16_t offset_max; + u16_t options_idx; + struct dhcp6_msg *msg_in; + + LWIP_UNUSED_ARG(dhcp6); + + /* clear received options */ + dhcp6_clear_all_options(dhcp6); + msg_in = (struct dhcp6_msg *)p->payload; + + /* parse options */ + + options_idx = sizeof(struct dhcp6_msg); + /* parse options to the end of the received packet */ + offset_max = p->tot_len; + + offset = options_idx; + /* at least 4 byte to read? */ + while ((offset + 4 <= offset_max)) { + u8_t op_len_buf[4]; + u8_t *op_len; + u16_t op; + u16_t len; + u16_t val_offset = (u16_t)(offset + 4); + if (val_offset < offset) { + /* overflow */ + return ERR_BUF; + } + /* copy option + length, might be split accross pbufs */ + op_len = (u8_t *)pbuf_get_contiguous(p, op_len_buf, 4, 4, offset); + if (op_len == NULL) { + /* failed to get option and length */ + return ERR_VAL; + } + op = (op_len[0] << 8) | op_len[1]; + len = (op_len[2] << 8) | op_len[3]; + offset = val_offset + len; + if (offset < val_offset) { + /* overflow */ + return ERR_BUF; + } + + switch (op) { + case (DHCP6_OPTION_CLIENTID): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID, val_offset, len); + break; + case (DHCP6_OPTION_SERVERID): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID, val_offset, len); + break; +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + case (DHCP6_OPTION_DNS_SERVERS): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER, val_offset, len); + break; + case (DHCP6_OPTION_DOMAIN_LIST): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST, val_offset, len); + break; +#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP6_GET_NTP_SRV + case (DHCP6_OPTION_SNTP_SERVERS): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER, val_offset, len); + break; +#endif /* LWIP_DHCP6_GET_NTP_SRV*/ + default: + LWIP_DEBUGF(DHCP6_DEBUG, ("skipping option %"U16_F" in options\n", op)); + LWIP_HOOK_DHCP6_PARSE_OPTION(ip_current_netif(), dhcp6, dhcp6->state, msg_in, + msg_in->msgtype, op, len, q, val_offset); + break; + } + } + return ERR_OK; +} + +static void +dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + struct dhcp6_msg *reply_msg = (struct dhcp6_msg *)p->payload; + u8_t msg_type; + u32_t xid; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCPv6 message from netif that does not have DHCPv6 enabled? -> not interested */ + if ((dhcp6 == NULL) || (dhcp6->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ERROR("invalid server address type", IP_IS_V6(addr), goto free_pbuf_and_return;); + + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_recv(pbuf = %p) from DHCPv6 server %s port %"U16_F"\n", (void *)p, + ipaddr_ntoa(addr), port)); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + if (p->len < sizeof(struct dhcp6_msg)) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCPv6 reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + /* match transaction ID against what we expected */ + xid = reply_msg->transaction_id[0] << 16; + xid |= reply_msg->transaction_id[1] << 8; + xid |= reply_msg->transaction_id[2]; + if (xid != dhcp6->xid) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!= dhcp6->xid(%"X32_F")\n", xid, dhcp6->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp6_parse_reply(p, dhcp6) != ERR_OK) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCPv6 message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + /* read DHCP message type */ + msg_type = reply_msg->msgtype; + /* message type is DHCP6 REPLY? */ + if (msg_type == DHCP6_REPLY) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("DHCP6_REPLY received\n")); +#if LWIP_IPV6_DHCP6_STATELESS + /* in info-requesting state? */ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_recv"); + dhcp6_handle_config_reply(netif, p); + } else +#endif /* LWIP_IPV6_DHCP6_STATELESS */ + { + /* @todo: handle reply in other states? */ + } + } else { + /* @todo: handle other message types */ + } + +free_pbuf_and_return: + pbuf_free(p); +} + +/** + * A DHCPv6 request has timed out. + * + * The timer that was started with the DHCPv6 request has + * timed out, indicating no response was received in time. + */ +static void +dhcp6_timeout(struct netif *netif, struct dhcp6 *dhcp6) +{ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout()\n")); + + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(dhcp6); + +#if LWIP_IPV6_DHCP6_STATELESS + /* back-off period has passed, or server selection timed out */ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout(): retrying information request\n")); + dhcp6_information_request(netif, dhcp6); + } +#endif /* LWIP_IPV6_DHCP6_STATELESS */ +} + +/** + * DHCPv6 timeout handling (this function must be called every 500ms, + * see @ref DHCP6_TIMER_MSECS). + * + * A DHCPv6 server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCPv6 request is timed out. + */ +void +dhcp6_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + /* only act on DHCPv6 configured interfaces */ + if (dhcp6 != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp6->request_timeout > 1) { + dhcp6->request_timeout--; + } else if (dhcp6->request_timeout == 1) { + dhcp6->request_timeout--; + /* { dhcp6->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp6_timeout(netif, dhcp6); + } + } + } +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_DHCP6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c new file mode 100644 index 00000000..fec8b28a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c @@ -0,0 +1,123 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET + +#include "lwip/ethip6.h" +#include "lwip/nd6.h" +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/prot/ethernet.h" +#include "netif/ethernet.h" + +#include + +/** + * Resolve and fill-in Ethernet address header for outgoing IPv6 packet. + * + * For IPv6 multicast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, ask the ND6 module what to do. It will either let us + * send the the packet right away, or queue the packet for later itself, unless + * an error occurs. + * + * @todo anycast addresses + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return + * - ERR_OK or the return value of @ref nd6_get_next_hop_addr_or_queue. + */ +err_t +ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + struct eth_addr dest; + const u8_t *hwaddr; + err_t result; + + LWIP_ASSERT_CORE_LOCKED(); + + /* The destination IP address must be properly zoned from here on down. */ + IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif); + + /* multicast destination IP address? */ + if (ip6_addr_ismulticast(ip6addr)) { + /* Hash IP multicast address to MAC address.*/ + dest.addr[0] = 0x33; + dest.addr[1] = 0x33; + dest.addr[2] = ((const u8_t *)(&(ip6addr->addr[3])))[0]; + dest.addr[3] = ((const u8_t *)(&(ip6addr->addr[3])))[1]; + dest.addr[4] = ((const u8_t *)(&(ip6addr->addr[3])))[2]; + dest.addr[5] = ((const u8_t *)(&(ip6addr->addr[3])))[3]; + + /* Send out. */ + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); + } + + /* We have a unicast destination IP address */ + /* @todo anycast? */ + + /* Ask ND6 what to do with the packet. */ + result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); + if (result != ERR_OK) { + return result; + } + + /* If no hardware address is returned, nd6 has queued the packet for later. */ + if (hwaddr == NULL) { + return ERR_OK; + } + + /* Send out the packet using the returned hardware address. */ + SMEMCPY(dest.addr, hwaddr, 6); + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); +} + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c new file mode 100644 index 00000000..167738a0 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c @@ -0,0 +1,425 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" + +#include + +#if LWIP_ICMP6_DATASIZE == 0 +#undef LWIP_ICMP6_DATASIZE +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/* Forward declarations */ +static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type); +static void icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, + u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr); +static void icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, + u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr, struct netif *netif); + + +/** + * Process an input ICMPv6 message. Called by ip6_input. + * + * Will generate a reply for echo requests. Other messages are forwarded + * to nd6_input, or mld6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +icmp6_input(struct pbuf *p, struct netif *inp) +{ + struct icmp6_hdr *icmp6hdr; + struct pbuf *r; + const ip6_addr_t *reply_src; + + ICMP6_STATS_INC(icmp6.recv); + + /* Check that ICMPv6 header fits in payload */ + if (p->len < sizeof(struct icmp6_hdr)) { + /* drop short packets */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.lenerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + +#if CHECKSUM_CHECK_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP6) { + if (ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->tot_len, ip6_current_src_addr(), + ip6_current_dest_addr()) != 0) { + /* Checksum failed */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.chkerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + } +#endif /* CHECKSUM_CHECK_ICMP6 */ + + switch (icmp6hdr->type) { + case ICMP6_TYPE_NA: /* Neighbor advertisement */ + case ICMP6_TYPE_NS: /* Neighbor solicitation */ + case ICMP6_TYPE_RA: /* Router advertisement */ + case ICMP6_TYPE_RD: /* Redirect */ + case ICMP6_TYPE_PTB: /* Packet too big */ + nd6_input(p, inp); + return; + case ICMP6_TYPE_RS: +#if LWIP_IPV6_FORWARD + /* @todo implement router functionality */ +#endif + break; +#if LWIP_IPV6_MLD + case ICMP6_TYPE_MLQ: + case ICMP6_TYPE_MLR: + case ICMP6_TYPE_MLD: + mld6_input(p, inp); + return; +#endif + case ICMP6_TYPE_EREQ: +#if !LWIP_MULTICAST_PING + /* multicast destination address? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.drop); + return; + } +#endif /* LWIP_MULTICAST_PING */ + + /* Allocate reply. */ + r = pbuf_alloc(PBUF_IP, p->tot_len, PBUF_RAM); + if (r == NULL) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + + /* Copy echo request. */ + if (pbuf_copy(r, p) != ERR_OK) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.err); + return; + } + + /* Determine reply source IPv6 address. */ +#if LWIP_MULTICAST_PING + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + reply_src = ip_2_ip6(ip6_select_source_address(inp, ip6_current_src_addr())); + if (reply_src == NULL) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.rterr); + return; + } + } + else +#endif /* LWIP_MULTICAST_PING */ + { + reply_src = ip6_current_dest_addr(); + } + + /* Set fields in reply. */ + ((struct icmp6_echo_hdr *)(r->payload))->type = ICMP6_TYPE_EREP; + ((struct icmp6_echo_hdr *)(r->payload))->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP6) { + ((struct icmp6_echo_hdr *)(r->payload))->chksum = ip6_chksum_pseudo(r, + IP6_NEXTH_ICMP6, r->tot_len, reply_src, ip6_current_src_addr()); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send reply. */ + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(r, reply_src, ip6_current_src_addr(), + LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, inp); + pbuf_free(r); + + break; + default: + ICMP6_STATS_INC(icmp6.proterr); + ICMP6_STATS_INC(icmp6.drop); + break; + } + + pbuf_free(p); +} + + +/** + * Send an icmpv6 'destination unreachable' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the unreachable type + */ +void +icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_DUR); +} + +/** + * Send an icmpv6 'packet too big' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'packet too big' should be sent, + * p->payload pointing to the IPv6 header + * @param mtu the maximum mtu that we can accept + */ +void +icmp6_packet_too_big(struct pbuf *p, u32_t mtu) +{ + icmp6_send_response(p, 0, mtu, ICMP6_TYPE_PTB); +} + +/** + * Send an icmpv6 'time exceeded' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + */ +void +icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_TE); +} + +/** + * Send an icmpv6 'time exceeded' packet, with explicit source and destination + * addresses. + * + * This function may be used to send a response sometime after receiving the + * packet for which this response is meant. The provided source and destination + * addresses are used primarily to retain their zone information. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + * @param src_addr source address of the original packet, with zone information + * @param dest_addr destination address of the original packet, with zone + * information + */ +void +icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr) +{ + icmp6_send_response_with_addrs(p, c, 0, ICMP6_TYPE_TE, src_addr, dest_addr); +} + +/** + * Send an icmpv6 'parameter problem' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost and the calculated + * offset would be wrong (calculated against ip6_current_header()). + * + * @param p the input packet for which the 'param problem' should be sent, + * p->payload pointing to the IP header + * @param c ICMPv6 code for the param problem type + * @param pointer the pointer to the byte where the parameter is found + */ +void +icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer) +{ + u32_t pointer_u32 = (u32_t)((const u8_t *)pointer - (const u8_t *)ip6_current_header()); + icmp6_send_response(p, c, pointer_u32, ICMP6_TYPE_PP); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * The packet is sent *to* ip_current_src_addr() on ip_current_netif(). + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + */ +static void +icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type) +{ + const struct ip6_addr *reply_src, *reply_dest; + struct netif *netif = ip_current_netif(); + + LWIP_ASSERT("icmpv6 packet not a direct response", netif != NULL); + reply_dest = ip6_current_src_addr(); + + /* Select an address to use as source. */ + reply_src = ip_2_ip6(ip6_select_source_address(netif, reply_dest)); + if (reply_src == NULL) { + ICMP6_STATS_INC(icmp6.rterr); + return; + } + icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, reply_dest, netif); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * + * Call this function if the packet is NOT sent as a direct response to an + * incoming packet, but rather sometime later (e.g. for a fragment reassembly + * timeout). The caller must provide the zoned source and destination addresses + * from the original packet with the src_addr and dest_addr parameters. The + * reason for this approach is that while the addresses themselves are part of + * the original packet, their zone information is not, thus possibly resulting + * in a link-local response being sent over the wrong link. + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + * @param src_addr original source address + * @param dest_addr original destination address + */ +static void +icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, u8_t type, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr) +{ + const struct ip6_addr *reply_src, *reply_dest; + struct netif *netif; + + /* Get the destination address and netif for this ICMP message. */ + LWIP_ASSERT("must provide both source and destination", src_addr != NULL); + LWIP_ASSERT("must provide both source and destination", dest_addr != NULL); + + /* Special case, as ip6_current_xxx is either NULL, or points + to a different packet than the one that expired. */ + IP6_ADDR_ZONECHECK(src_addr); + IP6_ADDR_ZONECHECK(dest_addr); + /* Swap source and destination for the reply. */ + reply_dest = src_addr; + reply_src = dest_addr; + netif = ip6_route(reply_src, reply_dest); + if (netif == NULL) { + ICMP6_STATS_INC(icmp6.rterr); + return; + } + icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, + reply_dest, netif); +} + +/** + * Send an ICMPv6 packet (with srd/dst address and netif given). + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + * @param reply_src source address of the packet to send + * @param reply_dest destination address of the packet to send + * @param netif netif to send the packet + */ +static void +icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, u8_t type, + const ip6_addr_t *reply_src, const ip6_addr_t *reply_dest, struct netif *netif) +{ + struct pbuf *q; + struct icmp6_hdr *icmp6hdr; + + /* ICMPv6 header + IPv6 header + data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMPv6 packet.\n")); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp 6message", + (q->len >= (sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE))); + + icmp6hdr = (struct icmp6_hdr *)q->payload; + icmp6hdr->type = type; + icmp6hdr->code = code; + icmp6hdr->data = lwip_htonl(data); + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp6_hdr), (u8_t *)p->payload, + IP6_HLEN + LWIP_ICMP6_DATASIZE); + + /* calculate checksum */ + icmp6hdr->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + icmp6hdr->chksum = ip6_chksum_pseudo(q, IP6_NEXTH_ICMP6, q->tot_len, + reply_src, reply_dest); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(q, reply_src, reply_dest, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(q); +} + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c new file mode 100644 index 00000000..d9a992c2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c @@ -0,0 +1,53 @@ +/** + * @file + * + * INET v6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/inet.h" + +/** This variable is initialized by the system to contain the wildcard IPv6 address. + */ +const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c new file mode 100644 index 00000000..eda11dc8 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c @@ -0,0 +1,1492 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/netif.h" +#include "lwip/ip.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6_frag.h" +#include "lwip/icmp6.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/dhcp6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/debug.h" +#include "lwip/stats.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** + * Finds the appropriate network interface for a given IPv6 address. It tries to select + * a netif following a sequence of heuristics: + * 1) if there is only 1 netif, return it + * 2) if the destination is a zoned address, match its zone to a netif + * 3) if the either the source or destination address is a scoped address, + * match the source address's zone (if set) or address (if not) to a netif + * 4) tries to match the destination subnet to a configured address + * 5) tries to find a router-announced route + * 6) tries to match the (unscoped) source address to the netif + * 7) returns the default netif, if configured + * + * Note that each of the two given addresses may or may not be properly zoned. + * + * @param src the source IPv6 address, if known + * @param dest the destination IPv6 address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest) +{ +#if LWIP_SINGLE_NETIF + LWIP_UNUSED_ARG(src); + LWIP_UNUSED_ARG(dest); +#else /* LWIP_SINGLE_NETIF */ + struct netif *netif; + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + /* If single netif configuration, fast return. */ + if ((netif_list != NULL) && (netif_list->next == NULL)) { + if (!netif_is_up(netif_list) || !netif_is_link_up(netif_list) || + (ip6_addr_has_zone(dest) && !ip6_addr_test_zone(dest, netif_list))) { + return NULL; + } + return netif_list; + } + +#if LWIP_IPV6_SCOPES + /* Special processing for zoned destination addresses. This includes link- + * local unicast addresses and interface/link-local multicast addresses. Use + * the zone to find a matching netif. If the address is not zoned, then there + * is technically no "wrong" netif to choose, and we leave routing to other + * rules; in most cases this should be the scoped-source rule below. */ + if (ip6_addr_has_zone(dest)) { + IP6_ADDR_ZONECHECK(dest); + /* Find a netif based on the zone. For custom mappings, one zone may map + * to multiple netifs, so find one that can actually send a packet. */ + NETIF_FOREACH(netif) { + if (ip6_addr_test_zone(dest, netif) && + netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + } + /* No matching netif found. Do no try to route to a different netif, + * as that would be a zone violation, resulting in any packets sent to + * that netif being dropped on output. */ + return NULL; + } +#endif /* LWIP_IPV6_SCOPES */ + + /* Special processing for scoped source and destination addresses. If we get + * here, the destination address does not have a zone, so either way we need + * to look at the source address, which may or may not have a zone. If it + * does, the zone is restrictive: there is (typically) only one matching + * netif for it, and we should avoid routing to any other netif as that would + * result in guaranteed zone violations. For scoped source addresses that do + * not have a zone, use (only) a netif that has that source address locally + * assigned. This case also applies to the loopback source address, which has + * an implied link-local scope. If only the destination address is scoped + * (but, again, not zoned), we still want to use only the source address to + * determine its zone because that's most likely what the user/application + * wants, regardless of whether the source address is scoped. Finally, some + * of this story also applies if scoping is disabled altogether. */ +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_scope(dest, IP6_UNKNOWN) || + ip6_addr_has_scope(src, IP6_UNICAST) || +#else /* LWIP_IPV6_SCOPES */ + if (ip6_addr_islinklocal(dest) || ip6_addr_ismulticast_iflocal(dest) || + ip6_addr_ismulticast_linklocal(dest) || ip6_addr_islinklocal(src) || +#endif /* LWIP_IPV6_SCOPES */ + ip6_addr_isloopback(src)) { +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_zone(src)) { + /* Find a netif matching the source zone (relatively cheap). */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif) && netif_is_link_up(netif) && + ip6_addr_test_zone(src, netif)) { + return netif; + } + } + } else +#endif /* LWIP_IPV6_SCOPES */ + { + /* Find a netif matching the source address (relatively expensive). */ + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp_zoneless(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + /* Again, do not use any other netif in this case, as that could result in + * zone boundary violations. */ + return NULL; + } + + /* We come here only if neither source nor destination is scoped. */ + IP6_ADDR_ZONECHECK(src); + +#ifdef LWIP_HOOK_IP6_ROUTE + netif = LWIP_HOOK_IP6_ROUTE(src, dest); + if (netif != NULL) { + return netif; + } +#endif + + /* See if the destination subnet matches a configured address. In accordance + * with RFC 5942, dynamically configured addresses do not have an implied + * local subnet, and thus should be considered /128 assignments. However, as + * such, the destination address may still match a local address, and so we + * still need to check for exact matches here. By (lwIP) policy, statically + * configured addresses do always have an implied local /64 subnet. */ + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_netcmp(dest, netif_ip6_addr(netif, i)) && + (netif_ip6_addr_isstatic(netif, i) || + ip6_addr_nethostcmp(dest, netif_ip6_addr(netif, i)))) { + return netif; + } + } + } + + /* Get the netif for a suitable router-announced route. */ + netif = nd6_find_route(dest); + if (netif != NULL) { + return netif; + } + + /* Try with the netif that matches the source address. Given the earlier rule + * for scoped source addresses, this applies to unscoped addresses only. */ + if (!ip6_addr_isany(src)) { + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, loopback traffic is passed through any netif */ + if (ip6_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ +#endif /* !LWIP_SINGLE_NETIF */ + + /* no matching netif found, use default netif, if up */ + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default)) { + return NULL; + } + return netif_default; +} + +/** + * @ingroup ip6 + * Select the best IPv6 source address for a given destination IPv6 address. + * + * This implementation follows RFC 6724 Sec. 5 to the following extent: + * - Rules 1, 2, 3: fully implemented + * - Rules 4, 5, 5.5: not applicable + * - Rule 6: not implemented + * - Rule 7: not applicable + * - Rule 8: limited to "prefer /64 subnet match over non-match" + * + * For Rule 2, we deliberately deviate from RFC 6724 Sec. 3.1 by considering + * ULAs to be of smaller scope than global addresses, to avoid that a preferred + * ULA is picked over a deprecated global address when given a global address + * as destination, as that would likely result in broken two-way communication. + * + * As long as temporary addresses are not supported (as used in Rule 7), a + * proper implementation of Rule 8 would obviate the need to implement Rule 6. + * + * @param netif the netif on which to send a packet + * @param dest the destination we are trying to reach (possibly not properly + * zoned) + * @return the most suitable source address to use, or NULL if no suitable + * source address is found + */ +const ip_addr_t * +ip6_select_source_address(struct netif *netif, const ip6_addr_t *dest) +{ + const ip_addr_t *best_addr; + const ip6_addr_t *cand_addr; + s8_t dest_scope, cand_scope; + s8_t best_scope = IP6_MULTICAST_SCOPE_RESERVED; + u8_t i, cand_pref, cand_bits; + u8_t best_pref = 0; + u8_t best_bits = 0; + + /* Start by determining the scope of the given destination address. These + * tests are hopefully (roughly) in order of likeliness to match. */ + if (ip6_addr_isglobal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } else if (ip6_addr_islinklocal(dest) || ip6_addr_isloopback(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL; + } else if (ip6_addr_isuniquelocal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL; + } else if (ip6_addr_ismulticast(dest)) { + dest_scope = ip6_addr_multicast_scope(dest); + } else if (ip6_addr_issitelocal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL; + } else { + /* no match, consider scope global */ + dest_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } + + best_addr = NULL; + + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + /* Consider only valid (= preferred and deprecated) addresses. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + continue; + } + /* Determine the scope of this candidate address. Same ordering idea. */ + cand_addr = netif_ip6_addr(netif, i); + if (ip6_addr_isglobal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } else if (ip6_addr_islinklocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL; + } else if (ip6_addr_isuniquelocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL; + } else if (ip6_addr_issitelocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL; + } else { + /* no match, treat as low-priority global scope */ + cand_scope = IP6_MULTICAST_SCOPE_RESERVEDF; + } + cand_pref = ip6_addr_ispreferred(netif_ip6_addr_state(netif, i)); + /* @todo compute the actual common bits, for longest matching prefix. */ + /* We cannot count on the destination address having a proper zone + * assignment, so do not compare zones in this case. */ + cand_bits = ip6_addr_netcmp_zoneless(cand_addr, dest); /* just 1 or 0 for now */ + if (cand_bits && ip6_addr_nethostcmp(cand_addr, dest)) { + return netif_ip_addr6(netif, i); /* Rule 1 */ + } + if ((best_addr == NULL) || /* no alternative yet */ + ((cand_scope < best_scope) && (cand_scope >= dest_scope)) || + ((cand_scope > best_scope) && (best_scope < dest_scope)) || /* Rule 2 */ + ((cand_scope == best_scope) && ((cand_pref > best_pref) || /* Rule 3 */ + ((cand_pref == best_pref) && (cand_bits > best_bits))))) { /* Rule 8 */ + /* We found a new "winning" candidate. */ + best_addr = netif_ip_addr6(netif, i); + best_scope = cand_scope; + best_pref = cand_pref; + best_bits = cand_bits; + } + } + + return best_addr; /* may be NULL */ +} + +#if LWIP_IPV6_FORWARD +/** + * Forwards an IPv6 packet. It finds an appropriate route for the + * packet, decrements the HL value of the packet, and outputs + * the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IPv6 header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip6_forward(struct pbuf *p, struct ip6_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + /* do not forward link-local or loopback addresses */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_dest_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding link-local address.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip6_route(IP6_ADDR_ANY6, ip6_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } +#if LWIP_IPV6_SCOPES + /* Do not forward packets with a zoned (e.g., link-local) source address + * outside of their zone. We determined the zone a bit earlier, so we know + * that the address is properly zoned here, so we can safely use has_zone. + * Also skip packets with a loopback source address (link-local implied). */ + if ((ip6_addr_has_zone(ip6_current_src_addr()) && + !ip6_addr_test_zone(ip6_current_src_addr(), netif)) || + ip6_addr_isloopback(ip6_current_src_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding packet beyond its source address zone.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } +#endif /* LWIP_IPV6_SCOPES */ + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not bouncing packets back on incoming interface.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* decrement HL */ + IP6H_HOPLIM_SET(iphdr, IP6H_HOPLIM(iphdr) - 1); + /* send ICMP6 if HL == 0 */ + if (IP6H_HOPLIM(iphdr) == 0) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_time_exceeded(p, ICMP6_TE_HL); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + if (netif->mtu && (p->tot_len > netif->mtu)) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_packet_too_big(p, netif->mtu); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: forwarding packet to %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); + + /* transmit pbuf on chosen interface */ + netif->output_ip6(netif, p, ip6_current_dest_addr()); + IP6_STATS_INC(ip6.fw); + IP6_STATS_INC(ip6.xmit); + return; +} +#endif /* LWIP_IPV6_FORWARD */ + +/** Return true if the current input packet should be accepted on this netif */ +static int +ip6_input_accept(struct netif *netif) +{ + /* interface is up? */ + if (netif_is_up(netif)) { + u8_t i; + /* unicast to this interface address? address configured? */ + /* If custom scopes are used, the destination zone will be tested as + * part of the local-address comparison, but we need to test the source + * scope as well (e.g., is this interface on the same link?). */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(ip6_current_dest_addr(), netif_ip6_addr(netif, i)) +#if IPV6_CUSTOM_SCOPES + && (!ip6_addr_has_zone(ip6_current_src_addr()) || + ip6_addr_test_zone(ip6_current_src_addr(), netif)) +#endif /* IPV6_CUSTOM_SCOPES */ + ) { + /* accept on this netif */ + return 1; + } + } + } + return 0; +} + +/** + * This function is called by the network interface device driver when + * an IPv6 packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip6_forward). + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IPv6 packet (p->payload points to IPv6 header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip6_input(struct pbuf *p, struct netif *inp) +{ + struct ip6_hdr *ip6hdr; + struct netif *netif; + const u8_t *nexth; + u16_t hlen, hlen_tot; /* the current header length */ +#if 0 /*IP_ACCEPT_LINK_LAYER_ADDRESSING*/ + @todo + int check_ip_src=1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ +#if LWIP_RAW + raw_input_state_t raw_status; +#endif /* LWIP_RAW */ + + LWIP_ASSERT_CORE_LOCKED(); + + IP6_STATS_INC(ip6.recv); + + /* identify the IP header */ + ip6hdr = (struct ip6_hdr *)p->payload; + if (IP6H_V(ip6hdr) != 6) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IPv6 packet dropped due to bad version number %"U32_F"\n", + IP6H_V(ip6hdr))); + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP6_INPUT + if (LWIP_HOOK_IP6_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((IP6_HLEN > p->len) || (IP6H_PLEN(ip6hdr) > (p->tot_len - IP6_HLEN))) { + if (IP6_HLEN > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)IP6_HLEN, p->len)); + } + if ((IP6H_PLEN(ip6hdr) + IP6_HLEN) > p->tot_len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 (plen %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)(IP6H_PLEN(ip6hdr) + IP6_HLEN), p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Trim pbuf. This should have been done at the netif layer, + * but we'll do it anyway just to be sure that its done. */ + pbuf_realloc(p, (u16_t)(IP6_HLEN + IP6H_PLEN(ip6hdr))); + + /* copy IP addresses to aligned ip6_addr_t */ + ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_dest, ip6hdr->dest); + ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_src, ip6hdr->src); + + /* Don't accept virtual IPv4 mapped IPv6 addresses. + * Don't accept multicast source addresses. */ + if (ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_dest)) || + ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_src)) || + ip6_addr_ismulticast(ip_2_ip6(&ip_data.current_iphdr_src))) { + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Set the appropriate zone identifier on the addresses. */ + ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_dest), IP6_UNKNOWN, inp); + ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_src), IP6_UNICAST, inp); + + /* current header pointer. */ + ip_data.current_ip6_header = ip6hdr; + + /* In netif, used in case we need to send ICMPv6 packets back. */ + ip_data.current_netif = inp; + ip_data.current_input_netif = inp; + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* Always joined to multicast if-local and link-local all-nodes group. */ + if (ip6_addr_isallnodes_iflocal(ip6_current_dest_addr()) || + ip6_addr_isallnodes_linklocal(ip6_current_dest_addr())) { + netif = inp; + } +#if LWIP_IPV6_MLD + else if (mld6_lookfor_group(inp, ip6_current_dest_addr())) { + netif = inp; + } +#else /* LWIP_IPV6_MLD */ + else if (ip6_addr_issolicitednode(ip6_current_dest_addr())) { + u8_t i; + /* Filter solicited node packets when MLD is not enabled + * (for Neighbor discovery). */ + netif = NULL; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp_solicitednode(ip6_current_dest_addr(), netif_ip6_addr(inp, i))) { + netif = inp; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: solicited node packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + break; + } + } + } +#endif /* LWIP_IPV6_MLD */ + else { + netif = NULL; + } + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. */ + if (ip6_input_accept(inp)) { + netif = inp; + } else { + netif = NULL; +#if !IPV6_CUSTOM_SCOPES + /* Shortcut: stop looking for other interfaces if either the source or + * the destination has a scope constrained to this interface. Custom + * scopes may break the 1:1 link/interface mapping, however. */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_islinklocal(ip6_current_src_addr())) { + goto netif_found; + } +#endif /* !IPV6_CUSTOM_SCOPES */ +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* The loopback address is to be considered link-local. Packets to it + * should be dropped on other interfaces, as per RFC 4291 Sec. 2.5.3. + * Its implied scope means packets *from* the loopback address should + * not be accepted on other interfaces, either. These requirements + * cannot be implemented in the case that loopback traffic is sent + * across a non-loopback interface, however. */ + if (ip6_addr_isloopback(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_src_addr())) { + goto netif_found; + } +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ +#if !LWIP_SINGLE_NETIF + NETIF_FOREACH(netif) { + if (netif == inp) { + /* we checked that before already */ + continue; + } + if (ip6_input_accept(netif)) { + break; + } + } +#endif /* !LWIP_SINGLE_NETIF */ + } +netif_found: + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet accepted on interface %c%c\n", + netif ? netif->name[0] : 'X', netif? netif->name[1] : 'X')); + } + + /* "::" packet source address? (used in duplicate address detection) */ + if (ip6_addr_isany(ip6_current_src_addr()) && + (!ip6_addr_issolicitednode(ip6_current_dest_addr()))) { + /* packet source is not valid */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with src ANY_ADDRESS dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_TRACE, ("ip6_input: packet not for us.\n")); +#if LWIP_IPV6_FORWARD + /* non-multicast packet? */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* try to forward IP packet on (other) interfaces */ + ip6_forward(p, ip6hdr, inp); + } +#endif /* LWIP_IPV6_FORWARD */ + pbuf_free(p); + goto ip6_input_cleanup; + } + + /* current netif pointer. */ + ip_data.current_netif = netif; + + /* Save next header type. */ + nexth = &IP6H_NEXTH(ip6hdr); + + /* Init header length. */ + hlen = hlen_tot = IP6_HLEN; + + /* Move to payload. */ + pbuf_remove_header(p, IP6_HLEN); + + /* Process known option extension headers, if present. */ + while (*nexth != IP6_NEXTH_NONE) + { + switch (*nexth) { + case IP6_NEXTH_HOPBYHOP: + { + s32_t opt_offset; + struct ip6_hbh_hdr *hbh_hdr; + struct ip6_opt_hdr *opt_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header\n")); + + /* Get and check the header length, while staying in packet bounds. */ + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_HBH_NEXTH(hbh_hdr); + + /* Get the header length. */ + hlen = (u16_t)(8 * (1 + hbh_hdr->_hlen)); + + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* The extended option header starts right after Hop-by-Hop header. */ + opt_offset = IP6_HBH_HLEN; + while (opt_offset < hlen) + { + s32_t opt_dlen = 0; + + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + opt_offset); + + switch (IP6_OPT_TYPE(opt_hdr)) { + /* @todo: process IPV6 Hop-by-Hop option data */ + case IP6_PAD1_OPTION: + /* PAD1 option doesn't have length and value field */ + opt_dlen = -1; + break; + case IP6_PADN_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_ROUTER_ALERT_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_JUMBO_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + default: + /* Check 2 MSB of Hop-by-Hop header type. */ + switch (IP6_OPT_TYPE_ACTION(opt_hdr)) { + case 1: + /* Discard the packet. */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 2: + /* Send ICMP Parameter Problem */ + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 3: + /* Send ICMP Parameter Problem if destination address is not a multicast address */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + } + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + default: + /* Skip over this option. */ + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + } + break; + } + + /* Adjust the offset to move to the next extended option header */ + opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen; + } + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_DESTOPTS: + { + s32_t opt_offset; + struct ip6_dest_hdr *dest_hdr; + struct ip6_opt_hdr *opt_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Destination options header\n")); + + dest_hdr = (struct ip6_dest_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_DEST_NEXTH(dest_hdr); + + /* Get the header length. */ + hlen = 8 * (1 + dest_hdr->_hlen); + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* The extended option header starts right after Destination header. */ + opt_offset = IP6_DEST_HLEN; + while (opt_offset < hlen) + { + s32_t opt_dlen = 0; + + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)dest_hdr + opt_offset); + + switch (IP6_OPT_TYPE(opt_hdr)) + { + /* @todo: process IPV6 Destination option data */ + case IP6_PAD1_OPTION: + /* PAD1 option deosn't have length and value field */ + opt_dlen = -1; + break; + case IP6_PADN_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_ROUTER_ALERT_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_JUMBO_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_HOME_ADDRESS_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + default: + /* Check 2 MSB of Destination header type. */ + switch (IP6_OPT_TYPE_ACTION(opt_hdr)) + { + case 1: + /* Discard the packet. */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 2: + /* Send ICMP Parameter Problem */ + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 3: + /* Send ICMP Parameter Problem if destination address is not a multicast address */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + } + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + default: + /* Skip over this option. */ + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + } + break; + } + + /* Adjust the offset to move to the next extended option header */ + opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen; + } + + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_ROUTING: + { + struct ip6_rout_hdr *rout_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Routing header\n")); + + rout_hdr = (struct ip6_rout_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_ROUT_NEXTH(rout_hdr); + + /* Get the header length. */ + hlen = 8 * (1 + rout_hdr->_hlen); + + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* Skip over this header. */ + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* if segment left value is 0 in routing header, ignore the option */ + if (IP6_ROUT_SEG_LEFT(rout_hdr)) { + /* The length field of routing option header must be even */ + if (rout_hdr->_hlen & 0x1) { + /* Discard and send parameter field error */ + icmp6_param_problem(p, ICMP6_PP_FIELD, &rout_hdr->_hlen); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + switch (IP6_ROUT_TYPE(rout_hdr)) + { + /* TODO: process routing by the type */ + case IP6_ROUT_TYPE2: + break; + case IP6_ROUT_RPL: + break; + default: + /* Discard unrecognized routing type and send parameter field error */ + icmp6_param_problem(p, ICMP6_PP_FIELD, &IP6_ROUT_TYPE(rout_hdr)); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + } + + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_FRAGMENT: + { + struct ip6_frag_hdr *frag_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header\n")); + + frag_hdr = (struct ip6_frag_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_FRAG_NEXTH(frag_hdr); + + /* Fragment Header length. */ + hlen = 8; + + /* Make sure this header fits in current pbuf. */ + if (hlen > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_FRAG_STATS_INC(ip6_frag.lenerr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* check payload length is multiple of 8 octets when mbit is set */ + if (IP6_FRAG_MBIT(frag_hdr) && (IP6H_PLEN(ip6hdr) & 0x7)) { + /* ipv6 payload length is not multiple of 8 octets */ + icmp6_param_problem(p, ICMP6_PP_FIELD, LWIP_PACKED_CAST(const void *, &ip6hdr->_plen)); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid payload length dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* Offset == 0 and more_fragments == 0? */ + if ((frag_hdr->_fragment_offset & + PP_HTONS(IP6_FRAG_OFFSET_MASK | IP6_FRAG_MORE_FLAG)) == 0) { + /* This is a 1-fragment packet. Skip this header and continue. */ + pbuf_remove_header(p, hlen); + } else { +#if LWIP_IPV6_REASS + /* reassemble the packet */ + ip_data.current_ip_header_tot_len = hlen_tot; + p = ip6_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + goto ip6_input_cleanup; + } + + /* Returned p point to IPv6 header. + * Update all our variables and pointers and continue. */ + ip6hdr = (struct ip6_hdr *)p->payload; + nexth = &IP6H_NEXTH(ip6hdr); + hlen = hlen_tot = IP6_HLEN; + pbuf_remove_header(p, IP6_HLEN); + +#else /* LWIP_IPV6_REASS */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header dropped (with LWIP_IPV6_REASS==0)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.opterr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; +#endif /* LWIP_IPV6_REASS */ + } + break; + } + default: + goto options_done; + } + + if (*nexth == IP6_NEXTH_HOPBYHOP) { + /* Hop-by-Hop header comes only as a first option */ + icmp6_param_problem(p, ICMP6_PP_HEADER, nexth); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header dropped (only valid as a first option)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + } + +options_done: + + /* send to upper layers */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: \n")); + ip6_debug_print(p); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_ip_header_tot_len = hlen_tot; + +#if LWIP_RAW + /* p points to IPv6 header again for raw_input. */ + pbuf_add_header_force(p, hlen_tot); + /* raw input did not eat the packet? */ + raw_status = raw_input(p, inp); + if (raw_status != RAW_INPUT_EATEN) + { + /* Point to payload. */ + pbuf_remove_header(p, hlen_tot); +#else /* LWIP_RAW */ + { +#endif /* LWIP_RAW */ + switch (*nexth) { + case IP6_NEXTH_NONE: + pbuf_free(p); + break; +#if LWIP_UDP + case IP6_NEXTH_UDP: +#if LWIP_UDPLITE + case IP6_NEXTH_UDPLITE: +#endif /* LWIP_UDPLITE */ + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP6_NEXTH_TCP: + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP6 + case IP6_NEXTH_ICMP6: + icmp6_input(p, inp); + break; +#endif /* LWIP_ICMP */ + default: +#if LWIP_RAW + if (raw_status == RAW_INPUT_DELIVERED) { + /* @todo: ipv6 mib in-delivers? */ + } else +#endif /* LWIP_RAW */ + { +#if LWIP_ICMP6 + /* p points to IPv6 header again for raw_input. */ + pbuf_add_header_force(p, hlen_tot); + /* send ICMP parameter problem unless it was a multicast or ICMPv6 */ + if ((!ip6_addr_ismulticast(ip6_current_dest_addr())) && + (IP6H_NEXTH(ip6hdr) != IP6_NEXTH_ICMP6)) { + icmp6_param_problem(p, ICMP6_PP_HEADER, nexth); + } +#endif /* LWIP_ICMP */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_input: Unsupported transport protocol %"U16_F"\n", (u16_t)IP6H_NEXTH(ip6hdr))); + IP6_STATS_INC(ip6.proterr); + IP6_STATS_INC(ip6.drop); + } + pbuf_free(p); + break; + } + } + +ip6_input_cleanup: + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip6_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip6_addr_set_zero(ip6_current_src_addr()); + ip6_addr_set_zero(ip6_current_dest_addr()); + + return ERR_OK; +} + + +/** + * Sends an IPv6 packet on a network interface. This function constructs + * the IPv6 header. If the source IPv6 address is NULL, the IPv6 "ANY" address is + * used as source (usually during network startup). If the source IPv6 address it + * IP6_ADDR_ANY, the most appropriate IPv6 address of the outgoing network + * interface is filled in as source address. If the destination IPv6 address is + * LWIP_IP_HDRINCL, p is assumed to already include an IPv6 header and + * p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) (src is possibly not + * properly zoned) + * @param dest the destination IPv6 address to send the packet to (possibly not + * properly zoned) + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IPv6/LINK headers + * returns errors returned by netif->output_ip6 + */ +err_t +ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + const ip6_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (src != NULL && ip6_addr_isany(src)) { + src_used = ip_2_ip6(ip6_select_source_address(netif, dest)); + if ((src_used == NULL) || ip6_addr_isany(src_used)) { + /* No appropriate source address was found for this packet. */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: No suitable source address for packet.\n")); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + } + } + return ip6_output_if_src(p, src_used, dest, hl, tc, nexth, netif); +} + +/** + * Same as ip6_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest_addr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + /* Should the IPv6 header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { +#if LWIP_IPV6_SCOPES + /* If the destination address is scoped but lacks a zone, add a zone now, + * based on the outgoing interface. The lower layers (e.g., nd6) absolutely + * require addresses to be properly zoned for correctness. In some cases, + * earlier attempts will have been made to add a zone to the destination, + * but this function is the only one that is called in all (other) cases, + * so we must do this here. */ + if (ip6_addr_lacks_zone(dest, IP6_UNKNOWN)) { + ip6_addr_copy(dest_addr, *dest); + ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif); + dest = &dest_addr; + } +#endif /* LWIP_IPV6_SCOPES */ + + /* generate IPv6 header */ + if (pbuf_add_header(p, IP6_HLEN)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: not enough room for IPv6 header in pbuf\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + ip6hdr = (struct ip6_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip6_hdr", + (p->len >= sizeof(struct ip6_hdr))); + + IP6H_HOPLIM_SET(ip6hdr, hl); + IP6H_NEXTH_SET(ip6hdr, nexth); + + /* dest cannot be NULL here */ + ip6_addr_copy_to_packed(ip6hdr->dest, *dest); + + IP6H_VTCFL_SET(ip6hdr, 6, tc, 0); + IP6H_PLEN_SET(ip6hdr, (u16_t)(p->tot_len - IP6_HLEN)); + + if (src == NULL) { + src = IP6_ADDR_ANY6; + } + /* src cannot be NULL here */ + ip6_addr_copy_to_packed(ip6hdr->src, *src); + + } else { + /* IP header already included in p */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif); + dest = &dest_addr; + } + + IP6_STATS_INC(ip6.xmit); + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip6_debug_print(p); + +#if ENABLE_LOOPBACK + { + int i; +#if !LWIP_HAVE_LOOPIF + if (ip6_addr_isloopback(dest)) { + return netif_loop_output(netif, p); + } +#endif /* !LWIP_HAVE_LOOPIF */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(dest, netif_ip6_addr(netif, i))) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP6_DEBUG, ("netif_loop_output()\n")); + return netif_loop_output(netif, p); + } + } + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if LWIP_IPV6_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif_mtu6(netif) && (p->tot_len > nd6_get_destination_mtu(dest, netif))) { + return ip6_frag(p, netif, dest); + } +#endif /* LWIP_IPV6_FRAG */ + + LWIP_DEBUGF(IP6_DEBUG, ("netif->output_ip6()\n")); + return netif->output_ip6(netif, p, dest); +} + +/** + * Simple interface to ip6_output_if. It finds the outgoing network + * interface and calls upon ip6_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(src_addr, ip6hdr->src); + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + return ip6_output_if(p, src, dest, hl, tc, nexth, netif); +} + + +#if LWIP_NETIF_USE_HINTS +/** Like ip6_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip6_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif_hint netif output hint pointer set to netif->hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(src_addr, ip6hdr->src); + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + NETIF_SET_HINTS(netif, netif_hint); + err = ip6_output_if(p, src, dest, hl, tc, nexth, netif); + NETIF_RESET_HINTS(netif); + + return err; +} +#endif /* LWIP_NETIF_USE_HINTS*/ + +#if LWIP_IPV6_MLD +/** + * Add a hop-by-hop options header with a router alert option and padding. + * + * Used by MLD when sending a Multicast listener report/done message. + * + * @param p the packet to which we will prepend the options header + * @param nexth the next header protocol number (e.g. IP6_NEXTH_ICMP6) + * @param value the value of the router alert option data (e.g. IP6_ROUTER_ALERT_VALUE_MLD) + * @return ERR_OK if hop-by-hop header was added, ERR_* otherwise + */ +err_t +ip6_options_add_hbh_ra(struct pbuf *p, u8_t nexth, u8_t value) +{ + u8_t *opt_data; + u32_t offset = 0; + struct ip6_hbh_hdr *hbh_hdr; + struct ip6_opt_hdr *opt_hdr; + + /* fixed 4 bytes for router alert option and 2 bytes padding */ + const u8_t hlen = (sizeof(struct ip6_opt_hdr) * 2) + IP6_ROUTER_ALERT_DLEN; + /* Move pointer to make room for hop-by-hop options header. */ + if (pbuf_add_header(p, sizeof(struct ip6_hbh_hdr) + hlen)) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_options: no space for options header\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + /* Set fields of Hop-by-Hop header */ + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + IP6_HBH_NEXTH(hbh_hdr) = nexth; + hbh_hdr->_hlen = 0; + offset = IP6_HBH_HLEN; + + /* Set router alert options to Hop-by-Hop extended option header */ + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset); + IP6_OPT_TYPE(opt_hdr) = IP6_ROUTER_ALERT_OPTION; + IP6_OPT_DLEN(opt_hdr) = IP6_ROUTER_ALERT_DLEN; + offset += IP6_OPT_HLEN; + + /* Set router alert option data */ + opt_data = (u8_t *)hbh_hdr + offset; + opt_data[0] = value; + opt_data[1] = 0; + offset += IP6_OPT_DLEN(opt_hdr); + + /* add 2 bytes padding to make 8 bytes Hop-by-Hop header length */ + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset); + IP6_OPT_TYPE(opt_hdr) = IP6_PADN_OPTION; + IP6_OPT_DLEN(opt_hdr) = 0; + + return ERR_OK; +} +#endif /* LWIP_IPV6_MLD */ + +#if IP6_DEBUG +/* Print an IPv6 header by using LWIP_DEBUGF + * @param p an IPv6 packet, p->payload pointing to the IPv6 header + */ +void +ip6_debug_print(struct pbuf *p) +{ + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + + LWIP_DEBUGF(IP6_DEBUG, ("IPv6 header:\n")); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %2"U16_F" | %3"U16_F" | %7"U32_F" | (ver, class, flow)\n", + IP6H_V(ip6hdr), + IP6H_TC(ip6hdr), + IP6H_FL(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %5"U16_F" | %3"U16_F" | %3"U16_F" | (plen, nexth, hopl)\n", + IP6H_PLEN(ip6hdr), + IP6H_NEXTH(ip6hdr), + IP6H_HOPLIM(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (src)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->src)), + IP6_ADDR_BLOCK2(&(ip6hdr->src)), + IP6_ADDR_BLOCK3(&(ip6hdr->src)), + IP6_ADDR_BLOCK4(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->src)), + IP6_ADDR_BLOCK6(&(ip6hdr->src)), + IP6_ADDR_BLOCK7(&(ip6hdr->src)), + IP6_ADDR_BLOCK8(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (dest)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->dest)), + IP6_ADDR_BLOCK2(&(ip6hdr->dest)), + IP6_ADDR_BLOCK3(&(ip6hdr->dest)), + IP6_ADDR_BLOCK4(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->dest)), + IP6_ADDR_BLOCK6(&(ip6hdr->dest)), + IP6_ADDR_BLOCK7(&(ip6hdr->dest)), + IP6_ADDR_BLOCK8(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP6_DEBUG */ + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c new file mode 100644 index 00000000..687c02f7 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c @@ -0,0 +1,343 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Functions for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/def.h" + +#include + +#if LWIP_IPV4 +#include "lwip/ip4_addr.h" /* for ip6addr_aton to handle IPv4-mapped addresses */ +#endif /* LWIP_IPV4 */ + +/* used by IP6_ADDR_ANY(6) in ip6_addr.h */ +const ip_addr_t ip6_addr_any = IPADDR6_INIT(0ul, 0ul, 0ul, 0ul); + +#define lwip_xchar(i) ((char)((i) < 10 ? '0' + (i) : 'A' + (i) - 10)) + +/** + * Check whether "cp" is a valid ascii representation + * of an IPv6 address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * + * @param cp IPv6 address in ascii representation (e.g. "FF01::1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip6addr_aton(const char *cp, ip6_addr_t *addr) +{ + u32_t addr_index, zero_blocks, current_block_index, current_block_value; + const char *s; +#if LWIP_IPV4 + int check_ipv4_mapped = 0; +#endif /* LWIP_IPV4 */ + + /* Count the number of colons, to count the number of blocks in a "::" sequence + zero_blocks may be 1 even if there are no :: sequences */ + zero_blocks = 8; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + zero_blocks--; +#if LWIP_IPV4 + } else if (*s == '.') { + if ((zero_blocks == 5) ||(zero_blocks == 2)) { + check_ipv4_mapped = 1; + /* last block could be the start of an IPv4 address */ + zero_blocks--; + } else { + /* invalid format */ + return 0; + } + break; +#endif /* LWIP_IPV4 */ + } else if (!lwip_isxdigit(*s)) { + break; + } + } + + /* parse each block */ + addr_index = 0; + current_block_index = 0; + current_block_value = 0; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } + } + current_block_index++; +#if LWIP_IPV4 + if (check_ipv4_mapped) { + if (current_block_index == 6) { + ip4_addr_t ip4; + int ret = ip4addr_aton(s + 1, &ip4); + if (ret) { + if (addr) { + addr->addr[3] = lwip_htonl(ip4.addr); + current_block_index++; + goto fix_byte_order_and_return; + } + return 1; + } + } + } +#endif /* LWIP_IPV4 */ + current_block_value = 0; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + if (s[1] == ':') { + if (s[2] == ':') { + /* invalid format: three successive colons */ + return 0; + } + s++; + /* "::" found, set zeros */ + while (zero_blocks > 0) { + zero_blocks--; + if (current_block_index & 0x1) { + addr_index++; + } else { + if (addr) { + addr->addr[addr_index] = 0; + } + } + current_block_index++; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + } + } + } else if (lwip_isxdigit(*s)) { + /* add current digit */ + current_block_value = (current_block_value << 4) + + (lwip_isdigit(*s) ? (u32_t)(*s - '0') : + (u32_t)(10 + (lwip_islower(*s) ? *s - 'a' : *s - 'A'))); + } else { + /* unexpected digit, space? CRLF? */ + break; + } + } + + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } +#if LWIP_IPV4 +fix_byte_order_and_return: +#endif + /* convert to network byte order. */ + for (addr_index = 0; addr_index < 4; addr_index++) { + addr->addr[addr_index] = lwip_htonl(addr->addr[addr_index]); + } + + ip6_addr_clear_zone(addr); + } + + if (current_block_index != 7) { + return 0; + } + + return 1; +} + +/** + * Convert numeric IPv6 address into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip6 address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip6addr_ntoa(const ip6_addr_t *addr) +{ + static char str[40]; + return ip6addr_ntoa_r(addr, str, 40); +} + +/** + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip6 address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen) +{ + u32_t current_block_index, current_block_value, next_block_value; + s32_t i; + u8_t zero_flag, empty_block_flag; + +#if LWIP_IPV4 + if (ip6_addr_isipv4mappedipv6(addr)) { + /* This is an IPv4 mapped address */ + ip4_addr_t addr4; + char *ret; +#define IP4MAPPED_HEADER "::FFFF:" + char *buf_ip4 = buf + sizeof(IP4MAPPED_HEADER) - 1; + int buflen_ip4 = buflen - sizeof(IP4MAPPED_HEADER) + 1; + if (buflen < (int)sizeof(IP4MAPPED_HEADER)) { + return NULL; + } + memcpy(buf, IP4MAPPED_HEADER, sizeof(IP4MAPPED_HEADER)); + addr4.addr = addr->addr[3]; + ret = ip4addr_ntoa_r(&addr4, buf_ip4, buflen_ip4); + if (ret != buf_ip4) { + return NULL; + } + return buf; + } +#endif /* LWIP_IPV4 */ + i = 0; + empty_block_flag = 0; /* used to indicate a zero chain for "::' */ + + for (current_block_index = 0; current_block_index < 8; current_block_index++) { + /* get the current 16-bit block */ + current_block_value = lwip_htonl(addr->addr[current_block_index >> 1]); + if ((current_block_index & 0x1) == 0) { + current_block_value = current_block_value >> 16; + } + current_block_value &= 0xffff; + + /* Check for empty block. */ + if (current_block_value == 0) { + if (current_block_index == 7 && empty_block_flag == 1) { + /* special case, we must render a ':' for the last block. */ + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + break; + } + if (empty_block_flag == 0) { + /* generate empty block "::", but only if more than one contiguous zero block, + * according to current formatting suggestions RFC 5952. */ + next_block_value = lwip_htonl(addr->addr[(current_block_index + 1) >> 1]); + if ((current_block_index & 0x1) == 0x01) { + next_block_value = next_block_value >> 16; + } + next_block_value &= 0xffff; + if (next_block_value == 0) { + empty_block_flag = 1; + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + continue; /* move on to next block. */ + } + } else if (empty_block_flag == 1) { + /* move on to next block. */ + continue; + } + } else if (empty_block_flag == 1) { + /* Set this flag value so we don't produce multiple empty blocks. */ + empty_block_flag = 2; + } + + if (current_block_index > 0) { + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + } + + if ((current_block_value & 0xf000) == 0) { + zero_flag = 1; + } else { + buf[i++] = lwip_xchar(((current_block_value & 0xf000) >> 12)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf00) == 0) && (zero_flag)) { + /* do nothing */ + } else { + buf[i++] = lwip_xchar(((current_block_value & 0xf00) >> 8)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf0) == 0) && (zero_flag)) { + /* do nothing */ + } + else { + buf[i++] = lwip_xchar(((current_block_value & 0xf0) >> 4)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + buf[i++] = lwip_xchar((current_block_value & 0xf)); + if (i >= buflen) { + return NULL; + } + } + + buf[i] = 0; + + return buf; +} + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c new file mode 100644 index 00000000..d6c5d223 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c @@ -0,0 +1,862 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" +#include "lwip/ip6_frag.h" +#include "lwip/ip6.h" +#include "lwip/icmp6.h" +#include "lwip/nd6.h" +#include "lwip/ip.h" + +#include "lwip/pbuf.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#if IPV6_FRAG_COPYHEADER +/* The number of bytes we need to "borrow" from (i.e., overwrite in) the header + * that precedes the fragment header for reassembly pruposes. */ +#define IPV6_FRAG_REQROOM ((s16_t)(sizeof(struct ip6_reass_helper) - IP6_FRAG_HLEN)) +#endif + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IPv6 header, since it replaces + * the Fragment Header in memory in incoming fragments to keep + * track of the various fragments. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* static variables */ +static struct ip6_reassdata *reassdatagrams; +static u16_t ip6_reass_pbufcount; + +/* Forward declarations. */ +static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr); +#if IP_REASS_FREE_OLDEST +static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed); +#endif /* IP_REASS_FREE_OLDEST */ + +void +ip6_reass_tmr(void) +{ + struct ip6_reassdata *r, *tmp; + +#if !IPV6_FRAG_COPYHEADER + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* !IPV6_FRAG_COPYHEADER */ + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + r = r->next; + } else { + /* reassembly timed out */ + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip6_reass_free_complete_datagram(tmp); + } + } +} + +/** + * Free a datagram (struct ip6_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip6_reass_pbufcount), + * sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + */ +static void +ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr) +{ + struct ip6_reassdata *prev; + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip6_reass_helper *iprh; + +#if LWIP_ICMP6 + iprh = (struct ip6_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Restore the part that we've overwritten with our helper structure, or we + * might send garbage (and disclose a pointer) in the ICMPv6 reply. */ + MEMCPY(p->payload, ipr->orig_hdr, sizeof(iprh)); + /* Then, move back to the original ipv6 header (we are now pointing to Fragment header). + This cannot fail since we already checked when receiving this fragment. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr))) { + LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0); + } + else { + /* Reconstruct the zoned source and destination addresses, so that we do + * not end up sending the ICMP response over the wrong link. */ + ip6_addr_t src_addr, dest_addr; + ip6_addr_copy_from_packed(src_addr, IPV6_FRAG_SRC(ipr)); + ip6_addr_set_zone(&src_addr, ipr->src_zone); + ip6_addr_copy_from_packed(dest_addr, IPV6_FRAG_DEST(ipr)); + ip6_addr_set_zone(&dest_addr, ipr->dest_zone); + /* Send the actual ICMP response. */ + icmp6_time_exceeded_with_addrs(p, ICMP6_TE_FRAG, &src_addr, &dest_addr); + } + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(p); + } +#endif /* LWIP_ICMP6 */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip6_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(pcur); + } + + /* Then, unchain the struct ip6_reassdata from the list and free it. */ + if (ipr == reassdatagrams) { + reassdatagrams = ipr->next; + } else { + prev = reassdatagrams; + while (prev != NULL) { + if (prev->next == ipr) { + break; + } + prev = prev->next; + } + if (prev != NULL) { + prev->next = ipr->next; + } + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* Finally, update number of pbufs in reassembly queue */ + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed); + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - pbufs_freed); +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram ipr is not freed! + * + * @param ipr ip6_reassdata for the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + */ +static void +ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed) +{ + struct ip6_reassdata *r, *oldest; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the current datagram! */ + do { + r = oldest = reassdatagrams; + while (r != NULL) { + if (r != ipr) { + if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + } + } + r = r->next; + } + if (oldest == ipr) { + /* nothing to free, ipr is the only element on the list */ + return; + } + if (oldest != NULL) { + ip6_reass_free_complete_datagram(oldest); + } + } while (((ip6_reass_pbufcount + pbufs_needed) > IP_REASS_MAX_PBUFS) && (reassdatagrams != NULL)); +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Reassembles incoming IPv6 fragments into an IPv6 datagram. + * + * @param p points to the IPv6 Fragment Header + * @return NULL if reassembly is incomplete, pbuf pointing to + * IPv6 Header if reassembly is complete + */ +struct pbuf * +ip6_reass(struct pbuf *p) +{ + struct ip6_reassdata *ipr, *ipr_prev; + struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; + struct ip6_frag_hdr *frag_hdr; + u16_t offset, len, start, end; + ptrdiff_t hdrdiff; + u16_t clen; + u8_t valid = 1; + struct pbuf *q, *next_pbuf; + + IP6_FRAG_STATS_INC(ip6_frag.recv); + + /* ip6_frag_hdr must be in the first pbuf, not chained. Checked by caller. */ + LWIP_ASSERT("IPv6 fragment header does not fit in first pbuf", + p->len >= sizeof(struct ip6_frag_hdr)); + + frag_hdr = (struct ip6_frag_hdr *) p->payload; + + clen = pbuf_clen(p); + + offset = lwip_ntohs(frag_hdr->_fragment_offset); + + /* Calculate fragment length from IPv6 payload length. + * Adjust for headers before Fragment Header. + * And finally adjust by Fragment Header length. */ + len = lwip_ntohs(ip6_current_header()->_plen); + hdrdiff = (u8_t*)p->payload - (const u8_t*)ip6_current_header(); + LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff <= 0xFFFF); + LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff >= IP6_HLEN); + hdrdiff -= IP6_HLEN; + hdrdiff += IP6_FRAG_HLEN; + if (hdrdiff > len) { + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + len = (u16_t)(len - hdrdiff); + start = (offset & IP6_FRAG_OFFSET_MASK); + if (start > (0xFFFF - len)) { + /* u16_t overflow, cannot handle this */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if ((frag_hdr->_identification == ipr->identification) && + ip6_addr_cmp_packed(ip6_current_src_addr(), &(IPV6_FRAG_SRC(ipr)), ipr->src_zone) && + ip6_addr_cmp_packed(ip6_current_dest_addr(), &(IPV6_FRAG_DEST(ipr)), ipr->dest_zone)) { + IP6_FRAG_STATS_INC(ip6_frag.cachehit); + break; + } + ipr_prev = ipr; + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + /* Make room and try again. */ + ip6_reass_remove_oldest_datagram(ipr, clen); + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr != NULL) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + goto nullreturn; + } + } + + memset(ipr, 0, sizeof(struct ip6_reassdata)); + ipr->timer = IPV6_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + + /* Use the current IPv6 header for src/dest address reference. + * Eventually, we will replace it when we get the first fragment + * (it might be this one, in any case, it is done later). */ + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; +#if IPV6_FRAG_COPYHEADER + MEMCPY(&ipr->src, &ip6_current_header()->src, sizeof(ipr->src)); + MEMCPY(&ipr->dest, &ip6_current_header()->dest, sizeof(ipr->dest)); +#endif /* IPV6_FRAG_COPYHEADER */ +#if LWIP_IPV6_SCOPES + /* Also store the address zone information. + * @todo It is possible that due to netif destruction and recreation, the + * stored zones end up resolving to a different interface. In that case, we + * risk sending a "time exceeded" ICMP response over the wrong link. + * Ideally, netif destruction would clean up matching pending reassembly + * structures, but custom zone mappings would make that non-trivial. */ + ipr->src_zone = ip6_addr_zone(ip6_current_src_addr()); + ipr->dest_zone = ip6_addr_zone(ip6_current_dest_addr()); +#endif /* LWIP_IPV6_SCOPES */ + /* copy the fragmented packet id. */ + ipr->identification = frag_hdr->_identification; + + /* copy the nexth field */ + ipr->nexth = frag_hdr->_nexth; + } + + /* Check if we are allowed to enqueue more datagrams. */ + if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + ip6_reass_remove_oldest_datagram(ipr, clen); + if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + /* @todo: send ICMPv6 time exceeded here? */ + /* drop this pbuf */ + IP6_FRAG_STATS_INC(ip6_frag.memerr); + goto nullreturn; + } + } + + /* Overwrite Fragment Header with our own helper struct. */ +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4). + This cannot fail since we already checked when receiving this fragment. */ + u8_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#else /* IPV6_FRAG_COPYHEADER */ + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* IPV6_FRAG_COPYHEADER */ + + /* Prepare the pointer to the helper structure, and its initial values. + * Do not yet write to the structure itself, as we still have to make a + * backup of the original data, and we should not do that until we know for + * sure that we are going to add this packet to the list. */ + iprh = (struct ip6_reass_helper *)p->payload; + next_pbuf = NULL; + end = (u16_t)(start + len); + + /* find the right place to insert this pbuf */ + /* Iterate through until we either get to the end of the list (append), + * or we find on with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip6_reass_helper*)q->payload; + if (start < iprh_tmp->start) { +#if IP_REASS_CHECK_OVERLAP + if (end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + if (iprh_prev != NULL) { + if (start < iprh_prev->end) { + /* fragment overlaps with previous, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* the new pbuf should be inserted before this */ + next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ + iprh_prev->next_pbuf = p; + } else { + /* fragment with the lowest offset */ + ipr->p = p; + } + break; + } else if (start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + goto nullreturn; +#if IP_REASS_CHECK_OVERLAP + } else if (start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no gaps. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = p; + if (iprh_prev->end != start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = p; + } + } + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time */ + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount + clen); + + /* Remember IPv6 header if this is the first fragment. */ + if (start == 0) { + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; + /* Make a backup of the part of the packet data that we are about to + * overwrite, so that we can restore the original later. */ + MEMCPY(ipr->orig_hdr, p->payload, sizeof(*iprh)); + /* For IPV6_FRAG_COPYHEADER there is no need to copy src/dst again, as they + * will be the same as they were. With LWIP_IPV6_SCOPES, the same applies + * to the source/destination zones. */ + } + /* Only after the backup do we get to fill in the actual helper structure. */ + iprh->next_pbuf = next_pbuf; + iprh->start = start; + iprh->end = end; + + /* If this is the last fragment, calculate total packet length. */ + if ((offset & IP6_FRAG_MORE_FLAG) == 0) { + ipr->datagram_len = iprh->end; + } + + /* Additional validity tests: we have received first and last fragment. */ + iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload; + if (iprh_tmp->start != 0) { + valid = 0; + } + if (ipr->datagram_len == 0) { + valid = 0; + } + + /* Final validity test: no gaps between current and last fragment. */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while ((q != NULL) && valid) { + iprh = (struct ip6_reass_helper*)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + + if (valid) { + /* All fragments have been received */ + struct ip6_hdr* iphdr_ptr; + + /* chain together the pbufs contained within the ip6_reassdata list. */ + iprh = (struct ip6_reass_helper*) ipr->p->payload; + while (iprh != NULL) { + next_pbuf = iprh->next_pbuf; + if (next_pbuf != NULL) { + /* Save next helper struct (will be hidden in next step). */ + iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload; + + /* hide the fragment header for every succeeding fragment */ + pbuf_remove_header(next_pbuf, IP6_FRAG_HLEN); +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */ + u8_t hdrerr = pbuf_remove_header(next_pbuf, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + pbuf_cat(ipr->p, next_pbuf); + } + else { + iprh_tmp = NULL; + } + + iprh = iprh_tmp; + } + + /* Get the first pbuf. */ + p = ipr->p; + +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + u8_t hdrerr; + /* Restore (only) the bytes that we overwrote beyond the fragment header. + * Those bytes may belong to either the IPv6 header or an extension + * header placed before the fragment header. */ + MEMCPY(p->payload, ipr->orig_hdr, IPV6_FRAG_REQROOM); + /* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */ + hdrerr = pbuf_remove_header(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + + /* We need to get rid of the fragment header itself, which is somewhere in + * the middle of the packet (but still in the first pbuf of the chain). + * Getting rid of the header is required by RFC 2460 Sec. 4.5 and necessary + * in order to be able to reassemble packets that are close to full size + * (i.e., around 65535 bytes). We simply move up all the headers before the + * fragment header, including the IPv6 header, and adjust the payload start + * accordingly. This works because all these headers are in the first pbuf + * of the chain, and because the caller adjusts all its pointers on + * successful reassembly. */ + MEMMOVE((u8_t*)ipr->iphdr + sizeof(struct ip6_frag_hdr), ipr->iphdr, + (size_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr)); + + /* This is where the IPv6 header is now. */ + iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->iphdr + + sizeof(struct ip6_frag_hdr)); + + /* Adjust datagram length by adding header lengths. */ + ipr->datagram_len = (u16_t)(ipr->datagram_len + ((u8_t*)p->payload - (u8_t*)iphdr_ptr) + - IP6_HLEN); + + /* Set payload length in ip header. */ + iphdr_ptr->_plen = lwip_htons(ipr->datagram_len); + + /* With the fragment header gone, we now need to adjust the next-header + * field of whatever header was originally before it. Since the packet made + * it through the original header processing routines at least up to the + * fragment header, we do not need any further sanity checks here. */ + if (IP6H_NEXTH(iphdr_ptr) == IP6_NEXTH_FRAGMENT) { + iphdr_ptr->_nexth = ipr->nexth; + } else { + u8_t *ptr = (u8_t *)iphdr_ptr + IP6_HLEN; + while (*ptr != IP6_NEXTH_FRAGMENT) { + ptr += 8 * (1 + ptr[1]); + } + *ptr = ipr->nexth; + } + + /* release the resources allocated for the fragment queue entry */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", ipr_prev != NULL); + ipr_prev->next = ipr->next; + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* adjust the number of pbufs currently queued for reassembly. */ + clen = pbuf_clen(p); + LWIP_ASSERT("ip6_reass_pbufcount >= clen", ip6_reass_pbufcount >= clen); + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - clen); + + /* Move pbuf back to IPv6 header. This should never fail. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) { + LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0); + pbuf_free(p); + return NULL; + } + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + return NULL; + +nullreturn: + IP6_FRAG_STATS_INC(ip6_frag.drop); + pbuf_free(p); + return NULL; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG + +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref* +ip6_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip6_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ip6_frag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip6_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IPv6 datagram if too large for the netif or path MTU. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p + * + * @param p ipv6 packet to send + * @param netif the netif on which to send + * @param dest destination ipv6 address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest) +{ + struct ip6_hdr *original_ip6hdr; + struct ip6_hdr *ip6hdr; + struct ip6_frag_hdr *frag_hdr; + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + static u32_t identification; + u16_t left, cop; + const u16_t mtu = nd6_get_destination_mtu(dest, netif); + const u16_t nfb = (u16_t)((mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK); + u16_t fragment_offset = 0; + u16_t last; + u16_t poff = IP6_HLEN; + + identification++; + + original_ip6hdr = (struct ip6_hdr *)p->payload; + + /* @todo we assume there are no options in the unfragmentable part (IPv6 header). */ + LWIP_ASSERT("p->tot_len >= IP6_HLEN", p->tot_len >= IP6_HLEN); + left = (u16_t)(p->tot_len - IP6_HLEN); + + while (left) { + last = (left <= nfb); + + /* Fill this fragment */ + cop = last ? left : nfb; + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, cop + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, (u8_t*)rambuf->payload + IP6_FRAG_HLEN, cop, poff); + /* make room for the IP header */ + if (pbuf_add_header(rambuf, IP6_HLEN)) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); +#else + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link, IPv6, and Fragment header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (p->len >= (IP6_HLEN))); + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); + + /* Can just adjust p directly for needed offset. */ + p->payload = (u8_t *)p->payload + poff; + p->len = (u16_t)(p->len - poff); + p->tot_len = (u16_t)(p->tot_len - poff); + + left_to_copy = cop; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len; + /* Is this pbuf already empty? */ + if (!newpbuflen) { + p = p->next; + continue; + } + pcr = ip6_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen); + if (newpbuf == NULL) { + ip6_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy = (u16_t)(left_to_copy - newpbuflen); + if (left_to_copy) { + p = p->next; + } + } + poff = newpbuflen; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Set headers */ + frag_hdr->_nexth = original_ip6hdr->_nexth; + frag_hdr->reserved = 0; + frag_hdr->_fragment_offset = lwip_htons((u16_t)((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG))); + frag_hdr->_identification = lwip_htonl(identification); + + IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT); + IP6H_PLEN_SET(ip6hdr, (u16_t)(cop + IP6_FRAG_HLEN)); + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + IP6_FRAG_STATS_INC(ip6_frag.xmit); + netif->output_ip6(netif, rambuf, dest); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left = (u16_t)(left - cop); + fragment_offset = (u16_t)(fragment_offset + cop); + } + return ERR_OK; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c new file mode 100644 index 00000000..6387d468 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c @@ -0,0 +1,626 @@ +/** + * @file + * Multicast listener discovery + * + * @defgroup mld6 MLD6 + * @ingroup ip6 + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2.\n + * Note: The allnodes (ff01::1, ff02::1) group is assumed be received by your + * netif since it must always be received for correct IPv6 operation (e.g. SLAAC). + * Ensure the netif filters are configured accordingly!\n + * The netif flags also need NETIF_FLAG_MLD6 flag set to enable MLD6 on a + * netif ("netif->flags |= NETIF_FLAG_MLD6;").\n + * To be called from TCPIP thread. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/* Based on igmp.c implementation of igmp v2 protocol */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_MLD /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mld6.h" +#include "lwip/prot/mld6.h" +#include "lwip/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + + +/* + * MLD constants + */ +#define MLD6_HL 1 +#define MLD6_JOIN_DELAYING_MEMBER_TMR_MS (500) + +#define MLD6_GROUP_NON_MEMBER 0 +#define MLD6_GROUP_DELAYING_MEMBER 1 +#define MLD6_GROUP_IDLE_MEMBER 2 + +/* Forward declarations. */ +static struct mld_group *mld6_new_group(struct netif *ifp, const ip6_addr_t *addr); +static err_t mld6_remove_group(struct netif *netif, struct mld_group *group); +static void mld6_delayed_report(struct mld_group *group, u16_t maxresp); +static void mld6_send(struct netif *netif, struct mld_group *group, u8_t type); + + +/** + * Stop MLD processing on interface + * + * @param netif network interface on which stop MLD processing + */ +err_t +mld6_stop(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, NULL); + + while (group != NULL) { + struct mld_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_MLD6_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report MLD memberships for this interface + * + * @param netif network interface on which report MLD memberships + */ +void +mld6_report_groups(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + group = group->next; + } +} + +/** + * Search for a group that is joined on a netif + * + * @param ifp the network interface for which to look + * @param addr the group ipv6 address to search for + * @return a struct mld_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct mld_group * +mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group = netif_mld6_data(ifp); + + while (group != NULL) { + if (ip6_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + return NULL; +} + + +/** + * create a new group + * + * @param ifp the network interface for which to create + * @param addr the new group ipv6 + * @return a struct mld_group*, + * NULL on memory error. + */ +static struct mld_group * +mld6_new_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group; + + group = (struct mld_group *)memp_malloc(MEMP_MLD6_GROUP); + if (group != NULL) { + ip6_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + group->next = netif_mld6_data(ifp); + + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group); + } + + return group; +} + +/** + * Remove a group from the mld_group_list, but do not free it yet + * + * @param group the group to remove + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +mld6_remove_group(struct netif *netif, struct mld_group *group) +{ + err_t err = ERR_OK; + + /* Is it the first group? */ + if (netif_mld6_data(netif) == group) { + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group->next); + } else { + /* look for group further down the list */ + struct mld_group *tmpGroup; + for (tmpGroup = netif_mld6_data(netif); tmpGroup != NULL; tmpGroup = tmpGroup->next) { + if (tmpGroup->next == group) { + tmpGroup->next = group->next; + break; + } + } + /* Group not find group */ + if (tmpGroup == NULL) { + err = ERR_ARG; + } + } + + return err; +} + + +/** + * Process an input MLD message. Called by icmp6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +mld6_input(struct pbuf *p, struct netif *inp) +{ + struct mld_header *mld_hdr; + struct mld_group *group; + + MLD6_STATS_INC(mld6.recv); + + /* Check that mld header fits in packet. */ + if (p->len < sizeof(struct mld_header)) { + /* @todo debug message */ + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + MLD6_STATS_INC(mld6.drop); + return; + } + + mld_hdr = (struct mld_header *)p->payload; + + switch (mld_hdr->type) { + case ICMP6_TYPE_MLQ: /* Multicast listener query. */ + /* Is it a general query? */ + if (ip6_addr_isallnodes_linklocal(ip6_current_dest_addr()) && + ip6_addr_isany(&(mld_hdr->multicast_address))) { + MLD6_STATS_INC(mld6.rx_general); + /* Report all groups, except all nodes group, and if-local groups. */ + group = netif_mld6_data(inp); + while (group != NULL) { + if ((!(ip6_addr_ismulticast_iflocal(&(group->group_address)))) && + (!(ip6_addr_isallnodes_linklocal(&(group->group_address))))) { + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + group = group->next; + } + } else { + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_group); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* Schedule a report. */ + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + } + break; /* ICMP6_TYPE_MLQ */ + case ICMP6_TYPE_MLR: /* Multicast listener report. */ + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_report); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* If we are waiting to report, cancel it. */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + group->timer = 0; /* stopped */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + } + break; /* ICMP6_TYPE_MLR */ + case ICMP6_TYPE_MLD: /* Multicast listener done. */ + /* Do nothing, router will query us. */ + break; /* ICMP6_TYPE_MLD */ + default: + MLD6_STATS_INC(mld6.proterr); + MLD6_STATS_INC(mld6.drop); + break; + } + + pbuf_free(p); +} + +/** + * @ingroup mld6 + * Join a group on one or all network interfaces. + * + * If the group is to be joined on all interfaces, the given group address must + * not have a zone set (i.e., it must have its zone index set to IP6_NO_ZONE). + * If the group is to be joined on one particular interface, the given group + * address may or may not have a zone set. + * + * @param srcaddr ipv6 address (zoned) of the network interface which should + * join a new group. If IP6_ADDR_ANY6, join on all netifs + * @param groupaddr the ipv6 address of the group to join (possibly but not + * necessarily zoned) + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we join this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err = mld6_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + return err; + } + } + } + + return err; +} + +/** + * @ingroup mld6 + * Join a group on a network interface. + * + * @param netif the network interface which should join a new group. + * @param groupaddr the ipv6 address of the group to join (possibly but not + * necessarily zoned) + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; +#if LWIP_IPV6_SCOPES + ip6_addr_t ip6addr; + + /* If the address has a particular scope but no zone set, use the netif to + * set one now. Within the mld6 module, all addresses are properly zoned. */ + if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) { + ip6_addr_set(&ip6addr, groupaddr); + ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif); + groupaddr = &ip6addr; + } + IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif); +#endif /* LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + + /* find group or create a new one if not found */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group == NULL) { + /* Joining a new group. Create a new group entry. */ + group = mld6_new_group(netif, groupaddr); + if (group == NULL) { + return ERR_MEM; + } + + /* Activate this address on the MAC layer. */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + /* Report our membership. */ + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + } + + /* Increment group use */ + group->use++; + return ERR_OK; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * Zoning of address follows the same rules as @ref mld6_joingroup. + * + * @param srcaddr ipv6 address (zoned) of the network interface which should + * leave the group. If IP6_ADDR_ANY6, leave on all netifs + * @param groupaddr the ipv6 address of the group to leave (possibly, but not + * necessarily zoned) + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we leave this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err_t res = mld6_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + } + + return err; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * @param netif the network interface which should leave the group. + * @param groupaddr the ipv6 address of the group to leave (possibly, but not + * necessarily zoned) + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; +#if LWIP_IPV6_SCOPES + ip6_addr_t ip6addr; + + if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) { + ip6_addr_set(&ip6addr, groupaddr); + ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif); + groupaddr = &ip6addr; + } + IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif); +#endif /* LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + + /* find group */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Leave if there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + mld6_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + MLD6_STATS_INC(mld6.tx_leave); + mld6_send(netif, group, ICMP6_TYPE_MLD); + } + + /* Disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* free group struct */ + memp_free(MEMP_MLD6_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + + /* Left group */ + return ERR_OK; + } + + /* Group not found */ + return ERR_VAL; +} + + +/** + * Periodic timer for mld processing. Must be called every + * MLD6_TMR_INTERVAL milliseconds (100). + * + * When a delaying member expires, a membership report is sent. + */ +void +mld6_tmr(void) +{ + struct netif *netif; + + NETIF_FOREACH(netif) { + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + /* If the state is MLD6_GROUP_DELAYING_MEMBER then we send a report for this group */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + group->group_state = MLD6_GROUP_IDLE_MEMBER; + } + } + } + group = group->next; + } + } +} + +/** + * Schedule a delayed membership report for a group + * + * @param group the mld_group for which "delaying" membership report + * should be sent + * @param maxresp_in the max resp delay provided in the query + */ +static void +mld6_delayed_report(struct mld_group *group, u16_t maxresp_in) +{ + /* Convert maxresp from milliseconds to tmr ticks */ + u16_t maxresp = maxresp_in / MLD6_TMR_INTERVAL; + if (maxresp == 0) { + maxresp = 1; + } + +#ifdef LWIP_RAND + /* Randomize maxresp. (if LWIP_RAND is supported) */ + maxresp = (u16_t)(LWIP_RAND() % maxresp); + if (maxresp == 0) { + maxresp = 1; + } +#endif /* LWIP_RAND */ + + /* Apply timer value if no report has been scheduled already. */ + if ((group->group_state == MLD6_GROUP_IDLE_MEMBER) || + ((group->group_state == MLD6_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + group->timer = maxresp; + group->group_state = MLD6_GROUP_DELAYING_MEMBER; + } +} + +/** + * Send a MLD message (report or done). + * + * An IPv6 hop-by-hop options header with a router alert option + * is prepended. + * + * @param group the group to report or quit + * @param type ICMP6_TYPE_MLR (report) or ICMP6_TYPE_MLD (done) + */ +static void +mld6_send(struct netif *netif, struct mld_group *group, u8_t type) +{ + struct mld_header *mld_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + + /* Allocate a packet. Size is MLD header + IPv6 Hop-by-hop options header. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct mld_header) + MLD6_HBH_HLEN, PBUF_RAM); + if (p == NULL) { + MLD6_STATS_INC(mld6.memerr); + return; + } + + /* Move to make room for Hop-by-hop options header. */ + if (pbuf_remove_header(p, MLD6_HBH_HLEN)) { + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + return; + } + + /* Select our source address. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + /* This is a special case, when we are performing duplicate address detection. + * We must join the multicast group, but we don't have a valid address yet. */ + src_addr = IP6_ADDR_ANY6; + } else { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + } + + /* MLD message header pointer. */ + mld_hdr = (struct mld_header *)p->payload; + + /* Set fields. */ + mld_hdr->type = type; + mld_hdr->code = 0; + mld_hdr->chksum = 0; + mld_hdr->max_resp_delay = 0; + mld_hdr->reserved = 0; + ip6_addr_copy_to_packed(mld_hdr->multicast_address, group->group_address); + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + mld_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, + src_addr, &(group->group_address)); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Add hop-by-hop headers options: router alert with MLD value. */ + ip6_options_add_hbh_ra(p, IP6_NEXTH_ICMP6, IP6_ROUTER_ALERT_VALUE_MLD); + + if (type == ICMP6_TYPE_MLR) { + /* Remember we were the last to report */ + group->last_reporter_flag = 1; + } + + /* Send the packet out. */ + MLD6_STATS_INC(mld6.xmit); + ip6_output_if(p, (ip6_addr_isany(src_addr)) ? NULL : src_addr, &(group->group_address), + MLD6_HL, 0, IP6_NEXTH_HOPBYHOP, netif); + pbuf_free(p); +} + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c b/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c new file mode 100644 index 00000000..db0c132e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c @@ -0,0 +1,2434 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/nd6.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/prot/nd6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/pbuf.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/mld6.h" +#include "lwip/dhcp6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#error LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#endif + +/* Router tables. */ +struct nd6_neighbor_cache_entry neighbor_cache[LWIP_ND6_NUM_NEIGHBORS]; +struct nd6_destination_cache_entry destination_cache[LWIP_ND6_NUM_DESTINATIONS]; +struct nd6_prefix_list_entry prefix_list[LWIP_ND6_NUM_PREFIXES]; +struct nd6_router_list_entry default_router_list[LWIP_ND6_NUM_ROUTERS]; + +/* Default values, can be updated by a RA message. */ +u32_t reachable_time = LWIP_ND6_REACHABLE_TIME; +u32_t retrans_timer = LWIP_ND6_RETRANS_TIMER; /* @todo implement this value in timer */ + +/* Index for cache entries. */ +static u8_t nd6_cached_neighbor_index; +static netif_addr_idx_t nd6_cached_destination_index; + +/* Multicast address holder. */ +static ip6_addr_t multicast_address; + +static u8_t nd6_tmr_rs_reduction; + +/* Static buffer to parse RA packet options */ +union ra_options { + struct lladdr_option lladdr; + struct mtu_option mtu; + struct prefix_option prefix; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + struct rdnss_option rdnss; +#endif +}; +static union ra_options nd6_ra_buffer; + +/* Forward declarations. */ +static s8_t nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr); +static s8_t nd6_new_neighbor_cache_entry(void); +static void nd6_free_neighbor_cache_entry(s8_t i); +static s16_t nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr); +static s16_t nd6_new_destination_cache_entry(void); +static int nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif); +static err_t nd6_queue_packet(s8_t neighbor_index, struct pbuf *q); + +#define ND6_SEND_FLAG_MULTICAST_DEST 0x01 +#define ND6_SEND_FLAG_ALLNODES_DEST 0x02 +#define ND6_SEND_FLAG_ANY_SRC 0x04 +static void nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags); +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +static err_t nd6_send_rs(struct netif *netif); +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +#if LWIP_ND6_QUEUEING +static void nd6_free_q(struct nd6_q_entry *q); +#else /* LWIP_ND6_QUEUEING */ +#define nd6_free_q(q) pbuf_free(q) +#endif /* LWIP_ND6_QUEUEING */ +static void nd6_send_q(s8_t i); + + +/** + * A local address has been determined to be a duplicate. Take the appropriate + * action(s) on the address and the interface as a whole. + * + * @param netif the netif that owns the address + * @param addr_idx the index of the address detected to be a duplicate + */ +static void +nd6_duplicate_addr_detected(struct netif *netif, s8_t addr_idx) +{ + + /* Mark the address as duplicate, but leave its lifetimes alone. If this was + * a manually assigned address, it will remain in existence as duplicate, and + * as such be unusable for any practical purposes until manual intervention. + * If this was an autogenerated address, the address will follow normal + * expiration rules, and thus disappear once its valid lifetime expires. */ + netif_ip6_addr_set_state(netif, addr_idx, IP6_ADDR_DUPLICATED); + +#if LWIP_IPV6_AUTOCONFIG + /* If the affected address was the link-local address that we use to generate + * all other addresses, then we should not continue to use those derived + * addresses either, so mark them as duplicate as well. For autoconfig-only + * setups, this will make the interface effectively unusable, approaching the + * intention of RFC 4862 Sec. 5.4.5. @todo implement the full requirements */ + if (addr_idx == 0) { + s8_t i; + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + !netif_ip6_addr_isstatic(netif, i)) { + netif_ip6_addr_set_state(netif, i, IP6_ADDR_DUPLICATED); + } + } + } +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +#if LWIP_IPV6_AUTOCONFIG +/** + * We received a router advertisement that contains a prefix with the + * autoconfiguration flag set. Add or update an associated autogenerated + * address. + * + * @param netif the netif on which the router advertisement arrived + * @param prefix_opt a pointer to the prefix option data + * @param prefix_addr an aligned copy of the prefix address + */ +static void +nd6_process_autoconfig_prefix(struct netif *netif, + struct prefix_option *prefix_opt, const ip6_addr_t *prefix_addr) +{ + ip6_addr_t ip6addr; + u32_t valid_life, pref_life; + u8_t addr_state; + s8_t i, free_idx; + + /* The caller already checks RFC 4862 Sec. 5.5.3 points (a) and (b). We do + * the rest, starting with checks for (c) and (d) here. */ + valid_life = lwip_htonl(prefix_opt->valid_lifetime); + pref_life = lwip_htonl(prefix_opt->preferred_lifetime); + if (pref_life > valid_life || prefix_opt->prefix_length != 64) { + return; /* silently ignore this prefix for autoconfiguration purposes */ + } + + /* If an autogenerated address already exists for this prefix, update its + * lifetimes. An address is considered autogenerated if 1) it is not static + * (i.e., manually assigned), and 2) there is an advertised autoconfiguration + * prefix for it (the one we are processing here). This does not necessarily + * exclude the possibility that the address was actually assigned by, say, + * DHCPv6. If that distinction becomes important in the future, more state + * must be kept. As explained elsewhere we also update lifetimes of tentative + * and duplicate addresses. Skip address slot 0 (the link-local address). */ + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + addr_state = netif_ip6_addr_state(netif, i); + if (!ip6_addr_isinvalid(addr_state) && !netif_ip6_addr_isstatic(netif, i) && + ip6_addr_netcmp(prefix_addr, netif_ip6_addr(netif, i))) { + /* Update the valid lifetime, as per RFC 4862 Sec. 5.5.3 point (e). + * The valid lifetime will never drop to zero as a result of this. */ + u32_t remaining_life = netif_ip6_addr_valid_life(netif, i); + if (valid_life > ND6_2HRS || valid_life > remaining_life) { + netif_ip6_addr_set_valid_life(netif, i, valid_life); + } else if (remaining_life > ND6_2HRS) { + netif_ip6_addr_set_valid_life(netif, i, ND6_2HRS); + } + LWIP_ASSERT("bad valid lifetime", !netif_ip6_addr_isstatic(netif, i)); + /* Update the preferred lifetime. No bounds checks are needed here. In + * rare cases the advertisement may un-deprecate the address, though. + * Deprecation is left to the timer code where it is handled anyway. */ + if (pref_life > 0 && addr_state == IP6_ADDR_DEPRECATED) { + netif_ip6_addr_set_state(netif, i, IP6_ADDR_PREFERRED); + } + netif_ip6_addr_set_pref_life(netif, i, pref_life); + return; /* there should be at most one matching address */ + } + } + + /* No autogenerated address exists for this prefix yet. See if we can add a + * new one. However, if IPv6 autoconfiguration is administratively disabled, + * do not generate new addresses, but do keep updating lifetimes for existing + * addresses. Also, when adding new addresses, we must protect explicitly + * against a valid lifetime of zero, because again, we use that as a special + * value. The generated address would otherwise expire immediately anyway. + * Finally, the original link-local address must be usable at all. We start + * creating addresses even if the link-local address is still in tentative + * state though, and deal with the fallout of that upon DAD collision. */ + addr_state = netif_ip6_addr_state(netif, 0); + if (!netif->ip6_autoconfig_enabled || valid_life == IP6_ADDR_LIFE_STATIC || + ip6_addr_isinvalid(addr_state) || ip6_addr_isduplicated(addr_state)) { + return; + } + + /* Construct the new address that we intend to use, and then see if that + * address really does not exist. It might have been added manually, after + * all. As a side effect, find a free slot. Note that we cannot use + * netif_add_ip6_address() here, as it would return ERR_OK if the address + * already did exist, resulting in that address being given lifetimes. */ + IP6_ADDR(&ip6addr, prefix_addr->addr[0], prefix_addr->addr[1], + netif_ip6_addr(netif, 0)->addr[2], netif_ip6_addr(netif, 0)->addr[3]); + ip6_addr_assign_zone(&ip6addr, IP6_UNICAST, netif); + + free_idx = 0; + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + if (ip6_addr_cmp(&ip6addr, netif_ip6_addr(netif, i))) { + return; /* formed address already exists */ + } + } else if (free_idx == 0) { + free_idx = i; + } + } + if (free_idx == 0) { + return; /* no address slots available, try again on next advertisement */ + } + + /* Assign the new address to the interface. */ + ip_addr_copy_from_ip6(netif->ip6_addr[free_idx], ip6addr); + netif_ip6_addr_set_valid_life(netif, free_idx, valid_life); + netif_ip6_addr_set_pref_life(netif, free_idx, pref_life); + netif_ip6_addr_set_state(netif, free_idx, IP6_ADDR_TENTATIVE); +} +#endif /* LWIP_IPV6_AUTOCONFIG */ + +/** + * Process an incoming neighbor discovery message + * + * @param p the nd packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +nd6_input(struct pbuf *p, struct netif *inp) +{ + u8_t msg_type; + s8_t i; + s16_t dest_idx; + + ND6_STATS_INC(nd6.recv); + + msg_type = *((u8_t *)p->payload); + switch (msg_type) { + case ICMP6_TYPE_NA: /* Neighbor Advertisement. */ + { + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t target_address; + + /* Check that na header fits in packet. */ + if (p->len < (sizeof(struct na_header))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + na_hdr = (struct na_header *)p->payload; + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, na_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 7.1.2 requirements. */ + if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || na_hdr->code != 0 || + ip6_addr_ismulticast(&target_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: if IP destination is multicast, Solicited flag is zero */ + /* @todo RFC MUST: all included options have a length greater than zero */ + + /* Unsolicited NA?*/ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* This is an unsolicited NA. + * link-layer changed? + * part of DAD mechanism? */ + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* If the target address matches this netif, it is a DAD response. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + !ip6_addr_isduplicated(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* We are using a duplicate address. */ + nd6_duplicate_addr_detected(inp, i); + + pbuf_free(p); + return; + } + } +#endif /* LWIP_IPV6_DUP_DETECT_ATTEMPTS */ + + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* This is an unsolicited NA, most likely there was a LLADDR change. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i >= 0) { + if (na_hdr->flags & ND6_FLAG_OVERRIDE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + } + } else { + /* This is a solicited NA. + * neighbor address resolution response? + * neighbor unreachability detection response? */ + + /* Find the cache entry corresponding to this na. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + /* We no longer care about this target address. drop it. */ + pbuf_free(p); + return; + } + + /* Update cache entry. */ + if ((na_hdr->flags & ND6_FLAG_OVERRIDE) || + (neighbor_cache[i].state == ND6_INCOMPLETE)) { + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + + neighbor_cache[i].netif = inp; + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; + + /* Send queued packets, if any. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + } + + break; /* ICMP6_TYPE_NA */ + } + case ICMP6_TYPE_NS: /* Neighbor solicitation. */ + { + struct ns_header *ns_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t target_address; + u8_t accepted; + + /* Check that ns header fits in packet. */ + if (p->len < sizeof(struct ns_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ns_hdr = (struct ns_header *)p->payload; + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, ns_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 7.1.1 requirements. */ + if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ns_hdr->code != 0 || + ip6_addr_ismulticast(&target_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: all included options have a length greater than zero */ + /* @todo RFC MUST: if IP source is 'any', destination is solicited-node multicast address */ + /* @todo RFC MUST: if IP source is 'any', there is no source LL address option */ + + /* Check if there is a link-layer address provided. Only point to it if in this buffer. */ + if (p->len >= (sizeof(struct ns_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + if (p->len < (sizeof(struct ns_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Check if the target address is configured on the receiving netif. */ + accepted = 0; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if ((ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) || + (ip6_addr_istentative(netif_ip6_addr_state(inp, i)) && + ip6_addr_isany(ip6_current_src_addr()))) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + accepted = 1; + break; + } + } + + /* NS not for us? */ + if (!accepted) { + pbuf_free(p); + return; + } + + /* Check for ANY address in src (DAD algorithm). */ + if (ip6_addr_isany(ip6_current_src_addr())) { + /* Sender is validating this address. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* Send a NA back so that the sender does not use this address. */ + nd6_send_na(inp, netif_ip6_addr(inp, i), ND6_FLAG_OVERRIDE | ND6_SEND_FLAG_ALLNODES_DEST); + if (ip6_addr_istentative(netif_ip6_addr_state(inp, i))) { + /* We shouldn't use this address either. */ + nd6_duplicate_addr_detected(inp, i); + } + } + } + } else { + /* Sender is trying to resolve our address. */ + /* Verify that they included their own link-layer address. */ + if (lladdr_opt == NULL) { + /* Not a valid message. */ + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + i = nd6_find_neighbor_cache_entry(ip6_current_src_addr()); + if (i>= 0) { + /* We already have a record for the solicitor. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + + /* Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } else { + /* Add their IPv6 address and link-layer address to neighbor cache. + * We will need it at least to send a unicast NA message, but most + * likely we will also be communicating with this node soon. */ + i = nd6_new_neighbor_cache_entry(); + if (i < 0) { + /* We couldn't assign a cache entry for this neighbor. + * we won't be able to reply. drop it. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_set(&(neighbor_cache[i].next_hop_address), ip6_current_src_addr()); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + + /* Send back a NA for us. Allocate the reply pbuf. */ + nd6_send_na(inp, &target_address, ND6_FLAG_SOLICITED | ND6_FLAG_OVERRIDE); + } + + break; /* ICMP6_TYPE_NS */ + } + case ICMP6_TYPE_RA: /* Router Advertisement. */ + { + struct ra_header *ra_hdr; + u8_t *buffer; /* Used to copy options. */ + u16_t offset; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + /* There can be multiple RDNSS options per RA */ + u8_t rdnss_server_idx = 0; +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + + /* Check that RA header fits in packet. */ + if (p->len < sizeof(struct ra_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ra_hdr = (struct ra_header *)p->payload; + + /* Check a subset of the other RFC 4861 Sec. 6.1.2 requirements. */ + if (!ip6_addr_islinklocal(ip6_current_src_addr()) || + IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ra_hdr->code != 0) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: all included options have a length greater than zero */ + + /* If we are sending RS messages, stop. */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* ensure at least one solicitation is sent (see RFC 4861, ch. 6.3.7) */ + if ((inp->rs_count < LWIP_ND6_MAX_MULTICAST_SOLICIT) || + (nd6_send_rs(inp) == ERR_OK)) { + inp->rs_count = 0; + } else { + inp->rs_count = 1; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + + /* Get the matching default router entry. */ + i = nd6_get_router(ip6_current_src_addr(), inp); + if (i < 0) { + /* Create a new router entry. */ + i = nd6_new_router(ip6_current_src_addr(), inp); + } + + if (i < 0) { + /* Could not create a new router entry. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Re-set invalidation timer. */ + default_router_list[i].invalidation_timer = lwip_htons(ra_hdr->router_lifetime); + + /* Re-set default timer values. */ +#if LWIP_ND6_ALLOW_RA_UPDATES + if (ra_hdr->retrans_timer > 0) { + retrans_timer = lwip_htonl(ra_hdr->retrans_timer); + } + if (ra_hdr->reachable_time > 0) { + reachable_time = lwip_htonl(ra_hdr->reachable_time); + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + + /* @todo set default hop limit... */ + /* ra_hdr->current_hop_limit;*/ + + /* Update flags in local entry (incl. preference). */ + default_router_list[i].flags = ra_hdr->flags; + +#if LWIP_IPV6_DHCP6 + /* Trigger DHCPv6 if enabled */ + dhcp6_nd6_ra_trigger(inp, ra_hdr->flags & ND6_RA_FLAG_MANAGED_ADDR_CONFIG, + ra_hdr->flags & ND6_RA_FLAG_OTHER_CONFIG); +#endif + + /* Offset to options. */ + offset = sizeof(struct ra_header); + + /* Process each option. */ + while ((p->tot_len - offset) >= 2) { + u8_t option_type; + u16_t option_len; + int option_len8 = pbuf_try_get_at(p, offset + 1); + if (option_len8 <= 0) { + /* read beyond end or zero length */ + goto lenerr_drop_free_return; + } + option_len = ((u8_t)option_len8) << 3; + if (option_len > p->tot_len - offset) { + /* short packet (option does not fit in) */ + goto lenerr_drop_free_return; + } + if (p->len == p->tot_len) { + /* no need to copy from contiguous pbuf */ + buffer = &((u8_t*)p->payload)[offset]; + } else { + /* check if this option fits into our buffer */ + if (option_len > sizeof(nd6_ra_buffer)) { + option_type = pbuf_get_at(p, offset); + /* invalid option length */ + if (option_type != ND6_OPTION_TYPE_RDNSS) { + goto lenerr_drop_free_return; + } + /* we allow RDNSS option to be longer - we'll just drop some servers */ + option_len = sizeof(nd6_ra_buffer); + } + buffer = (u8_t*)&nd6_ra_buffer; + option_len = pbuf_copy_partial(p, &nd6_ra_buffer, option_len, offset); + } + option_type = buffer[0]; + switch (option_type) { + case ND6_OPTION_TYPE_SOURCE_LLADDR: + { + struct lladdr_option *lladdr_opt; + if (option_len < sizeof(struct lladdr_option)) { + goto lenerr_drop_free_return; + } + lladdr_opt = (struct lladdr_option *)buffer; + if ((default_router_list[i].neighbor_entry != NULL) && + (default_router_list[i].neighbor_entry->state == ND6_INCOMPLETE)) { + SMEMCPY(default_router_list[i].neighbor_entry->lladdr, lladdr_opt->addr, inp->hwaddr_len); + default_router_list[i].neighbor_entry->state = ND6_REACHABLE; + default_router_list[i].neighbor_entry->counter.reachable_time = reachable_time; + } + break; + } + case ND6_OPTION_TYPE_MTU: + { + struct mtu_option *mtu_opt; + u32_t mtu32; + if (option_len < sizeof(struct mtu_option)) { + goto lenerr_drop_free_return; + } + mtu_opt = (struct mtu_option *)buffer; + mtu32 = lwip_htonl(mtu_opt->mtu); + if ((mtu32 >= 1280) && (mtu32 <= 0xffff)) { +#if LWIP_ND6_ALLOW_RA_UPDATES + if (inp->mtu) { + /* don't set the mtu for IPv6 higher than the netif driver supports */ + inp->mtu6 = LWIP_MIN(inp->mtu, (u16_t)mtu32); + } else { + inp->mtu6 = (u16_t)mtu32; + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + } + break; + } + case ND6_OPTION_TYPE_PREFIX_INFO: + { + struct prefix_option *prefix_opt; + ip6_addr_t prefix_addr; + if (option_len < sizeof(struct prefix_option)) { + goto lenerr_drop_free_return; + } + + prefix_opt = (struct prefix_option *)buffer; + + /* Get a memory-aligned copy of the prefix. */ + ip6_addr_copy_from_packed(prefix_addr, prefix_opt->prefix); + ip6_addr_assign_zone(&prefix_addr, IP6_UNICAST, inp); + + if (!ip6_addr_islinklocal(&prefix_addr)) { + if ((prefix_opt->flags & ND6_PREFIX_FLAG_ON_LINK) && + (prefix_opt->prefix_length == 64)) { + /* Add to on-link prefix list. */ + u32_t valid_life; + s8_t prefix; + + valid_life = lwip_htonl(prefix_opt->valid_lifetime); + + /* find cache entry for this prefix. */ + prefix = nd6_get_onlink_prefix(&prefix_addr, inp); + if (prefix < 0 && valid_life > 0) { + /* Create a new cache entry. */ + prefix = nd6_new_onlink_prefix(&prefix_addr, inp); + } + if (prefix >= 0) { + prefix_list[prefix].invalidation_timer = valid_life; + } + } +#if LWIP_IPV6_AUTOCONFIG + if (prefix_opt->flags & ND6_PREFIX_FLAG_AUTONOMOUS) { + /* Perform processing for autoconfiguration. */ + nd6_process_autoconfig_prefix(inp, prefix_opt, &prefix_addr); + } +#endif /* LWIP_IPV6_AUTOCONFIG */ + } + + break; + } + case ND6_OPTION_TYPE_ROUTE_INFO: + /* @todo implement preferred routes. + struct route_option * route_opt; + route_opt = (struct route_option *)buffer;*/ + + break; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + case ND6_OPTION_TYPE_RDNSS: + { + u8_t num, n; + u16_t copy_offset = offset + SIZEOF_RDNSS_OPTION_BASE; + struct rdnss_option * rdnss_opt; + if (option_len < SIZEOF_RDNSS_OPTION_BASE) { + goto lenerr_drop_free_return; + } + + rdnss_opt = (struct rdnss_option *)buffer; + num = (rdnss_opt->length - 1) / 2; + for (n = 0; (rdnss_server_idx < DNS_MAX_SERVERS) && (n < num); n++) { + ip_addr_t rdnss_address; + + /* Copy directly from pbuf to get an aligned, zoned copy of the prefix. */ + if (pbuf_copy_partial(p, &rdnss_address, sizeof(ip6_addr_p_t), copy_offset) == sizeof(ip6_addr_p_t)) { + IP_SET_TYPE_VAL(rdnss_address, IPADDR_TYPE_V6); + ip6_addr_assign_zone(ip_2_ip6(&rdnss_address), IP6_UNKNOWN, inp); + + if (htonl(rdnss_opt->lifetime) > 0) { + /* TODO implement Lifetime > 0 */ + dns_setserver(rdnss_server_idx++, &rdnss_address); + } else { + /* TODO implement DNS removal in dns.c */ + u8_t s; + for (s = 0; s < DNS_MAX_SERVERS; s++) { + const ip_addr_t *addr = dns_getserver(s); + if(ip_addr_cmp(addr, &rdnss_address)) { + dns_setserver(s, NULL); + } + } + } + } + } + break; + } +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + default: + /* Unrecognized option, abort. */ + ND6_STATS_INC(nd6.proterr); + break; + } + /* option length is checked earlier to be non-zero to make sure loop ends */ + offset += 8 * (u8_t)option_len8; + } + + break; /* ICMP6_TYPE_RA */ + } + case ICMP6_TYPE_RD: /* Redirect */ + { + struct redirect_header *redir_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t destination_address, target_address; + + /* Check that Redir header fits in packet. */ + if (p->len < sizeof(struct redirect_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + redir_hdr = (struct redirect_header *)p->payload; + + /* Create an aligned, zoned copy of the destination address. */ + ip6_addr_copy_from_packed(destination_address, redir_hdr->destination_address); + ip6_addr_assign_zone(&destination_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 8.1 requirements. */ + if (!ip6_addr_islinklocal(ip6_current_src_addr()) || + IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || + redir_hdr->code != 0 || ip6_addr_ismulticast(&destination_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: IP source address equals first-hop router for destination_address */ + /* @todo RFC MUST: ICMP target address is either link-local address or same as destination_address */ + /* @todo RFC MUST: all included options have a length greater than zero */ + + if (p->len >= (sizeof(struct redirect_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct redirect_header)); + if (p->len < (sizeof(struct redirect_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Find dest address in cache */ + dest_idx = nd6_find_destination_cache_entry(&destination_address); + if (dest_idx < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, redir_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Set the new target address. */ + ip6_addr_copy(destination_cache[dest_idx].next_hop_addr, target_address); + + /* If Link-layer address of other router is given, try to add to neighbor cache. */ + if (lladdr_opt != NULL) { + if (lladdr_opt->type == ND6_OPTION_TYPE_TARGET_LLADDR) { + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_copy(neighbor_cache[i].next_hop_address, target_address); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + if (i >= 0) { + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + } + } + break; /* ICMP6_TYPE_RD */ + } + case ICMP6_TYPE_PTB: /* Packet too big */ + { + struct icmp6_hdr *icmp6hdr; /* Packet too big message */ + struct ip6_hdr *ip6hdr; /* IPv6 header of the packet which caused the error */ + u32_t pmtu; + ip6_addr_t destination_address; + + /* Check that ICMPv6 header + IPv6 header fit in payload */ + if (p->len < (sizeof(struct icmp6_hdr) + IP6_HLEN)) { + /* drop short packets */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + ip6hdr = (struct ip6_hdr *)((u8_t*)p->payload + sizeof(struct icmp6_hdr)); + + /* Create an aligned, zoned copy of the destination address. */ + ip6_addr_copy_from_packed(destination_address, ip6hdr->dest); + ip6_addr_assign_zone(&destination_address, IP6_UNKNOWN, inp); + + /* Look for entry in destination cache. */ + dest_idx = nd6_find_destination_cache_entry(&destination_address); + if (dest_idx < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Change the Path MTU. */ + pmtu = lwip_htonl(icmp6hdr->data); + destination_cache[dest_idx].pmtu = (u16_t)LWIP_MIN(pmtu, 0xFFFF); + + break; /* ICMP6_TYPE_PTB */ + } + + default: + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + break; /* default */ + } + + pbuf_free(p); + return; +lenerr_drop_free_return: + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + pbuf_free(p); +} + + +/** + * Periodic timer for Neighbor discovery functions: + * + * - Update neighbor reachability states + * - Update destination cache entries age + * - Update invalidation timers of default routers and on-link prefixes + * - Update lifetimes of our addresses + * - Perform duplicate address detection (DAD) for our addresses + * - Send router solicitations + */ +void +nd6_tmr(void) +{ + s8_t i; + struct netif *netif; + + /* Process neighbor entries. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + switch (neighbor_cache[i].state) { + case ND6_INCOMPLETE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + break; + case ND6_REACHABLE: + /* Send queued packets, if any are left. Should have been sent already. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + if (neighbor_cache[i].counter.reachable_time <= ND6_TMR_INTERVAL) { + /* Change to stale state. */ + neighbor_cache[i].state = ND6_STALE; + neighbor_cache[i].counter.stale_time = 0; + } else { + neighbor_cache[i].counter.reachable_time -= ND6_TMR_INTERVAL; + } + break; + case ND6_STALE: + neighbor_cache[i].counter.stale_time++; + break; + case ND6_DELAY: + if (neighbor_cache[i].counter.delay_time <= 1) { + /* Change to PROBE state. */ + neighbor_cache[i].state = ND6_PROBE; + neighbor_cache[i].counter.probes_sent = 0; + } else { + neighbor_cache[i].counter.delay_time--; + } + break; + case ND6_PROBE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], 0); + } + break; + case ND6_NO_ENTRY: + default: + /* Do nothing. */ + break; + } + } + + /* Process destination entries. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + destination_cache[i].age++; + } + + /* Process router entries. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if (default_router_list[i].neighbor_entry != NULL) { + /* Active entry. */ + if (default_router_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) { + /* No more than 1 second remaining. Clear this entry. Also clear any of + * its destination cache entries, as per RFC 4861 Sec. 5.3 and 6.3.5. */ + s8_t j; + for (j = 0; j < LWIP_ND6_NUM_DESTINATIONS; j++) { + if (ip6_addr_cmp(&destination_cache[j].next_hop_addr, + &default_router_list[i].neighbor_entry->next_hop_address)) { + ip6_addr_set_any(&destination_cache[j].destination_addr); + } + } + default_router_list[i].neighbor_entry->isrouter = 0; + default_router_list[i].neighbor_entry = NULL; + default_router_list[i].invalidation_timer = 0; + default_router_list[i].flags = 0; + } else { + default_router_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + } + } + + /* Process prefix entries. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif != NULL) { + if (prefix_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) { + /* Entry timed out, remove it */ + prefix_list[i].invalidation_timer = 0; + prefix_list[i].netif = NULL; + } else { + prefix_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + } + } + + /* Process our own addresses, updating address lifetimes and/or DAD state. */ + NETIF_FOREACH(netif) { + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + u8_t addr_state; +#if LWIP_IPV6_ADDRESS_LIFETIMES + /* Step 1: update address lifetimes (valid and preferred). */ + addr_state = netif_ip6_addr_state(netif, i); + /* RFC 4862 is not entirely clear as to whether address lifetimes affect + * tentative addresses, and is even less clear as to what should happen + * with duplicate addresses. We choose to track and update lifetimes for + * both those types, although for different reasons: + * - for tentative addresses, the line of thought of Sec. 5.7 combined + * with the potentially long period that an address may be in tentative + * state (due to the interface being down) suggests that lifetimes + * should be independent of external factors which would include DAD; + * - for duplicate addresses, retiring them early could result in a new + * but unwanted attempt at marking them as valid, while retiring them + * late/never could clog up address slots on the netif. + * As a result, we may end up expiring addresses of either type here. + */ + if (!ip6_addr_isinvalid(addr_state) && + !netif_ip6_addr_isstatic(netif, i)) { + u32_t life = netif_ip6_addr_valid_life(netif, i); + if (life <= ND6_TMR_INTERVAL / 1000) { + /* The address has expired. */ + netif_ip6_addr_set_valid_life(netif, i, 0); + netif_ip6_addr_set_pref_life(netif, i, 0); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_INVALID); + } else { + if (!ip6_addr_life_isinfinite(life)) { + life -= ND6_TMR_INTERVAL / 1000; + LWIP_ASSERT("bad valid lifetime", life != IP6_ADDR_LIFE_STATIC); + netif_ip6_addr_set_valid_life(netif, i, life); + } + /* The address is still here. Update the preferred lifetime too. */ + life = netif_ip6_addr_pref_life(netif, i); + if (life <= ND6_TMR_INTERVAL / 1000) { + /* This case must also trigger if 'life' was already zero, so as to + * deal correctly with advertised preferred-lifetime reductions. */ + netif_ip6_addr_set_pref_life(netif, i, 0); + if (addr_state == IP6_ADDR_PREFERRED) + netif_ip6_addr_set_state(netif, i, IP6_ADDR_DEPRECATED); + } else if (!ip6_addr_life_isinfinite(life)) { + life -= ND6_TMR_INTERVAL / 1000; + netif_ip6_addr_set_pref_life(netif, i, life); + } + } + } + /* The address state may now have changed, so reobtain it next. */ +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + /* Step 2: update DAD state. */ + addr_state = netif_ip6_addr_state(netif, i); + if (ip6_addr_istentative(addr_state)) { + if ((addr_state & IP6_ADDR_TENTATIVE_COUNT_MASK) >= LWIP_IPV6_DUP_DETECT_ATTEMPTS) { + /* No NA received in response. Mark address as valid. For dynamic + * addresses with an expired preferred lifetime, the state is set to + * deprecated right away. That should almost never happen, though. */ + addr_state = IP6_ADDR_PREFERRED; +#if LWIP_IPV6_ADDRESS_LIFETIMES + if (!netif_ip6_addr_isstatic(netif, i) && + netif_ip6_addr_pref_life(netif, i) == 0) { + addr_state = IP6_ADDR_DEPRECATED; + } +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + netif_ip6_addr_set_state(netif, i, addr_state); + } else if (netif_is_up(netif) && netif_is_link_up(netif)) { + /* tentative: set next state by increasing by one */ + netif_ip6_addr_set_state(netif, i, addr_state + 1); + /* Send a NS for this address. Use the unspecified address as source + * address in all cases (RFC 4862 Sec. 5.4.2), not in the least + * because as it is, we only consider multicast replies for DAD. */ + nd6_send_ns(netif, netif_ip6_addr(netif, i), + ND6_SEND_FLAG_MULTICAST_DEST | ND6_SEND_FLAG_ANY_SRC); + } + } + } + } + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send router solicitation messages, if necessary. */ + if (!nd6_tmr_rs_reduction) { + nd6_tmr_rs_reduction = (ND6_RTR_SOLICITATION_INTERVAL / ND6_TMR_INTERVAL) - 1; + NETIF_FOREACH(netif) { + if ((netif->rs_count > 0) && netif_is_up(netif) && + netif_is_link_up(netif) && + !ip6_addr_isinvalid(netif_ip6_addr_state(netif, 0)) && + !ip6_addr_isduplicated(netif_ip6_addr_state(netif, 0))) { + if (nd6_send_rs(netif) == ERR_OK) { + netif->rs_count--; + } + } + } + } else { + nd6_tmr_rs_reduction--; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +} + +/** Send a neighbor solicitation message for a specific neighbor cache entry + * + * @param entry the neightbor cache entry for wich to send the message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags) +{ + nd6_send_ns(entry->netif, &entry->next_hop_address, flags); +} + +/** + * Send a neighbor solicitation message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct ns_header *ns_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + u16_t lladdr_opt_len; + + LWIP_ASSERT("target address is required", target_addr != NULL); + + if (!(flags & ND6_SEND_FLAG_ANY_SRC) && + ip6_addr_isvalid(netif_ip6_addr_state(netif,0))) { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + /* calculate option length (in 8-byte-blocks) */ + lladdr_opt_len = ((netif->hwaddr_len + 2) + 7) >> 3; + } else { + src_addr = IP6_ADDR_ANY6; + /* Option "MUST NOT be included when the source IP address is the unspecified address." */ + lladdr_opt_len = 0; + } + + /* Allocate a packet. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct ns_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + ns_hdr = (struct ns_header *)p->payload; + + ns_hdr->type = ICMP6_TYPE_NS; + ns_hdr->code = 0; + ns_hdr->chksum = 0; + ns_hdr->reserved = 0; + ip6_addr_copy_to_packed(ns_hdr->target_address, *target_addr); + + if (lladdr_opt_len != 0) { + struct lladdr_option *lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + target_addr = &multicast_address; + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + ns_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + target_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, target_addr, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +/** + * Send a neighbor advertisement message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + const ip6_addr_t *dest_addr; + u16_t lladdr_opt_len; + + LWIP_ASSERT("target address is required", target_addr != NULL); + + /* Use link-local address as source address. */ + /* src_addr = netif_ip6_addr(netif, 0); */ + /* Use target address as source address. */ + src_addr = target_addr; + + /* Allocate a packet. */ + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + p = pbuf_alloc(PBUF_IP, sizeof(struct na_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + na_hdr = (struct na_header *)p->payload; + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + na_hdr->type = ICMP6_TYPE_NA; + na_hdr->code = 0; + na_hdr->chksum = 0; + na_hdr->flags = flags & 0xf0; + na_hdr->reserved[0] = 0; + na_hdr->reserved[1] = 0; + na_hdr->reserved[2] = 0; + ip6_addr_copy_to_packed(na_hdr->target_address, *target_addr); + + lladdr_opt->type = ND6_OPTION_TYPE_TARGET_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + dest_addr = &multicast_address; + } else if (flags & ND6_SEND_FLAG_ALLNODES_DEST) { + ip6_addr_set_allnodes_linklocal(&multicast_address); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + dest_addr = &multicast_address; + } else { + dest_addr = ip6_current_src_addr(); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + na_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + dest_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, src_addr, dest_addr, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +/** + * Send a router solicitation message + * + * @param netif the netif on which to send the message + */ +static err_t +nd6_send_rs(struct netif *netif) +{ + struct rs_header *rs_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + err_t err; + u16_t lladdr_opt_len = 0; + + /* Link-local source address, or unspecified address? */ + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + src_addr = netif_ip6_addr(netif, 0); + } else { + src_addr = IP6_ADDR_ANY6; + } + + /* Generate the all routers target address. */ + ip6_addr_set_allrouters_linklocal(&multicast_address); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + + /* Allocate a packet. */ + if (src_addr != IP6_ADDR_ANY6) { + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + } + p = pbuf_alloc(PBUF_IP, sizeof(struct rs_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return ERR_BUF; + } + + /* Set fields. */ + rs_hdr = (struct rs_header *)p->payload; + + rs_hdr->type = ICMP6_TYPE_RS; + rs_hdr->code = 0; + rs_hdr->chksum = 0; + rs_hdr->reserved = 0; + + if (src_addr != IP6_ADDR_ANY6) { + /* Include our hw address. */ + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct rs_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + rs_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + &multicast_address); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + + err = ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, &multicast_address, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); + + return err; +} +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +/** + * Search for a neighbor cache entry + * + * @param ip6addr the IPv6 address of the neighbor + * @return The neighbor cache entry index that matched, -1 if no + * entry is found + */ +static s8_t +nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr) +{ + s8_t i; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (ip6_addr_cmp(ip6addr, &(neighbor_cache[i].next_hop_address))) { + return i; + } + } + return -1; +} + +/** + * Create a new neighbor cache entry. + * + * If no unused entry is found, will try to recycle an old entry + * according to ad-hoc "age" heuristic. + * + * @return The neighbor cache entry index that was created, -1 if no + * entry could be created + */ +static s8_t +nd6_new_neighbor_cache_entry(void) +{ + s8_t i; + s8_t j; + u32_t time; + + + /* First, try to find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].state == ND6_NO_ENTRY) { + return i; + } + } + + /* We need to recycle an entry. in general, do not recycle if it is a router. */ + + /* Next, try to find a Stale entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_STALE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Probe entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_PROBE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Delayed entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_DELAY) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find the oldest reachable entry. */ + time = 0xfffffffful; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_REACHABLE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.reachable_time < time) { + j = i; + time = neighbor_cache[i].counter.reachable_time; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry without queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ( + (neighbor_cache[i].q == NULL) && + (neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry with queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* No more entries to try. */ + return -1; +} + +/** + * Will free any resources associated with a neighbor cache + * entry, and will mark it as unused. + * + * @param i the neighbor cache entry index to free + */ +static void +nd6_free_neighbor_cache_entry(s8_t i) +{ + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + if (neighbor_cache[i].isrouter) { + /* isrouter needs to be cleared before deleting a neighbor cache entry */ + return; + } + + /* Free any queued packets. */ + if (neighbor_cache[i].q != NULL) { + nd6_free_q(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } + + neighbor_cache[i].state = ND6_NO_ENTRY; + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = NULL; + neighbor_cache[i].counter.reachable_time = 0; + ip6_addr_set_zero(&(neighbor_cache[i].next_hop_address)); +} + +/** + * Search for a destination cache entry + * + * @param ip6addr the IPv6 address of the destination + * @return The destination cache entry index that matched, -1 if no + * entry is found + */ +static s16_t +nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr) +{ + s16_t i; + + IP6_ADDR_ZONECHECK(ip6addr); + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_cmp(ip6addr, &(destination_cache[i].destination_addr))) { + return i; + } + } + return -1; +} + +/** + * Create a new destination cache entry. If no unused entry is found, + * will recycle oldest entry. + * + * @return The destination cache entry index that was created, -1 if no + * entry was created + */ +static s16_t +nd6_new_destination_cache_entry(void) +{ + s16_t i, j; + u32_t age; + + /* Find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_isany(&(destination_cache[i].destination_addr))) { + return i; + } + } + + /* Find oldest entry. */ + age = 0; + j = LWIP_ND6_NUM_DESTINATIONS - 1; + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (destination_cache[i].age > age) { + j = i; + } + } + + return j; +} + +/** + * Clear the destination cache. + * + * This operation may be necessary for consistency in the light of changing + * local addresses and/or use of the gateway hook. + */ +void +nd6_clear_destination_cache(void) +{ + int i; + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + ip6_addr_set_any(&destination_cache[i].destination_addr); + } +} + +/** + * Determine whether an address matches an on-link prefix or the subnet of a + * statically assigned address. + * + * @param ip6addr the IPv6 address to match + * @return 1 if the address is on-link, 0 otherwise + */ +static int +nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s8_t i; + + /* Check to see if the address matches an on-link prefix. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if ((prefix_list[i].netif == netif) && + (prefix_list[i].invalidation_timer > 0) && + ip6_addr_netcmp(ip6addr, &(prefix_list[i].prefix))) { + return 1; + } + } + /* Check to see if address prefix matches a manually configured (= static) + * address. Static addresses have an implied /64 subnet assignment. Dynamic + * addresses (from autoconfiguration) have no implied subnet assignment, and + * are thus effectively /128 assignments. See RFC 5942 for more on this. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + netif_ip6_addr_isstatic(netif, i) && + ip6_addr_netcmp(ip6addr, netif_ip6_addr(netif, i))) { + return 1; + } + } + return 0; +} + +/** + * Select a default router for a destination. + * + * This function is used both for routing and for finding a next-hop target for + * a packet. In the former case, the given netif is NULL, and the returned + * router entry must be for a netif suitable for sending packets (up, link up). + * In the latter case, the given netif is not NULL and restricts router choice. + * + * @param ip6addr the destination address + * @param netif the netif for the outgoing packet, if known + * @return the default router entry index, or -1 if no suitable + * router is found + */ +static s8_t +nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif) +{ + struct netif *router_netif; + s8_t i, j, valid_router; + static s8_t last_router; + + LWIP_UNUSED_ARG(ip6addr); /* @todo match preferred routes!! (must implement ND6_OPTION_TYPE_ROUTE_INFO) */ + + /* @todo: implement default router preference */ + + /* Look for valid routers. A reachable router is preferred. */ + valid_router = -1; + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + /* Is the router netif both set and apppropriate? */ + if (default_router_list[i].neighbor_entry != NULL) { + router_netif = default_router_list[i].neighbor_entry->netif; + if ((router_netif != NULL) && (netif != NULL ? netif == router_netif : + (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) { + /* Is the router valid, i.e., reachable or probably reachable as per + * RFC 4861 Sec. 6.3.6? Note that we will never return a router that + * has no neighbor cache entry, due to the netif association tests. */ + if (default_router_list[i].neighbor_entry->state != ND6_INCOMPLETE) { + /* Is the router known to be reachable? */ + if (default_router_list[i].neighbor_entry->state == ND6_REACHABLE) { + return i; /* valid and reachable - done! */ + } else if (valid_router < 0) { + valid_router = i; /* valid but not known to be reachable */ + } + } + } + } + } + if (valid_router >= 0) { + return valid_router; + } + + /* Look for any router for which we have any information at all. */ + /* last_router is used for round-robin selection of incomplete routers, as + * recommended in RFC 4861 Sec. 6.3.6 point (2). Advance only when picking a + * route, to select the same router as next-hop target in the common case. */ + if ((netif == NULL) && (++last_router >= LWIP_ND6_NUM_ROUTERS)) { + last_router = 0; + } + i = last_router; + for (j = 0; j < LWIP_ND6_NUM_ROUTERS; j++) { + if (default_router_list[i].neighbor_entry != NULL) { + router_netif = default_router_list[i].neighbor_entry->netif; + if ((router_netif != NULL) && (netif != NULL ? netif == router_netif : + (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) { + return i; + } + } + if (++i >= LWIP_ND6_NUM_ROUTERS) { + i = 0; + } + } + + /* no suitable router found. */ + return -1; +} + +/** + * Find a router-announced route to the given destination. This route may be + * based on an on-link prefix or a default router. + * + * If a suitable route is found, the returned netif is guaranteed to be in a + * suitable state (up, link up) to be used for packet transmission. + * + * @param ip6addr the destination IPv6 address + * @return the netif to use for the destination, or NULL if none found + */ +struct netif * +nd6_find_route(const ip6_addr_t *ip6addr) +{ + struct netif *netif; + s8_t i; + + /* @todo decide if it makes sense to check the destination cache first */ + + /* Check if there is a matching on-link prefix. There may be multiple + * matches. Pick the first one that is associated with a suitable netif. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + netif = prefix_list[i].netif; + if ((netif != NULL) && ip6_addr_netcmp(&prefix_list[i].prefix, ip6addr) && + netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + } + + /* No on-link prefix match. Find a router that can forward the packet. */ + i = nd6_select_router(ip6addr, NULL); + if (i >= 0) { + LWIP_ASSERT("selected router must have a neighbor entry", + default_router_list[i].neighbor_entry != NULL); + return default_router_list[i].neighbor_entry->netif; + } + + return NULL; +} + +/** + * Find an entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is found, if known + * @return the index of the router entry, or -1 if not found + */ +static s8_t +nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t i; + + IP6_ADDR_ZONECHECK_NETIF(router_addr, netif); + + /* Look for router. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if ((default_router_list[i].neighbor_entry != NULL) && + ((netif != NULL) ? netif == default_router_list[i].neighbor_entry->netif : 1) && + ip6_addr_cmp(router_addr, &(default_router_list[i].neighbor_entry->next_hop_address))) { + return i; + } + } + + /* router not found. */ + return -1; +} + +/** + * Create a new entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is connected, if known + * @return the index on the router table, or -1 if could not be created + */ +static s8_t +nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t router_index; + s8_t free_router_index; + s8_t neighbor_index; + + IP6_ADDR_ZONECHECK_NETIF(router_addr, netif); + + /* Do we have a neighbor entry for this router? */ + neighbor_index = nd6_find_neighbor_cache_entry(router_addr); + if (neighbor_index < 0) { + /* Create a neighbor entry for this router. */ + neighbor_index = nd6_new_neighbor_cache_entry(); + if (neighbor_index < 0) { + /* Could not create neighbor entry for this router. */ + return -1; + } + ip6_addr_set(&(neighbor_cache[neighbor_index].next_hop_address), router_addr); + neighbor_cache[neighbor_index].netif = netif; + neighbor_cache[neighbor_index].q = NULL; + neighbor_cache[neighbor_index].state = ND6_INCOMPLETE; + neighbor_cache[neighbor_index].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[neighbor_index], ND6_SEND_FLAG_MULTICAST_DEST); + } + + /* Mark neighbor as router. */ + neighbor_cache[neighbor_index].isrouter = 1; + + /* Look for empty entry. */ + free_router_index = LWIP_ND6_NUM_ROUTERS; + for (router_index = LWIP_ND6_NUM_ROUTERS - 1; router_index >= 0; router_index--) { + /* check if router already exists (this is a special case for 2 netifs on the same subnet + - e.g. wifi and cable) */ + if(default_router_list[router_index].neighbor_entry == &(neighbor_cache[neighbor_index])){ + return router_index; + } + if (default_router_list[router_index].neighbor_entry == NULL) { + /* remember lowest free index to create a new entry */ + free_router_index = router_index; + } + } + if (free_router_index < LWIP_ND6_NUM_ROUTERS) { + default_router_list[free_router_index].neighbor_entry = &(neighbor_cache[neighbor_index]); + return free_router_index; + } + + /* Could not create a router entry. */ + + /* Mark neighbor entry as not-router. Entry might be useful as neighbor still. */ + neighbor_cache[neighbor_index].isrouter = 0; + + /* router not found. */ + return -1; +} + +/** + * Find the cached entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not found + */ +static s8_t +nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Look for prefix in list. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((ip6_addr_netcmp(&(prefix_list[i].prefix), prefix)) && + (prefix_list[i].netif == netif)) { + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Creates a new entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not created + */ +static s8_t +nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Create new entry. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((prefix_list[i].netif == NULL) || + (prefix_list[i].invalidation_timer == 0)) { + /* Found empty prefix entry. */ + prefix_list[i].netif = netif; + ip6_addr_set(&(prefix_list[i].prefix), prefix); + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Determine the next hop for a destination. Will determine if the + * destination is on-link, else a suitable on-link router is selected. + * + * The last entry index is cached for fast entry search. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the neighbor cache entry for the next hop, ERR_RTE if no + * suitable next hop was found, ERR_MEM if no cache entry + * could be created + */ +static s8_t +nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif) +{ +#ifdef LWIP_HOOK_ND6_GET_GW + const ip6_addr_t *next_hop_addr; +#endif /* LWIP_HOOK_ND6_GET_GW */ + s8_t i; + s16_t dst_idx; + + IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif); + +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif_addr_idx_t addr_hint = netif->hints->addr_hint; + if (addr_hint < LWIP_ND6_NUM_DESTINATIONS) { + nd6_cached_destination_index = addr_hint; + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look for ip6addr in destination cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + /* the cached entry index is the right one! */ + /* do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + /* Search destination cache. */ + dst_idx = nd6_find_destination_cache_entry(ip6addr); + if (dst_idx >= 0) { + /* found destination entry. make it our new cached index. */ + LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX); + nd6_cached_destination_index = (netif_addr_idx_t)dst_idx; + } else { + /* Not found. Create a new destination entry. */ + dst_idx = nd6_new_destination_cache_entry(); + if (dst_idx >= 0) { + /* got new destination entry. make it our new cached index. */ + LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX); + nd6_cached_destination_index = (netif_addr_idx_t)dst_idx; + } else { + /* Could not create a destination cache entry. */ + return ERR_MEM; + } + + /* Copy dest address to destination cache. */ + ip6_addr_set(&(destination_cache[nd6_cached_destination_index].destination_addr), ip6addr); + + /* Now find the next hop. is it a neighbor? */ + if (ip6_addr_islinklocal(ip6addr) || + nd6_is_prefix_in_netif(ip6addr, netif)) { + /* Destination in local link. */ + destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, destination_cache[nd6_cached_destination_index].destination_addr); +#ifdef LWIP_HOOK_ND6_GET_GW + } else if ((next_hop_addr = LWIP_HOOK_ND6_GET_GW(netif, ip6addr)) != NULL) { + /* Next hop for destination provided by hook function. */ + destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; + ip6_addr_set(&destination_cache[nd6_cached_destination_index].next_hop_addr, next_hop_addr); +#endif /* LWIP_HOOK_ND6_GET_GW */ + } else { + /* We need to select a router. */ + i = nd6_select_router(ip6addr, netif); + if (i < 0) { + /* No router found. */ + ip6_addr_set_any(&(destination_cache[nd6_cached_destination_index].destination_addr)); + return ERR_RTE; + } + destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); /* Start with netif mtu, correct through ICMPv6 if necessary */ + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, default_router_list[i].neighbor_entry->next_hop_address); + } + } + } + +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif->hints->addr_hint = nd6_cached_destination_index; + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look in neighbor cache for the next-hop address. */ + if (ip6_addr_cmp(&(destination_cache[nd6_cached_destination_index].next_hop_addr), + &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + /* Cache hit. */ + /* Do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[nd6_cached_destination_index].next_hop_addr)); + if (i >= 0) { + /* Found a matching record, make it new cached entry. */ + nd6_cached_neighbor_index = i; + } else { + /* Neighbor not in cache. Make a new entry. */ + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + /* got new neighbor entry. make it our new cached index. */ + nd6_cached_neighbor_index = i; + } else { + /* Could not create a neighbor cache entry. */ + return ERR_MEM; + } + + /* Initialize fields. */ + ip6_addr_copy(neighbor_cache[i].next_hop_address, + destination_cache[nd6_cached_destination_index].next_hop_addr); + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = netif; + neighbor_cache[i].state = ND6_INCOMPLETE; + neighbor_cache[i].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + } + + /* Reset this destination's age. */ + destination_cache[nd6_cached_destination_index].age = 0; + + return nd6_cached_neighbor_index; +} + +/** + * Queue a packet for a neighbor. + * + * @param neighbor_index the index in the neighbor cache table + * @param q packet to be queued + * @return ERR_OK if succeeded, ERR_MEM if out of memory + */ +static err_t +nd6_queue_packet(s8_t neighbor_index, struct pbuf *q) +{ + err_t result = ERR_MEM; + struct pbuf *p; + int copy_needed = 0; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *new_entry, *r; +#endif /* LWIP_ND6_QUEUEING */ + + if ((neighbor_index < 0) || (neighbor_index >= LWIP_ND6_NUM_NEIGHBORS)) { + return ERR_ARG; + } + + /* IF q includes a pbuf that must be copied, we have to copy the whole chain + * into a new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */ + p = q; + while (p) { + if (PBUF_NEEDS_COPY(p)) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + while ((p == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ +#if LWIP_ND6_QUEUEING + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); +#else /* LWIP_ND6_QUEUEING */ + pbuf_free(neighbor_cache[neighbor_index].q); + neighbor_cache[neighbor_index].q = NULL; +#endif /* LWIP_ND6_QUEUEING */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + } + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet was copied/ref'd? */ + if (p != NULL) { + /* queue packet ... */ +#if LWIP_ND6_QUEUEING + /* allocate a new nd6 queue entry */ + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + if ((new_entry == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + } + if (new_entry != NULL) { + new_entry->next = NULL; + new_entry->p = p; + if (neighbor_cache[neighbor_index].q != NULL) { + /* queue was already existent, append the new entry to the end */ + r = neighbor_cache[neighbor_index].q; + while (r->next != NULL) { + r = r->next; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + neighbor_cache[neighbor_index].q = new_entry; + } + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; + } else { + /* the pool MEMP_ND6_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)p)); + /* { result == ERR_MEM } through initialization */ + } +#else /* LWIP_ND6_QUEUEING */ + /* Queue a single packet. If an older packet is already queued, free it as per RFC. */ + if (neighbor_cache[neighbor_index].q != NULL) { + pbuf_free(neighbor_cache[neighbor_index].q); + } + neighbor_cache[neighbor_index].q = p; + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; +#endif /* LWIP_ND6_QUEUEING */ + } else { + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)q)); + /* { result == ERR_MEM } through initialization */ + } + + return result; +} + +#if LWIP_ND6_QUEUEING +/** + * Free a complete queue of nd6 q entries + * + * @param q a queue of nd6_q_entry to free + */ +static void +nd6_free_q(struct nd6_q_entry *q) +{ + struct nd6_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("q->p != NULL", q->p != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ND6_QUEUE, r); + } +} +#endif /* LWIP_ND6_QUEUEING */ + +/** + * Send queued packets for a neighbor + * + * @param i the neighbor to send packets to + */ +static void +nd6_send_q(s8_t i) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *q; +#endif /* LWIP_ND6_QUEUEING */ + + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + +#if LWIP_ND6_QUEUEING + while (neighbor_cache[i].q != NULL) { + /* remember first in queue */ + q = neighbor_cache[i].q; + /* pop first item off the queue */ + neighbor_cache[i].q = q->next; + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(q->p->payload); + /* Create an aligned copy. */ + ip6_addr_copy_from_packed(dest, ip6hdr->dest); + /* Restore the zone, if applicable. */ + ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, q->p, &dest); + /* free the queued IP packet */ + pbuf_free(q->p); + /* now queue entry can be freed */ + memp_free(MEMP_ND6_QUEUE, q); + } +#else /* LWIP_ND6_QUEUEING */ + if (neighbor_cache[i].q != NULL) { + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(neighbor_cache[i].q->payload); + /* Create an aligned copy. */ + ip6_addr_copy_from_packed(dest, ip6hdr->dest); + /* Restore the zone, if applicable. */ + ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, neighbor_cache[i].q, &dest); + /* free the queued IP packet */ + pbuf_free(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } +#endif /* LWIP_ND6_QUEUEING */ +} + +/** + * A packet is to be transmitted to a specific IPv6 destination on a specific + * interface. Check if we can find the hardware address of the next hop to use + * for the packet. If so, give the hardware address to the caller, which should + * use it to send the packet right away. Otherwise, enqueue the packet for + * later transmission while looking up the hardware address, if possible. + * + * As such, this function returns one of three different possible results: + * + * - ERR_OK with a non-NULL 'hwaddrp': the caller should send the packet now. + * - ERR_OK with a NULL 'hwaddrp': the packet has been enqueued for later. + * - not ERR_OK: something went wrong; forward the error upward in the stack. + * + * @param netif The lwIP network interface on which the IP packet will be sent. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The destination IPv6 address of the packet. + * @param hwaddrp On success, filled with a pointer to a HW address or NULL (meaning + * the packet has been queued). + * @return + * - ERR_OK on success, ERR_RTE if no route was found for the packet, + * or ERR_MEM if low memory conditions prohibit sending the packet at all. + */ +err_t +nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp) +{ + s8_t i; + + /* Get next hop record. */ + i = nd6_get_next_hop_entry(ip6addr, netif); + if (i < 0) { + /* failed to get a next hop neighbor record. */ + return i; + } + + /* Now that we have a destination record, send or queue the packet. */ + if (neighbor_cache[i].state == ND6_STALE) { + /* Switch to delay state. */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + /* @todo should we send or queue if PROBE? send for now, to let unicast NS pass. */ + if ((neighbor_cache[i].state == ND6_REACHABLE) || + (neighbor_cache[i].state == ND6_DELAY) || + (neighbor_cache[i].state == ND6_PROBE)) { + + /* Tell the caller to send out the packet now. */ + *hwaddrp = neighbor_cache[i].lladdr; + return ERR_OK; + } + + /* We should queue packet on this interface. */ + *hwaddrp = NULL; + return nd6_queue_packet(i, q); +} + + +/** + * Get the Path MTU for a destination. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the Path MTU, if known, or the netif default MTU + */ +u16_t +nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s16_t i; + + i = nd6_find_destination_cache_entry(ip6addr); + if (i >= 0) { + if (destination_cache[i].pmtu > 0) { + return destination_cache[i].pmtu; + } + } + + if (netif != NULL) { + return netif_mtu6(netif); + } + + return 1280; /* Minimum MTU */ +} + + +#if LWIP_ND6_TCP_REACHABILITY_HINTS +/** + * Provide the Neighbor discovery process with a hint that a + * destination is reachable. Called by tcp_receive when ACKs are + * received or sent (as per RFC). This is useful to avoid sending + * NS messages every 30 seconds. + * + * @param ip6addr the destination address which is know to be reachable + * by an upper layer protocol (TCP) + */ +void +nd6_reachability_hint(const ip6_addr_t *ip6addr) +{ + s8_t i; + s16_t dst_idx; + + /* Find destination in cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + dst_idx = nd6_cached_destination_index; + ND6_STATS_INC(nd6.cachehit); + } else { + dst_idx = nd6_find_destination_cache_entry(ip6addr); + } + if (dst_idx < 0) { + return; + } + + /* Find next hop neighbor in cache. */ + if (ip6_addr_cmp(&(destination_cache[dst_idx].next_hop_addr), &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + i = nd6_cached_neighbor_index; + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[dst_idx].next_hop_addr)); + } + if (i < 0) { + return; + } + + /* For safety: don't set as reachable if we don't have a LL address yet. Misuse protection. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE || neighbor_cache[i].state == ND6_NO_ENTRY) { + return; + } + + /* Set reachability state. */ + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; +} +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +/** + * Remove all prefix, neighbor_cache and router entries of the specified netif. + * + * @param netif points to a network interface + */ +void +nd6_cleanup_netif(struct netif *netif) +{ + u8_t i; + s8_t router_index; + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif == netif) { + prefix_list[i].netif = NULL; + } + } + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].netif == netif) { + for (router_index = 0; router_index < LWIP_ND6_NUM_ROUTERS; router_index++) { + if (default_router_list[router_index].neighbor_entry == &neighbor_cache[i]) { + default_router_list[router_index].neighbor_entry = NULL; + default_router_list[router_index].flags = 0; + } + } + neighbor_cache[i].isrouter = 0; + nd6_free_neighbor_cache_entry(i); + } + } + /* Clear the destination cache, since many entries may now have become + * invalid for one of several reasons. As destination cache entries have no + * netif association, use a sledgehammer approach (this can be improved). */ + nd6_clear_destination_cache(); +} + +#if LWIP_IPV6_MLD +/** + * The state of a local IPv6 address entry is about to change. If needed, join + * or leave the solicited-node multicast group for the address. + * + * @param netif The netif that owns the address. + * @param addr_idx The index of the address. + * @param new_state The new (IP6_ADDR_) state for the address. + */ +void +nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state) +{ + u8_t old_state, old_member, new_member; + + old_state = netif_ip6_addr_state(netif, addr_idx); + + /* Determine whether we were, and should be, a member of the solicited-node + * multicast group for this address. For tentative addresses, the group is + * not joined until the address enters the TENTATIVE_1 (or VALID) state. */ + old_member = (old_state != IP6_ADDR_INVALID && old_state != IP6_ADDR_DUPLICATED && old_state != IP6_ADDR_TENTATIVE); + new_member = (new_state != IP6_ADDR_INVALID && new_state != IP6_ADDR_DUPLICATED && new_state != IP6_ADDR_TENTATIVE); + + if (old_member != new_member) { + ip6_addr_set_solicitednode(&multicast_address, netif_ip6_addr(netif, addr_idx)->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + + if (new_member) { + mld6_joingroup_netif(netif, &multicast_address); + } else { + mld6_leavegroup_netif(netif, &multicast_address); + } + } +} +#endif /* LWIP_IPV6_MLD */ + +/** Netif was added, set up, or reconnected (link up) */ +void +nd6_restart_netif(struct netif *netif) +{ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send Router Solicitation messages (see RFC 4861, ch. 6.3.7). */ + netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +} + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/core/mem.c b/Middlewares/Third_Party/LwIP/src/core/mem.c new file mode 100644 index 00000000..315fb3c5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/mem.c @@ -0,0 +1,1017 @@ +/** + * @file + * Dynamic memory manager + * + * This is a lightweight replacement for the standard C library malloc(). + * + * If you want to use the standard C library malloc() instead, define + * MEM_LIBC_MALLOC to 1 in your lwipopts.h + * + * To let mem_malloc() use pools (prevents fragmentation and is much faster than + * a heap but might waste some memory), define MEM_USE_POOLS to 1, define + * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list + * of pools like this (more pools can be added between _START and _END): + * + * Define three pools with sizes 256, 512, and 1512 bytes + * LWIP_MALLOC_MEMPOOL_START + * LWIP_MALLOC_MEMPOOL(20, 256) + * LWIP_MALLOC_MEMPOOL(10, 512) + * LWIP_MALLOC_MEMPOOL(5, 1512) + * LWIP_MALLOC_MEMPOOL_END + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/mem.h" +#include "lwip/def.h" +#include "lwip/sys.h" +#include "lwip/stats.h" +#include "lwip/err.h" + +#include + +#if MEM_LIBC_MALLOC +#include /* for malloc()/free() */ +#endif + +/* This is overridable for tests only... */ +#ifndef LWIP_MEM_ILLEGAL_FREE +#define LWIP_MEM_ILLEGAL_FREE(msg) LWIP_ASSERT(msg, 0) +#endif + +#define MEM_STATS_INC_LOCKED(x) SYS_ARCH_LOCKED(MEM_STATS_INC(x)) +#define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y)) +#define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y)) + +#if MEM_OVERFLOW_CHECK +#define MEM_SANITY_OFFSET MEM_SANITY_REGION_BEFORE_ALIGNED +#define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED) +#else +#define MEM_SANITY_OFFSET 0 +#define MEM_SANITY_OVERHEAD 0 +#endif + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/** + * Check if a mep element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the mem element to check + * @param size allocated size of the element + * @param descr1 description of the element source shown on error + * @param descr2 description of the element source shown on error + */ +void +mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2) +{ +#if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED + u16_t k; + u8_t *m; + +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128]; + snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128]; + snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */ +#else + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); + LWIP_UNUSED_ARG(descr); +#endif +} + +/** + * Initialize the restricted area of a mem element. + */ +void +mem_overflow_init_raw(void *p, size_t size) +{ +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 + u8_t *m; +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED); +#endif +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED); +#endif +#else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ +} +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + +#if MEM_LIBC_MALLOC || MEM_USE_POOLS + +/** mem_init is not used when using pools instead of a heap or using + * C library malloc(). + */ +void +mem_init(void) +{ +} + +/** mem_trim is not used when using pools instead of a heap or using + * C library malloc(): we can't free part of a pool element and the stack + * support mem_trim() to return a different pointer + */ +void * +mem_trim(void *mem, mem_size_t size) +{ + LWIP_UNUSED_ARG(size); + return mem; +} +#endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC +/* lwIP heap implemented using C library malloc() */ + +/* in case C library malloc() needs extra protection, + * allow these defines to be overridden. + */ +#ifndef mem_clib_free +#define mem_clib_free free +#endif +#ifndef mem_clib_malloc +#define mem_clib_malloc malloc +#endif +#ifndef mem_clib_calloc +#define mem_clib_calloc calloc +#endif + +#if LWIP_STATS && MEM_STATS +#define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t)) +#else +#define MEM_LIBC_STATSHELPER_SIZE 0 +#endif + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE); + if (ret == NULL) { + MEM_STATS_INC_LOCKED(err); + } else { + LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret); +#if LWIP_STATS && MEM_STATS + *(mem_size_t *)ret = size; + ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_INC_USED_LOCKED(used, size); +#endif + } + return ret; +} + +/** Put memory back on the heap + * + * @param rmem is the pointer as returned by a previous call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); +#if LWIP_STATS && MEM_STATS + rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem); +#endif + mem_clib_free(rmem); +} + +#elif MEM_USE_POOLS + +/* lwIP heap implemented with different sized pools */ + +/** + * Allocate memory: determine the smallest pool that is big enough + * to contain an element of 'size' and get an element from that pool. + * + * @param size the size in bytes of the memory needed + * @return a pointer to the allocated memory or NULL if the pool is empty + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret; + struct memp_malloc_helper *element = NULL; + memp_t poolnr; + mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + + for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) { + /* is this pool big enough to hold an element of the required size + plus a struct memp_malloc_helper that saves the pool this element came from? */ + if (required_size <= memp_pools[poolnr]->size) { + element = (struct memp_malloc_helper *)memp_malloc(poolnr); + if (element == NULL) { + /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */ +#if MEM_USE_POOLS_TRY_BIGGER_POOL + /** Try a bigger pool if this one is empty! */ + if (poolnr < MEMP_POOL_LAST) { + continue; + } +#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ + MEM_STATS_INC_LOCKED(err); + return NULL; + } + break; + } + } + if (poolnr > MEMP_POOL_LAST) { + LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); + MEM_STATS_INC_LOCKED(err); + return NULL; + } + + /* save the pool number this element came from */ + element->poolnr = poolnr; + /* and return a pointer to the memory directly after the struct memp_malloc_helper */ + ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + /* truncating to u16_t is safe because struct memp_desc::size is u16_t */ + element->size = (u16_t)size; + MEM_STATS_INC_USED_LOCKED(used, element->size); +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +#if MEMP_OVERFLOW_CHECK + /* initialize unused memory (diff between requested size and selected pool's size) */ + memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size); +#endif /* MEMP_OVERFLOW_CHECK */ + return ret; +} + +/** + * Free memory previously allocated by mem_malloc. Loads the pool number + * and calls memp_free with that pool number to put the element back into + * its pool + * + * @param rmem the memory element to free + */ +void +mem_free(void *rmem) +{ + struct memp_malloc_helper *hmem; + + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); + + /* get the original struct memp_malloc_helper */ + /* cast through void* to get rid of alignment warnings */ + hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))); + + LWIP_ASSERT("hmem != NULL", (hmem != NULL)); + LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); + LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); + + MEM_STATS_DEC_USED_LOCKED(used, hmem->size); +#if MEMP_OVERFLOW_CHECK + { + u16_t i; + LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size", + hmem->size <= memp_pools[hmem->poolnr]->size); + /* check that unused memory remained untouched (diff between requested size and selected pool's size) */ + for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) { + u8_t data = *((u8_t *)rmem + i); + LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd); + } + } +#endif /* MEMP_OVERFLOW_CHECK */ + + /* and put it in the pool we saved earlier */ + memp_free(hmem->poolnr, hmem); +} + +#else /* MEM_USE_POOLS */ +/* lwIP replacement for your libc malloc() */ + +/** + * The heap is made up as a list of structs of this type. + * This does not have to be aligned since for getting its size, + * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns. + */ +struct mem { + /** index (-> ram[next]) of the next struct */ + mem_size_t next; + /** index (-> ram[prev]) of the previous struct */ + mem_size_t prev; + /** 1: this area is used; 0: this area is unused */ + u8_t used; +#if MEM_OVERFLOW_CHECK + /** this keeps track of the user allocation size for guard checks */ + mem_size_t user_size; +#endif +}; + +/** All allocated blocks will be MIN_SIZE bytes big, at least! + * MIN_SIZE can be overridden to suit your needs. Smaller values save space, + * larger values could prevent too small blocks to fragment the RAM too much. */ +#ifndef MIN_SIZE +#define MIN_SIZE 12 +#endif /* MIN_SIZE */ +/* some alignment macros: we define them here for better source code layout */ +#define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) +#define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) +#define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) + +/** If you want to relocate the heap to external memory, simply define + * LWIP_RAM_HEAP_POINTER as a void-pointer to that location. + * If so, make sure the memory at that location is big enough (see below on + * how that space is calculated). */ +#ifndef LWIP_RAM_HEAP_POINTER +/** the heap. we need one struct mem at the end and some room for alignment */ +LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM)); +#define LWIP_RAM_HEAP_POINTER ram_heap +#endif /* LWIP_RAM_HEAP_POINTER */ + +/** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ +static u8_t *ram; +/** the last entry, always unused! */ +static struct mem *ram_end; + +/** concurrent access protection */ +#if !NO_SYS +static sys_mutex_t mem_mutex; +#endif + +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + +static volatile u8_t mem_free_count; + +/* Allow mem_free from other (e.g. interrupt) context */ +#define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) +#define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) +#define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) +#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) +#define LWIP_MEM_LFREE_VOLATILE volatile + +#else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/* Protect the heap only by using a mutex */ +#define LWIP_MEM_FREE_DECL_PROTECT() +#define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex) +#define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex) +/* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */ +#define LWIP_MEM_ALLOC_DECL_PROTECT() +#define LWIP_MEM_ALLOC_PROTECT() +#define LWIP_MEM_ALLOC_UNPROTECT() +#define LWIP_MEM_LFREE_VOLATILE + +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/** pointer to the lowest free block, this is used for faster search */ +static struct mem * LWIP_MEM_LFREE_VOLATILE lfree; + +#if MEM_SANITY_CHECK +static void mem_sanity(void); +#define MEM_SANITY() mem_sanity() +#else +#define MEM_SANITY() +#endif + +#if MEM_OVERFLOW_CHECK +static void +mem_overflow_init_element(struct mem *mem, mem_size_t user_size) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem->user_size = user_size; + mem_overflow_init_raw(p, user_size); +} + +static void +mem_overflow_check_element(struct mem *mem) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem_overflow_check_raw(p, mem->user_size, "heap", ""); +} +#else /* MEM_OVERFLOW_CHECK */ +#define mem_overflow_init_element(mem, size) +#define mem_overflow_check_element(mem) +#endif /* MEM_OVERFLOW_CHECK */ + +static struct mem * +ptr_to_mem(mem_size_t ptr) +{ + return (struct mem *)(void *)&ram[ptr]; +} + +static mem_size_t +mem_to_ptr(void *mem) +{ + return (mem_size_t)((u8_t *)mem - ram); +} + +/** + * "Plug holes" by combining adjacent empty struct mems. + * After this function is through, there should not exist + * one empty struct mem pointing to another empty struct mem. + * + * @param mem this points to a struct mem which just has been freed + * @internal this function is only called by mem_free() and mem_trim() + * + * This assumes access to the heap is protected by the calling function + * already. + */ +static void +plug_holes(struct mem *mem) +{ + struct mem *nmem; + struct mem *pmem; + + LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); + LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); + LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); + + /* plug hole forward */ + LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); + + nmem = ptr_to_mem(mem->next); + if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { + /* if mem->next is unused and not end of ram, combine mem and mem->next */ + if (lfree == nmem) { + lfree = mem; + } + mem->next = nmem->next; + if (nmem->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem); + } + } + + /* plug hole backward */ + pmem = ptr_to_mem(mem->prev); + if (pmem != mem && pmem->used == 0) { + /* if mem->prev is unused, combine mem and mem->prev */ + if (lfree == mem) { + lfree = pmem; + } + pmem->next = mem->next; + if (mem->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem); + } + } +} + +/** + * Zero the heap and initialize start, end and lowest-free + */ +void +mem_init(void) +{ + struct mem *mem; + + LWIP_ASSERT("Sanity check alignment", + (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0); + + /* align the heap */ + ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER); + /* initialize the start of the heap */ + mem = (struct mem *)(void *)ram; + mem->next = MEM_SIZE_ALIGNED; + mem->prev = 0; + mem->used = 0; + /* initialize the end of the heap */ + ram_end = ptr_to_mem(MEM_SIZE_ALIGNED); + ram_end->used = 1; + ram_end->next = MEM_SIZE_ALIGNED; + ram_end->prev = MEM_SIZE_ALIGNED; + MEM_SANITY(); + + /* initialize the lowest-free pointer to the start of the heap */ + lfree = (struct mem *)(void *)ram; + + MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED); + + if (sys_mutex_new(&mem_mutex) != ERR_OK) { + LWIP_ASSERT("failed to create mem_mutex", 0); + } +} + +/* Check if a struct mem is correctly linked. + * If not, double-free is a possible reason. + */ +static int +mem_link_valid(struct mem *mem) +{ + struct mem *nmem, *pmem; + mem_size_t rmem_idx; + rmem_idx = mem_to_ptr(mem); + nmem = ptr_to_mem(mem->next); + pmem = ptr_to_mem(mem->prev); + if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) || + ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) || + ((nmem != ram_end) && (nmem->prev != rmem_idx))) { + return 0; + } + return 1; +} + +#if MEM_SANITY_CHECK +static void +mem_sanity(void) +{ + struct mem *mem; + u8_t last_used; + + /* begin with first element here */ + mem = (struct mem *)ram; + LWIP_ASSERT("heap element used valid", (mem->used == 0) || (mem->used == 1)); + last_used = mem->used; + LWIP_ASSERT("heap element prev ptr valid", mem->prev == 0); + LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next))); + + /* check all elements before the end of the heap */ + for (mem = ptr_to_mem(mem->next); + ((u8_t *)mem > ram) && (mem < ram_end); + mem = ptr_to_mem(mem->next)) { + LWIP_ASSERT("heap element aligned", LWIP_MEM_ALIGN(mem) == mem); + LWIP_ASSERT("heap element prev ptr valid", mem->prev <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element prev ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->prev) == ptr_to_mem(mem->prev))); + LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next))); + + if (last_used == 0) { + /* 2 unused elements in a row? */ + LWIP_ASSERT("heap element unused?", mem->used == 1); + } else { + LWIP_ASSERT("heap element unused member", (mem->used == 0) || (mem->used == 1)); + } + + LWIP_ASSERT("heap element link valid", mem_link_valid(mem)); + + /* used/unused altering */ + last_used = mem->used; + } + LWIP_ASSERT("heap end ptr sanity", mem == ptr_to_mem(MEM_SIZE_ALIGNED)); + LWIP_ASSERT("heap element used valid", mem->used == 1); + LWIP_ASSERT("heap element prev ptr valid", mem->prev == MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr valid", mem->next == MEM_SIZE_ALIGNED); +} +#endif /* MEM_SANITY_CHECK */ + +/** + * Put a struct mem back on the heap + * + * @param rmem is the data portion of a struct mem as returned by a previous + * call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + struct mem *mem; + LWIP_MEM_FREE_DECL_PROTECT(); + + if (rmem == NULL) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); + return; + } + if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) { + LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment"); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + /* Get the corresponding struct mem: */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); + + if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory"); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + /* mem has to be in a used state */ + if (!mem->used) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free"); + LWIP_MEM_FREE_UNPROTECT(); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + if (!mem_link_valid(mem)) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free"); + LWIP_MEM_FREE_UNPROTECT(); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + /* mem is now unused. */ + mem->used = 0; + + if (mem < lfree) { + /* the newly freed struct is now the lowest */ + lfree = mem; + } + + MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); + + /* finally, see if prev or next are free also */ + plug_holes(mem); + MEM_SANITY(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); +} + +/** + * Shrink memory returned by mem_malloc(). + * + * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked + * @param new_size required size after shrinking (needs to be smaller than or + * equal to the previous size) + * @return for compatibility reasons: is always == rmem, at the moment + * or NULL if newsize is > old size, in which case rmem is NOT touched + * or freed! + */ +void * +mem_trim(void *rmem, mem_size_t new_size) +{ + mem_size_t size, newsize; + mem_size_t ptr, ptr2; + struct mem *mem, *mem2; + /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ + LWIP_MEM_FREE_DECL_PROTECT(); + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size); + if (newsize < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + newsize = MIN_SIZE_ALIGNED; + } +#if MEM_OVERFLOW_CHECK + newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) { + return NULL; + } + + LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && + (u8_t *)rmem < (u8_t *)ram_end); + + if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return rmem; + } + /* Get the corresponding struct mem ... */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif + /* ... and its offset pointer */ + ptr = mem_to_ptr(mem); + + size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD)); + LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); + if (newsize > size) { + /* not supported */ + return NULL; + } + if (newsize == size) { + /* No change in size, simply return */ + return rmem; + } + + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + + mem2 = ptr_to_mem(mem->next); + if (mem2->used == 0) { + /* The next struct is unused, we can simply move it at little */ + mem_size_t next; + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); + /* remember the old next pointer */ + next = mem2->next; + /* create new struct mem which is moved directly after the shrinked mem */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + if (lfree == mem2) { + lfree = ptr_to_mem(ptr2); + } + mem2 = ptr_to_mem(ptr2); + mem2->used = 0; + /* restore the next pointer */ + mem2->next = next; + /* link it back to mem */ + mem2->prev = ptr; + /* link mem to it */ + mem->next = ptr2; + /* last thing to restore linked list: as we have moved mem2, + * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not + * the end of the heap */ + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* no need to plug holes, we've already done that */ + } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { + /* Next struct is used but there's room for another struct mem with + * at least MIN_SIZE_ALIGNED of data. + * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem + * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); + mem2 = ptr_to_mem(ptr2); + if (mem2 < lfree) { + lfree = mem2; + } + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + mem->next = ptr2; + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* the original mem->next is used, so no need to plug holes! */ + } + /* else { + next struct mem is used but size between mem and mem2 is not big enough + to create another struct mem + -> don't do anyhting. + -> the remaining space stays unused since it is too small + } */ +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, new_size); +#endif + MEM_SANITY(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); + return rmem; +} + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size_in is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size_in) +{ + mem_size_t ptr, ptr2, size; + struct mem *mem, *mem2; +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + u8_t local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_ALLOC_DECL_PROTECT(); + + if (size_in == 0) { + return NULL; + } + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in); + if (size < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + size = MIN_SIZE_ALIGNED; + } +#if MEM_OVERFLOW_CHECK + size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) { + return NULL; + } + + /* protect the heap from concurrent access */ + sys_mutex_lock(&mem_mutex); + LWIP_MEM_ALLOC_PROTECT(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* run as long as a mem_free disturbed mem_malloc or mem_trim */ + do { + local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + /* Scan through the heap searching for a free block that is big enough, + * beginning with the lowest free block. + */ + for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size; + ptr = ptr_to_mem(ptr)->next) { + mem = ptr_to_mem(ptr); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* allow mem_free or mem_trim to run */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem. */ + local_mem_free_count = 1; + break; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + if ((!mem->used) && + (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { + /* mem is not used and at least perfect fit is possible: + * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ + + if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { + /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing + * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') + * -> split large block, create empty remainder, + * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if + * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, + * struct mem would fit in but no data between mem2 and mem2->next + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory + */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size); + LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED); + /* create mem2 struct */ + mem2 = ptr_to_mem(ptr2); + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + /* and insert it between mem and mem->next */ + mem->next = ptr2; + mem->used = 1; + + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); + } else { + /* (a mem2 struct does no fit into the user data space of mem and mem->next will always + * be used at this point: if not we have 2 unused structs in a row, plug_holes should have + * take care of this). + * -> near fit or exact fit: do not split, no mem2 creation + * also can't move mem->next directly behind mem, since mem->next + * will always be used at this point! + */ + mem->used = 1; + MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem)); + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT +mem_malloc_adjust_lfree: +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + if (mem == lfree) { + struct mem *cur = lfree; + /* Find next free block after mem and update lowest free pointer */ + while (cur->used && cur != ram_end) { +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* prevent high interrupt latency... */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem or lfree. */ + goto mem_malloc_adjust_lfree; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + cur = ptr_to_mem(cur->next); + } + lfree = cur; + LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); + } + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", + (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); + LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", + ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); + LWIP_ASSERT("mem_malloc: sanity check alignment", + (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0); + +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, size_in); +#endif + MEM_SANITY(); + return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + } + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* if we got interrupted by a mem_free, try again */ + } while (local_mem_free_count != 0); +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + MEM_STATS_INC(err); + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); + return NULL; +} + +#endif /* MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + return mem_clib_calloc(count, size); +} + +#else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ +/** + * Contiguously allocates enough space for count objects that are size bytes + * of memory each and returns a pointer to the allocated memory. + * + * The allocated memory is filled with bytes of value zero. + * + * @param count number of objects to allocate + * @param size size of the objects to allocate + * @return pointer to allocated memory / NULL pointer if there is an error + */ +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + void *p; + size_t alloc_size = (size_t)count * (size_t)size; + + if ((size_t)(mem_size_t)alloc_size != alloc_size) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size)); + return NULL; + } + + /* allocate 'count' objects of size 'size' */ + p = mem_malloc((mem_size_t)alloc_size); + if (p) { + /* zero the memory */ + memset(p, 0, alloc_size); + } + return p; +} +#endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ diff --git a/Middlewares/Third_Party/LwIP/src/core/memp.c b/Middlewares/Third_Party/LwIP/src/core/memp.c new file mode 100644 index 00000000..352ce5a5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/memp.c @@ -0,0 +1,447 @@ +/** + * @file + * Dynamic pool memory manager + * + * lwIP has dedicated pools for many structures (netconn, protocol control blocks, + * packet buffers, ...). All these pools are managed here. + * + * @defgroup mempool Memory pools + * @ingroup infrastructure + * Custom memory pools + + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/stats.h" + +#include + +/* Make sure we include everything we need for size calculation required by memp_std.h */ +#include "lwip/pbuf.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/altcp.h" +#include "lwip/ip4_frag.h" +#include "lwip/netbuf.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/sockets_priv.h" +#include "lwip/etharp.h" +#include "lwip/igmp.h" +#include "lwip/timeouts.h" +/* needed by default MEMP_NUM_SYS_TIMEOUT */ +#include "netif/ppp/ppp_opts.h" +#include "lwip/netdb.h" +#include "lwip/dns.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" + +#define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +const struct memp_desc *const memp_pools[MEMP_MAX] = { +#define LWIP_MEMPOOL(name,num,size,desc) &memp_ ## name, +#include "lwip/priv/memp_std.h" +}; + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if MEMP_MEM_MALLOC && MEMP_OVERFLOW_CHECK >= 2 +#undef MEMP_OVERFLOW_CHECK +/* MEMP_OVERFLOW_CHECK >= 2 does not work with MEMP_MEM_MALLOC, use 1 instead */ +#define MEMP_OVERFLOW_CHECK 1 +#endif + +#if MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC +/** + * Check that memp-lists don't form a circle, using "Floyd's cycle-finding algorithm". + */ +static int +memp_sanity(const struct memp_desc *desc) +{ + struct memp *t, *h; + + t = *desc->tab; + if (t != NULL) { + for (h = t->next; (t != NULL) && (h != NULL); t = t->next, + h = ((h->next != NULL) ? h->next->next : NULL)) { + if (t == h) { + return 0; + } + } + } + + return 1; +} +#endif /* MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC */ + +#if MEMP_OVERFLOW_CHECK +/** + * Check if a memp element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the memp element to check + * @param desc the pool p comes from + */ +static void +memp_overflow_check_element(struct memp *p, const struct memp_desc *desc) +{ + mem_overflow_check_raw((u8_t *)p + MEMP_SIZE, desc->size, "pool ", desc->desc); +} + +/** + * Initialize the restricted area of on memp element. + */ +static void +memp_overflow_init_element(struct memp *p, const struct memp_desc *desc) +{ + mem_overflow_init_raw((u8_t *)p + MEMP_SIZE, desc->size); +} + +#if MEMP_OVERFLOW_CHECK >= 2 +/** + * Do an overflow check for all elements in every pool. + * + * @see memp_overflow_check_element for a description of the check + */ +static void +memp_overflow_check_all(void) +{ + u16_t i, j; + struct memp *p; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + + for (i = 0; i < MEMP_MAX; ++i) { + p = (struct memp *)LWIP_MEM_ALIGN(memp_pools[i]->base); + for (j = 0; j < memp_pools[i]->num; ++j) { + memp_overflow_check_element(p, memp_pools[i]); + p = LWIP_ALIGNMENT_CAST(struct memp *, ((u8_t *)p + MEMP_SIZE + memp_pools[i]->size + MEM_SANITY_REGION_AFTER_ALIGNED)); + } + } + SYS_ARCH_UNPROTECT(old_level); +} +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +#endif /* MEMP_OVERFLOW_CHECK */ + +/** + * Initialize custom memory pool. + * Related functions: memp_malloc_pool, memp_free_pool + * + * @param desc pool to initialize + */ +void +memp_init_pool(const struct memp_desc *desc) +{ +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); +#else + int i; + struct memp *memp; + + *desc->tab = NULL; + memp = (struct memp *)LWIP_MEM_ALIGN(desc->base); +#if MEMP_MEM_INIT + /* force memset on pool memory */ + memset(memp, 0, (size_t)desc->num * (MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEM_SANITY_REGION_AFTER_ALIGNED +#endif + )); +#endif + /* create a linked list of memp elements */ + for (i = 0; i < desc->num; ++i) { + memp->next = *desc->tab; + *desc->tab = memp; +#if MEMP_OVERFLOW_CHECK + memp_overflow_init_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEM_SANITY_REGION_AFTER_ALIGNED +#endif + ); + } +#if MEMP_STATS + desc->stats->avail = desc->num; +#endif /* MEMP_STATS */ +#endif /* !MEMP_MEM_MALLOC */ + +#if MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) + desc->stats->name = desc->desc; +#endif /* MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) */ +} + +/** + * Initializes lwIP built-in pools. + * Related functions: memp_malloc, memp_free + * + * Carves out memp_memory into linked lists for each pool-type. + */ +void +memp_init(void) +{ + u16_t i; + + /* for every pool: */ + for (i = 0; i < LWIP_ARRAYSIZE(memp_pools); i++) { + memp_init_pool(memp_pools[i]); + +#if LWIP_STATS && MEMP_STATS + lwip_stats.memp[i] = memp_pools[i]->stats; +#endif + } + +#if MEMP_OVERFLOW_CHECK >= 2 + /* check everything a first time to see if it worked */ + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +} + +static void * +#if !MEMP_OVERFLOW_CHECK +do_memp_malloc_pool(const struct memp_desc *desc) +#else +do_memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line) +#endif +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + +#if MEMP_MEM_MALLOC + memp = (struct memp *)mem_malloc(MEMP_SIZE + MEMP_ALIGN_SIZE(desc->size)); + SYS_ARCH_PROTECT(old_level); +#else /* MEMP_MEM_MALLOC */ + SYS_ARCH_PROTECT(old_level); + + memp = *desc->tab; +#endif /* MEMP_MEM_MALLOC */ + + if (memp != NULL) { +#if !MEMP_MEM_MALLOC +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + + *desc->tab = memp->next; +#if MEMP_OVERFLOW_CHECK + memp->next = NULL; +#endif /* MEMP_OVERFLOW_CHECK */ +#endif /* !MEMP_MEM_MALLOC */ +#if MEMP_OVERFLOW_CHECK + memp->file = file; + memp->line = line; +#if MEMP_MEM_MALLOC + memp_overflow_init_element(memp, desc); +#endif /* MEMP_MEM_MALLOC */ +#endif /* MEMP_OVERFLOW_CHECK */ + LWIP_ASSERT("memp_malloc: memp properly aligned", + ((mem_ptr_t)memp % MEM_ALIGNMENT) == 0); +#if MEMP_STATS + desc->stats->used++; + if (desc->stats->used > desc->stats->max) { + desc->stats->max = desc->stats->used; + } +#endif + SYS_ARCH_UNPROTECT(old_level); + /* cast through u8_t* to get rid of alignment warnings */ + return ((u8_t *)memp + MEMP_SIZE); + } else { +#if MEMP_STATS + desc->stats->err++; +#endif + SYS_ARCH_UNPROTECT(old_level); + LWIP_DEBUGF(MEMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("memp_malloc: out of memory in pool %s\n", desc->desc)); + } + + return NULL; +} + +/** + * Get an element from a custom pool. + * + * @param desc the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc_pool(const struct memp_desc *desc) +#else +memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line) +#endif +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if (desc == NULL) { + return NULL; + } + +#if !MEMP_OVERFLOW_CHECK + return do_memp_malloc_pool(desc); +#else + return do_memp_malloc_pool_fn(desc, file, line); +#endif +} + +/** + * Get an element from a specific pool. + * + * @param type the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc(memp_t type) +#else +memp_malloc_fn(memp_t type, const char *file, const int line) +#endif +{ + void *memp; + LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;); + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#if !MEMP_OVERFLOW_CHECK + memp = do_memp_malloc_pool(memp_pools[type]); +#else + memp = do_memp_malloc_pool_fn(memp_pools[type], file, line); +#endif + + return memp; +} + +static void +do_memp_free_pool(const struct memp_desc *desc, void *mem) +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + + LWIP_ASSERT("memp_free: mem properly aligned", + ((mem_ptr_t)mem % MEM_ALIGNMENT) == 0); + + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)mem - MEMP_SIZE); + + SYS_ARCH_PROTECT(old_level); + +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + +#if MEMP_STATS + desc->stats->used--; +#endif + +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); + SYS_ARCH_UNPROTECT(old_level); + mem_free(memp); +#else /* MEMP_MEM_MALLOC */ + memp->next = *desc->tab; + *desc->tab = memp; + +#if MEMP_SANITY_CHECK + LWIP_ASSERT("memp sanity", memp_sanity(desc)); +#endif /* MEMP_SANITY_CHECK */ + + SYS_ARCH_UNPROTECT(old_level); +#endif /* !MEMP_MEM_MALLOC */ +} + +/** + * Put a custom pool element back into its pool. + * + * @param desc the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free_pool(const struct memp_desc *desc, void *mem) +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if ((desc == NULL) || (mem == NULL)) { + return; + } + + do_memp_free_pool(desc, mem); +} + +/** + * Put an element back into its pool. + * + * @param type the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free(memp_t type, void *mem) +{ +#ifdef LWIP_HOOK_MEMP_AVAILABLE + struct memp *old_first; +#endif + + LWIP_ERROR("memp_free: type < MEMP_MAX", (type < MEMP_MAX), return;); + + if (mem == NULL) { + return; + } + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + old_first = *memp_pools[type]->tab; +#endif + + do_memp_free_pool(memp_pools[type], mem); + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + if (old_first == NULL) { + LWIP_HOOK_MEMP_AVAILABLE(type); + } +#endif +} diff --git a/Middlewares/Third_Party/LwIP/src/core/netif.c b/Middlewares/Third_Party/LwIP/src/core/netif.c new file mode 100644 index 00000000..15200a27 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/netif.c @@ -0,0 +1,1795 @@ +/** + * @file + * lwIP network interface abstraction + * + * @defgroup netif Network interface (NETIF) + * @ingroup callbackstyle_api + * + * @defgroup netif_ip4 IPv4 address handling + * @ingroup netif + * + * @defgroup netif_ip6 IPv6 address handling + * @ingroup netif + * + * @defgroup netif_cd Client data handling + * Store data (void*) on a netif for application usage. + * @see @ref LWIP_NUM_NETIF_CLIENT_DATA + * @ingroup netif + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include /* memset */ +#include /* atoi */ + +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/snmp.h" +#include "lwip/igmp.h" +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/ip.h" +#if ENABLE_LOOPBACK +#if LWIP_NETIF_LOOPBACK_MULTITHREADING +#include "lwip/tcpip.h" +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#include "netif/ethernet.h" + +#if LWIP_AUTOIP +#include "lwip/autoip.h" +#endif /* LWIP_AUTOIP */ +#if LWIP_DHCP +#include "lwip/dhcp.h" +#endif /* LWIP_DHCP */ +#if LWIP_IPV6_DHCP6 +#include "lwip/dhcp6.h" +#endif /* LWIP_IPV6_DHCP6 */ +#if LWIP_IPV6_MLD +#include "lwip/mld6.h" +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6 +#include "lwip/nd6.h" +#endif + +#if LWIP_NETIF_STATUS_CALLBACK +#define NETIF_STATUS_CALLBACK(n) do{ if (n->status_callback) { (n->status_callback)(n); }}while(0) +#else +#define NETIF_STATUS_CALLBACK(n) +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_LINK_CALLBACK +#define NETIF_LINK_CALLBACK(n) do{ if (n->link_callback) { (n->link_callback)(n); }}while(0) +#else +#define NETIF_LINK_CALLBACK(n) +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +static netif_ext_callback_t *ext_callback; +#endif + +#if !LWIP_SINGLE_NETIF +struct netif *netif_list; +#endif /* !LWIP_SINGLE_NETIF */ +struct netif *netif_default; + +#define netif_index_to_num(index) ((index) - 1) +static u8_t netif_num; + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +static u8_t netif_client_id; +#endif + +#define NETIF_REPORT_TYPE_IPV4 0x01 +#define NETIF_REPORT_TYPE_IPV6 0x02 +static void netif_issue_reports(struct netif *netif, u8_t report_type); + +#if LWIP_IPV6 +static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 +static err_t netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4 */ + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr); +#endif +#if LWIP_IPV6 +static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr); +#endif + + +static struct netif loop_netif; + +/** + * Initialize a lwip network interface structure for a loopback interface + * + * @param netif the lwip network interface structure for this loopif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + */ +static err_t +netif_loopif_init(struct netif *netif) +{ + LWIP_ASSERT("netif_loopif_init: invalid netif", netif != NULL); + + /* initialize the snmp variables and counters inside the struct netif + * ifSpeed: no assumption can be made! + */ + MIB2_INIT_NETIF(netif, snmp_ifType_softwareLoopback, 0); + + netif->name[0] = 'l'; + netif->name[1] = 'o'; +#if LWIP_IPV4 + netif->output = netif_loop_output_ipv4; +#endif +#if LWIP_IPV6 + netif->output_ip6 = netif_loop_output_ipv6; +#endif +#if LWIP_LOOPIF_MULTICAST + netif_set_flags(netif, NETIF_FLAG_IGMP); +#endif + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_DISABLE_ALL); + return ERR_OK; +} +#endif /* LWIP_HAVE_LOOPIF */ + +void +netif_init(void) +{ +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +#define LOOPIF_ADDRINIT &loop_ipaddr, &loop_netmask, &loop_gw, + ip4_addr_t loop_ipaddr, loop_netmask, loop_gw; + IP4_ADDR(&loop_gw, 127, 0, 0, 1); + IP4_ADDR(&loop_ipaddr, 127, 0, 0, 1); + IP4_ADDR(&loop_netmask, 255, 0, 0, 0); +#else /* LWIP_IPV4 */ +#define LOOPIF_ADDRINIT +#endif /* LWIP_IPV4 */ + +#if NO_SYS + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, ip_input); +#else /* NO_SYS */ + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, tcpip_input); +#endif /* NO_SYS */ + +#if LWIP_IPV6 + IP_ADDR6_HOST(loop_netif.ip6_addr, 0, 0, 0, 0x00000001UL); + loop_netif.ip6_addr_state[0] = IP6_ADDR_VALID; +#endif /* LWIP_IPV6 */ + + netif_set_link_up(&loop_netif); + netif_set_up(&loop_netif); + +#endif /* LWIP_HAVE_LOOPIF */ +} + +/** + * @ingroup lwip_nosys + * Forwards a received packet for input processing with + * ethernet_input() or ip_input() depending on netif flags. + * Don't call directly, pass to netif_add() and call + * netif->input(). + * Only works if the netif driver correctly sets + * NETIF_FLAG_ETHARP and/or NETIF_FLAG_ETHERNET flag! + */ +err_t +netif_input(struct pbuf *p, struct netif *inp) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_input: invalid pbuf", p != NULL); + LWIP_ASSERT("netif_input: invalid netif", inp != NULL); + +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return ethernet_input(p, inp); + } else +#endif /* LWIP_ETHERNET */ + return ip_input(p, inp); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * Same as @ref netif_add but without IPv4 addresses + */ +struct netif * +netif_add_noaddr(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input) +{ + return netif_add(netif, +#if LWIP_IPV4 + NULL, NULL, NULL, +#endif /* LWIP_IPV4*/ + state, init, input); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * @param netif a pre-allocated netif structure + * @param ipaddr IP address for the new netif + * @param netmask network mask for the new netif + * @param gw default gateway IP address for the new netif + * @param state opaque data passed to the new netif + * @param init callback function that initializes the interface + * @param input callback function that is called to pass + * ingress packets up in the protocol layer stack.\n + * It is recommended to use a function that passes the input directly + * to the stack (netif_input(), NO_SYS=1 mode) or via sending a + * message to TCPIP thread (tcpip_input(), NO_SYS=0 mode).\n + * These functions use netif flags NETIF_FLAG_ETHARP and NETIF_FLAG_ETHERNET + * to decide whether to forward to ethernet_input() or ip_input(). + * In other words, the functions only work when the netif + * driver is implemented correctly!\n + * Most members of struct netif should be be initialized by the + * netif init function = netif driver (init parameter of this function).\n + * IPv6: Don't forget to call netif_create_ip6_linklocal_address() after + * setting the MAC address in struct netif.hwaddr + * (IPv6 requires a link-local address). + * + * @return netif, or NULL if failed. + */ +struct netif * +netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ +#if LWIP_IPV6 + s8_t i; +#endif + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_SINGLE_NETIF + if (netif_default != NULL) { + LWIP_ASSERT("single netif already set", 0); + return NULL; + } +#endif + + LWIP_ERROR("netif_add: invalid netif", netif != NULL, return NULL); + LWIP_ERROR("netif_add: No init function given", init != NULL, return NULL); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = ip_2_ip4(IP4_ADDR_ANY); + } + if (netmask == NULL) { + netmask = ip_2_ip4(IP4_ADDR_ANY); + } + if (gw == NULL) { + gw = ip_2_ip4(IP4_ADDR_ANY); + } + + /* reset new interface configuration state */ + ip_addr_set_zero_ip4(&netif->ip_addr); + ip_addr_set_zero_ip4(&netif->netmask); + ip_addr_set_zero_ip4(&netif->gw); + netif->output = netif_null_output_ip4; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + ip_addr_set_zero_ip6(&netif->ip6_addr[i]); + netif->ip6_addr_state[i] = IP6_ADDR_INVALID; +#if LWIP_IPV6_ADDRESS_LIFETIMES + netif->ip6_addr_valid_life[i] = IP6_ADDR_LIFE_STATIC; + netif->ip6_addr_pref_life[i] = IP6_ADDR_LIFE_STATIC; +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + } + netif->output_ip6 = netif_null_output_ip6; +#endif /* LWIP_IPV6 */ + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_ENABLE_ALL); + netif->mtu = 0; + netif->flags = 0; +#ifdef netif_get_client_data + memset(netif->client_data, 0, sizeof(netif->client_data)); +#endif /* LWIP_NUM_NETIF_CLIENT_DATA */ +#if LWIP_IPV6 +#if LWIP_IPV6_AUTOCONFIG + /* IPv6 address autoconfiguration not enabled by default */ + netif->ip6_autoconfig_enabled = 0; +#endif /* LWIP_IPV6_AUTOCONFIG */ + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ +#if LWIP_NETIF_STATUS_CALLBACK + netif->status_callback = NULL; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + netif->link_callback = NULL; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_IGMP + netif->igmp_mac_filter = NULL; +#endif /* LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + netif->mld_mac_filter = NULL; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if ENABLE_LOOPBACK + netif->loop_first = NULL; + netif->loop_last = NULL; +#endif /* ENABLE_LOOPBACK */ + + /* remember netif specific state information data */ + netif->state = state; + netif->num = netif_num; + netif->input = input; + + NETIF_RESET_HINTS(netif); +#if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS + netif->loop_cnt_current = 0; +#endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */ + +#if LWIP_IPV4 + netif_set_addr(netif, ipaddr, netmask, gw); +#endif /* LWIP_IPV4 */ + + /* call user specified initialization function for netif */ + if (init(netif) != ERR_OK) { + return NULL; + } +#if LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES + /* Initialize the MTU for IPv6 to the one set by the netif driver. + This can be updated later by RA. */ + netif->mtu6 = netif->mtu; +#endif /* LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES */ + +#if !LWIP_SINGLE_NETIF + /* Assign a unique netif number in the range [0..254], so that (num+1) can + serve as an interface index that fits in a u8_t. + We assume that the new netif has not yet been added to the list here. + This algorithm is O(n^2), but that should be OK for lwIP. + */ + { + struct netif *netif2; + int num_netifs; + do { + if (netif->num == 255) { + netif->num = 0; + } + num_netifs = 0; + for (netif2 = netif_list; netif2 != NULL; netif2 = netif2->next) { + LWIP_ASSERT("netif already added", netif2 != netif); + num_netifs++; + LWIP_ASSERT("too many netifs, max. supported number is 255", num_netifs <= 255); + if (netif2->num == netif->num) { + netif->num++; + break; + } + } + } while (netif2 != NULL); + } + if (netif->num == 254) { + netif_num = 0; + } else { + netif_num = (u8_t)(netif->num + 1); + } + + /* add this netif to the list */ + netif->next = netif_list; + netif_list = netif; +#endif /* "LWIP_SINGLE_NETIF */ + mib2_netif_added(netif); + +#if LWIP_IGMP + /* start IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_start(netif); + } +#endif /* LWIP_IGMP */ + + LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP", + netif->name[0], netif->name[1])); +#if LWIP_IPV4 + LWIP_DEBUGF(NETIF_DEBUG, (" addr ")); + ip4_addr_debug_print(NETIF_DEBUG, ipaddr); + LWIP_DEBUGF(NETIF_DEBUG, (" netmask ")); + ip4_addr_debug_print(NETIF_DEBUG, netmask); + LWIP_DEBUGF(NETIF_DEBUG, (" gw ")); + ip4_addr_debug_print(NETIF_DEBUG, gw); +#endif /* LWIP_IPV4 */ + LWIP_DEBUGF(NETIF_DEBUG, ("\n")); + + netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_ADDED, NULL); + + return netif; +} + +static void +netif_do_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ +#if LWIP_TCP + tcp_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_TCP */ +#if LWIP_UDP + udp_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_UDP */ +#if LWIP_RAW + raw_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_RAW */ +} + +#if LWIP_IPV4 +static int +netif_do_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr, ip_addr_t *old_addr) +{ + LWIP_ASSERT("invalid pointer", ipaddr != NULL); + LWIP_ASSERT("invalid pointer", old_addr != NULL); + + /* address is actually being changed? */ + if (ip4_addr_cmp(ipaddr, netif_ip4_addr(netif)) == 0) { + ip_addr_t new_addr; + *ip_2_ip4(&new_addr) = *ipaddr; + IP_SET_TYPE_VAL(new_addr, IPADDR_TYPE_V4); + + ip_addr_copy(*old_addr, *netif_ip_addr4(netif)); + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: netif address being changed\n")); + netif_do_ip_addr_changed(old_addr, &new_addr); + + mib2_remove_ip4(netif); + mib2_remove_route_ip4(0, netif); + /* set new IP address to netif */ + ip4_addr_set(ip_2_ip4(&netif->ip_addr), ipaddr); + IP_SET_TYPE_VAL(netif->ip_addr, IPADDR_TYPE_V4); + mib2_add_ip4(netif); + mib2_add_route_ip4(0, netif); + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4); + + NETIF_STATUS_CALLBACK(netif); + return 1; /* address changed */ + } + return 0; /* address unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the IP address of a network interface + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * + * @note call netif_set_addr() if you also want to change netmask and + * default gateway + */ +void +netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr) +{ + ip_addr_t old_addr; + + LWIP_ERROR("netif_set_ipaddr: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + + LWIP_ASSERT_CORE_LOCKED(); + + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_address = &old_addr; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_ADDRESS_CHANGED, &args); +#endif + } +} + +static int +netif_do_set_netmask(struct netif *netif, const ip4_addr_t *netmask, ip_addr_t *old_nm) +{ + /* address is actually being changed? */ + if (ip4_addr_cmp(netmask, netif_ip4_netmask(netif)) == 0) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + LWIP_ASSERT("invalid pointer", old_nm != NULL); + ip_addr_copy(*old_nm, *netif_ip_netmask4(netif)); +#else + LWIP_UNUSED_ARG(old_nm); +#endif + mib2_remove_route_ip4(0, netif); + /* set new netmask to netif */ + ip4_addr_set(ip_2_ip4(&netif->netmask), netmask); + IP_SET_TYPE_VAL(netif->netmask, IPADDR_TYPE_V4); + mib2_add_route_ip4(0, netif); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: netmask of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_netmask(netif)), + ip4_addr2_16(netif_ip4_netmask(netif)), + ip4_addr3_16(netif_ip4_netmask(netif)), + ip4_addr4_16(netif_ip4_netmask(netif)))); + return 1; /* netmask changed */ + } + return 0; /* netmask unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the netmask of a network interface + * + * @param netif the network interface to change + * @param netmask the new netmask + * + * @note call netif_set_addr() if you also want to change ip address and + * default gateway + */ +void +netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + ip_addr_t old_nm_val; + ip_addr_t *old_nm = &old_nm_val; +#else + ip_addr_t *old_nm = NULL; +#endif + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_netmask: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + + if (netif_do_set_netmask(netif, netmask, old_nm)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_netmask = old_nm; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_NETMASK_CHANGED, &args); +#endif + } +} + +static int +netif_do_set_gw(struct netif *netif, const ip4_addr_t *gw, ip_addr_t *old_gw) +{ + /* address is actually being changed? */ + if (ip4_addr_cmp(gw, netif_ip4_gw(netif)) == 0) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + LWIP_ASSERT("invalid pointer", old_gw != NULL); + ip_addr_copy(*old_gw, *netif_ip_gw4(netif)); +#else + LWIP_UNUSED_ARG(old_gw); +#endif + + ip4_addr_set(ip_2_ip4(&netif->gw), gw); + IP_SET_TYPE_VAL(netif->gw, IPADDR_TYPE_V4); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: GW address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_gw(netif)), + ip4_addr2_16(netif_ip4_gw(netif)), + ip4_addr3_16(netif_ip4_gw(netif)), + ip4_addr4_16(netif_ip4_gw(netif)))); + return 1; /* gateway changed */ + } + return 0; /* gateway unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the default gateway for a network interface + * + * @param netif the network interface to change + * @param gw the new default gateway + * + * @note call netif_set_addr() if you also want to change ip address and netmask + */ +void +netif_set_gw(struct netif *netif, const ip4_addr_t *gw) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + ip_addr_t old_gw_val; + ip_addr_t *old_gw = &old_gw_val; +#else + ip_addr_t *old_gw = NULL; +#endif + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_gw: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + if (netif_do_set_gw(netif, gw, old_gw)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_gw = old_gw; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_GATEWAY_CHANGED, &args); +#endif + } +} + +/** + * @ingroup netif_ip4 + * Change IP address configuration for a network interface (including netmask + * and default gateway). + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * @param netmask the new netmask + * @param gw the new default gateway + */ +void +netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_nsc_reason_t change_reason = LWIP_NSC_NONE; + netif_ext_callback_args_t cb_args; + ip_addr_t old_nm_val; + ip_addr_t old_gw_val; + ip_addr_t *old_nm = &old_nm_val; + ip_addr_t *old_gw = &old_gw_val; +#else + ip_addr_t *old_nm = NULL; + ip_addr_t *old_gw = NULL; +#endif + ip_addr_t old_addr; + int remove; + + LWIP_ASSERT_CORE_LOCKED(); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + remove = ip4_addr_isany(ipaddr); + if (remove) { + /* when removing an address, we have to remove it *before* changing netmask/gw + to ensure that tcp RST segment can be sent correctly */ + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED; + cb_args.ipv4_changed.old_address = &old_addr; +#endif + } + } + if (netif_do_set_netmask(netif, netmask, old_nm)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_NETMASK_CHANGED; + cb_args.ipv4_changed.old_netmask = old_nm; +#endif + } + if (netif_do_set_gw(netif, gw, old_gw)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_GATEWAY_CHANGED; + cb_args.ipv4_changed.old_gw = old_gw; +#endif + } + if (!remove) { + /* set ipaddr last to ensure netmask/gw have been set when status callback is called */ + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED; + cb_args.ipv4_changed.old_address = &old_addr; +#endif + } + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + if (change_reason != LWIP_NSC_NONE) { + change_reason |= LWIP_NSC_IPV4_SETTINGS_CHANGED; + netif_invoke_ext_callback(netif, change_reason, &cb_args); + } +#endif +} +#endif /* LWIP_IPV4*/ + +/** + * @ingroup netif + * Remove a network interface from the list of lwIP netifs. + * + * @param netif the network interface to remove + */ +void +netif_remove(struct netif *netif) +{ +#if LWIP_IPV6 + int i; +#endif + + LWIP_ASSERT_CORE_LOCKED(); + + if (netif == NULL) { + return; + } + + netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_REMOVED, NULL); + +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { + netif_do_ip_addr_changed(netif_ip_addr4(netif), NULL); + } + +#if LWIP_IGMP + /* stop IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_stop(netif); + } +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + netif_do_ip_addr_changed(netif_ip_addr6(netif, i), NULL); + } + } +#if LWIP_IPV6_MLD + /* stop MLD processing */ + mld6_stop(netif); +#endif /* LWIP_IPV6_MLD */ +#endif /* LWIP_IPV6 */ + if (netif_is_up(netif)) { + /* set netif down before removing (call callback function) */ + netif_set_down(netif); + } + + mib2_remove_ip4(netif); + + /* this netif is default? */ + if (netif_default == netif) { + /* reset default netif */ + netif_set_default(NULL); + } +#if !LWIP_SINGLE_NETIF + /* is it the first netif? */ + if (netif_list == netif) { + netif_list = netif->next; + } else { + /* look for netif further down the list */ + struct netif *tmp_netif; + NETIF_FOREACH(tmp_netif) { + if (tmp_netif->next == netif) { + tmp_netif->next = netif->next; + break; + } + } + if (tmp_netif == NULL) { + return; /* netif is not on the list */ + } + } +#endif /* !LWIP_SINGLE_NETIF */ + mib2_netif_removed(netif); +#if LWIP_NETIF_REMOVE_CALLBACK + if (netif->remove_callback) { + netif->remove_callback(netif); + } +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + LWIP_DEBUGF( NETIF_DEBUG, ("netif_remove: removed netif\n") ); +} + +/** + * @ingroup netif + * Set a network interface as the default network interface + * (used to output all packets for which no specific route is found) + * + * @param netif the default network interface + */ +void +netif_set_default(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif == NULL) { + /* remove default route */ + mib2_remove_route_ip4(1, netif); + } else { + /* install default route */ + mib2_add_route_ip4(1, netif); + } + netif_default = netif; + LWIP_DEBUGF(NETIF_DEBUG, ("netif: setting default interface %c%c\n", + netif ? netif->name[0] : '\'', netif ? netif->name[1] : '\'')); +} + +/** + * @ingroup netif + * Bring an interface up, available for processing + * traffic. + */ +void +netif_set_up(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_up: invalid netif", netif != NULL, return); + + if (!(netif->flags & NETIF_FLAG_UP)) { + netif_set_flags(netif, NETIF_FLAG_UP); + + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + + NETIF_STATUS_CALLBACK(netif); + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.status_changed.state = 1; + netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args); + } +#endif + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6); +#if LWIP_IPV6 + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ + } +} + +/** Send ARP/IGMP/MLD/RS events, e.g. on link-up/netif-up or addr-change + */ +static void +netif_issue_reports(struct netif *netif, u8_t report_type) +{ + LWIP_ASSERT("netif_issue_reports: invalid netif", netif != NULL); + + /* Only send reports when both link and admin states are up */ + if (!(netif->flags & NETIF_FLAG_LINK_UP) || + !(netif->flags & NETIF_FLAG_UP)) { + return; + } + +#if LWIP_IPV4 + if ((report_type & NETIF_REPORT_TYPE_IPV4) && + !ip4_addr_isany_val(*netif_ip4_addr(netif))) { +#if LWIP_ARP + /* For Ethernet network interfaces, we would like to send a "gratuitous ARP" */ + if (netif->flags & (NETIF_FLAG_ETHARP)) { + etharp_gratuitous(netif); + } +#endif /* LWIP_ARP */ + +#if LWIP_IGMP + /* resend IGMP memberships */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_report_groups(netif); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 + if (report_type & NETIF_REPORT_TYPE_IPV6) { +#if LWIP_IPV6_MLD + /* send mld memberships */ + mld6_report_groups(netif); +#endif /* LWIP_IPV6_MLD */ + } +#endif /* LWIP_IPV6 */ +} + +/** + * @ingroup netif + * Bring an interface down, disabling any traffic processing. + */ +void +netif_set_down(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_down: invalid netif", netif != NULL, return); + + if (netif->flags & NETIF_FLAG_UP) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.status_changed.state = 0; + netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args); + } +#endif + + netif_clear_flags(netif, NETIF_FLAG_UP); + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + +#if LWIP_IPV4 && LWIP_ARP + if (netif->flags & NETIF_FLAG_ETHARP) { + etharp_cleanup_netif(netif); + } +#endif /* LWIP_IPV4 && LWIP_ARP */ + +#if LWIP_IPV6 + nd6_cleanup_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_STATUS_CALLBACK(netif); + } +} + +#if LWIP_NETIF_STATUS_CALLBACK +/** + * @ingroup netif + * Set callback to be called when interface is brought up/down or address is changed while up + */ +void +netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->status_callback = status_callback; + } +} +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_REMOVE_CALLBACK +/** + * @ingroup netif + * Set callback to be called when the interface has been removed + */ +void +netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->remove_callback = remove_callback; + } +} +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +/** + * @ingroup netif + * Called by a driver when its link goes up + */ +void +netif_set_link_up(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_link_up: invalid netif", netif != NULL, return); + + if (!(netif->flags & NETIF_FLAG_LINK_UP)) { + netif_set_flags(netif, NETIF_FLAG_LINK_UP); + +#if LWIP_DHCP + dhcp_network_changed(netif); +#endif /* LWIP_DHCP */ + +#if LWIP_AUTOIP + autoip_network_changed(netif); +#endif /* LWIP_AUTOIP */ + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6); +#if LWIP_IPV6 + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_LINK_CALLBACK(netif); +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.link_changed.state = 1; + netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args); + } +#endif + } +} + +/** + * @ingroup netif + * Called by a driver when its link goes down + */ +void +netif_set_link_down(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_link_down: invalid netif", netif != NULL, return); + + if (netif->flags & NETIF_FLAG_LINK_UP) { + netif_clear_flags(netif, NETIF_FLAG_LINK_UP); + NETIF_LINK_CALLBACK(netif); +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.link_changed.state = 0; + netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args); + } +#endif + } +} + +#if LWIP_NETIF_LINK_CALLBACK +/** + * @ingroup netif + * Set callback to be called when link is brought up/down + */ +void +netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->link_callback = link_callback; + } +} +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if ENABLE_LOOPBACK +/** + * @ingroup netif + * Send an IP packet to be received on the same netif (loopif-like). + * The pbuf is simply copied and handed back to netif->input. + * In multithreaded mode, this is done directly since netif->input must put + * the packet on a queue. + * In callback mode, the packet is put on an internal queue and is fed to + * netif->input by netif_poll(). + * + * @param netif the lwip network interface structure + * @param p the (IP) packet to 'send' + * @return ERR_OK if the packet has been sent + * ERR_MEM if the pbuf used to copy the packet couldn't be allocated + */ +err_t +netif_loop_output(struct netif *netif, struct pbuf *p) +{ + struct pbuf *r; + err_t err; + struct pbuf *last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t clen = 0; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + u8_t schedule_poll = 0; +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("netif_loop_output: invalid netif", netif != NULL); + LWIP_ASSERT("netif_loop_output: invalid pbuf", p != NULL); + + /* Allocate a new pbuf */ + r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); + if (r == NULL) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } +#if LWIP_LOOPBACK_MAX_PBUFS + clen = pbuf_clen(r); + /* check for overflow or too many pbuf on queue */ + if (((netif->loop_cnt_current + clen) < netif->loop_cnt_current) || + ((netif->loop_cnt_current + clen) > LWIP_MIN(LWIP_LOOPBACK_MAX_PBUFS, 0xFFFF))) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } + netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current + clen); +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* Copy the whole pbuf queue p into the single pbuf r */ + if ((err = pbuf_copy(r, p)) != ERR_OK) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return err; + } + + /* Put the packet on a linked list which gets emptied through calling + netif_poll(). */ + + /* let last point to the last pbuf in chain r */ + for (last = r; last->next != NULL; last = last->next) { + /* nothing to do here, just get to the last pbuf */ + } + + SYS_ARCH_PROTECT(lev); + if (netif->loop_first != NULL) { + LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL); + netif->loop_last->next = r; + netif->loop_last = last; + } else { + netif->loop_first = r; + netif->loop_last = last; +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* No existing packets queued, schedule poll */ + schedule_poll = 1; +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + } + SYS_ARCH_UNPROTECT(lev); + + LINK_STATS_INC(link.xmit); + MIB2_STATS_NETIF_ADD(stats_if, ifoutoctets, p->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifoutucastpkts); + +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* For multithreading environment, schedule a call to netif_poll */ + if (schedule_poll) { + tcpip_try_callback((tcpip_callback_fn)netif_poll, netif); + } +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + + return ERR_OK; +} + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t +netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +static err_t +netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV6 */ +#endif /* LWIP_HAVE_LOOPIF */ + + +/** + * Call netif_poll() in the main loop of your application. This is to prevent + * reentering non-reentrant functions like tcp_input(). Packets passed to + * netif_loop_output() are put on a list that is passed to netif->input() by + * netif_poll(). + */ +void +netif_poll(struct netif *netif) +{ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("netif_poll: invalid netif", netif != NULL); + + /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ + SYS_ARCH_PROTECT(lev); + while (netif->loop_first != NULL) { + struct pbuf *in, *in_end; +#if LWIP_LOOPBACK_MAX_PBUFS + u8_t clen = 1; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + in = in_end = netif->loop_first; + while (in_end->len != in_end->tot_len) { + LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); + in_end = in_end->next; +#if LWIP_LOOPBACK_MAX_PBUFS + clen++; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + } +#if LWIP_LOOPBACK_MAX_PBUFS + /* adjust the number of pbufs on queue */ + LWIP_ASSERT("netif->loop_cnt_current underflow", + ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); + netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current - clen); +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* 'in_end' now points to the last pbuf from 'in' */ + if (in_end == netif->loop_last) { + /* this was the last pbuf in the list */ + netif->loop_first = netif->loop_last = NULL; + } else { + /* pop the pbuf off the list */ + netif->loop_first = in_end->next; + LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); + } + /* De-queue the pbuf from its successors on the 'loop_' list. */ + in_end->next = NULL; + SYS_ARCH_UNPROTECT(lev); + + in->if_idx = netif_get_index(netif); + + LINK_STATS_INC(link.recv); + MIB2_STATS_NETIF_ADD(stats_if, ifinoctets, in->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifinucastpkts); + /* loopback packets are always IP packets! */ + if (ip_input(in, netif) != ERR_OK) { + pbuf_free(in); + } + SYS_ARCH_PROTECT(lev); + } + SYS_ARCH_UNPROTECT(lev); +} + +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +/** + * Calls netif_poll() for every netif on the netif_list. + */ +void +netif_poll_all(void) +{ + struct netif *netif; + /* loop through netifs */ + NETIF_FOREACH(netif) { + netif_poll(netif); + } +} +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +/** + * @ingroup netif_cd + * Allocate an index to store data in client_data member of struct netif. + * Returned value is an index in mentioned array. + * @see LWIP_NUM_NETIF_CLIENT_DATA + */ +u8_t +netif_alloc_client_data_id(void) +{ + u8_t result = netif_client_id; + netif_client_id++; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_NUM_NETIF_CLIENT_DATA > 256 +#error LWIP_NUM_NETIF_CLIENT_DATA must be <= 256 +#endif + LWIP_ASSERT("Increase LWIP_NUM_NETIF_CLIENT_DATA in lwipopts.h", result < LWIP_NUM_NETIF_CLIENT_DATA); + return (u8_t)(result + LWIP_NETIF_CLIENT_DATA_INDEX_MAX); +} +#endif + +#if LWIP_IPV6 +/** + * @ingroup netif_ip6 + * Change an IPv6 address of a network interface + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param addr6 the new IPv6 address + * + * @note call netif_ip6_addr_set_state() to set the address valid/temptative + */ +void +netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_ip6_addr_set: invalid netif", netif != NULL); + LWIP_ASSERT("netif_ip6_addr_set: invalid addr6", addr6 != NULL); + + netif_ip6_addr_set_parts(netif, addr_idx, addr6->addr[0], addr6->addr[1], + addr6->addr[2], addr6->addr[3]); +} + +/* + * Change an IPv6 address of a network interface (internal version taking 4 * u32_t) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param i0 word0 of the new IPv6 address + * @param i1 word1 of the new IPv6 address + * @param i2 word2 of the new IPv6 address + * @param i3 word3 of the new IPv6 address + */ +void +netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3) +{ + ip_addr_t old_addr; + ip_addr_t new_ipaddr; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + ip6_addr_copy(*ip_2_ip6(&old_addr), *netif_ip6_addr(netif, addr_idx)); + IP_SET_TYPE_VAL(old_addr, IPADDR_TYPE_V6); + + /* address is actually being changed? */ + if ((ip_2_ip6(&old_addr)->addr[0] != i0) || (ip_2_ip6(&old_addr)->addr[1] != i1) || + (ip_2_ip6(&old_addr)->addr[2] != i2) || (ip_2_ip6(&old_addr)->addr[3] != i3)) { + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set: netif address being changed\n")); + + IP_ADDR6(&new_ipaddr, i0, i1, i2, i3); + ip6_addr_assign_zone(ip_2_ip6(&new_ipaddr), IP6_UNICAST, netif); + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) { + netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); + } + /* @todo: remove/readd mib2 ip6 entries? */ + + ip_addr_copy(netif->ip6_addr[addr_idx], new_ipaddr); + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) { + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + NETIF_STATUS_CALLBACK(netif); + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.ipv6_set.addr_index = addr_idx; + args.ipv6_set.old_address = &old_addr; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_SET, &args); + } +#endif + } + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * @ingroup netif_ip6 + * Change the state of an IPv6 address of a network interface + * (INVALID, TEMPTATIVE, PREFERRED, DEPRECATED, where TEMPTATIVE + * includes the number of checks done, see ip6_addr.h) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param state the new IPv6 address state + */ +void +netif_ip6_addr_set_state(struct netif *netif, s8_t addr_idx, u8_t state) +{ + u8_t old_state; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + old_state = netif_ip6_addr_state(netif, addr_idx); + /* state is actually being changed? */ + if (old_state != state) { + u8_t old_valid = old_state & IP6_ADDR_VALID; + u8_t new_valid = state & IP6_ADDR_VALID; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set_state: netif address state being changed\n")); + +#if LWIP_IPV6_MLD + /* Reevaluate solicited-node multicast group membership. */ + if (netif->flags & NETIF_FLAG_MLD6) { + nd6_adjust_mld_membership(netif, addr_idx, state); + } +#endif /* LWIP_IPV6_MLD */ + + if (old_valid && !new_valid) { + /* address about to be removed by setting invalid */ + netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); + /* @todo: remove mib2 ip6 entries? */ + } + netif->ip6_addr_state[addr_idx] = state; + + if (!old_valid && new_valid) { + /* address added by setting valid */ + /* This is a good moment to check that the address is properly zoned. */ + IP6_ADDR_ZONECHECK_NETIF(netif_ip6_addr(netif, addr_idx), netif); + /* @todo: add mib2 ip6 entries? */ + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + } + if ((old_state & ~IP6_ADDR_TENTATIVE_COUNT_MASK) != + (state & ~IP6_ADDR_TENTATIVE_COUNT_MASK)) { + /* address state has changed -> call the callback function */ + NETIF_STATUS_CALLBACK(netif); + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.ipv6_addr_state_changed.addr_index = addr_idx; + args.ipv6_addr_state_changed.old_state = old_state; + args.ipv6_addr_state_changed.address = netif_ip_addr6(netif, addr_idx); + netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_ADDR_STATE_CHANGED, &args); + } +#endif + } + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * Checks if a specific local address is present on the netif and returns its + * index. Depending on its state, it may or may not be assigned to the + * interface (as per RFC terminology). + * + * The given address may or may not be zoned (i.e., have a zone index other + * than IP6_NO_ZONE). If the address is zoned, it must have the correct zone + * for the given netif, or no match will be found. + * + * @param netif the netif to check + * @param ip6addr the IPv6 address to find + * @return >= 0: address found, this is its index + * -1: address not found on this netif + */ +s8_t +netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr) +{ + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_get_ip6_addr_match: invalid netif", netif != NULL); + LWIP_ASSERT("netif_get_ip6_addr_match: invalid ip6addr", ip6addr != NULL); + +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_zone(ip6addr) && !ip6_addr_test_zone(ip6addr, netif)) { + return -1; /* wrong zone, no match */ + } +#endif /* LWIP_IPV6_SCOPES */ + + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp_zoneless(netif_ip6_addr(netif, i), ip6addr)) { + return i; + } + } + return -1; +} + +/** + * @ingroup netif_ip6 + * Create a link-local IPv6 address on a netif (stored in slot 0) + * + * @param netif the netif to create the address on + * @param from_mac_48bit if != 0, assume hwadr is a 48-bit MAC address (std conversion) + * if == 0, use hwaddr directly as interface ID + */ +void +netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit) +{ + u8_t i, addr_index; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_create_ip6_linklocal_address: invalid netif", netif != NULL); + + /* Link-local prefix. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[0] = PP_HTONL(0xfe800000ul); + ip_2_ip6(&netif->ip6_addr[0])->addr[1] = 0; + + /* Generate interface ID. */ + if (from_mac_48bit) { + /* Assume hwaddr is a 48-bit IEEE 802 MAC. Convert to EUI-64 address. Complement Group bit. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = lwip_htonl((((u32_t)(netif->hwaddr[0] ^ 0x02)) << 24) | + ((u32_t)(netif->hwaddr[1]) << 16) | + ((u32_t)(netif->hwaddr[2]) << 8) | + (0xff)); + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = lwip_htonl((u32_t)(0xfeul << 24) | + ((u32_t)(netif->hwaddr[3]) << 16) | + ((u32_t)(netif->hwaddr[4]) << 8) | + (netif->hwaddr[5])); + } else { + /* Use hwaddr directly as interface ID. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = 0; + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = 0; + + addr_index = 3; + for (i = 0; (i < 8) && (i < netif->hwaddr_len); i++) { + if (i == 4) { + addr_index--; + } + ip_2_ip6(&netif->ip6_addr[0])->addr[addr_index] |= lwip_htonl(((u32_t)(netif->hwaddr[netif->hwaddr_len - i - 1])) << (8 * (i & 0x03))); + } + } + + /* Set a link-local zone. Even though the zone is implied by the owning + * netif, setting the zone anyway has two important conceptual advantages: + * 1) it avoids the need for a ton of exceptions in internal code, allowing + * e.g. ip6_addr_cmp() to be used on local addresses; + * 2) the properly zoned address is visible externally, e.g. when any outside + * code enumerates available addresses or uses one to bind a socket. + * Any external code unaware of address scoping is likely to just ignore the + * zone field, so this should not create any compatibility problems. */ + ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[0]), IP6_UNICAST, netif); + + /* Set address state. */ +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* Will perform duplicate address detection (DAD). */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_TENTATIVE); +#else + /* Consider address valid. */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_PREFERRED); +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +/** + * @ingroup netif_ip6 + * This function allows for the easy addition of a new IPv6 address to an interface. + * It takes care of finding an empty slot and then sets the address tentative + * (to make sure that all the subsequent processing happens). + * + * @param netif netif to add the address on + * @param ip6addr address to add + * @param chosen_idx if != NULL, the chosen IPv6 address index will be stored here + */ +err_t +netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx) +{ + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_add_ip6_address: invalid netif", netif != NULL); + LWIP_ASSERT("netif_add_ip6_address: invalid ip6addr", ip6addr != NULL); + + i = netif_get_ip6_addr_match(netif, ip6addr); + if (i >= 0) { + /* Address already added */ + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + + /* Find a free slot. The first one is reserved for link-local addresses. */ + for (i = ip6_addr_islinklocal(ip6addr) ? 0 : 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + ip_addr_copy_from_ip6(netif->ip6_addr[i], *ip6addr); + ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[i]), IP6_UNICAST, netif); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_TENTATIVE); + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + } + + if (chosen_idx != NULL) { + *chosen_idx = -1; + } + return ERR_VAL; +} + +/** Dummy IPv6 output function for netifs not supporting IPv6 + */ +static err_t +netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 +/** Dummy IPv4 output function for netifs not supporting IPv4 + */ +static err_t +netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV4 */ + +/** +* @ingroup netif +* Return the interface index for the netif with name +* or NETIF_NO_INDEX if not found/on error +* +* @param name the name of the netif +*/ +u8_t +netif_name_to_index(const char *name) +{ + struct netif *netif = netif_find(name); + if (netif != NULL) { + return netif_get_index(netif); + } + /* No name found, return invalid index */ + return NETIF_NO_INDEX; +} + +/** +* @ingroup netif +* Return the interface name for the netif matching index +* or NULL if not found/on error +* +* @param idx the interface index of the netif +* @param name char buffer of at least NETIF_NAMESIZE bytes +*/ +char * +netif_index_to_name(u8_t idx, char *name) +{ + struct netif *netif = netif_get_by_index(idx); + + if (netif != NULL) { + name[0] = netif->name[0]; + name[1] = netif->name[1]; + lwip_itoa(&name[2], NETIF_NAMESIZE - 2, netif_index_to_num(idx)); + return name; + } + return NULL; +} + +/** +* @ingroup netif +* Return the interface for the netif index +* +* @param idx index of netif to find +*/ +struct netif * +netif_get_by_index(u8_t idx) +{ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + if (idx != NETIF_NO_INDEX) { + NETIF_FOREACH(netif) { + if (idx == netif_get_index(netif)) { + return netif; /* found! */ + } + } + } + + return NULL; +} + +/** + * @ingroup netif + * Find a network interface by searching for its name + * + * @param name the name of the netif (like netif->name) plus concatenated number + * in ascii representation (e.g. 'en0') + */ +struct netif * +netif_find(const char *name) +{ + struct netif *netif; + u8_t num; + + LWIP_ASSERT_CORE_LOCKED(); + + if (name == NULL) { + return NULL; + } + + num = (u8_t)atoi(&name[2]); + + NETIF_FOREACH(netif) { + if (num == netif->num && + name[0] == netif->name[0] && + name[1] == netif->name[1]) { + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: found %c%c\n", name[0], name[1])); + return netif; + } + } + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: didn't find %c%c\n", name[0], name[1])); + return NULL; +} + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +/** + * @ingroup netif + * Add extended netif events listener + * @param callback pointer to listener structure + * @param fn callback function + */ +void +netif_add_ext_callback(netif_ext_callback_t *callback, netif_ext_callback_fn fn) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("callback must be != NULL", callback != NULL); + LWIP_ASSERT("fn must be != NULL", fn != NULL); + + callback->callback_fn = fn; + callback->next = ext_callback; + ext_callback = callback; +} + +/** + * @ingroup netif + * Remove extended netif events listener + * @param callback pointer to listener structure + */ +void +netif_remove_ext_callback(netif_ext_callback_t* callback) +{ + netif_ext_callback_t *last, *iter; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("callback must be != NULL", callback != NULL); + + if (ext_callback == NULL) { + return; + } + + if (callback == ext_callback) { + ext_callback = ext_callback->next; + } else { + last = ext_callback; + for (iter = ext_callback->next; iter != NULL; last = iter, iter = iter->next) { + if (iter == callback) { + LWIP_ASSERT("last != NULL", last != NULL); + last->next = callback->next; + callback->next = NULL; + return; + } + } + } +} + +/** + * Invoke extended netif status event + * @param netif netif that is affected by change + * @param reason change reason + * @param args depends on reason, see reason description + */ +void +netif_invoke_ext_callback(struct netif *netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t *args) +{ + netif_ext_callback_t *callback = ext_callback; + + LWIP_ASSERT("netif must be != NULL", netif != NULL); + + while (callback != NULL) { + callback->callback_fn(netif, reason, args); + callback = callback->next; + } +} +#endif /* LWIP_NETIF_EXT_STATUS_CALLBACK */ diff --git a/Middlewares/Third_Party/LwIP/src/core/pbuf.c b/Middlewares/Third_Party/LwIP/src/core/pbuf.c new file mode 100644 index 00000000..a209e0ca --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/pbuf.c @@ -0,0 +1,1514 @@ +/** + * @file + * Packet buffer management + */ + +/** + * @defgroup pbuf Packet buffers (PBUF) + * @ingroup infrastructure + * + * Packets are built from the pbuf data structure. It supports dynamic + * memory allocation for packet contents or can reference externally + * managed packet contents both in RAM and ROM. Quick allocation for + * incoming packets is provided through pools with fixed sized pbufs. + * + * A packet may span over multiple pbufs, chained as a singly linked + * list. This is called a "pbuf chain". + * + * Multiple packets may be queued, also using this singly linked list. + * This is called a "packet queue". + * + * So, a packet queue consists of one or more pbuf chains, each of + * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE + * NOT SUPPORTED!!! Use helper structs to queue multiple packets. + * + * The differences between a pbuf chain and a packet queue are very + * precise but subtle. + * + * The last pbuf of a packet has a ->tot_len field that equals the + * ->len field. It can be found by traversing the list. If the last + * pbuf of a packet has a ->next field other than NULL, more packets + * are on the queue. + * + * Therefore, looping through a pbuf of a single packet, has an + * loop end condition (tot_len == p->len), NOT (next == NULL). + * + * Example of custom pbuf usage: @ref zerocopyrx + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/netif.h" +#if LWIP_TCP && TCP_QUEUE_OOSEQ +#include "lwip/priv/tcp_priv.h" +#endif +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#include + +#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) +/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically + aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */ +#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) + +static const struct pbuf * +pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset); + +#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_IS_EMPTY() +#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +#if !NO_SYS +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL +#include "lwip/tcpip.h" +#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \ + if (tcpip_try_callback(pbuf_free_ooseq_callback, NULL) != ERR_OK) { \ + SYS_ARCH_PROTECT(old_level); \ + pbuf_free_ooseq_pending = 0; \ + SYS_ARCH_UNPROTECT(old_level); \ + } } while(0) +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +#endif /* !NO_SYS */ + +volatile u8_t pbuf_free_ooseq_pending; +#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty() + +/** + * Attempt to reclaim some memory from queued out-of-sequence TCP segments + * if we run out of pool pbufs. It's better to give priority to new packets + * if we're running out. + * + * This must be done in the correct thread context therefore this function + * can only be used with NO_SYS=0 and through tcpip_callback. + */ +#if !NO_SYS +static +#endif /* !NO_SYS */ +void +pbuf_free_ooseq(void) +{ + struct tcp_pcb *pcb; + SYS_ARCH_SET(pbuf_free_ooseq_pending, 0); + + for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { + if (pcb->ooseq != NULL) { + /** Free the ooseq pbufs of one PCB only */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); + tcp_free_ooseq(pcb); + return; + } + } +} + +#if !NO_SYS +/** + * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq(). + */ +static void +pbuf_free_ooseq_callback(void *arg) +{ + LWIP_UNUSED_ARG(arg); + pbuf_free_ooseq(); +} +#endif /* !NO_SYS */ + +/** Queue a call to pbuf_free_ooseq if not already queued. */ +static void +pbuf_pool_is_empty(void) +{ +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL + SYS_ARCH_SET(pbuf_free_ooseq_pending, 1); +#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ + u8_t queued; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + queued = pbuf_free_ooseq_pending; + pbuf_free_ooseq_pending = 1; + SYS_ARCH_UNPROTECT(old_level); + + if (!queued) { + /* queue a call to pbuf_free_ooseq if not already queued */ + PBUF_POOL_FREE_OOSEQ_QUEUE_CALL(); + } +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +} +#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +/* Initialize members of struct pbuf after allocation */ +static void +pbuf_init_alloced_pbuf(struct pbuf *p, void *payload, u16_t tot_len, u16_t len, pbuf_type type, u8_t flags) +{ + p->next = NULL; + p->payload = payload; + p->tot_len = tot_len; + p->len = len; + p->type_internal = (u8_t)type; + p->flags = flags; + p->ref = 1; + p->if_idx = NETIF_NO_INDEX; +} + +/** + * @ingroup pbuf + * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param layer header size + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_RAM: buffer memory for pbuf is allocated as one large + * chunk. This includes protocol headers as well. + * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for + * protocol headers. Additional headers must be prepended + * by allocating another pbuf and chain in to the front of + * the ROM pbuf. It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: no buffer memory is allocated for the pbuf, even for + * protocol headers. It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from + * the pbuf pool that is allocated during pbuf_init(). + * + * @return the allocated pbuf. If multiple pbufs where allocated, this + * is the first pbuf of a pbuf chain. + */ +struct pbuf * +pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) +{ + struct pbuf *p; + u16_t offset = (u16_t)layer; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length)); + + switch (type) { + case PBUF_REF: /* fall through */ + case PBUF_ROM: + p = pbuf_alloc_reference(NULL, length, type); + break; + case PBUF_POOL: { + struct pbuf *q, *last; + u16_t rem_len; /* remaining length */ + p = NULL; + last = NULL; + rem_len = length; + do { + u16_t qlen; + q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); + if (q == NULL) { + PBUF_POOL_IS_EMPTY(); + /* free chain so far allocated */ + if (p) { + pbuf_free(p); + } + /* bail out unsuccessfully */ + return NULL; + } + qlen = LWIP_MIN(rem_len, (u16_t)(PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset))); + pbuf_init_alloced_pbuf(q, LWIP_MEM_ALIGN((void *)((u8_t *)q + SIZEOF_STRUCT_PBUF + offset)), + rem_len, qlen, type, 0); + LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned", + ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0); + LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT", + (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 ); + if (p == NULL) { + /* allocated head of pbuf chain (into p) */ + p = q; + } else { + /* make previous pbuf point to this pbuf */ + last->next = q; + } + last = q; + rem_len = (u16_t)(rem_len - qlen); + offset = 0; + } while (rem_len > 0); + break; + } + case PBUF_RAM: { + u16_t payload_len = (u16_t)(LWIP_MEM_ALIGN_SIZE(offset) + LWIP_MEM_ALIGN_SIZE(length)); + mem_size_t alloc_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF) + payload_len); + + /* bug #50040: Check for integer overflow when calculating alloc_len */ + if ((payload_len < LWIP_MEM_ALIGN_SIZE(length)) || + (alloc_len < LWIP_MEM_ALIGN_SIZE(length))) { + return NULL; + } + + /* If pbuf is to be allocated in RAM, allocate memory for it. */ + p = (struct pbuf *)mem_malloc(alloc_len); + if (p == NULL) { + return NULL; + } + pbuf_init_alloced_pbuf(p, LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)), + length, length, type, 0); + LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned", + ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); + break; + } + default: + LWIP_ASSERT("pbuf_alloc: erroneous type", 0); + return NULL; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); + return p; +} + +/** + * @ingroup pbuf + * Allocates a pbuf for referenced data. + * Referenced data can be volatile (PBUF_REF) or long-lived (PBUF_ROM). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param payload referenced payload + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_ROM: It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * + * @return the allocated pbuf. + */ +struct pbuf * +pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type) +{ + struct pbuf *p; + LWIP_ASSERT("invalid pbuf_type", (type == PBUF_REF) || (type == PBUF_ROM)); + /* only allocate memory for the pbuf structure */ + p = (struct pbuf *)memp_malloc(MEMP_PBUF); + if (p == NULL) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_alloc_reference: Could not allocate MEMP_PBUF for PBUF_%s.\n", + (type == PBUF_ROM) ? "ROM" : "REF")); + return NULL; + } + pbuf_init_alloced_pbuf(p, payload, length, length, type, 0); + return p; +} + + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** + * @ingroup pbuf + * Initialize a custom pbuf (already allocated). + * Example of custom pbuf usage: @ref zerocopyrx + * + * @param l header size + * @param length size of the pbuf's payload + * @param type type of the pbuf (only used to treat the pbuf accordingly, as + * this function allocates no memory) + * @param p pointer to the custom pbuf to initialize (already allocated) + * @param payload_mem pointer to the buffer that is used for payload and headers, + * must be at least big enough to hold 'length' plus the header size, + * may be NULL if set later. + * ATTENTION: The caller is responsible for correct alignment of this buffer!! + * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least + * big enough to hold 'length' plus the header size + */ +struct pbuf * +pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p, + void *payload_mem, u16_t payload_mem_len) +{ + u16_t offset = (u16_t)l; + void *payload; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length)); + + if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length)); + return NULL; + } + + if (payload_mem != NULL) { + payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset); + } else { + payload = NULL; + } + pbuf_init_alloced_pbuf(&p->pbuf, payload, length, length, type, PBUF_FLAG_IS_CUSTOM); + return &p->pbuf; +} +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** + * @ingroup pbuf + * Shrink a pbuf chain to a desired length. + * + * @param p pbuf to shrink. + * @param new_len desired new length of pbuf chain + * + * Depending on the desired length, the first few pbufs in a chain might + * be skipped and left unchanged. The new last pbuf in the chain will be + * resized, and any remaining pbufs will be freed. + * + * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted. + * @note May not be called on a packet queue. + * + * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain). + */ +void +pbuf_realloc(struct pbuf *p, u16_t new_len) +{ + struct pbuf *q; + u16_t rem_len; /* remaining length */ + u16_t shrink; + + LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL); + + /* desired length larger than current length? */ + if (new_len >= p->tot_len) { + /* enlarging not yet supported */ + return; + } + + /* the pbuf chain grows by (new_len - p->tot_len) bytes + * (which may be negative in case of shrinking) */ + shrink = (u16_t)(p->tot_len - new_len); + + /* first, step over any pbufs that should remain in the chain */ + rem_len = new_len; + q = p; + /* should this pbuf be kept? */ + while (rem_len > q->len) { + /* decrease remaining length by pbuf length */ + rem_len = (u16_t)(rem_len - q->len); + /* decrease total length indicator */ + q->tot_len = (u16_t)(q->tot_len - shrink); + /* proceed to next pbuf in chain */ + q = q->next; + LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL); + } + /* we have now reached the new last pbuf (in q) */ + /* rem_len == desired length for pbuf q */ + + /* shrink allocated memory for PBUF_RAM */ + /* (other types merely adjust their length fields */ + if (pbuf_match_allocsrc(q, PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) && (rem_len != q->len) +#if LWIP_SUPPORT_CUSTOM_PBUF + && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0) +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + ) { + /* reallocate and adjust the length of the pbuf that will be split */ + q = (struct pbuf *)mem_trim(q, (mem_size_t)(((u8_t *)q->payload - (u8_t *)q) + rem_len)); + LWIP_ASSERT("mem_trim returned q == NULL", q != NULL); + } + /* adjust length fields for new last pbuf */ + q->len = rem_len; + q->tot_len = q->len; + + /* any remaining pbufs in chain? */ + if (q->next != NULL) { + /* free remaining pbufs in chain */ + pbuf_free(q->next); + } + /* q is last packet in chain */ + q->next = NULL; + +} + +/** + * Adjusts the payload pointer to reveal headers in the payload. + * @see pbuf_add_header. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size. + * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types + * + * @return non-zero on failure, zero on success. + * + */ +static u8_t +pbuf_add_header_impl(struct pbuf *p, size_t header_size_increment, u8_t force) +{ + u16_t type_internal; + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((p == NULL) || (header_size_increment > 0xFFFF)) { + return 1; + } + if (header_size_increment == 0) { + return 0; + } + + increment_magnitude = (u16_t)header_size_increment; + /* Do not allow tot_len to wrap as a result. */ + if ((u16_t)(increment_magnitude + p->tot_len) < increment_magnitude) { + return 1; + } + + type_internal = p->type_internal; + + /* pbuf types containing payloads? */ + if (type_internal & PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS) { + /* set new payload pointer */ + payload = (u8_t *)p->payload - header_size_increment; + /* boundary check fails? */ + if ((u8_t *)payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_add_header: failed as %p < %p (not enough space for new header size)\n", + (void *)payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF))); + /* bail out unsuccessfully */ + return 1; + } + /* pbuf types referring to external payloads? */ + } else { + /* hide a header in the payload? */ + if (force) { + payload = (u8_t *)p->payload - header_size_increment; + } else { + /* cannot expand payload to front (yet!) + * bail out unsuccessfully */ + return 1; + } + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_add_header: old %p new %p (%"U16_F")\n", + (void *)p->payload, (void *)payload, increment_magnitude)); + + /* modify pbuf fields */ + p->payload = payload; + p->len = (u16_t)(p->len + increment_magnitude); + p->tot_len = (u16_t)(p->tot_len + increment_magnitude); + + + return 0; +} + +/** + * Adjusts the payload pointer to reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * If header_size_increment is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_add_header(struct pbuf *p, size_t header_size_increment) +{ + return pbuf_add_header_impl(p, header_size_increment, 0); +} + +/** + * Same as @ref pbuf_add_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_add_header_force(struct pbuf *p, size_t header_size_increment) +{ + return pbuf_add_header_impl(p, header_size_increment, 1); +} + +/** + * Adjusts the payload pointer to hide headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * disappears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_decrement Number of bytes to decrement header size which + * decreases the size of the pbuf. + * If header_size_decrement is 0, this function does nothing and returns successful. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_remove_header(struct pbuf *p, size_t header_size_decrement) +{ + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((p == NULL) || (header_size_decrement > 0xFFFF)) { + return 1; + } + if (header_size_decrement == 0) { + return 0; + } + + increment_magnitude = (u16_t)header_size_decrement; + /* Check that we aren't going to move off the end of the pbuf */ + LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;); + + /* remember current payload pointer */ + payload = p->payload; + LWIP_UNUSED_ARG(payload); /* only used in LWIP_DEBUGF below */ + + /* increase payload pointer (guarded by length check above) */ + p->payload = (u8_t *)p->payload + header_size_decrement; + /* modify pbuf length fields */ + p->len = (u16_t)(p->len - increment_magnitude); + p->tot_len = (u16_t)(p->tot_len - increment_magnitude); + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_remove_header: old %p new %p (%"U16_F")\n", + (void *)payload, (void *)p->payload, increment_magnitude)); + + return 0; +} + +static u8_t +pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force) +{ + if (header_size_increment < 0) { + return pbuf_remove_header(p, (size_t) - header_size_increment); + } else { + return pbuf_add_header_impl(p, (size_t)header_size_increment, force); + } +} + +/** + * Adjusts the payload pointer to hide or reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * (dis)appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * (Using a negative value decreases the header size.) + * If header_size_increment is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_header(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 0); +} + +/** + * Same as pbuf_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_header_force(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 1); +} + +/** Similar to pbuf_header(-size) but de-refs header pbufs for (size >= p->len) + * + * @param q pbufs to operate on + * @param size The number of bytes to remove from the beginning of the pbuf list. + * While size >= p->len, pbufs are freed. + * ATTENTION: this is the opposite direction as @ref pbuf_header, but + * takes an u16_t not s16_t! + * @return the new head pbuf + */ +struct pbuf * +pbuf_free_header(struct pbuf *q, u16_t size) +{ + struct pbuf *p = q; + u16_t free_left = size; + while (free_left && p) { + if (free_left >= p->len) { + struct pbuf *f = p; + free_left = (u16_t)(free_left - p->len); + p = p->next; + f->next = 0; + pbuf_free(f); + } else { + pbuf_remove_header(p, free_left); + free_left = 0; + } + } + return p; +} + +/** + * @ingroup pbuf + * Dereference a pbuf chain or queue and deallocate any no-longer-used + * pbufs at the head of this chain or queue. + * + * Decrements the pbuf reference count. If it reaches zero, the pbuf is + * deallocated. + * + * For a pbuf chain, this is repeated for each pbuf in the chain, + * up to the first pbuf which has a non-zero reference count after + * decrementing. So, when all reference counts are one, the whole + * chain is free'd. + * + * @param p The pbuf (chain) to be dereferenced. + * + * @return the number of pbufs that were de-allocated + * from the head of the chain. + * + * @note MUST NOT be called on a packet queue (Not verified to work yet). + * @note the reference counter of a pbuf equals the number of pointers + * that refer to the pbuf (or into the pbuf). + * + * @internal examples: + * + * Assuming existing chains a->b->c with the following reference + * counts, calling pbuf_free(a) results in: + * + * 1->2->3 becomes ...1->3 + * 3->3->3 becomes 2->3->3 + * 1->1->2 becomes ......1 + * 2->1->1 becomes 1->1->1 + * 1->1->1 becomes ....... + * + */ +u8_t +pbuf_free(struct pbuf *p) +{ + u8_t alloc_src; + struct pbuf *q; + u8_t count; + + if (p == NULL) { + LWIP_ASSERT("p != NULL", p != NULL); + /* if assertions are disabled, proceed with debug output */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_free(p == NULL) was called.\n")); + return 0; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p)); + + PERF_START; + + count = 0; + /* de-allocate all consecutive pbufs from the head of the chain that + * obtain a zero reference count after decrementing*/ + while (p != NULL) { + LWIP_PBUF_REF_T ref; + SYS_ARCH_DECL_PROTECT(old_level); + /* Since decrementing ref cannot be guaranteed to be a single machine operation + * we must protect it. We put the new ref into a local variable to prevent + * further protection. */ + SYS_ARCH_PROTECT(old_level); + /* all pbufs in a chain are referenced at least once */ + LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0); + /* decrease reference count (number of pointers to pbuf) */ + ref = --(p->ref); + SYS_ARCH_UNPROTECT(old_level); + /* this pbuf is no longer referenced to? */ + if (ref == 0) { + /* remember next pbuf in chain for next iteration */ + q = p->next; + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p)); + alloc_src = pbuf_get_allocsrc(p); +#if LWIP_SUPPORT_CUSTOM_PBUF + /* is this a custom pbuf? */ + if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) { + struct pbuf_custom *pc = (struct pbuf_custom *)p; + LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL); + pc->custom_free_function(p); + } else +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + { + /* is this a pbuf from the pool? */ + if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) { + memp_free(MEMP_PBUF_POOL, p); + /* is this a ROM or RAM referencing pbuf? */ + } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF) { + memp_free(MEMP_PBUF, p); + /* type == PBUF_RAM */ + } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) { + mem_free(p); + } else { + /* @todo: support freeing other types */ + LWIP_ASSERT("invalid pbuf type", 0); + } + } + count++; + /* proceed to next pbuf */ + p = q; + /* p->ref > 0, this pbuf is still referenced to */ + /* (and so the remaining pbufs in chain as well) */ + } else { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, (u16_t)ref)); + /* stop walking through the chain */ + p = NULL; + } + } + PERF_STOP("pbuf_free"); + /* return number of de-allocated pbufs */ + return count; +} + +/** + * Count number of pbufs in a chain + * + * @param p first pbuf of chain + * @return the number of pbufs in a chain + */ +u16_t +pbuf_clen(const struct pbuf *p) +{ + u16_t len; + + len = 0; + while (p != NULL) { + ++len; + p = p->next; + } + return len; +} + +/** + * @ingroup pbuf + * Increment the reference count of the pbuf. + * + * @param p pbuf to increase reference counter of + * + */ +void +pbuf_ref(struct pbuf *p) +{ + /* pbuf given? */ + if (p != NULL) { + SYS_ARCH_SET(p->ref, (LWIP_PBUF_REF_T)(p->ref + 1)); + LWIP_ASSERT("pbuf ref overflow", p->ref > 0); + } +} + +/** + * @ingroup pbuf + * Concatenate two pbufs (each may be a pbuf chain) and take over + * the caller's reference of the tail pbuf. + * + * @note The caller MAY NOT reference the tail pbuf afterwards. + * Use pbuf_chain() for that purpose. + * + * This function explicitly does not check for tot_len overflow to prevent + * failing to queue too long pbufs. This can produce invalid pbufs, so + * handle with care! + * + * @see pbuf_chain() + */ +void +pbuf_cat(struct pbuf *h, struct pbuf *t) +{ + struct pbuf *p; + + LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)", + ((h != NULL) && (t != NULL)), return;); + + /* proceed to last pbuf of chain */ + for (p = h; p->next != NULL; p = p->next) { + /* add total length of second chain to all totals of first chain */ + p->tot_len = (u16_t)(p->tot_len + t->tot_len); + } + /* { p is last pbuf of first h chain, p->next == NULL } */ + LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len); + LWIP_ASSERT("p->next == NULL", p->next == NULL); + /* add total length of second chain to last pbuf total of first chain */ + p->tot_len = (u16_t)(p->tot_len + t->tot_len); + /* chain last pbuf of head (p) with first of tail (t) */ + p->next = t; + /* p->next now references t, but the caller will drop its reference to t, + * so netto there is no change to the reference count of t. + */ +} + +/** + * @ingroup pbuf + * Chain two pbufs (or pbuf chains) together. + * + * The caller MUST call pbuf_free(t) once it has stopped + * using it. Use pbuf_cat() instead if you no longer use t. + * + * @param h head pbuf (chain) + * @param t tail pbuf (chain) + * @note The pbufs MUST belong to the same packet. + * @note MAY NOT be called on a packet queue. + * + * The ->tot_len fields of all pbufs of the head chain are adjusted. + * The ->next field of the last pbuf of the head chain is adjusted. + * The ->ref field of the first pbuf of the tail chain is adjusted. + * + */ +void +pbuf_chain(struct pbuf *h, struct pbuf *t) +{ + pbuf_cat(h, t); + /* t is now referenced by h */ + pbuf_ref(t); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t)); +} + +/** + * Dechains the first pbuf from its succeeding pbufs in the chain. + * + * Makes p->tot_len field equal to p->len. + * @param p pbuf to dechain + * @return remainder of the pbuf chain, or NULL if it was de-allocated. + * @note May not be called on a packet queue. + */ +struct pbuf * +pbuf_dechain(struct pbuf *p) +{ + struct pbuf *q; + u8_t tail_gone = 1; + /* tail */ + q = p->next; + /* pbuf has successor in chain? */ + if (q != NULL) { + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len); + /* enforce invariant if assertion is disabled */ + q->tot_len = (u16_t)(p->tot_len - p->len); + /* decouple pbuf from remainder */ + p->next = NULL; + /* total length of pbuf p is its own length only */ + p->tot_len = p->len; + /* q is no longer referenced by p, free it */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q)); + tail_gone = pbuf_free(q); + if (tail_gone > 0) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q)); + } + /* return remaining tail or NULL if deallocated */ + } + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len); + return ((tail_gone > 0) ? NULL : q); +} + +/** + * @ingroup pbuf + * Create PBUF_RAM copies of pbufs. + * + * Used to queue packets on behalf of the lwIP stack, such as + * ARP based queueing. + * + * @note You MUST explicitly use p = pbuf_take(p); + * + * @note Only one packet is copied, no packet queue! + * + * @param p_to pbuf destination of the copy + * @param p_from pbuf source of the copy + * + * @return ERR_OK if pbuf was copied + * ERR_ARG if one of the pbufs is NULL or p_to is not big + * enough to hold p_from + */ +err_t +pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from) +{ + size_t offset_to = 0, offset_from = 0, len; + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n", + (const void *)p_to, (const void *)p_from)); + + /* is the target big enough to hold the source? */ + LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) && + (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;); + + /* iterate through pbuf chain */ + do { + /* copy one part of the original chain */ + if ((p_to->len - offset_to) >= (p_from->len - offset_from)) { + /* complete current p_from fits into current p_to */ + len = p_from->len - offset_from; + } else { + /* current p_from does not fit into current p_to */ + len = p_to->len - offset_to; + } + MEMCPY((u8_t *)p_to->payload + offset_to, (u8_t *)p_from->payload + offset_from, len); + offset_to += len; + offset_from += len; + LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len); + LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len); + if (offset_from >= p_from->len) { + /* on to next p_from (if any) */ + offset_from = 0; + p_from = p_from->next; + } + if (offset_to == p_to->len) { + /* on to next p_to (if any) */ + offset_to = 0; + p_to = p_to->next; + LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL), return ERR_ARG;); + } + + if ((p_from != NULL) && (p_from->len == p_from->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_from->next == NULL), return ERR_VAL;); + } + if ((p_to != NULL) && (p_to->len == p_to->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_to->next == NULL), return ERR_VAL;); + } + } while (p_from); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n")); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Copy (part of) the contents of a packet buffer + * to an application supplied buffer. + * + * @param buf the pbuf from which to copy data + * @param dataptr the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +u16_t +pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset) +{ + const struct pbuf *p; + u16_t left = 0; + u16_t buf_copy_len; + u16_t copied_total = 0; + + LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;); + LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;); + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; len != 0 && p != NULL; p = p->next) { + if ((offset != 0) && (offset >= p->len)) { + /* don't copy from this buffer -> on to the next */ + offset = (u16_t)(offset - p->len); + } else { + /* copy from this buffer. maybe only partially. */ + buf_copy_len = (u16_t)(p->len - offset); + if (buf_copy_len > len) { + buf_copy_len = len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(&((char *)dataptr)[left], &((char *)p->payload)[offset], buf_copy_len); + copied_total = (u16_t)(copied_total + buf_copy_len); + left = (u16_t)(left + buf_copy_len); + len = (u16_t)(len - buf_copy_len); + offset = 0; + } + } + return copied_total; +} + +/** + * @ingroup pbuf + * Get part of a pbuf's payload as contiguous memory. The returned memory is + * either a pointer into the pbuf's payload or, if split over multiple pbufs, + * a copy into the user-supplied buffer. + * + * @param p the pbuf from which to copy data + * @param buffer the application supplied buffer + * @param bufsize size of the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +void * +pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset) +{ + const struct pbuf *q; + u16_t out_offset; + + LWIP_ERROR("pbuf_get_contiguous: invalid buf", (p != NULL), return NULL;); + LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (buffer != NULL), return NULL;); + LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (bufsize >= len), return NULL;); + + q = pbuf_skip_const(p, offset, &out_offset); + if (q != NULL) { + if (q->len >= (out_offset + len)) { + /* all data in this pbuf, return zero-copy */ + return (u8_t *)q->payload + out_offset; + } + /* need to copy */ + if (pbuf_copy_partial(q, buffer, len, out_offset) != len) { + /* copying failed: pbuf is too short */ + return NULL; + } + return buffer; + } + /* pbuf is too short (offset does not fit in) */ + return NULL; +} + +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +/** + * This method modifies a 'pbuf chain', so that its total length is + * smaller than 64K. The remainder of the original pbuf chain is stored + * in *rest. + * This function never creates new pbufs, but splits an existing chain + * in two parts. The tot_len of the modified packet queue will likely be + * smaller than 64K. + * 'packet queues' are not supported by this function. + * + * @param p the pbuf queue to be split + * @param rest pointer to store the remainder (after the first 64K) + */ +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest) +{ + *rest = NULL; + if ((p != NULL) && (p->next != NULL)) { + u16_t tot_len_front = p->len; + struct pbuf *i = p; + struct pbuf *r = p->next; + + /* continue until the total length (summed up as u16_t) overflows */ + while ((r != NULL) && ((u16_t)(tot_len_front + r->len) >= tot_len_front)) { + tot_len_front = (u16_t)(tot_len_front + r->len); + i = r; + r = r->next; + } + /* i now points to last packet of the first segment. Set next + pointer to NULL */ + i->next = NULL; + + if (r != NULL) { + /* Update the tot_len field in the first part */ + for (i = p; i != NULL; i = i->next) { + i->tot_len = (u16_t)(i->tot_len - r->tot_len); + LWIP_ASSERT("tot_len/len mismatch in last pbuf", + (i->next != NULL) || (i->tot_len == i->len)); + } + if (p->flags & PBUF_FLAG_TCP_FIN) { + r->flags |= PBUF_FLAG_TCP_FIN; + } + + /* tot_len field in rest does not need modifications */ + /* reference counters do not need modifications */ + *rest = r; + } + } +} +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +/* Actual implementation of pbuf_skip() but returning const pointer... */ +static const struct pbuf * +pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset) +{ + u16_t offset_left = in_offset; + const struct pbuf *q = in; + + /* get the correct pbuf */ + while ((q != NULL) && (q->len <= offset_left)) { + offset_left = (u16_t)(offset_left - q->len); + q = q->next; + } + if (out_offset != NULL) { + *out_offset = offset_left; + } + return q; +} + +/** + * @ingroup pbuf + * Skip a number of bytes at the start of a pbuf + * + * @param in input pbuf + * @param in_offset offset to skip + * @param out_offset resulting offset in the returned pbuf + * @return the pbuf in the queue where the offset is + */ +struct pbuf * +pbuf_skip(struct pbuf *in, u16_t in_offset, u16_t *out_offset) +{ + const struct pbuf *out = pbuf_skip_const(in, in_offset, out_offset); + return LWIP_CONST_CAST(struct pbuf *, out); +} + +/** + * @ingroup pbuf + * Copy application supplied data into a pbuf. + * This function can only be used to copy the equivalent of buf->tot_len data. + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len) +{ + struct pbuf *p; + size_t buf_copy_len; + size_t total_copy_len = len; + size_t copied_total = 0; + + LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;); + + if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) { + return ERR_ARG; + } + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; total_copy_len != 0; p = p->next) { + LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL); + buf_copy_len = total_copy_len; + if (buf_copy_len > p->len) { + /* this pbuf cannot hold all remaining data */ + buf_copy_len = p->len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(p->payload, &((const char *)dataptr)[copied_total], buf_copy_len); + total_copy_len -= buf_copy_len; + copied_total += buf_copy_len; + } + LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Same as pbuf_take() but puts data at an offset + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * @param offset offset in pbuf where to copy dataptr to + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset) +{ + u16_t target_offset; + struct pbuf *q = pbuf_skip(buf, offset, &target_offset); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->tot_len >= target_offset + len)) { + u16_t remaining_len = len; + const u8_t *src_ptr = (const u8_t *)dataptr; + /* copy the part that goes into the first pbuf */ + u16_t first_copy_len; + LWIP_ASSERT("check pbuf_skip result", target_offset < q->len); + first_copy_len = (u16_t)LWIP_MIN(q->len - target_offset, len); + MEMCPY(((u8_t *)q->payload) + target_offset, dataptr, first_copy_len); + remaining_len = (u16_t)(remaining_len - first_copy_len); + src_ptr += first_copy_len; + if (remaining_len > 0) { + return pbuf_take(q->next, src_ptr, remaining_len); + } + return ERR_OK; + } + return ERR_MEM; +} + +/** + * @ingroup pbuf + * Creates a single pbuf out of a queue of pbufs. + * + * @remark: Either the source pbuf 'p' is freed by this function or the original + * pbuf 'p' is returned, therefore the caller has to check the result! + * + * @param p the source pbuf + * @param layer pbuf_layer of the new pbuf + * + * @return a new, single pbuf (p->next is NULL) + * or the old pbuf if allocation fails + */ +struct pbuf * +pbuf_coalesce(struct pbuf *p, pbuf_layer layer) +{ + struct pbuf *q; + if (p->next == NULL) { + return p; + } + q = pbuf_clone(layer, PBUF_RAM, p); + if (q == NULL) { + /* @todo: what do we do now? */ + return p; + } + pbuf_free(p); + return q; +} + +/** + * @ingroup pbuf + * Allocates a new pbuf of same length (via pbuf_alloc()) and copies the source + * pbuf into this new pbuf (using pbuf_copy()). + * + * @param layer pbuf_layer of the new pbuf + * @param type this parameter decides how and where the pbuf should be allocated + * (@see pbuf_alloc()) + * @param p the source pbuf + * + * @return a new pbuf or NULL if allocation fails + */ +struct pbuf * +pbuf_clone(pbuf_layer layer, pbuf_type type, struct pbuf *p) +{ + struct pbuf *q; + err_t err; + q = pbuf_alloc(layer, p->tot_len, type); + if (q == NULL) { + return NULL; + } + err = pbuf_copy(q, p); + LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("pbuf_copy failed", err == ERR_OK); + return q; +} + +#if LWIP_CHECKSUM_ON_COPY +/** + * Copies data into a single pbuf (*not* into a pbuf queue!) and updates + * the checksum while copying + * + * @param p the pbuf to copy data into + * @param start_offset offset of p->payload where to copy the data to + * @param dataptr data to copy into the pbuf + * @param len length of data to copy into the pbuf + * @param chksum pointer to the checksum which is updated + * @return ERR_OK if successful, another error if the data does not fit + * within the (first) pbuf (no pbuf queues!) + */ +err_t +pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum) +{ + u32_t acc; + u16_t copy_chksum; + char *dst_ptr; + LWIP_ASSERT("p != NULL", p != NULL); + LWIP_ASSERT("dataptr != NULL", dataptr != NULL); + LWIP_ASSERT("chksum != NULL", chksum != NULL); + LWIP_ASSERT("len != 0", len != 0); + + if ((start_offset >= p->len) || (start_offset + len > p->len)) { + return ERR_ARG; + } + + dst_ptr = ((char *)p->payload) + start_offset; + copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len); + if ((start_offset & 1) != 0) { + copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum); + } + acc = *chksum; + acc += copy_chksum; + *chksum = FOLD_U32T(acc); + return ERR_OK; +} +#endif /* LWIP_CHECKSUM_ON_COPY */ + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * WARNING: returns zero for offset >= p->tot_len + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len + */ +u8_t +pbuf_get_at(const struct pbuf *p, u16_t offset) +{ + int ret = pbuf_try_get_at(p, offset); + if (ret >= 0) { + return (u8_t)ret; + } + return 0; +} + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len + */ +int +pbuf_try_get_at(const struct pbuf *p, u16_t offset) +{ + u16_t q_idx; + const struct pbuf *q = pbuf_skip_const(p, offset, &q_idx); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + return ((u8_t *)q->payload)[q_idx]; + } + return -1; +} + +/** + * @ingroup pbuf + * Put one byte to the specified position in a pbuf + * WARNING: silently ignores offset >= p->tot_len + * + * @param p pbuf to fill + * @param offset offset into p of the byte to write + * @param data byte to write at an offset into p + */ +void +pbuf_put_at(struct pbuf *p, u16_t offset, u8_t data) +{ + u16_t q_idx; + struct pbuf *q = pbuf_skip(p, offset, &q_idx); + + /* write requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + ((u8_t *)q->payload)[q_idx] = data; + } +} + +/** + * @ingroup pbuf + * Compare pbuf contents at specified offset with memory s2, both of length n + * + * @param p pbuf to compare + * @param offset offset into p at which to start comparing + * @param s2 buffer to compare + * @param n length of buffer to compare + * @return zero if equal, nonzero otherwise + * (0xffff if p is too short, diffoffset+1 otherwise) + */ +u16_t +pbuf_memcmp(const struct pbuf *p, u16_t offset, const void *s2, u16_t n) +{ + u16_t start = offset; + const struct pbuf *q = p; + u16_t i; + + /* pbuf long enough to perform check? */ + if (p->tot_len < (offset + n)) { + return 0xffff; + } + + /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */ + while ((q != NULL) && (q->len <= start)) { + start = (u16_t)(start - q->len); + q = q->next; + } + + /* return requested data if pbuf is OK */ + for (i = 0; i < n; i++) { + /* We know pbuf_get_at() succeeds because of p->tot_len check above. */ + u8_t a = pbuf_get_at(q, (u16_t)(start + i)); + u8_t b = ((const u8_t *)s2)[i]; + if (a != b) { + return (u16_t)LWIP_MIN(i + 1, 0xFFFF); + } + } + return 0; +} + +/** + * @ingroup pbuf + * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset + * start_offset. + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param mem search for the contents of this buffer + * @param mem_len length of 'mem' + * @param start_offset offset into p at which to start searching + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_memfind(const struct pbuf *p, const void *mem, u16_t mem_len, u16_t start_offset) +{ + u16_t i; + u16_t max_cmp_start = (u16_t)(p->tot_len - mem_len); + if (p->tot_len >= mem_len + start_offset) { + for (i = start_offset; i <= max_cmp_start; i++) { + u16_t plus = pbuf_memcmp(p, i, mem, mem_len); + if (plus == 0) { + return i; + } + } + } + return 0xFFFF; +} + +/** + * Find occurrence of substr with length substr_len in pbuf p, start at offset + * start_offset + * WARNING: in contrast to strstr(), this one does not stop at the first \0 in + * the pbuf/source string! + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param substr string to search for in p, maximum length is 0xFFFE + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_strstr(const struct pbuf *p, const char *substr) +{ + size_t substr_len; + if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) { + return 0xFFFF; + } + substr_len = strlen(substr); + if (substr_len >= 0xFFFF) { + return 0xFFFF; + } + return pbuf_memfind(p, substr, (u16_t)substr_len, 0); +} diff --git a/Middlewares/Third_Party/LwIP/src/core/raw.c b/Middlewares/Third_Party/LwIP/src/core/raw.c new file mode 100644 index 00000000..3b34544b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/raw.c @@ -0,0 +1,671 @@ +/** + * @file + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * See also @ref raw_raw + * + * @defgroup raw_raw RAW + * @ingroup callbackstyle_api + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * @see @ref api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/raw.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" + +#include + +/** The list of RAW PCBs */ +static struct raw_pcb *raw_pcbs; + +static u8_t +raw_input_local_match(struct raw_pcb *pcb, u8_t broadcast) +{ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + return 0; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* IP_SOF_BROADCAST_RECV */ + return 1; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: receive all broadcasts + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: catch all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Determine if in incoming IP packet is covered by a RAW PCB + * and if so, pass it to a user-provided receive callback function. + * + * Given an incoming IP datagram (as a chain of pbufs) this function + * finds a corresponding RAW PCB and calls the corresponding receive + * callback function. + * + * @param p pbuf to be demultiplexed to a RAW PCB. + * @param inp network interface on which the datagram was received. + * @return - 1 if the packet has been eaten by a RAW PCB receive + * callback function. The caller MAY NOT not reference the + * packet any longer, and MAY NOT call pbuf_free(). + * @return - 0 if packet is not eaten (pbuf is still referenced by the + * caller). + * + */ +raw_input_state_t +raw_input(struct pbuf *p, struct netif *inp) +{ + struct raw_pcb *pcb, *prev; + s16_t proto; + raw_input_state_t ret = RAW_INPUT_NONE; + u8_t broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_UNUSED_ARG(inp); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_HDR_GET_VERSION(p->payload) == 6) +#endif /* LWIP_IPV4 */ + { + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + proto = IP6H_NEXTH(ip6hdr); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + proto = IPH_PROTO((struct ip_hdr *)p->payload); + } +#endif /* LWIP_IPV4 */ + + prev = NULL; + pcb = raw_pcbs; + /* loop through all raw pcbs until the packet is eaten by one */ + /* this allows multiple pcbs to match against the packet by design */ + while (pcb != NULL) { + if ((pcb->protocol == proto) && raw_input_local_match(pcb, broadcast) && + (((pcb->flags & RAW_FLAGS_CONNECTED) == 0) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* receive callback function available? */ + if (pcb->recv != NULL) { + u8_t eaten; +#ifndef LWIP_NOASSERT + void *old_payload = p->payload; +#endif + ret = RAW_INPUT_DELIVERED; + /* the receive callback function did not eat the packet? */ + eaten = pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr()); + if (eaten != 0) { + /* receive function ate the packet */ + p = NULL; + if (prev != NULL) { + /* move the pcb to the front of raw_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return RAW_INPUT_EATEN; + } else { + /* sanity-check that the receive callback did not alter the pbuf */ + LWIP_ASSERT("raw pcb recv callback altered pbuf payload pointer without eating packet", + p->payload == old_payload); + } + } + /* no receive callback function was set for this raw PCB */ + } + /* drop the packet */ + prev = pcb; + pcb = pcb->next; + } + return ret; +} + +/** + * @ingroup raw_raw + * Bind a RAW PCB. + * + * @param pcb RAW PCB to be bound with a local address ipaddr. + * @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to + * bind to all local interfaces. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified IP address is already bound to by + * another RAW PCB. + * + * @see raw_disconnect() + */ +err_t +raw_bind(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. */ + if (IP_IS_V6(&pcb->local_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->local_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->local_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Bind an RAW PCB to a specific netif. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb RAW PCB to be bound with netif. + * @param netif netif to bind to. Can be NULL. + * + * @see raw_disconnect() + */ +void +raw_bind_netif(struct raw_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +/** + * @ingroup raw_raw + * Connect an RAW PCB. This function is required by upper layers + * of lwip. Using the raw api you could use raw_sendto() instead + * + * This will associate the RAW PCB with the remote address. + * + * @param pcb RAW PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * + * @return lwIP error code + * + * @see raw_disconnect() and raw_sendto() + */ +err_t +raw_connect(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now, + * using the bound address to make a more informed decision when possible. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + raw_set_flags(pcb, RAW_FLAGS_CONNECTED); + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Disconnect a RAW PCB. + * + * @param pcb the raw pcb to disconnect. + */ +void +raw_disconnect(struct raw_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->netif_idx = NETIF_NO_INDEX; + /* mark PCB as unconnected */ + raw_clear_flags(pcb, RAW_FLAGS_CONNECTED); +} + +/** + * @ingroup raw_raw + * Set the callback function for received packets that match the + * raw PCB's protocol and binding. + * + * The callback function MUST either + * - eat the packet by calling pbuf_free() and returning non-zero. The + * packet will not be passed to other raw PCBs or other protocol layers. + * - not free the packet, and return zero. The packet will be matched + * against further PCBs and/or forwarded to another protocol layers. + */ +void +raw_recv(struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address. An IP header will be prepended + * to the packet, unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that + * case, the packet must include an IP header, which will then be sent as is. + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * @param ipaddr the destination address of the IP packet + * + */ +err_t +raw_sendto(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr) +{ + struct netif *netif; + const ip_addr_t *src_ip; + + if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr)) { + return ERR_VAL; + } + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n")); + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { +#if LWIP_MULTICAST_TX_OPTIONS + netif = NULL; + if (ip_addr_ismulticast(ipaddr)) { + /* For multicast-destined packets, use the user-provided interface index to + * determine the outgoing interface, if an interface index is set and a + * matching netif can be found. Otherwise, fall back to regular routing. */ + netif = netif_get_by_index(pcb->mcast_ifindex); + } + + if (netif == NULL) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + { + netif = ip_route(&pcb->local_ip, ipaddr); + } + } + + if (netif == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to ")); + ip_addr_debug_print(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ipaddr); + return ERR_RTE; + } + + if (ip_addr_isany(&pcb->local_ip) || ip_addr_ismulticast(&pcb->local_ip)) { + /* use outgoing network interface IP address as source address */ + src_ip = ip_netif_get_local_ip(netif, ipaddr); +#if LWIP_IPV6 + if (src_ip == NULL) { + return ERR_RTE; + } +#endif /* LWIP_IPV6 */ + } else { + /* use RAW PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } + + return raw_sendto_if_src(pcb, p, ipaddr, netif, src_ip); +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address, using a particular outgoing + * netif and source IP address. An IP header will be prepended to the packet, + * unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that case, the + * packet must include an IP header, which will then be sent as is. + * + * @param pcb RAW PCB used to send the data + * @param p chain of pbufs to be sent + * @param dst_ip destination IP address + * @param netif the netif used for sending + * @param src_ip source IP address + */ +err_t +raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + struct netif *netif, const ip_addr_t *src_ip) +{ + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u16_t header_size; + u8_t ttl; + + LWIP_ASSERT_CORE_LOCKED(); + + if ((pcb == NULL) || (dst_ip == NULL) || (netif == NULL) || (src_ip == NULL) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + header_size = ( +#if LWIP_IPV4 && LWIP_IPV6 + IP_IS_V6(dst_ip) ? IP6_HLEN : IP_HLEN); +#elif LWIP_IPV4 + IP_HLEN); +#else + IP6_HLEN); +#endif + + /* Handle the HDRINCL option as an exception: none of the code below applies + * to this case, and sending the packet needs to be done differently too. */ + if (pcb->flags & RAW_FLAGS_HDRINCL) { + /* A full header *must* be present in the first pbuf of the chain, as the + * output routines may access its fields directly. */ + if (p->len < header_size) { + return ERR_VAL; + } + /* @todo multicast loop support, if at all desired for this scenario.. */ + NETIF_SET_HINTS(netif, &pcb->netif_hints); + err = ip_output_if_hdrincl(p, src_ip, dst_ip, netif); + NETIF_RESET_HINTS(netif); + return err; + } + + /* packet too large to add an IP header without causing an overflow? */ + if ((u16_t)(p->tot_len + header_size) < p->tot_len) { + return ERR_MEM; + } + /* not enough space to add an IP header to first pbuf in given p chain? */ + if (pbuf_add_header(p, header_size)) { + /* allocate header in new pbuf */ + q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p */ + pbuf_chain(q, p); + } + /* { first pbuf q points to header pbuf } */ + LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* first pbuf q equals given pbuf */ + q = p; + if (pbuf_remove_header(q, header_size)) { + LWIP_ASSERT("Can't restore header we just removed!", 0); + return ERR_MEM; + } + } + +#if IP_SOF_BROADCAST + if (IP_IS_V4(dst_ip)) { + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + /* free any temporary header pbuf allocated by pbuf_header() */ + if (q != p) { + pbuf_free(q); + } + return ERR_VAL; + } + } +#endif /* IP_SOF_BROADCAST */ + + /* Multicast Loop? */ +#if LWIP_MULTICAST_TX_OPTIONS + if (((pcb->flags & RAW_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_IPV6 + /* If requested, based on the IPV6_CHECKSUM socket option per RFC3542, + compute the checksum and update the checksum in the payload. */ + if (IP_IS_V6(dst_ip) && pcb->chksum_reqd) { + u16_t chksum = ip6_chksum_pseudo(p, pcb->protocol, p->tot_len, ip_2_ip6(src_ip), ip_2_ip6(dst_ip)); + LWIP_ASSERT("Checksum must fit into first pbuf", p->len >= (pcb->chksum_offset + 2)); + SMEMCPY(((u8_t *)p->payload) + pcb->chksum_offset, &chksum, sizeof(u16_t)); + } +#endif + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? raw_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + NETIF_SET_HINTS(netif, &pcb->netif_hints); + err = ip_output_if(q, src_ip, dst_ip, ttl, pcb->tos, pcb->protocol, netif); + NETIF_RESET_HINTS(netif); + + /* did we chain a header earlier? */ + if (q != p) { + /* free the header */ + pbuf_free(q); + } + return err; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the address given by raw_connect() + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * + */ +err_t +raw_send(struct raw_pcb *pcb, struct pbuf *p) +{ + return raw_sendto(pcb, p, &pcb->remote_ip); +} + +/** + * @ingroup raw_raw + * Remove an RAW PCB. + * + * @param pcb RAW PCB to be removed. The PCB is removed from the list of + * RAW PCB's and the data structure is freed from memory. + * + * @see raw_new() + */ +void +raw_remove(struct raw_pcb *pcb) +{ + struct raw_pcb *pcb2; + LWIP_ASSERT_CORE_LOCKED(); + /* pcb to be removed is first in list? */ + if (raw_pcbs == pcb) { + /* make list start at 2nd pcb */ + raw_pcbs = raw_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = raw_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in raw_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_RAW_PCB, pcb); +} + +/** + * @ingroup raw_raw + * Create a RAW PCB. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param proto the protocol number of the IPs payload (e.g. IP_PROTO_ICMP) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new(u8_t proto) +{ + struct raw_pcb *pcb; + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_new\n")); + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct raw_pcb *)memp_malloc(MEMP_RAW_PCB); + /* could allocate RAW PCB? */ + if (pcb != NULL) { + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct raw_pcb)); + pcb->protocol = proto; + pcb->ttl = RAW_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + raw_set_multicast_ttl(pcb, RAW_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return pcb; +} + +/** + * @ingroup raw_raw + * Create a RAW PCB for specific IP type. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @param proto the protocol number (next header) of the IPv6 packet payload + * (e.g. IP6_NEXTH_ICMP6) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new_ip_type(u8_t type, u8_t proto) +{ + struct raw_pcb *pcb; + LWIP_ASSERT_CORE_LOCKED(); + pcb = raw_new(proto); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void raw_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct raw_pcb *rpcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (rpcb = raw_pcbs; rpcb != NULL; rpcb = rpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&rpcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(rpcb->local_ip, *new_addr); + } + } + } +} + +#endif /* LWIP_RAW */ diff --git a/Middlewares/Third_Party/LwIP/src/core/stats.c b/Middlewares/Third_Party/LwIP/src/core/stats.c new file mode 100644 index 00000000..34e9b270 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/stats.c @@ -0,0 +1,169 @@ +/** + * @file + * Statistics module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_STATS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/debug.h" + +#include + +struct stats_ lwip_stats; + +void +stats_init(void) +{ +#ifdef LWIP_DEBUG +#if MEM_STATS + lwip_stats.mem.name = "MEM"; +#endif /* MEM_STATS */ +#endif /* LWIP_DEBUG */ +} + +#if LWIP_STATS_DISPLAY +void +stats_display_proto(struct stats_proto *proto, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv)); + LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", proto->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", proto->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", proto->memerr)); + LWIP_PLATFORM_DIAG(("rterr: %"STAT_COUNTER_F"\n\t", proto->rterr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", proto->proterr)); + LWIP_PLATFORM_DIAG(("opterr: %"STAT_COUNTER_F"\n\t", proto->opterr)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n\t", proto->err)); + LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit)); +} + +#if IGMP_STATS || MLD6_STATS +void +stats_display_igmp(struct stats_igmp *igmp, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", igmp->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", igmp->recv)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", igmp->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", igmp->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", igmp->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", igmp->memerr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", igmp->proterr)); + LWIP_PLATFORM_DIAG(("rx_v1: %"STAT_COUNTER_F"\n\t", igmp->rx_v1)); + LWIP_PLATFORM_DIAG(("rx_group: %"STAT_COUNTER_F"\n\t", igmp->rx_group)); + LWIP_PLATFORM_DIAG(("rx_general: %"STAT_COUNTER_F"\n\t", igmp->rx_general)); + LWIP_PLATFORM_DIAG(("rx_report: %"STAT_COUNTER_F"\n\t", igmp->rx_report)); + LWIP_PLATFORM_DIAG(("tx_join: %"STAT_COUNTER_F"\n\t", igmp->tx_join)); + LWIP_PLATFORM_DIAG(("tx_leave: %"STAT_COUNTER_F"\n\t", igmp->tx_leave)); + LWIP_PLATFORM_DIAG(("tx_report: %"STAT_COUNTER_F"\n", igmp->tx_report)); +} +#endif /* IGMP_STATS || MLD6_STATS */ + +#if MEM_STATS || MEMP_STATS +void +stats_display_mem(struct stats_mem *mem, const char *name) +{ + LWIP_PLATFORM_DIAG(("\nMEM %s\n\t", name)); + LWIP_PLATFORM_DIAG(("avail: %"MEM_SIZE_F"\n\t", mem->avail)); + LWIP_PLATFORM_DIAG(("used: %"MEM_SIZE_F"\n\t", mem->used)); + LWIP_PLATFORM_DIAG(("max: %"MEM_SIZE_F"\n\t", mem->max)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n", mem->err)); +} + +#if MEMP_STATS +void +stats_display_memp(struct stats_mem *mem, int idx) +{ + if (idx < MEMP_MAX) { + stats_display_mem(mem, mem->name); + } +} +#endif /* MEMP_STATS */ +#endif /* MEM_STATS || MEMP_STATS */ + +#if SYS_STATS +void +stats_display_sys(struct stats_sys *sys) +{ + LWIP_PLATFORM_DIAG(("\nSYS\n\t")); + LWIP_PLATFORM_DIAG(("sem.used: %"STAT_COUNTER_F"\n\t", sys->sem.used)); + LWIP_PLATFORM_DIAG(("sem.max: %"STAT_COUNTER_F"\n\t", sys->sem.max)); + LWIP_PLATFORM_DIAG(("sem.err: %"STAT_COUNTER_F"\n\t", sys->sem.err)); + LWIP_PLATFORM_DIAG(("mutex.used: %"STAT_COUNTER_F"\n\t", sys->mutex.used)); + LWIP_PLATFORM_DIAG(("mutex.max: %"STAT_COUNTER_F"\n\t", sys->mutex.max)); + LWIP_PLATFORM_DIAG(("mutex.err: %"STAT_COUNTER_F"\n\t", sys->mutex.err)); + LWIP_PLATFORM_DIAG(("mbox.used: %"STAT_COUNTER_F"\n\t", sys->mbox.used)); + LWIP_PLATFORM_DIAG(("mbox.max: %"STAT_COUNTER_F"\n\t", sys->mbox.max)); + LWIP_PLATFORM_DIAG(("mbox.err: %"STAT_COUNTER_F"\n", sys->mbox.err)); +} +#endif /* SYS_STATS */ + +void +stats_display(void) +{ + s16_t i; + + LINK_STATS_DISPLAY(); + ETHARP_STATS_DISPLAY(); + IPFRAG_STATS_DISPLAY(); + IP6_FRAG_STATS_DISPLAY(); + IP_STATS_DISPLAY(); + ND6_STATS_DISPLAY(); + IP6_STATS_DISPLAY(); + IGMP_STATS_DISPLAY(); + MLD6_STATS_DISPLAY(); + ICMP_STATS_DISPLAY(); + ICMP6_STATS_DISPLAY(); + UDP_STATS_DISPLAY(); + TCP_STATS_DISPLAY(); + MEM_STATS_DISPLAY(); + for (i = 0; i < MEMP_MAX; i++) { + MEMP_STATS_DISPLAY(i); + } + SYS_STATS_DISPLAY(); +} +#endif /* LWIP_STATS_DISPLAY */ + +#endif /* LWIP_STATS */ + diff --git a/Middlewares/Third_Party/LwIP/src/core/sys.c b/Middlewares/Third_Party/LwIP/src/core/sys.c new file mode 100644 index 00000000..5f08352b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/sys.c @@ -0,0 +1,148 @@ +/** + * @file + * lwIP Operating System abstraction + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup sys_layer Porting (system abstraction layer) + * @ingroup lwip + * + * @defgroup sys_os OS abstraction layer + * @ingroup sys_layer + * No need to implement functions in this section in NO_SYS mode. + * The OS-specific code should be implemented in arch/sys_arch.h + * and sys_arch.c of your port. + * + * The operating system emulation layer provides a common interface + * between the lwIP code and the underlying operating system kernel. The + * general idea is that porting lwIP to new architectures requires only + * small changes to a few header files and a new sys_arch + * implementation. It is also possible to do a sys_arch implementation + * that does not rely on any underlying operating system. + * + * The sys_arch provides semaphores, mailboxes and mutexes to lwIP. For the full + * lwIP functionality, multiple threads support can be implemented in the + * sys_arch, but this is not required for the basic lwIP + * functionality. Timer scheduling is implemented in lwIP, but can be implemented + * by the sys_arch port (LWIP_TIMERS_CUSTOM==1). + * + * In addition to the source file providing the functionality of sys_arch, + * the OS emulation layer must provide several header files defining + * macros used throughout lwip. The files required and the macros they + * must define are listed below the sys_arch description. + * + * Since lwIP 1.4.0, semaphore, mutexes and mailbox functions are prototyped in a way that + * allows both using pointers or actual OS structures to be used. This way, memory + * required for such types can be either allocated in place (globally or on the + * stack) or on the heap (allocated internally in the "*_new()" functions). + * + * Note: + * ----- + * Be careful with using mem_malloc() in sys_arch. When malloc() refers to + * mem_malloc() you can run into a circular function call problem. In mem.c + * mem_init() tries to allocate a semaphore using mem_malloc, which of course + * can't be performed when sys_arch uses mem_malloc. + * + * @defgroup sys_sem Semaphores + * @ingroup sys_os + * Semaphores can be either counting or binary - lwIP works with both + * kinds. + * Semaphores are represented by the type "sys_sem_t" which is typedef'd + * in the sys_arch.h file. Mailboxes are equivalently represented by the + * type "sys_mbox_t". Mutexes are represented by the type "sys_mutex_t". + * lwIP does not place any restrictions on how these types are represented + * internally. + * + * @defgroup sys_mutex Mutexes + * @ingroup sys_os + * Mutexes are recommended to correctly handle priority inversion, + * especially if you use LWIP_CORE_LOCKING . + * + * @defgroup sys_mbox Mailboxes + * @ingroup sys_os + * Mailboxes should be implemented as a queue which allows multiple messages + * to be posted (implementing as a rendez-vous point where only one message can be + * posted at a time can have a highly negative impact on performance). A message + * in a mailbox is just a pointer, nothing more. + * + * @defgroup sys_time Time + * @ingroup sys_layer + * + * @defgroup sys_prot Critical sections + * @ingroup sys_layer + * Used to protect short regions of code against concurrent access. + * - Your system is a bare-metal system (probably with an RTOS) + * and interrupts are under your control: + * Implement this as LockInterrupts() / UnlockInterrupts() + * - Your system uses an RTOS with deferred interrupt handling from a + * worker thread: Implement as a global mutex or lock/unlock scheduler + * - Your system uses a high-level OS with e.g. POSIX signals: + * Implement as a global mutex + * + * @defgroup sys_misc Misc + * @ingroup sys_os + */ + +#include "lwip/opt.h" + +#include "lwip/sys.h" + +/* Most of the functions defined in sys.h must be implemented in the + * architecture-dependent file sys_arch.c */ + +#if !NO_SYS + +#ifndef sys_msleep +/** + * Sleep for some ms. Timeouts are NOT processed while sleeping. + * + * @param ms number of milliseconds to sleep + */ +void +sys_msleep(u32_t ms) +{ + if (ms > 0) { + sys_sem_t delaysem; + err_t err = sys_sem_new(&delaysem, 0); + if (err == ERR_OK) { + sys_arch_sem_wait(&delaysem, ms); + sys_sem_free(&delaysem); + } + } +} +#endif /* sys_msleep */ + +#endif /* !NO_SYS */ diff --git a/Middlewares/Third_Party/LwIP/src/core/tcp.c b/Middlewares/Third_Party/LwIP/src/core/tcp.c new file mode 100644 index 00000000..bd7d64ec --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/tcp.c @@ -0,0 +1,2686 @@ +/** + * @file + * Transmission Control Protocol for IP + * See also @ref tcp_raw + * + * @defgroup tcp_raw TCP + * @ingroup callbackstyle_api + * Transmission Control Protocol for IP\n + * @see @ref api + * + * Common functions for the TCP implementation, such as functions + * for manipulating the data structures and the TCP timer functions. TCP functions + * related to input and output is found in tcp_in.c and tcp_out.c respectively.\n + * + * TCP connection setup + * -------------------- + * The functions used for setting up connections is similar to that of + * the sequential API and of the BSD socket API. A new TCP connection + * identifier (i.e., a protocol control block - PCB) is created with the + * tcp_new() function. This PCB can then be either set to listen for new + * incoming connections or be explicitly connected to another host. + * - tcp_new() + * - tcp_bind() + * - tcp_listen() and tcp_listen_with_backlog() + * - tcp_accept() + * - tcp_connect() + * + * Sending TCP data + * ---------------- + * TCP data is sent by enqueueing the data with a call to tcp_write() and + * triggering to send by calling tcp_output(). When the data is successfully + * transmitted to the remote host, the application will be notified with a + * call to a specified callback function. + * - tcp_write() + * - tcp_output() + * - tcp_sent() + * + * Receiving TCP data + * ------------------ + * TCP data reception is callback based - an application specified + * callback function is called when new data arrives. When the + * application has taken the data, it has to call the tcp_recved() + * function to indicate that TCP can advertise increase the receive + * window. + * - tcp_recv() + * - tcp_recved() + * + * Application polling + * ------------------- + * When a connection is idle (i.e., no data is either transmitted or + * received), lwIP will repeatedly poll the application by calling a + * specified callback function. This can be used either as a watchdog + * timer for killing connections that have stayed idle for too long, or + * as a method of waiting for memory to become available. For instance, + * if a call to tcp_write() has failed because memory wasn't available, + * the application may use the polling functionality to call tcp_write() + * again when the connection has been idle for a while. + * - tcp_poll() + * + * Closing and aborting connections + * -------------------------------- + * - tcp_close() + * - tcp_abort() + * - tcp_err() + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/debug.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/nd6.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#ifndef TCP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define TCP_LOCAL_PORT_RANGE_START 0xc000 +#define TCP_LOCAL_PORT_RANGE_END 0xffff +#define TCP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~TCP_LOCAL_PORT_RANGE_START) + TCP_LOCAL_PORT_RANGE_START)) +#endif + +#if LWIP_TCP_KEEPALIVE +#define TCP_KEEP_DUR(pcb) ((pcb)->keep_cnt * (pcb)->keep_intvl) +#define TCP_KEEP_INTVL(pcb) ((pcb)->keep_intvl) +#else /* LWIP_TCP_KEEPALIVE */ +#define TCP_KEEP_DUR(pcb) TCP_MAXIDLE +#define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT +#endif /* LWIP_TCP_KEEPALIVE */ + +/* As initial send MSS, we use TCP_MSS but limit it to 536. */ +#if TCP_MSS > 536 +#define INITIAL_MSS 536 +#else +#define INITIAL_MSS TCP_MSS +#endif + +static const char *const tcp_state_str[] = { + "CLOSED", + "LISTEN", + "SYN_SENT", + "SYN_RCVD", + "ESTABLISHED", + "FIN_WAIT_1", + "FIN_WAIT_2", + "CLOSE_WAIT", + "CLOSING", + "LAST_ACK", + "TIME_WAIT" +}; + +/* last local TCP port */ +static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START; + +/* Incremented every coarse grained timer shot (typically every 500 ms). */ +u32_t tcp_ticks; +static const u8_t tcp_backoff[13] = +{ 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7}; +/* Times per slowtmr hits */ +static const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 }; + +/* The TCP PCB lists. */ + +/** List of all TCP PCBs bound but not yet (connected || listening) */ +struct tcp_pcb *tcp_bound_pcbs; +/** List of all TCP PCBs in LISTEN state */ +union tcp_listen_pcbs_t tcp_listen_pcbs; +/** List of all TCP PCBs that are in a state in which + * they accept or send data. */ +struct tcp_pcb *tcp_active_pcbs; +/** List of all TCP PCBs in TIME-WAIT state */ +struct tcp_pcb *tcp_tw_pcbs; + +/** An array with all (non-temporary) PCB lists, mainly used for smaller code size */ +struct tcp_pcb **const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs, + &tcp_active_pcbs, &tcp_tw_pcbs +}; + +u8_t tcp_active_pcbs_changed; + +/** Timer counter to handle calling slow-timer from tcp_tmr() */ +static u8_t tcp_timer; +static u8_t tcp_timer_ctr; +static u16_t tcp_new_port(void); + +static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb); +#if LWIP_TCP_PCB_NUM_EXT_ARGS +static void tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args); +#endif + +/** + * Initialize this module. + */ +void +tcp_init(void) +{ +#ifdef LWIP_RAND + tcp_port = TCP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RAND */ +} + +/** Free a tcp pcb */ +void +tcp_free(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_free: LISTEN", pcb->state != LISTEN); +#if LWIP_TCP_PCB_NUM_EXT_ARGS + tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args); +#endif + memp_free(MEMP_TCP_PCB, pcb); +} + +/** Free a tcp listen pcb */ +static void +tcp_free_listen(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_free_listen: !LISTEN", pcb->state != LISTEN); +#if LWIP_TCP_PCB_NUM_EXT_ARGS + tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args); +#endif + memp_free(MEMP_TCP_PCB_LISTEN, pcb); +} + +/** + * Called periodically to dispatch TCP timers. + */ +void +tcp_tmr(void) +{ + /* Call tcp_fasttmr() every 250 ms */ + tcp_fasttmr(); + + if (++tcp_timer & 1) { + /* Call tcp_slowtmr() every 500 ms, i.e., every other timer + tcp_tmr() is called. */ + tcp_slowtmr(); + } +} + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG +/** Called when a listen pcb is closed. Iterates one pcb list and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_remove_listener(struct tcp_pcb *list, struct tcp_pcb_listen *lpcb) +{ + struct tcp_pcb *pcb; + + LWIP_ASSERT("tcp_remove_listener: invalid listener", lpcb != NULL); + + for (pcb = list; pcb != NULL; pcb = pcb->next) { + if (pcb->listener == lpcb) { + pcb->listener = NULL; + } + } +} +#endif + +/** Called when a listen pcb is closed. Iterates all pcb lists and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_listen_closed(struct tcp_pcb *pcb) +{ +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + size_t i; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("pcb->state == LISTEN", pcb->state == LISTEN); + for (i = 1; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) { + tcp_remove_listener(*tcp_pcb_lists[i], (struct tcp_pcb_listen *)pcb); + } +#endif + LWIP_UNUSED_ARG(pcb); +} + +#if TCP_LISTEN_BACKLOG +/** @ingroup tcp_raw + * Delay accepting a connection in respect to the listen backlog: + * the number of outstanding connections is increased until + * tcp_backlog_accepted() is called. + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is not fully accepted yet + */ +void +tcp_backlog_delayed(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb->flags & TF_BACKLOGPEND) == 0) { + if (pcb->listener != NULL) { + pcb->listener->accepts_pending++; + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + tcp_set_flags(pcb, TF_BACKLOGPEND); + } + } +} + +/** @ingroup tcp_raw + * A delayed-accept a connection is accepted (or closed/aborted): decreases + * the number of outstanding connections after calling tcp_backlog_delayed(). + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is now fully accepted (or closed/aborted) + */ +void +tcp_backlog_accepted(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb->flags & TF_BACKLOGPEND) != 0) { + if (pcb->listener != NULL) { + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + pcb->listener->accepts_pending--; + tcp_clear_flags(pcb, TF_BACKLOGPEND); + } + } +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * Closes the TX side of a connection held by the PCB. + * For tcp_close(), a RST is sent if the application didn't receive all data + * (tcp_recved() not called for all data passed to recv callback). + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +static err_t +tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data) +{ + LWIP_ASSERT("tcp_close_shutdown: invalid pcb", pcb != NULL); + + if (rst_on_unacked_data && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) { + if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND_MAX(pcb))) { + /* Not all data received by application, send RST to tell the remote + side about this. */ + LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED); + + /* don't call tcp_abort here: we must not deallocate the pcb since + that might not be expected when calling tcp_close */ + tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + /* Deallocate the pcb since we already sent a RST for it */ + if (tcp_input_pcb == pcb) { + /* prevent using a deallocated pcb: free it from tcp_input later */ + tcp_trigger_input_pcb_close(); + } else { + tcp_free(pcb); + } + return ERR_OK; + } + } + + /* - states which free the pcb are handled here, + - states which send FIN and change state are handled in tcp_close_shutdown_fin() */ + switch (pcb->state) { + case CLOSED: + /* Closing a pcb in the CLOSED state might seem erroneous, + * however, it is in this state once allocated and as yet unused + * and the user needs some way to free it should the need arise. + * Calling tcp_close() with a pcb that has already been closed, (i.e. twice) + * or for a pcb that has been used and then entered the CLOSED state + * is erroneous, but this should never happen as the pcb has in those cases + * been freed, and so any remaining handles are bogus. */ + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + tcp_free(pcb); + break; + case LISTEN: + tcp_listen_closed(pcb); + tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb); + tcp_free_listen(pcb); + break; + case SYN_SENT: + TCP_PCB_REMOVE_ACTIVE(pcb); + tcp_free(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + break; + default: + return tcp_close_shutdown_fin(pcb); + } + return ERR_OK; +} + +static err_t +tcp_close_shutdown_fin(struct tcp_pcb *pcb) +{ + err_t err; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + + switch (pcb->state) { + case SYN_RCVD: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + tcp_backlog_accepted(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + pcb->state = FIN_WAIT_1; + } + break; + case ESTABLISHED: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = FIN_WAIT_1; + } + break; + case CLOSE_WAIT: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = LAST_ACK; + } + break; + default: + /* Has already been closed, do nothing. */ + return ERR_OK; + } + + if (err == ERR_OK) { + /* To ensure all data has been sent when tcp_close returns, we have + to make sure tcp_output doesn't fail. + Since we don't really have to ensure all data has been sent when tcp_close + returns (unsent data is sent from tcp timer functions, also), we don't care + for the return value of tcp_output for now. */ + tcp_output(pcb); + } else if (err == ERR_MEM) { + /* Mark this pcb for closing. Closing is retried from tcp_tmr. */ + tcp_set_flags(pcb, TF_CLOSEPEND); + /* We have to return ERR_OK from here to indicate to the callers that this + pcb should not be used any more as it will be freed soon via tcp_tmr. + This is OK here since sending FIN does not guarantee a time frime for + actually freeing the pcb, either (it is left in closure states for + remote ACK or timeout) */ + return ERR_OK; + } + return err; +} + +/** + * @ingroup tcp_raw + * Closes the connection held by the PCB. + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it (unless an error is returned). + * + * The function may return ERR_MEM if no memory + * was available for closing the connection. If so, the application + * should wait and try again either by using the acknowledgment + * callback or the polling functionality. If the close succeeds, the + * function returns ERR_OK. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +err_t +tcp_close(struct tcp_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_close: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in ")); + + tcp_debug_print_state(pcb->state); + + if (pcb->state != LISTEN) { + /* Set a flag not to receive any more data... */ + tcp_set_flags(pcb, TF_RXCLOSED); + } + /* ... and close */ + return tcp_close_shutdown(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Causes all or part of a full-duplex connection of this PCB to be shut down. + * This doesn't deallocate the PCB unless shutting down both sides! + * Shutting down both sides is the same as calling tcp_close, so if it succeds + * (i.e. returns ER_OK), the PCB must not be referenced any more! + * + * @param pcb PCB to shutdown + * @param shut_rx shut down receive side if this is != 0 + * @param shut_tx shut down send side if this is != 0 + * @return ERR_OK if shutdown succeeded (or the PCB has already been shut down) + * another err_t on error. + */ +err_t +tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_shutdown: invalid pcb", pcb != NULL, return ERR_ARG); + + if (pcb->state == LISTEN) { + return ERR_CONN; + } + if (shut_rx) { + /* shut down the receive side: set a flag not to receive any more data... */ + tcp_set_flags(pcb, TF_RXCLOSED); + if (shut_tx) { + /* shutting down the tx AND rx side is the same as closing for the raw API */ + return tcp_close_shutdown(pcb, 1); + } + /* ... and free buffered data */ + if (pcb->refused_data != NULL) { + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + } + if (shut_tx) { + /* This can't happen twice since if it succeeds, the pcb's state is changed. + Only close in these states as the others directly deallocate the PCB */ + switch (pcb->state) { + case SYN_RCVD: + case ESTABLISHED: + case CLOSE_WAIT: + return tcp_close_shutdown(pcb, (u8_t)shut_rx); + default: + /* Not (yet?) connected, cannot shutdown the TX side as that would bring us + into CLOSED state, where the PCB is deallocated. */ + return ERR_CONN; + } + } + return ERR_OK; +} + +/** + * Abandons a connection and optionally sends a RST to the remote + * host. Deletes the local protocol control block. This is done when + * a connection is killed because of shortage of memory. + * + * @param pcb the tcp_pcb to abort + * @param reset boolean to indicate whether a reset should be sent + */ +void +tcp_abandon(struct tcp_pcb *pcb, int reset) +{ + u32_t seqno, ackno; +#if LWIP_CALLBACK_API + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + void *errf_arg; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_abandon: invalid pcb", pcb != NULL, return); + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs", + pcb->state != LISTEN); + /* Figure out on which TCP PCB list we are, and remove us. If we + are in an active state, call the receive function associated with + the PCB with a NULL argument, and send an RST to the remote end. */ + if (pcb->state == TIME_WAIT) { + tcp_pcb_remove(&tcp_tw_pcbs, pcb); + tcp_free(pcb); + } else { + int send_rst = 0; + u16_t local_port = 0; + enum tcp_state last_state; + seqno = pcb->snd_nxt; + ackno = pcb->rcv_nxt; +#if LWIP_CALLBACK_API + errf = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + errf_arg = pcb->callback_arg; + if (pcb->state == CLOSED) { + if (pcb->local_port != 0) { + /* bound, not yet opened */ + TCP_RMV(&tcp_bound_pcbs, pcb); + } + } else { + send_rst = reset; + local_port = pcb->local_port; + TCP_PCB_REMOVE_ACTIVE(pcb); + } + if (pcb->unacked != NULL) { + tcp_segs_free(pcb->unacked); + } + if (pcb->unsent != NULL) { + tcp_segs_free(pcb->unsent); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + tcp_segs_free(pcb->ooseq); + } +#endif /* TCP_QUEUE_OOSEQ */ + tcp_backlog_accepted(pcb); + if (send_rst) { + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n")); + tcp_rst(pcb, seqno, ackno, &pcb->local_ip, &pcb->remote_ip, local_port, pcb->remote_port); + } + last_state = pcb->state; + tcp_free(pcb); + TCP_EVENT_ERR(last_state, errf, errf_arg, ERR_ABRT); + } +} + +/** + * @ingroup tcp_raw + * Aborts the connection by sending a RST (reset) segment to the remote + * host. The pcb is deallocated. This function never fails. + * + * ATTENTION: When calling this from one of the TCP callbacks, make + * sure you always return ERR_ABRT (and never return ERR_ABRT otherwise + * or you will risk accessing deallocated memory or memory leaks! + * + * @param pcb the tcp pcb to abort + */ +void +tcp_abort(struct tcp_pcb *pcb) +{ + tcp_abandon(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Binds the connection to a local port number and IP address. If the + * IP address is not given (i.e., ipaddr == IP_ANY_TYPE), the connection is + * bound to all local IP addresses. + * If another connection is bound to the same port, the function will + * return ERR_USE, otherwise ERR_OK is returned. + * + * @param pcb the tcp_pcb to bind (no check is done whether this pcb is + * already bound!) + * @param ipaddr the local ip address to bind to (use IPx_ADDR_ANY to bind + * to any local address + * @param port the local port to bind to + * @return ERR_USE if the port is already in use + * ERR_VAL if bind failed because the PCB is not in a valid state + * ERR_OK if bound + */ +err_t +tcp_bind(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + int i; + int max_pcb_list = NUM_TCP_PCB_LISTS; + struct tcp_pcb *cpcb; +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + ip_addr_t zoned_ipaddr; +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#else /* LWIP_IPV4 */ + LWIP_ERROR("tcp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG); +#endif /* LWIP_IPV4 */ + + LWIP_ERROR("tcp_bind: invalid pcb", pcb != NULL, return ERR_ARG); + + LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return ERR_VAL); + +#if SO_REUSE + /* Unless the REUSEADDR flag is set, + we have to check the pcbs in TIME-WAIT state, also. + We do not dump TIME_WAIT pcb's; they can still be matched by incoming + packets using both local and remote IP addresses and ports to distinguish. + */ + if (ip_get_option(pcb, SOF_REUSEADDR)) { + max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT; + } +#endif /* SO_REUSE */ + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. Do the zone selection before the address-in-use + * check below; as such we have to make a temporary copy of the address. */ + if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNICAST)) { + ip_addr_copy(zoned_ipaddr, *ipaddr); + ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr)); + ipaddr = &zoned_ipaddr; + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + if (port == 0) { + port = tcp_new_port(); + if (port == 0) { + return ERR_BUF; + } + } else { + /* Check if the address already is in use (on all lists) */ + for (i = 0; i < max_pcb_list; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if (cpcb->local_port == port) { +#if SO_REUSE + /* Omit checking for the same port if both pcbs have REUSEADDR set. + For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in + tcp_connect. */ + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(cpcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* @todo: check accept_any_ip_version */ + if ((IP_IS_V6(ipaddr) == IP_IS_V6_VAL(cpcb->local_ip)) && + (ip_addr_isany(&cpcb->local_ip) || + ip_addr_isany(ipaddr) || + ip_addr_cmp(&cpcb->local_ip, ipaddr))) { + return ERR_USE; + } + } + } + } + } + } + + if (!ip_addr_isany(ipaddr) +#if LWIP_IPV4 && LWIP_IPV6 + || (IP_GET_TYPE(ipaddr) != IP_GET_TYPE(&pcb->local_ip)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ) { + ip_addr_set(&pcb->local_ip, ipaddr); + } + pcb->local_port = port; + TCP_REG(&tcp_bound_pcbs, pcb); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port)); + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Binds the connection to a netif and IP address. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb the tcp_pcb to bind. + * @param netif the netif to bind to. Can be NULL. + */ +void +tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +#if LWIP_CALLBACK_API +/** + * Default accept callback if no accept callback is specified by the user. + */ +static err_t +tcp_accept_null(void *arg, struct tcp_pcb *pcb, err_t err) +{ + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(err); + + LWIP_ASSERT("tcp_accept_null: invalid pcb", pcb != NULL); + + tcp_abort(pcb); + + return ERR_ABRT; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * When an incoming connection is accepted, the function specified with + * the tcp_accept() function will be called. The pcb has to be bound + * to a local port with the tcp_bind() function. + * + * The tcp_listen() function returns a new connection identifier, and + * the one passed as an argument to the function will be + * deallocated. The reason for this behavior is that less memory is + * needed for a connection that is listening, so tcp_listen() will + * reclaim the memory needed for the original connection and allocate a + * new smaller memory block for the listening connection. + * + * tcp_listen() may return NULL if no memory was available for the + * listening connection. If so, the memory associated with the pcb + * passed as an argument to tcp_listen() will not be deallocated. + * + * The backlog limits the number of outstanding connections + * in the listen queue to the value specified by the backlog argument. + * To use it, your need to set TCP_LISTEN_BACKLOG=1 in your lwipopts.h. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog(tpcb, backlog); + */ +struct tcp_pcb * +tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog) +{ + LWIP_ASSERT_CORE_LOCKED(); + return tcp_listen_with_backlog_and_err(pcb, backlog, NULL); +} + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @param err when NULL is returned, this contains the error reason + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog_and_err(tpcb, backlog, &err); + */ +struct tcp_pcb * +tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err) +{ + struct tcp_pcb_listen *lpcb = NULL; + err_t res; + + LWIP_UNUSED_ARG(backlog); + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_listen_with_backlog_and_err: invalid pcb", pcb != NULL, res = ERR_ARG; goto done); + LWIP_ERROR("tcp_listen_with_backlog_and_err: pcb already connected", pcb->state == CLOSED, res = ERR_CLSD; goto done); + + /* already listening? */ + if (pcb->state == LISTEN) { + lpcb = (struct tcp_pcb_listen *)pcb; + res = ERR_ALREADY; + goto done; + } +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address before the pcb's usage + is declared (listen-/connection-pcb), we have to make sure now that + this port is only used once for every local IP. */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + if ((lpcb->local_port == pcb->local_port) && + ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) { + /* this address/port is already used */ + lpcb = NULL; + res = ERR_USE; + goto done; + } + } + } +#endif /* SO_REUSE */ + lpcb = (struct tcp_pcb_listen *)memp_malloc(MEMP_TCP_PCB_LISTEN); + if (lpcb == NULL) { + res = ERR_MEM; + goto done; + } + lpcb->callback_arg = pcb->callback_arg; + lpcb->local_port = pcb->local_port; + lpcb->state = LISTEN; + lpcb->prio = pcb->prio; + lpcb->so_options = pcb->so_options; + lpcb->netif_idx = NETIF_NO_INDEX; + lpcb->ttl = pcb->ttl; + lpcb->tos = pcb->tos; +#if LWIP_IPV4 && LWIP_IPV6 + IP_SET_TYPE_VAL(lpcb->remote_ip, pcb->local_ip.type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ip_addr_copy(lpcb->local_ip, pcb->local_ip); + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } +#if LWIP_TCP_PCB_NUM_EXT_ARGS + /* copy over ext_args to listening pcb */ + memcpy(&lpcb->ext_args, &pcb->ext_args, sizeof(pcb->ext_args)); +#endif + tcp_free(pcb); +#if LWIP_CALLBACK_API + lpcb->accept = tcp_accept_null; +#endif /* LWIP_CALLBACK_API */ +#if TCP_LISTEN_BACKLOG + lpcb->accepts_pending = 0; + tcp_backlog_set(lpcb, backlog); +#endif /* TCP_LISTEN_BACKLOG */ + TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb); + res = ERR_OK; +done: + if (err != NULL) { + *err = res; + } + return (struct tcp_pcb *)lpcb; +} + +/** + * Update the state that tracks the available window space to advertise. + * + * Returns how much extra window would be advertised if we sent an + * update now. + */ +u32_t +tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb) +{ + u32_t new_right_edge; + + LWIP_ASSERT("tcp_update_rcv_ann_wnd: invalid pcb", pcb != NULL); + new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd; + + if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) { + /* we can advertise more window */ + pcb->rcv_ann_wnd = pcb->rcv_wnd; + return new_right_edge - pcb->rcv_ann_right_edge; + } else { + if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) { + /* Can happen due to other end sending out of advertised window, + * but within actual available (but not yet advertised) window */ + pcb->rcv_ann_wnd = 0; + } else { + /* keep the right edge of window constant */ + u32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt; +#if !LWIP_WND_SCALE + LWIP_ASSERT("new_rcv_ann_wnd <= 0xffff", new_rcv_ann_wnd <= 0xffff); +#endif + pcb->rcv_ann_wnd = (tcpwnd_size_t)new_rcv_ann_wnd; + } + return 0; + } +} + +/** + * @ingroup tcp_raw + * This function should be called by the application when it has + * processed the data. The purpose is to advertise a larger window + * when the data has been processed. + * + * @param pcb the tcp_pcb for which data is read + * @param len the amount of bytes that have been read by the application + */ +void +tcp_recved(struct tcp_pcb *pcb, u16_t len) +{ + u32_t wnd_inflation; + tcpwnd_size_t rcv_wnd; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_recved: invalid pcb", pcb != NULL, return); + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_recved for listen-pcbs", + pcb->state != LISTEN); + + rcv_wnd = (tcpwnd_size_t)(pcb->rcv_wnd + len); + if ((rcv_wnd > TCP_WND_MAX(pcb)) || (rcv_wnd < pcb->rcv_wnd)) { + /* window got too big or tcpwnd_size_t overflow */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: window got too big or tcpwnd_size_t overflow\n")); + pcb->rcv_wnd = TCP_WND_MAX(pcb); + } else { + pcb->rcv_wnd = rcv_wnd; + } + + wnd_inflation = tcp_update_rcv_ann_wnd(pcb); + + /* If the change in the right edge of window is significant (default + * watermark is TCP_WND/4), then send an explicit update now. + * Otherwise wait for a packet to be sent in the normal course of + * events (or more window to be available later) */ + if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: received %"U16_F" bytes, wnd %"TCPWNDSIZE_F" (%"TCPWNDSIZE_F").\n", + len, pcb->rcv_wnd, (u16_t)(TCP_WND_MAX(pcb) - pcb->rcv_wnd))); +} + +/** + * Allocate a new local TCP port. + * + * @return a new (free) local TCP port number + */ +static u16_t +tcp_new_port(void) +{ + u8_t i; + u16_t n = 0; + struct tcp_pcb *pcb; + +again: + tcp_port++; + if (tcp_port == TCP_LOCAL_PORT_RANGE_END) { + tcp_port = TCP_LOCAL_PORT_RANGE_START; + } + /* Check all PCB lists. */ + for (i = 0; i < NUM_TCP_PCB_LISTS; i++) { + for (pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == tcp_port) { + n++; + if (n > (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + } + return tcp_port; +} + +/** + * @ingroup tcp_raw + * Connects to another host. The function given as the "connected" + * argument will be called when the connection has been established. + * Sets up the pcb to connect to the remote host and sends the + * initial SYN segment which opens the connection. + * + * The tcp_connect() function returns immediately; it does not wait for + * the connection to be properly setup. Instead, it will call the + * function specified as the fourth argument (the "connected" argument) + * when the connection is established. If the connection could not be + * properly established, either because the other host refused the + * connection or because the other host didn't answer, the "err" + * callback function of this pcb (registered with tcp_err, see below) + * will be called. + * + * The tcp_connect() function can return ERR_MEM if no memory is + * available for enqueueing the SYN segment. If the SYN indeed was + * enqueued successfully, the tcp_connect() function returns ERR_OK. + * + * @param pcb the tcp_pcb used to establish the connection + * @param ipaddr the remote ip address to connect to + * @param port the remote tcp port to connect to + * @param connected callback function to call when connected (on error, + the err calback will be called) + * @return ERR_VAL if invalid arguments are given + * ERR_OK if connect request has been sent + * other err_t values if connect request couldn't be sent + */ +err_t +tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port, + tcp_connected_fn connected) +{ + struct netif *netif = NULL; + err_t ret; + u32_t iss; + u16_t old_local_port; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_connect: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("tcp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG); + + LWIP_ERROR("tcp_connect: can only connect from state CLOSED", pcb->state == CLOSED, return ERR_ISCONN); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port)); + ip_addr_set(&pcb->remote_ip, ipaddr); + pcb->remote_port = port; + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { + /* check if we have a route to the remote host */ + netif = ip_route(&pcb->local_ip, &pcb->remote_ip); + } + if (netif == NULL) { + /* Don't even try to send a SYN packet if we have no route since that will fail. */ + return ERR_RTE; + } + + /* check if local IP has been assigned to pcb, if not, get one */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, ipaddr); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * Given that we already have the target netif, this is easy and cheap. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST)) { + ip6_addr_assign_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST, netif); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + old_local_port = pcb->local_port; + if (pcb->local_port == 0) { + pcb->local_port = tcp_new_port(); + if (pcb->local_port == 0) { + return ERR_BUF; + } + } else { +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address, we have to make sure + now that the 5-tuple is unique. */ + struct tcp_pcb *cpcb; + int i; + /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */ + for (i = 2; i < NUM_TCP_PCB_LISTS; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if ((cpcb->local_port == pcb->local_port) && + (cpcb->remote_port == port) && + ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) && + ip_addr_cmp(&cpcb->remote_ip, ipaddr)) { + /* linux returns EISCONN here, but ERR_USE should be OK for us */ + return ERR_USE; + } + } + } + } +#endif /* SO_REUSE */ + } + + iss = tcp_next_iss(pcb); + pcb->rcv_nxt = 0; + pcb->snd_nxt = iss; + pcb->lastack = iss - 1; + pcb->snd_wl2 = iss - 1; + pcb->snd_lbb = iss - 1; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->snd_wnd = TCP_WND; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss_netif(pcb->mss, netif, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + pcb->cwnd = 1; +#if LWIP_CALLBACK_API + pcb->connected = connected; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(connected); +#endif /* LWIP_CALLBACK_API */ + + /* Send a SYN together with the MSS option. */ + ret = tcp_enqueue_flags(pcb, TCP_SYN); + if (ret == ERR_OK) { + /* SYN segment was enqueued, changed the pcbs state now */ + pcb->state = SYN_SENT; + if (old_local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + TCP_REG_ACTIVE(pcb); + MIB2_STATS_INC(mib2.tcpactiveopens); + + tcp_output(pcb); + } + return ret; +} + +/** + * Called every 500 ms and implements the retransmission timer and the timer that + * removes PCBs that have been in TIME-WAIT for enough time. It also increments + * various timers such as the inactivity timer in each PCB. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_slowtmr(void) +{ + struct tcp_pcb *pcb, *prev; + tcpwnd_size_t eff_wnd; + u8_t pcb_remove; /* flag if a PCB should be removed */ + u8_t pcb_reset; /* flag if a RST should be sent when removing */ + err_t err; + + err = ERR_OK; + + ++tcp_ticks; + ++tcp_timer_ctr; + +tcp_slowtmr_start: + /* Steps through all of the active PCBs. */ + prev = NULL; + pcb = tcp_active_pcbs; + if (pcb == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n")); + } + while (pcb != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n")); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT); + if (pcb->last_timer == tcp_timer_ctr) { + /* skip this pcb, we have already processed it */ + prev = pcb; + pcb = pcb->next; + continue; + } + pcb->last_timer = tcp_timer_ctr; + + pcb_remove = 0; + pcb_reset = 0; + + if (pcb->state == SYN_SENT && pcb->nrtx >= TCP_SYNMAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n")); + } else if (pcb->nrtx >= TCP_MAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n")); + } else { + if (pcb->persist_backoff > 0) { + LWIP_ASSERT("tcp_slowtimr: persist ticking with in-flight data", pcb->unacked == NULL); + LWIP_ASSERT("tcp_slowtimr: persist ticking with empty send buffer", pcb->unsent != NULL); + if (pcb->persist_probe >= TCP_MAXRTX) { + ++pcb_remove; /* max probes reached */ + } else { + u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff - 1]; + if (pcb->persist_cnt < backoff_cnt) { + pcb->persist_cnt++; + } + if (pcb->persist_cnt >= backoff_cnt) { + int next_slot = 1; /* increment timer to next slot */ + /* If snd_wnd is zero, send 1 byte probes */ + if (pcb->snd_wnd == 0) { + if (tcp_zero_window_probe(pcb) != ERR_OK) { + next_slot = 0; /* try probe again with current slot */ + } + /* snd_wnd not fully closed, split unsent head and fill window */ + } else { + if (tcp_split_unsent_seg(pcb, (u16_t)pcb->snd_wnd) == ERR_OK) { + if (tcp_output(pcb) == ERR_OK) { + /* sending will cancel persist timer, else retry with current slot */ + next_slot = 0; + } + } + } + if (next_slot) { + pcb->persist_cnt = 0; + if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) { + pcb->persist_backoff++; + } + } + } + } + } else { + /* Increase the retransmission timer if it is running */ + if ((pcb->rtime >= 0) && (pcb->rtime < 0x7FFF)) { + ++pcb->rtime; + } + + if (pcb->rtime >= pcb->rto) { + /* Time for a retransmission. */ + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F + " pcb->rto %"S16_F"\n", + pcb->rtime, pcb->rto)); + /* If prepare phase fails but we have unsent data but no unacked data, + still execute the backoff calculations below, as this means we somehow + failed to send segment. */ + if ((tcp_rexmit_rto_prepare(pcb) == ERR_OK) || ((pcb->unacked == NULL) && (pcb->unsent != NULL))) { + /* Double retransmission time-out unless we are trying to + * connect to somebody (i.e., we are in SYN_SENT). */ + if (pcb->state != SYN_SENT) { + u8_t backoff_idx = LWIP_MIN(pcb->nrtx, sizeof(tcp_backoff) - 1); + int calc_rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[backoff_idx]; + pcb->rto = (s16_t)LWIP_MIN(calc_rto, 0x7FFF); + } + + /* Reset the retransmission timer. */ + pcb->rtime = 0; + + /* Reduce congestion window and ssthresh. */ + eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd); + pcb->ssthresh = eff_wnd >> 1; + if (pcb->ssthresh < (tcpwnd_size_t)(pcb->mss << 1)) { + pcb->ssthresh = (tcpwnd_size_t)(pcb->mss << 1); + } + pcb->cwnd = pcb->mss; + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + pcb->bytes_acked = 0; + + /* The following needs to be called AFTER cwnd is set to one + mss - STJ */ + tcp_rexmit_rto_commit(pcb); + } + } + } + } + /* Check if this PCB has stayed too long in FIN-WAIT-2 */ + if (pcb->state == FIN_WAIT_2) { + /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */ + if (pcb->flags & TF_RXCLOSED) { + /* PCB was fully closed (either through close() or SHUT_RDWR): + normal FIN-WAIT timeout handling. */ + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n")); + } + } + } + + /* Check if KEEPALIVE should be sent */ + if (ip_get_option(pcb, SOF_KEEPALIVE) && + ((pcb->state == ESTABLISHED) || + (pcb->state == CLOSE_WAIT))) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + TCP_KEEP_DUR(pcb)) / TCP_SLOW_INTERVAL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + ++pcb_remove; + ++pcb_reset; + } else if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEP_INTVL(pcb)) + / TCP_SLOW_INTERVAL) { + err = tcp_keepalive(pcb); + if (err == ERR_OK) { + pcb->keep_cnt_sent++; + } + } + } + + /* If this PCB has queued out of sequence data, but has been + inactive for too long, will drop the data (it will eventually + be retransmitted). */ +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL && + (tcp_ticks - pcb->tmr >= (u32_t)pcb->rto * TCP_OOSEQ_TIMEOUT)) { + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n")); + tcp_free_ooseq(pcb); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Check if this PCB has stayed too long in SYN-RCVD */ + if (pcb->state == SYN_RCVD) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n")); + } + } + + /* Check if this PCB has stayed too long in LAST-ACK */ + if (pcb->state == LAST_ACK) { + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n")); + } + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; +#if LWIP_CALLBACK_API + tcp_err_fn err_fn = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + void *err_arg; + enum tcp_state last_state; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_active_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb); + tcp_active_pcbs = pcb->next; + } + + if (pcb_reset) { + tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + } + + err_arg = pcb->callback_arg; + last_state = pcb->state; + pcb2 = pcb; + pcb = pcb->next; + tcp_free(pcb2); + + tcp_active_pcbs_changed = 0; + TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_ABRT); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + } else { + /* get the 'next' element now and work with 'prev' below (in case of abort) */ + prev = pcb; + pcb = pcb->next; + + /* We check if we should poll the connection. */ + ++prev->polltmr; + if (prev->polltmr >= prev->pollinterval) { + prev->polltmr = 0; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n")); + tcp_active_pcbs_changed = 0; + TCP_EVENT_POLL(prev, err); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + /* if err == ERR_ABRT, 'prev' is already deallocated */ + if (err == ERR_OK) { + tcp_output(prev); + } + } + } + } + + + /* Steps through all of the TIME-WAIT PCBs. */ + prev = NULL; + pcb = tcp_tw_pcbs; + while (pcb != NULL) { + LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + pcb_remove = 0; + + /* Check if this PCB has stayed long enough in TIME-WAIT */ + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_tw_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb); + tcp_tw_pcbs = pcb->next; + } + pcb2 = pcb; + pcb = pcb->next; + tcp_free(pcb2); + } else { + prev = pcb; + pcb = pcb->next; + } + } +} + +/** + * Is called every TCP_FAST_INTERVAL (250 ms) and process data previously + * "refused" by upper layer (application) and sends delayed ACKs or pending FINs. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_fasttmr(void) +{ + struct tcp_pcb *pcb; + + ++tcp_timer_ctr; + +tcp_fasttmr_start: + pcb = tcp_active_pcbs; + + while (pcb != NULL) { + if (pcb->last_timer != tcp_timer_ctr) { + struct tcp_pcb *next; + pcb->last_timer = tcp_timer_ctr; + /* send delayed ACKs */ + if (pcb->flags & TF_ACK_DELAY) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: delayed ACK\n")); + tcp_ack_now(pcb); + tcp_output(pcb); + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: pending FIN\n")); + tcp_clear_flags(pcb, TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + + next = pcb->next; + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + tcp_active_pcbs_changed = 0; + tcp_process_refused_data(pcb); + if (tcp_active_pcbs_changed) { + /* application callback has changed the pcb list: restart the loop */ + goto tcp_fasttmr_start; + } + } + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** Call tcp_output for all active pcbs that have TF_NAGLEMEMERR set */ +void +tcp_txnow(void) +{ + struct tcp_pcb *pcb; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->flags & TF_NAGLEMEMERR) { + tcp_output(pcb); + } + } +} + +/** Pass pcb->refused_data to the recv callback */ +err_t +tcp_process_refused_data(struct tcp_pcb *pcb) +{ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + struct pbuf *rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ERROR("tcp_process_refused_data: invalid pcb", pcb != NULL, return ERR_ARG); + +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (pcb->refused_data != NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + { + err_t err; + u8_t refused_flags = pcb->refused_data->flags; + /* set pcb->refused_data to NULL in case the callback frees it and then + closes the pcb */ + struct pbuf *refused_data = pcb->refused_data; +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + pbuf_split_64k(refused_data, &rest); + pcb->refused_data = rest; +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = NULL; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + /* Notify again application with data previously received. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: notify kept packet\n")); + TCP_EVENT_RECV(pcb, refused_data, ERR_OK, err); + if (err == ERR_OK) { + /* did refused_data include a FIN? */ + if ((refused_flags & PBUF_FLAG_TCP_FIN) +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + && (rest == NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + ) { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + } + } else if (err == ERR_ABRT) { + /* if err == ERR_ABRT, 'pcb' is already deallocated */ + /* Drop incoming packets because pcb is "full" (only if the incoming + segment contains data). */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: drop incoming packets, because pcb is \"full\"\n")); + return ERR_ABRT; + } else { + /* data is still refused, pbuf is still valid (go on for ACK-only packets) */ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(refused_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = refused_data; + return ERR_INPROGRESS; + } + } + return ERR_OK; +} + +/** + * Deallocates a list of TCP segments (tcp_seg structures). + * + * @param seg tcp_seg list of TCP segments to free + */ +void +tcp_segs_free(struct tcp_seg *seg) +{ + while (seg != NULL) { + struct tcp_seg *next = seg->next; + tcp_seg_free(seg); + seg = next; + } +} + +/** + * Frees a TCP segment (tcp_seg structure). + * + * @param seg single tcp_seg to free + */ +void +tcp_seg_free(struct tcp_seg *seg) +{ + if (seg != NULL) { + if (seg->p != NULL) { + pbuf_free(seg->p); +#if TCP_DEBUG + seg->p = NULL; +#endif /* TCP_DEBUG */ + } + memp_free(MEMP_TCP_SEG, seg); + } +} + +/** + * @ingroup tcp + * Sets the priority of a connection. + * + * @param pcb the tcp_pcb to manipulate + * @param prio new priority + */ +void +tcp_setprio(struct tcp_pcb *pcb, u8_t prio) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_setprio: invalid pcb", pcb != NULL, return); + + pcb->prio = prio; +} + +#if TCP_QUEUE_OOSEQ +/** + * Returns a copy of the given TCP segment. + * The pbuf and data are not copied, only the pointers + * + * @param seg the old tcp_seg + * @return a copy of seg + */ +struct tcp_seg * +tcp_seg_copy(struct tcp_seg *seg) +{ + struct tcp_seg *cseg; + + LWIP_ASSERT("tcp_seg_copy: invalid seg", seg != NULL); + + cseg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG); + if (cseg == NULL) { + return NULL; + } + SMEMCPY((u8_t *)cseg, (const u8_t *)seg, sizeof(struct tcp_seg)); + pbuf_ref(cseg->p); + return cseg; +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if LWIP_CALLBACK_API +/** + * Default receive callback that is called if the user didn't register + * a recv callback for the pcb. + */ +err_t +tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + LWIP_UNUSED_ARG(arg); + + LWIP_ERROR("tcp_recv_null: invalid pcb", pcb != NULL, return ERR_ARG); + + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } else if (err == ERR_OK) { + return tcp_close(pcb); + } + return ERR_OK; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * Kills the oldest active connection that has a lower priority than 'prio'. + * + * @param prio minimum priority + */ +static void +tcp_kill_prio(u8_t prio) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + u8_t mprio; + + mprio = LWIP_MIN(TCP_PRIO_MAX, prio); + + /* We want to kill connections with a lower prio, so bail out if + * supplied prio is 0 - there can never be a lower prio + */ + if (mprio == 0) { + return; + } + + /* We only want kill connections with a lower prio, so decrement prio by one + * and start searching for oldest connection with same or lower priority than mprio. + * We want to find the connections with the lowest possible prio, and among + * these the one with the longest inactivity time. + */ + mprio--; + + inactivity = 0; + inactive = NULL; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + /* lower prio is always a kill candidate */ + if ((pcb->prio < mprio) || + /* longer inactivity is also a kill candidate */ + ((pcb->prio == mprio) && ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity))) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + mprio = pcb->prio; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/** + * Kills the oldest connection that is in specific state. + * Called from tcp_alloc() for LAST_ACK and CLOSING if no more connections are available. + */ +static void +tcp_kill_state(enum tcp_state state) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + LWIP_ASSERT("invalid state", (state == CLOSING) || (state == LAST_ACK)); + + inactivity = 0; + inactive = NULL; + /* Go through the list of active pcbs and get the oldest pcb that is in state + CLOSING/LAST_ACK. */ + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->state == state) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_closing: killing oldest %s PCB %p (%"S32_F")\n", + tcp_state_str[state], (void *)inactive, inactivity)); + /* Don't send a RST, since no data is lost. */ + tcp_abandon(inactive, 0); + } +} + +/** + * Kills the oldest connection that is in TIME_WAIT state. + * Called from tcp_alloc() if no more connections are available. + */ +static void +tcp_kill_timewait(void) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + inactivity = 0; + inactive = NULL; + /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/* Called when allocating a pcb fails. + * In this case, we want to handle all pcbs that want to close first: if we can + * now send the FIN (which failed before), the pcb might be in a state that is + * OK for us to now free it. + */ +static void +tcp_handle_closepend(void) +{ + struct tcp_pcb *pcb = tcp_active_pcbs; + + while (pcb != NULL) { + struct tcp_pcb *next = pcb->next; + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_handle_closepend: pending FIN\n")); + tcp_clear_flags(pcb, TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + pcb = next; + } +} + +/** + * Allocate a new tcp_pcb structure. + * + * @param prio priority for the new pcb + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_alloc(u8_t prio) +{ + struct tcp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try to send FIN for all pcbs stuck in TF_CLOSEPEND first */ + tcp_handle_closepend(); + + /* Try killing oldest connection in TIME-WAIT. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest TIME-WAIT connection\n")); + tcp_kill_timewait(); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in LAST-ACK (these wouldn't go to TIME-WAIT). */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest LAST-ACK connection\n")); + tcp_kill_state(LAST_ACK); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in CLOSING. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest CLOSING connection\n")); + tcp_kill_state(CLOSING); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest active connection with lower priority than the new one. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing oldest connection with prio lower than %d\n", prio)); + tcp_kill_prio(prio); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed above */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* zero out the whole pcb, so there is no need to initialize members to zero */ + memset(pcb, 0, sizeof(struct tcp_pcb)); + pcb->prio = prio; + pcb->snd_buf = TCP_SND_BUF; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->ttl = TCP_TTL; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; + pcb->rto = 3000 / TCP_SLOW_INTERVAL; + pcb->sv = 3000 / TCP_SLOW_INTERVAL; + pcb->rtime = -1; + pcb->cwnd = 1; + pcb->tmr = tcp_ticks; + pcb->last_timer = tcp_timer_ctr; + + /* RFC 5681 recommends setting ssthresh abritrarily high and gives an example + of using the largest advertised receive window. We've seen complications with + receiving TCPs that use window scaling and/or window auto-tuning where the + initial advertised window is very small and then grows rapidly once the + connection is established. To avoid these complications, we set ssthresh to the + largest effective cwnd (amount of in-flight data) that the sender can have. */ + pcb->ssthresh = TCP_SND_BUF; + +#if LWIP_CALLBACK_API + pcb->recv = tcp_recv_null; +#endif /* LWIP_CALLBACK_API */ + + /* Init KEEPALIVE timer */ + pcb->keep_idle = TCP_KEEPIDLE_DEFAULT; + +#if LWIP_TCP_KEEPALIVE + pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT; + pcb->keep_cnt = TCP_KEEPCNT_DEFAULT; +#endif /* LWIP_TCP_KEEPALIVE */ + } + return pcb; +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't place it on + * any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * If memory is not available for creating the new pcb, NULL is returned. + * + * @internal: Maybe there should be a idle TCP PCB list where these + * PCBs are put on. Port reservation using tcp_bind() is implemented but + * allocated pcbs that are not bound can't be killed automatically if wanting + * to allocate a pcb with higher prio (@see tcp_kill_prio()) + * + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new(void) +{ + return tcp_alloc(TCP_PRIO_NORMAL); +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't + * place it on any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) connections, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new_ip_type(u8_t type) +{ + struct tcp_pcb *pcb; + pcb = tcp_alloc(TCP_PRIO_NORMAL); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** + * @ingroup tcp_raw + * Specifies the program specific state that should be passed to all + * other callback functions. The "pcb" argument is the current TCP + * connection control block, and the "arg" argument is the argument + * that will be passed to the callbacks. + * + * @param pcb tcp_pcb to set the callback argument + * @param arg void pointer argument to pass to callback functions + */ +void +tcp_arg(struct tcp_pcb *pcb, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* This function is allowed to be called for both listen pcbs and + connection pcbs. */ + if (pcb != NULL) { + pcb->callback_arg = arg; + } +} +#if LWIP_CALLBACK_API + +/** + * @ingroup tcp_raw + * Sets the callback function that will be called when new data + * arrives. The callback function will be passed a NULL pbuf to + * indicate that the remote host has closed the connection. If the + * callback function returns ERR_OK or ERR_ABRT it must have + * freed the pbuf, otherwise it must not have freed it. + * + * @param pcb tcp_pcb to set the recv callback + * @param recv callback function to call for this pcb when data is received + */ +void +tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for recv callback", pcb->state != LISTEN); + pcb->recv = recv; + } +} + +/** + * @ingroup tcp_raw + * Specifies the callback function that should be called when data has + * successfully been received (i.e., acknowledged) by the remote + * host. The len argument passed to the callback function gives the + * amount bytes that was acknowledged by the last acknowledgment. + * + * @param pcb tcp_pcb to set the sent callback + * @param sent callback function to call for this pcb when data is successfully sent + */ +void +tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for sent callback", pcb->state != LISTEN); + pcb->sent = sent; + } +} + +/** + * @ingroup tcp_raw + * Used to specify the function that should be called when a fatal error + * has occurred on the connection. + * + * If a connection is aborted because of an error, the application is + * alerted of this event by the err callback. Errors that might abort a + * connection are when there is a shortage of memory. The callback + * function to be called is set using the tcp_err() function. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param pcb tcp_pcb to set the err callback + * @param err callback function to call for this pcb when a fatal error + * has occurred on the connection + */ +void +tcp_err(struct tcp_pcb *pcb, tcp_err_fn err) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for err callback", pcb->state != LISTEN); + pcb->errf = err; + } +} + +/** + * @ingroup tcp_raw + * Used for specifying the function that should be called when a + * LISTENing connection has been connected to another host. + * + * @param pcb tcp_pcb to set the accept callback + * @param accept callback function to call for this pcb when LISTENing + * connection has been connected to another host + */ +void +tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb != NULL) && (pcb->state == LISTEN)) { + struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen *)pcb; + lpcb->accept = accept; + } +} +#endif /* LWIP_CALLBACK_API */ + + +/** + * @ingroup tcp_raw + * Specifies the polling interval and the callback function that should + * be called to poll the application. The interval is specified in + * number of TCP coarse grained timer shots, which typically occurs + * twice a second. An interval of 10 means that the application would + * be polled every 5 seconds. + * + * When a connection is idle (i.e., no data is either transmitted or + * received), lwIP will repeatedly poll the application by calling a + * specified callback function. This can be used either as a watchdog + * timer for killing connections that have stayed idle for too long, or + * as a method of waiting for memory to become available. For instance, + * if a call to tcp_write() has failed because memory wasn't available, + * the application may use the polling functionality to call tcp_write() + * again when the connection has been idle for a while. + */ +void +tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_poll: invalid pcb", pcb != NULL, return); + LWIP_ASSERT("invalid socket state for poll", pcb->state != LISTEN); + +#if LWIP_CALLBACK_API + pcb->poll = poll; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(poll); +#endif /* LWIP_CALLBACK_API */ + pcb->pollinterval = interval; +} + +/** + * Purges a TCP PCB. Removes any buffered data and frees the buffer memory + * (pcb->ooseq, pcb->unsent and pcb->unacked are freed). + * + * @param pcb tcp_pcb to purge. The pcb itself is not deallocated! + */ +void +tcp_pcb_purge(struct tcp_pcb *pcb) +{ + LWIP_ERROR("tcp_pcb_purge: invalid pcb", pcb != NULL, return); + + if (pcb->state != CLOSED && + pcb->state != TIME_WAIT && + pcb->state != LISTEN) { + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge\n")); + + tcp_backlog_accepted(pcb); + + if (pcb->refused_data != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n")); + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + if (pcb->unsent != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n")); + } + if (pcb->unacked != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n")); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n")); + tcp_free_ooseq(pcb); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Stop the retransmission timer as it will expect data on unacked + queue if it fires */ + pcb->rtime = -1; + + tcp_segs_free(pcb->unsent); + tcp_segs_free(pcb->unacked); + pcb->unacked = pcb->unsent = NULL; +#if TCP_OVERSIZE + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + } +} + +/** + * Purges the PCB and removes it from a PCB list. Any delayed ACKs are sent first. + * + * @param pcblist PCB list to purge. + * @param pcb tcp_pcb to purge. The pcb itself is NOT deallocated! + */ +void +tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_pcb_remove: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_pcb_remove: invalid pcblist", pcblist != NULL); + + TCP_RMV(pcblist, pcb); + + tcp_pcb_purge(pcb); + + /* if there is an outstanding delayed ACKs, send it */ + if ((pcb->state != TIME_WAIT) && + (pcb->state != LISTEN) && + (pcb->flags & TF_ACK_DELAY)) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + if (pcb->state != LISTEN) { + LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL); + LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL); +#if TCP_QUEUE_OOSEQ + LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL); +#endif /* TCP_QUEUE_OOSEQ */ + } + + pcb->state = CLOSED; + /* reset the local port to prevent the pcb from being 'bound' */ + pcb->local_port = 0; + + LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane()); +} + +/** + * Calculates a new initial sequence number for new connections. + * + * @return u32_t pseudo random sequence number + */ +u32_t +tcp_next_iss(struct tcp_pcb *pcb) +{ +#ifdef LWIP_HOOK_TCP_ISN + LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL); + return LWIP_HOOK_TCP_ISN(&pcb->local_ip, pcb->local_port, &pcb->remote_ip, pcb->remote_port); +#else /* LWIP_HOOK_TCP_ISN */ + static u32_t iss = 6510; + + LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL); + LWIP_UNUSED_ARG(pcb); + + iss += tcp_ticks; /* XXX */ + return iss; +#endif /* LWIP_HOOK_TCP_ISN */ +} + +#if TCP_CALCULATE_EFF_SEND_MSS +/** + * Calculates the effective send mss that can be used for a specific IP address + * by calculating the minimum of TCP_MSS and the mtu (if set) of the target + * netif (if not NULL). + */ +u16_t +tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, const ip_addr_t *dest) +{ + u16_t mss_s; + u16_t mtu; + + LWIP_UNUSED_ARG(dest); /* in case IPv6 is disabled */ + + LWIP_ASSERT("tcp_eff_send_mss_netif: invalid dst_ip", dest != NULL); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + /* First look in destination cache, to see if there is a Path MTU. */ + mtu = nd6_get_destination_mtu(ip_2_ip6(dest), outif); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + if (outif == NULL) { + return sendmss; + } + mtu = outif->mtu; + } +#endif /* LWIP_IPV4 */ + + if (mtu != 0) { + u16_t offset; +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + offset = IP6_HLEN + TCP_HLEN; + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + offset = IP_HLEN + TCP_HLEN; + } +#endif /* LWIP_IPV4 */ + mss_s = (mtu > offset) ? (u16_t)(mtu - offset) : 0; + /* RFC 1122, chap 4.2.2.6: + * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize + * We correct for TCP options in tcp_write(), and don't support IP options. + */ + sendmss = LWIP_MIN(sendmss, mss_s); + } + return sendmss; +} +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +/** Helper function for tcp_netif_ip_addr_changed() that iterates a pcb list */ +static void +tcp_netif_ip_addr_changed_pcblist(const ip_addr_t *old_addr, struct tcp_pcb *pcb_list) +{ + struct tcp_pcb *pcb; + pcb = pcb_list; + + LWIP_ASSERT("tcp_netif_ip_addr_changed_pcblist: invalid old_addr", old_addr != NULL); + + while (pcb != NULL) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&pcb->local_ip, old_addr) +#if LWIP_AUTOIP + /* connections to link-local addresses must persist (RFC3927 ch. 1.9) */ + && (!IP_IS_V4_VAL(pcb->local_ip) || !ip4_addr_islinklocal(ip_2_ip4(&pcb->local_ip))) +#endif /* LWIP_AUTOIP */ + ) { + /* this connection must be aborted */ + struct tcp_pcb *next = pcb->next; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: aborting TCP pcb %p\n", (void *)pcb)); + tcp_abort(pcb); + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** This function is called from netif.c when address is changed or netif is removed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change or NULL if netif has been removed + */ +void +tcp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct tcp_pcb_listen *lpcb; + + if (!ip_addr_isany(old_addr)) { + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_active_pcbs); + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_bound_pcbs); + + if (!ip_addr_isany(new_addr)) { + /* PCB bound to current local interface address? */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&lpcb->local_ip, old_addr)) { + /* The PCB is listening to the old ipaddr and + * is set to listen to the new one instead */ + ip_addr_copy(lpcb->local_ip, *new_addr); + } + } + } + } +} + +const char * +tcp_debug_state_str(enum tcp_state s) +{ + return tcp_state_str[s]; +} + +err_t +tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port) +{ + if (pcb) { + if (local) { + if (addr) { + *addr = pcb->local_ip; + } + if (port) { + *port = pcb->local_port; + } + } else { + if (addr) { + *addr = pcb->remote_ip; + } + if (port) { + *port = pcb->remote_port; + } + } + return ERR_OK; + } + return ERR_VAL; +} + +#if TCP_QUEUE_OOSEQ +/* Free all ooseq pbufs (and possibly reset SACK state) */ +void +tcp_free_ooseq(struct tcp_pcb *pcb) +{ + if (pcb->ooseq) { + tcp_segs_free(pcb->ooseq); + pcb->ooseq = NULL; +#if LWIP_TCP_SACK_OUT + memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks)); +#endif /* LWIP_TCP_SACK_OUT */ + } +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +/** + * Print a tcp header for debugging purposes. + * + * @param tcphdr pointer to a struct tcp_hdr + */ +void +tcp_debug_print(struct tcp_hdr *tcphdr) +{ + LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(tcphdr->src), lwip_ntohs(tcphdr->dest))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (seq no)\n", + lwip_ntohl(tcphdr->seqno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (ack no)\n", + lwip_ntohl(tcphdr->ackno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" | |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"| %5"U16_F" | (hdrlen, flags (", + TCPH_HDRLEN(tcphdr), + (u16_t)(TCPH_FLAGS(tcphdr) >> 5 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 4 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 3 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 2 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 1 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) & 1), + lwip_ntohs(tcphdr->wnd))); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_DEBUG, ("), win)\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| 0x%04"X16_F" | %5"U16_F" | (chksum, urgp)\n", + lwip_ntohs(tcphdr->chksum), lwip_ntohs(tcphdr->urgp))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); +} + +/** + * Print a tcp state for debugging purposes. + * + * @param s enum tcp_state to print + */ +void +tcp_debug_print_state(enum tcp_state s) +{ + LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s])); +} + +/** + * Print tcp flags for debugging purposes. + * + * @param flags tcp flags, all active flags are printed + */ +void +tcp_debug_print_flags(u8_t flags) +{ + if (flags & TCP_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("FIN ")); + } + if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("SYN ")); + } + if (flags & TCP_RST) { + LWIP_DEBUGF(TCP_DEBUG, ("RST ")); + } + if (flags & TCP_PSH) { + LWIP_DEBUGF(TCP_DEBUG, ("PSH ")); + } + if (flags & TCP_ACK) { + LWIP_DEBUGF(TCP_DEBUG, ("ACK ")); + } + if (flags & TCP_URG) { + LWIP_DEBUGF(TCP_DEBUG, ("URG ")); + } + if (flags & TCP_ECE) { + LWIP_DEBUGF(TCP_DEBUG, ("ECE ")); + } + if (flags & TCP_CWR) { + LWIP_DEBUGF(TCP_DEBUG, ("CWR ")); + } + LWIP_DEBUGF(TCP_DEBUG, ("\n")); +} + +/** + * Print all tcp_pcbs in every list for debugging purposes. + */ +void +tcp_debug_print_pcbs(void) +{ + struct tcp_pcb *pcb; + struct tcp_pcb_listen *pcbl; + + LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n")); + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n")); + for (pcbl = tcp_listen_pcbs.listen_pcbs; pcbl != NULL; pcbl = pcbl->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F" ", pcbl->local_port)); + tcp_debug_print_state(pcbl->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n")); + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } +} + +/** + * Check state consistency of the tcp_pcb lists. + */ +s16_t +tcp_pcbs_sane(void) +{ + struct tcp_pcb *pcb; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + } + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + } + return 1; +} +#endif /* TCP_DEBUG */ + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +/** + * @defgroup tcp_raw_extargs ext arguments + * @ingroup tcp_raw + * Additional data storage per tcp pcb\n + * @see @ref tcp_raw + * + * When LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb) + * includes a number of additional argument entries in an array. + * + * To support memory management, in addition to a 'void *', callbacks can be + * provided to manage transition from listening pcbs to connections and to + * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks). + * + * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get + * to store and load arguments from this index for a given pcb. + */ + +static u8_t tcp_ext_arg_id; + +/** + * @ingroup tcp_raw_extargs + * Allocate an index to store data in ext_args member of struct tcp_pcb. + * Returned value is an index in mentioned array. + * The index is *global* over all pcbs! + * + * When @ref LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb) + * includes a number of additional argument entries in an array. + * + * To support memory management, in addition to a 'void *', callbacks can be + * provided to manage transition from listening pcbs to connections and to + * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks). + * + * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get + * to store and load arguments from this index for a given pcb. + * + * @return a unique index into struct tcp_pcb.ext_args + */ +u8_t +tcp_ext_arg_alloc_id(void) +{ + u8_t result = tcp_ext_arg_id; + tcp_ext_arg_id++; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_TCP_PCB_NUM_EXT_ARGS >= 255 +#error LWIP_TCP_PCB_NUM_EXT_ARGS +#endif + LWIP_ASSERT("Increase LWIP_TCP_PCB_NUM_EXT_ARGS in lwipopts.h", result < LWIP_TCP_PCB_NUM_EXT_ARGS); + return result; +} + +/** + * @ingroup tcp_raw_extargs + * Set callbacks for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the callback + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @param callbacks callback table (const since it is referenced, not copied!) + */ +void +tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + LWIP_ASSERT("callbacks != NULL", callbacks != NULL); + + LWIP_ASSERT_CORE_LOCKED(); + + pcb->ext_args[id].callbacks = callbacks; +} + +/** + * @ingroup tcp_raw_extargs + * Set data for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the data + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @param arg data pointer to set + */ +void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + + LWIP_ASSERT_CORE_LOCKED(); + + pcb->ext_args[id].data = arg; +} + +/** + * @ingroup tcp_raw_extargs + * Set data for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the data + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @return data pointer at the given index + */ +void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + + LWIP_ASSERT_CORE_LOCKED(); + + return pcb->ext_args[id].data; +} + +/** This function calls the "destroy" callback for all ext_args once a pcb is + * freed. + */ +static void +tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args) +{ + int i; + LWIP_ASSERT("ext_args != NULL", ext_args != NULL); + + for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) { + if (ext_args[i].callbacks != NULL) { + if (ext_args[i].callbacks->destroy != NULL) { + ext_args[i].callbacks->destroy((u8_t)i, ext_args[i].data); + } + } + } +} + +/** This function calls the "passive_open" callback for all ext_args if a connection + * is in the process of being accepted. This is called just after the SYN is + * received and before a SYN/ACK is sent, to allow to modify the very first + * segment sent even on passive open. Naturally, the "accepted" callback of the + * pcb has not been called yet! + */ +err_t +tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb) +{ + int i; + LWIP_ASSERT("lpcb != NULL", lpcb != NULL); + LWIP_ASSERT("cpcb != NULL", cpcb != NULL); + + for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) { + if (lpcb->ext_args[i].callbacks != NULL) { + if (lpcb->ext_args[i].callbacks->passive_open != NULL) { + err_t err = lpcb->ext_args[i].callbacks->passive_open((u8_t)i, lpcb, cpcb); + if (err != ERR_OK) { + return err; + } + } + } + } + return ERR_OK; +} +#endif /* LWIP_TCP_PCB_NUM_EXT_ARGS */ + +#endif /* LWIP_TCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/tcp_in.c b/Middlewares/Third_Party/LwIP/src/core/tcp_in.c new file mode 100644 index 00000000..428a6f48 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/tcp_in.c @@ -0,0 +1,2178 @@ +/** + * @file + * Transmission Control Protocol, incoming traffic + * + * The input processing functions of the TCP layer. + * + * These functions are generally called in the order (ip_input() ->) + * tcp_input() -> * tcp_process() -> tcp_receive() (-> application). + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_ND6_TCP_REACHABILITY_HINTS +#include "lwip/nd6.h" +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Initial CWND calculation as defined RFC 2581 */ +#define LWIP_TCP_CALC_INITIAL_CWND(mss) ((tcpwnd_size_t)LWIP_MIN((4U * (mss)), LWIP_MAX((2U * (mss)), 4380U))) + +/* These variables are global to all functions involved in the input + processing of TCP segments. They are set by the tcp_input() + function. */ +static struct tcp_seg inseg; +static struct tcp_hdr *tcphdr; +static u16_t tcphdr_optlen; +static u16_t tcphdr_opt1len; +static u8_t *tcphdr_opt2; +static u16_t tcp_optidx; +static u32_t seqno, ackno; +static tcpwnd_size_t recv_acked; +static u16_t tcplen; +static u8_t flags; + +static u8_t recv_flags; +static struct pbuf *recv_data; + +struct tcp_pcb *tcp_input_pcb; + +/* Forward declarations. */ +static err_t tcp_process(struct tcp_pcb *pcb); +static void tcp_receive(struct tcp_pcb *pcb); +static void tcp_parseopt(struct tcp_pcb *pcb); + +static void tcp_listen_input(struct tcp_pcb_listen *pcb); +static void tcp_timewait_input(struct tcp_pcb *pcb); + +static int tcp_input_delayed_close(struct tcp_pcb *pcb); + +#if LWIP_TCP_SACK_OUT +static void tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right); +static void tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq); +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) +static void tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq); +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ +#endif /* LWIP_TCP_SACK_OUT */ + +/** + * The initial input processing of TCP. It verifies the TCP header, demultiplexes + * the segment between the PCBs and passes it on to tcp_process(), which implements + * the TCP finite state machine. This function is called by the IP layer (in + * ip_input()). + * + * @param p received TCP segment to process (p->payload pointing to the TCP header) + * @param inp network interface on which this segment was received + */ +void +tcp_input(struct pbuf *p, struct netif *inp) +{ + struct tcp_pcb *pcb, *prev; + struct tcp_pcb_listen *lpcb; +#if SO_REUSE + struct tcp_pcb *lpcb_prev = NULL; + struct tcp_pcb_listen *lpcb_any = NULL; +#endif /* SO_REUSE */ + u8_t hdrlen_bytes; + err_t err; + + LWIP_UNUSED_ARG(inp); + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("tcp_input: invalid pbuf", p != NULL); + + PERF_START; + + TCP_STATS_INC(tcp.recv); + MIB2_STATS_INC(mib2.tcpinsegs); + + tcphdr = (struct tcp_hdr *)p->payload; + +#if TCP_INPUT_DEBUG + tcp_debug_print(tcphdr); +#endif + + /* Check that TCP header fits in payload */ + if (p->len < TCP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: short packet (%"U16_F" bytes) discarded\n", p->tot_len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Don't even process incoming broadcasts/multicasts. */ + if (ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()) || + ip_addr_ismulticast(ip_current_dest_addr())) { + TCP_STATS_INC(tcp.proterr); + goto dropped; + } + +#if CHECKSUM_CHECK_TCP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_TCP) { + /* Verify TCP checksum. */ + u16_t chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + ip_current_src_addr(), ip_current_dest_addr()); + if (chksum != 0) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packet discarded due to failing checksum 0x%04"X16_F"\n", + chksum)); + tcp_debug_print(tcphdr); + TCP_STATS_INC(tcp.chkerr); + goto dropped; + } + } +#endif /* CHECKSUM_CHECK_TCP */ + + /* sanity-check header length */ + hdrlen_bytes = TCPH_HDRLEN_BYTES(tcphdr); + if ((hdrlen_bytes < TCP_HLEN) || (hdrlen_bytes > p->tot_len)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: invalid header length (%"U16_F")\n", (u16_t)hdrlen_bytes)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Move the payload pointer in the pbuf so that it points to the + TCP data instead of the TCP header. */ + tcphdr_optlen = (u16_t)(hdrlen_bytes - TCP_HLEN); + tcphdr_opt2 = NULL; + if (p->len >= hdrlen_bytes) { + /* all options are in the first pbuf */ + tcphdr_opt1len = tcphdr_optlen; + pbuf_remove_header(p, hdrlen_bytes); /* cannot fail */ + } else { + u16_t opt2len; + /* TCP header fits into first pbuf, options don't - data is in the next pbuf */ + /* there must be a next pbuf, due to hdrlen_bytes sanity check above */ + LWIP_ASSERT("p->next != NULL", p->next != NULL); + + /* advance over the TCP header (cannot fail) */ + pbuf_remove_header(p, TCP_HLEN); + + /* determine how long the first and second parts of the options are */ + tcphdr_opt1len = p->len; + opt2len = (u16_t)(tcphdr_optlen - tcphdr_opt1len); + + /* options continue in the next pbuf: set p to zero length and hide the + options in the next pbuf (adjusting p->tot_len) */ + pbuf_remove_header(p, tcphdr_opt1len); + + /* check that the options fit in the second pbuf */ + if (opt2len > p->next->len) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: options overflow second pbuf (%"U16_F" bytes)\n", p->next->len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* remember the pointer to the second part of the options */ + tcphdr_opt2 = (u8_t *)p->next->payload; + + /* advance p->next to point after the options, and manually + adjust p->tot_len to keep it consistent with the changed p->next */ + pbuf_remove_header(p->next, opt2len); + p->tot_len = (u16_t)(p->tot_len - opt2len); + + LWIP_ASSERT("p->len == 0", p->len == 0); + LWIP_ASSERT("p->tot_len == p->next->tot_len", p->tot_len == p->next->tot_len); + } + + /* Convert fields in TCP header to host byte order. */ + tcphdr->src = lwip_ntohs(tcphdr->src); + tcphdr->dest = lwip_ntohs(tcphdr->dest); + seqno = tcphdr->seqno = lwip_ntohl(tcphdr->seqno); + ackno = tcphdr->ackno = lwip_ntohl(tcphdr->ackno); + tcphdr->wnd = lwip_ntohs(tcphdr->wnd); + + flags = TCPH_FLAGS(tcphdr); + tcplen = p->tot_len; + if (flags & (TCP_FIN | TCP_SYN)) { + tcplen++; + if (tcplen < p->tot_len) { + /* u16_t overflow, cannot handle this */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: length u16_t overflow, cannot handle this\n")); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + } + + /* Demultiplex an incoming segment. First, we check if it is destined + for an active connection. */ + prev = NULL; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_input: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + LWIP_ASSERT("tcp_input: active pcb->state != LISTEN", pcb->state != LISTEN); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + prev = pcb; + continue; + } + + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + LWIP_ASSERT("tcp_input: pcb->next != pcb (before cache)", pcb->next != pcb); + if (prev != NULL) { + prev->next = pcb->next; + pcb->next = tcp_active_pcbs; + tcp_active_pcbs = pcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + LWIP_ASSERT("tcp_input: pcb->next != pcb (after cache)", pcb->next != pcb); + break; + } + prev = pcb; + } + + if (pcb == NULL) { + /* If it did not go to an active connection, we check the connections + in the TIME-WAIT state. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + continue; + } + + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* We don't really care enough to move this PCB to the front + of the list since we are not very likely to receive that + many segments for connections in TIME-WAIT. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for TIME_WAITing connection.\n")); +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if (LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, tcphdr_opt1len, + tcphdr_opt2, p) == ERR_OK) +#endif + { + tcp_timewait_input(pcb); + } + pbuf_free(p); + return; + } + } + + /* Finally, if we still did not get a match, we check all PCBs that + are LISTENing for incoming connections. */ + prev = NULL; + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + /* check if PCB is bound to specific netif */ + if ((lpcb->netif_idx != NETIF_NO_INDEX) && + (lpcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + prev = (struct tcp_pcb *)lpcb; + continue; + } + + if (lpcb->local_port == tcphdr->dest) { + if (IP_IS_ANY_TYPE_VAL(lpcb->local_ip)) { + /* found an ANY TYPE (IPv4/IPv6) match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } else if (IP_ADDR_PCB_VERSION_MATCH_EXACT(lpcb, ip_current_dest_addr())) { + if (ip_addr_cmp(&lpcb->local_ip, ip_current_dest_addr())) { + /* found an exact match */ + break; + } else if (ip_addr_isany(&lpcb->local_ip)) { + /* found an ANY-match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } + } + } + prev = (struct tcp_pcb *)lpcb; + } +#if SO_REUSE + /* first try specific local IP */ + if (lpcb == NULL) { + /* only pass to ANY if no specific local IP has been found */ + lpcb = lpcb_any; + prev = lpcb_prev; + } +#endif /* SO_REUSE */ + if (lpcb != NULL) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + if (prev != NULL) { + ((struct tcp_pcb_listen *)prev)->next = lpcb->next; + /* our successor is the remainder of the listening list */ + lpcb->next = tcp_listen_pcbs.listen_pcbs; + /* put this listening pcb at the head of the listening list */ + tcp_listen_pcbs.listen_pcbs = lpcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for LISTENing connection.\n")); +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if (LWIP_HOOK_TCP_INPACKET_PCB((struct tcp_pcb *)lpcb, tcphdr, tcphdr_optlen, + tcphdr_opt1len, tcphdr_opt2, p) == ERR_OK) +#endif + { + tcp_listen_input(lpcb); + } + pbuf_free(p); + return; + } + } + +#if TCP_INPUT_DEBUG + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("+-+-+-+-+-+-+-+-+-+-+-+-+-+- tcp_input: flags ")); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")); +#endif /* TCP_INPUT_DEBUG */ + + +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if ((pcb != NULL) && LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, + tcphdr_opt1len, tcphdr_opt2, p) != ERR_OK) { + pbuf_free(p); + return; + } +#endif + if (pcb != NULL) { + /* The incoming segment belongs to a connection. */ +#if TCP_INPUT_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_INPUT_DEBUG */ + + /* Set up a tcp_seg structure. */ + inseg.next = NULL; + inseg.len = p->tot_len; + inseg.p = p; + inseg.tcphdr = tcphdr; + + recv_data = NULL; + recv_flags = 0; + recv_acked = 0; + + if (flags & TCP_PSH) { + p->flags |= PBUF_FLAG_PUSH; + } + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + if ((tcp_process_refused_data(pcb) == ERR_ABRT) || + ((pcb->refused_data != NULL) && (tcplen > 0))) { + /* pcb has been aborted or refused data is still refused and the new + segment contains data */ + if (pcb->rcv_ann_wnd == 0) { + /* this is a zero-window probe, we respond to it with current RCV.NXT + and drop the data segment */ + tcp_send_empty_ack(pcb); + } + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + goto aborted; + } + } + tcp_input_pcb = pcb; + err = tcp_process(pcb); + /* A return value of ERR_ABRT means that tcp_abort() was called + and that the pcb has been freed. If so, we don't do anything. */ + if (err != ERR_ABRT) { + if (recv_flags & TF_RESET) { + /* TF_RESET means that the connection was reset by the other + end. We then call the error callback to inform the + application that the connection is dead before we + deallocate the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_RST); + tcp_pcb_remove(&tcp_active_pcbs, pcb); + tcp_free(pcb); + } else { + err = ERR_OK; + /* If the application has registered a "sent" function to be + called when new send buffer space is available, we call it + now. */ + if (recv_acked > 0) { + u16_t acked16; +#if LWIP_WND_SCALE + /* recv_acked is u32_t but the sent callback only takes a u16_t, + so we might have to call it multiple times. */ + u32_t acked = recv_acked; + while (acked > 0) { + acked16 = (u16_t)LWIP_MIN(acked, 0xffffu); + acked -= acked16; +#else + { + acked16 = recv_acked; +#endif + TCP_EVENT_SENT(pcb, (u16_t)acked16, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + recv_acked = 0; + } + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (recv_data != NULL) { + struct pbuf *rest = NULL; + pbuf_split_64k(recv_data, &rest); +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + if (recv_data != NULL) { +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ASSERT("pcb->refused_data == NULL", pcb->refused_data == NULL); + if (pcb->flags & TF_RXCLOSED) { + /* received data although already closed -> abort (send RST) to + notify the remote host that not all data has been processed */ + pbuf_free(recv_data); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + tcp_abort(pcb); + goto aborted; + } + + /* Notify application that data has been received. */ + TCP_EVENT_RECV(pcb, recv_data, ERR_OK, err); + if (err == ERR_ABRT) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + goto aborted; + } + + /* If the upper layer can't receive this data, store it */ + if (err != ERR_OK) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(recv_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = recv_data; + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: keep incoming packet, because pcb is \"full\"\n")); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + break; + } else { + /* Upper layer received the data, go on with the rest if > 64K */ + recv_data = rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + } + } + + /* If a FIN segment was received, we call the callback + function with a NULL buffer to indicate EOF. */ + if (recv_flags & TF_GOT_FIN) { + if (pcb->refused_data != NULL) { + /* Delay this if we have refused data. */ + pcb->refused_data->flags |= PBUF_FLAG_TCP_FIN; + } else { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + } + + tcp_input_pcb = NULL; + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } + /* Try to send something out. */ + tcp_output(pcb); +#if TCP_INPUT_DEBUG +#if TCP_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_DEBUG */ +#endif /* TCP_INPUT_DEBUG */ + } + } + /* Jump target if pcb has been aborted in a callback (by calling tcp_abort()). + Below this line, 'pcb' may not be dereferenced! */ +aborted: + tcp_input_pcb = NULL; + recv_data = NULL; + + /* give up our reference to inseg.p */ + if (inseg.p != NULL) { + pbuf_free(inseg.p); + inseg.p = NULL; + } + } else { + /* If no matching PCB was found, send a TCP RST (reset) to the + sender. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_input: no PCB match found, resetting.\n")); + if (!(TCPH_FLAGS(tcphdr) & TCP_RST)) { + TCP_STATS_INC(tcp.proterr); + TCP_STATS_INC(tcp.drop); + tcp_rst(NULL, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + pbuf_free(p); + } + + LWIP_ASSERT("tcp_input: tcp_pcbs_sane()", tcp_pcbs_sane()); + PERF_STOP("tcp_input"); + return; +dropped: + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + pbuf_free(p); +} + +/** Called from tcp_input to check for TF_CLOSED flag. This results in closing + * and deallocating a pcb at the correct place to ensure noone references it + * any more. + * @returns 1 if the pcb has been closed and deallocated, 0 otherwise + */ +static int +tcp_input_delayed_close(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_input_delayed_close: invalid pcb", pcb != NULL); + + if (recv_flags & TF_CLOSED) { + /* The connection has been closed and we will deallocate the + PCB. */ + if (!(pcb->flags & TF_RXCLOSED)) { + /* Connection closed although the application has only shut down the + tx side: call the PCB's err callback and indicate the closure to + ensure the application doesn't continue using the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_CLSD); + } + tcp_pcb_remove(&tcp_active_pcbs, pcb); + tcp_free(pcb); + return 1; + } + return 0; +} + +/** + * Called by tcp_input() when a segment arrives for a listening + * connection (from tcp_input()). + * + * @param pcb the tcp_pcb_listen for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_listen_input(struct tcp_pcb_listen *pcb) +{ + struct tcp_pcb *npcb; + u32_t iss; + err_t rc; + + if (flags & TCP_RST) { + /* An incoming RST should be ignored. Return. */ + return; + } + + LWIP_ASSERT("tcp_listen_input: invalid pcb", pcb != NULL); + + /* In the LISTEN state, we check for incoming SYN segments, + creates a new PCB, and responds with a SYN|ACK. */ + if (flags & TCP_ACK) { + /* For incoming segments with the ACK flag set, respond with a + RST. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_listen_input: ACK in LISTEN, sending reset\n")); + tcp_rst((const struct tcp_pcb *)pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } else if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection request %"U16_F" -> %"U16_F".\n", tcphdr->src, tcphdr->dest)); +#if TCP_LISTEN_BACKLOG + if (pcb->accepts_pending >= pcb->backlog) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: listen backlog exceeded for port %"U16_F"\n", tcphdr->dest)); + return; + } +#endif /* TCP_LISTEN_BACKLOG */ + npcb = tcp_alloc(pcb->prio); + /* If a new PCB could not be created (probably due to lack of memory), + we don't do anything, but rely on the sender will retransmit the + SYN at a time when we have more memory available. */ + if (npcb == NULL) { + err_t err; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: could not allocate PCB\n")); + TCP_STATS_INC(tcp.memerr); + TCP_EVENT_ACCEPT(pcb, NULL, pcb->callback_arg, ERR_MEM, err); + LWIP_UNUSED_ARG(err); /* err not useful here */ + return; + } +#if TCP_LISTEN_BACKLOG + pcb->accepts_pending++; + tcp_set_flags(npcb, TF_BACKLOGPEND); +#endif /* TCP_LISTEN_BACKLOG */ + /* Set up the new PCB. */ + ip_addr_copy(npcb->local_ip, *ip_current_dest_addr()); + ip_addr_copy(npcb->remote_ip, *ip_current_src_addr()); + npcb->local_port = pcb->local_port; + npcb->remote_port = tcphdr->src; + npcb->state = SYN_RCVD; + npcb->rcv_nxt = seqno + 1; + npcb->rcv_ann_right_edge = npcb->rcv_nxt; + iss = tcp_next_iss(npcb); + npcb->snd_wl2 = iss; + npcb->snd_nxt = iss; + npcb->lastack = iss; + npcb->snd_lbb = iss; + npcb->snd_wl1 = seqno - 1;/* initialise to seqno-1 to force window update */ + npcb->callback_arg = pcb->callback_arg; +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + npcb->listener = pcb; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + /* inherit socket options */ + npcb->so_options = pcb->so_options & SOF_INHERITED; + npcb->netif_idx = pcb->netif_idx; + /* Register the new PCB so that we can begin receiving segments + for it. */ + TCP_REG_ACTIVE(npcb); + + /* Parse any options in the SYN. */ + tcp_parseopt(npcb); + npcb->snd_wnd = tcphdr->wnd; + npcb->snd_wnd_max = npcb->snd_wnd; + +#if TCP_CALCULATE_EFF_SEND_MSS + npcb->mss = tcp_eff_send_mss(npcb->mss, &npcb->local_ip, &npcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + MIB2_STATS_INC(mib2.tcppassiveopens); + +#if LWIP_TCP_PCB_NUM_EXT_ARGS + if (tcp_ext_arg_invoke_callbacks_passive_open(pcb, npcb) != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } +#endif + + /* Send a SYN|ACK together with the MSS option. */ + rc = tcp_enqueue_flags(npcb, TCP_SYN | TCP_ACK); + if (rc != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } + tcp_output(npcb); + } + return; +} + +/** + * Called by tcp_input() when a segment arrives for a connection in + * TIME_WAIT. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_timewait_input(struct tcp_pcb *pcb) +{ + /* RFC 1337: in TIME_WAIT, ignore RST and ACK FINs + any 'acceptable' segments */ + /* RFC 793 3.9 Event Processing - Segment Arrives: + * - first check sequence number - we skip that one in TIME_WAIT (always + * acceptable since we only send ACKs) + * - second check the RST bit (... return) */ + if (flags & TCP_RST) { + return; + } + + LWIP_ASSERT("tcp_timewait_input: invalid pcb", pcb != NULL); + + /* - fourth, check the SYN bit, */ + if (flags & TCP_SYN) { + /* If an incoming segment is not acceptable, an acknowledgment + should be sent in reply */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the SYN is in the window it is an error, send a reset */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + return; + } + } else if (flags & TCP_FIN) { + /* - eighth, check the FIN bit: Remain in the TIME-WAIT state. + Restart the 2 MSL time-wait timeout.*/ + pcb->tmr = tcp_ticks; + } + + if ((tcplen > 0)) { + /* Acknowledge data, FIN or out-of-window SYN */ + tcp_ack_now(pcb); + tcp_output(pcb); + } + return; +} + +/** + * Implements the TCP state machine. Called by tcp_input. In some + * states tcp_receive() is called to receive data. The tcp_seg + * argument will be freed by the caller (tcp_input()) unless the + * recv_data pointer in the pcb is set. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static err_t +tcp_process(struct tcp_pcb *pcb) +{ + struct tcp_seg *rseg; + u8_t acceptable = 0; + err_t err; + + err = ERR_OK; + + LWIP_ASSERT("tcp_process: invalid pcb", pcb != NULL); + + /* Process incoming RST segments. */ + if (flags & TCP_RST) { + /* First, determine if the reset is acceptable. */ + if (pcb->state == SYN_SENT) { + /* "In the SYN-SENT state (a RST received in response to an initial SYN), + the RST is acceptable if the ACK field acknowledges the SYN." */ + if (ackno == pcb->snd_nxt) { + acceptable = 1; + } + } else { + /* "In all states except SYN-SENT, all reset (RST) segments are validated + by checking their SEQ-fields." */ + if (seqno == pcb->rcv_nxt) { + acceptable = 1; + } else if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the sequence number is inside the window, we send a challenge ACK + and wait for a re-send with matching sequence number. + This follows RFC 5961 section 3.2 and addresses CVE-2004-0230 + (RST spoofing attack), which is present in RFC 793 RST handling. */ + tcp_ack_now(pcb); + } + } + + if (acceptable) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: Connection RESET\n")); + LWIP_ASSERT("tcp_input: pcb->state != CLOSED", pcb->state != CLOSED); + recv_flags |= TF_RESET; + tcp_clear_flags(pcb, TF_ACK_DELAY); + return ERR_RST; + } else { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + return ERR_OK; + } + } + + if ((flags & TCP_SYN) && (pcb->state != SYN_SENT && pcb->state != SYN_RCVD)) { + /* Cope with new connection attempt after remote end crashed */ + tcp_ack_now(pcb); + return ERR_OK; + } + + if ((pcb->flags & TF_RXCLOSED) == 0) { + /* Update the PCB (in)activity timer unless rx is closed (see tcp_shutdown) */ + pcb->tmr = tcp_ticks; + } + pcb->keep_cnt_sent = 0; + pcb->persist_probe = 0; + + tcp_parseopt(pcb); + + /* Do different things depending on the TCP state. */ + switch (pcb->state) { + case SYN_SENT: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("SYN-SENT: ackno %"U32_F" pcb->snd_nxt %"U32_F" unacked %"U32_F"\n", ackno, + pcb->snd_nxt, lwip_ntohl(pcb->unacked->tcphdr->seqno))); + /* received SYN ACK with expected sequence number? */ + if ((flags & TCP_ACK) && (flags & TCP_SYN) + && (ackno == pcb->lastack + 1)) { + pcb->rcv_nxt = seqno + 1; + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->lastack = ackno; + pcb->snd_wnd = tcphdr->wnd; + pcb->snd_wnd_max = pcb->snd_wnd; + pcb->snd_wl1 = seqno - 1; /* initialise to seqno - 1 to force window update */ + pcb->state = ESTABLISHED; + +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SENT): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + LWIP_ASSERT("pcb->snd_queuelen > 0", (pcb->snd_queuelen > 0)); + --pcb->snd_queuelen; + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_process: SYN-SENT --queuelen %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + rseg = pcb->unacked; + if (rseg == NULL) { + /* might happen if tcp_output fails in tcp_rexmit_rto() + in which case the segment is on the unsent list */ + rseg = pcb->unsent; + LWIP_ASSERT("no segment to free", rseg != NULL); + pcb->unsent = rseg->next; + } else { + pcb->unacked = rseg->next; + } + tcp_seg_free(rseg); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + pcb->nrtx = 0; + } + + /* Call the user specified function to call when successfully + * connected. */ + TCP_EVENT_CONNECTED(pcb, ERR_OK, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + tcp_ack_now(pcb); + } + /* received ACK? possibly a half-open connection */ + else if (flags & TCP_ACK) { + /* send a RST to bring the other side in a non-synchronized state. */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + /* Resend SYN immediately (don't wait for rto timeout) to establish + connection faster, but do not send more SYNs than we otherwise would + have, or we might get caught in a loop on loopback interfaces. */ + if (pcb->nrtx < TCP_SYNMAXRTX) { + pcb->rtime = 0; + tcp_rexmit_rto(pcb); + } + } + break; + case SYN_RCVD: + if (flags & TCP_ACK) { + /* expected ACK number? */ + if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) { + pcb->state = ESTABLISHED; + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection established %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + if (pcb->listener == NULL) { + /* listen pcb might be closed by now */ + err = ERR_VAL; + } else +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + { +#if LWIP_CALLBACK_API + LWIP_ASSERT("pcb->listener->accept != NULL", pcb->listener->accept != NULL); +#endif + tcp_backlog_accepted(pcb); + /* Call the accept function. */ + TCP_EVENT_ACCEPT(pcb->listener, pcb, pcb->callback_arg, ERR_OK, err); + } + if (err != ERR_OK) { + /* If the accept function returns with an error, we abort + * the connection. */ + /* Already aborted? */ + if (err != ERR_ABRT) { + tcp_abort(pcb); + } + return ERR_ABRT; + } + /* If there was any data contained within this ACK, + * we'd better pass it on to the application as well. */ + tcp_receive(pcb); + + /* Prevent ACK for SYN to generate a sent event */ + if (recv_acked != 0) { + recv_acked--; + } + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SYN_RCVD): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + + if (recv_flags & TF_GOT_FIN) { + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + } else { + /* incorrect ACK number, send RST */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + } else if ((flags & TCP_SYN) && (seqno == pcb->rcv_nxt - 1)) { + /* Looks like another copy of the SYN - retransmit our SYN-ACK */ + tcp_rexmit(pcb); + } + break; + case CLOSE_WAIT: + /* FALLTHROUGH */ + case ESTABLISHED: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { /* passive close */ + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + break; + case FIN_WAIT_1: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("TCP connection closed: FIN_WAIT_1 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } else { + tcp_ack_now(pcb); + pcb->state = CLOSING; + } + } else if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + pcb->state = FIN_WAIT_2; + } + break; + case FIN_WAIT_2: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: FIN_WAIT_2 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case CLOSING: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: CLOSING %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case LAST_ACK: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: LAST_ACK %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + /* bugfix #21699: don't set pcb->state to CLOSED here or we risk leaking segments */ + recv_flags |= TF_CLOSED; + } + break; + default: + break; + } + return ERR_OK; +} + +#if TCP_QUEUE_OOSEQ +/** + * Insert segment into the list (segments covered with new one will be deleted) + * + * Called from tcp_receive() + */ +static void +tcp_oos_insert_segment(struct tcp_seg *cseg, struct tcp_seg *next) +{ + struct tcp_seg *old_seg; + + LWIP_ASSERT("tcp_oos_insert_segment: invalid cseg", cseg != NULL); + + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + /* received segment overlaps all following segments */ + tcp_segs_free(next); + next = NULL; + } else { + /* delete some following segments + oos queue may have segments with FIN flag */ + while (next && + TCP_SEQ_GEQ((seqno + cseg->len), + (next->tcphdr->seqno + next->len))) { + /* cseg with FIN already processed */ + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + TCPH_SET_FLAG(cseg->tcphdr, TCP_FIN); + } + old_seg = next; + next = next->next; + tcp_seg_free(old_seg); + } + if (next && + TCP_SEQ_GT(seqno + cseg->len, next->tcphdr->seqno)) { + /* We need to trim the incoming segment. */ + cseg->len = (u16_t)(next->tcphdr->seqno - seqno); + pbuf_realloc(cseg->p, cseg->len); + } + } + cseg->next = next; +} +#endif /* TCP_QUEUE_OOSEQ */ + +/** Remove segments from a list if the incoming ACK acknowledges them */ +static struct tcp_seg * +tcp_free_acked_segments(struct tcp_pcb *pcb, struct tcp_seg *seg_list, const char *dbg_list_name, + struct tcp_seg *dbg_other_seg_list) +{ + struct tcp_seg *next; + u16_t clen; + + LWIP_UNUSED_ARG(dbg_list_name); + LWIP_UNUSED_ARG(dbg_other_seg_list); + + while (seg_list != NULL && + TCP_SEQ_LEQ(lwip_ntohl(seg_list->tcphdr->seqno) + + TCP_TCPLEN(seg_list), ackno)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->%s\n", + lwip_ntohl(seg_list->tcphdr->seqno), + lwip_ntohl(seg_list->tcphdr->seqno) + TCP_TCPLEN(seg_list), + dbg_list_name)); + + next = seg_list; + seg_list = seg_list->next; + + clen = pbuf_clen(next->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"TCPWNDSIZE_F" ... ", + (tcpwnd_size_t)pcb->snd_queuelen)); + LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= clen)); + + pcb->snd_queuelen = (u16_t)(pcb->snd_queuelen - clen); + recv_acked = (tcpwnd_size_t)(recv_acked + next->len); + tcp_seg_free(next); + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"TCPWNDSIZE_F" (after freeing %s)\n", + (tcpwnd_size_t)pcb->snd_queuelen, + dbg_list_name)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_receive: valid queue length", + seg_list != NULL || dbg_other_seg_list != NULL); + } + } + return seg_list; +} + +/** + * Called by tcp_process. Checks if the given segment is an ACK for outstanding + * data, and if so frees the memory of the buffered data. Next, it places the + * segment on any of the receive queues (pcb->recved or pcb->ooseq). If the segment + * is buffered, the pbuf is referenced by pbuf_ref so that it will not be freed until + * it has been removed from the buffer. + * + * If the incoming segment constitutes an ACK for a segment that was used for RTT + * estimation, the RTT is estimated here as well. + * + * Called from tcp_process(). + */ +static void +tcp_receive(struct tcp_pcb *pcb) +{ + s16_t m; + u32_t right_wnd_edge; + int found_dupack = 0; + + LWIP_ASSERT("tcp_receive: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_receive: wrong state", pcb->state >= ESTABLISHED); + + if (flags & TCP_ACK) { + right_wnd_edge = pcb->snd_wnd + pcb->snd_wl2; + + /* Update window. */ + if (TCP_SEQ_LT(pcb->snd_wl1, seqno) || + (pcb->snd_wl1 == seqno && TCP_SEQ_LT(pcb->snd_wl2, ackno)) || + (pcb->snd_wl2 == ackno && (u32_t)SND_WND_SCALE(pcb, tcphdr->wnd) > pcb->snd_wnd)) { + pcb->snd_wnd = SND_WND_SCALE(pcb, tcphdr->wnd); + /* keep track of the biggest window announced by the remote host to calculate + the maximum segment size */ + if (pcb->snd_wnd_max < pcb->snd_wnd) { + pcb->snd_wnd_max = pcb->snd_wnd; + } + pcb->snd_wl1 = seqno; + pcb->snd_wl2 = ackno; + LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: window update %"TCPWNDSIZE_F"\n", pcb->snd_wnd)); +#if TCP_WND_DEBUG + } else { + if (pcb->snd_wnd != (tcpwnd_size_t)SND_WND_SCALE(pcb, tcphdr->wnd)) { + LWIP_DEBUGF(TCP_WND_DEBUG, + ("tcp_receive: no window update lastack %"U32_F" ackno %" + U32_F" wl1 %"U32_F" seqno %"U32_F" wl2 %"U32_F"\n", + pcb->lastack, ackno, pcb->snd_wl1, seqno, pcb->snd_wl2)); + } +#endif /* TCP_WND_DEBUG */ + } + + /* (From Stevens TCP/IP Illustrated Vol II, p970.) Its only a + * duplicate ack if: + * 1) It doesn't ACK new data + * 2) length of received packet is zero (i.e. no payload) + * 3) the advertised window hasn't changed + * 4) There is outstanding unacknowledged data (retransmission timer running) + * 5) The ACK is == biggest ACK sequence number so far seen (snd_una) + * + * If it passes all five, should process as a dupack: + * a) dupacks < 3: do nothing + * b) dupacks == 3: fast retransmit + * c) dupacks > 3: increase cwnd + * + * If it only passes 1-3, should reset dupack counter (and add to + * stats, which we don't do in lwIP) + * + * If it only passes 1, should reset dupack counter + * + */ + + /* Clause 1 */ + if (TCP_SEQ_LEQ(ackno, pcb->lastack)) { + /* Clause 2 */ + if (tcplen == 0) { + /* Clause 3 */ + if (pcb->snd_wl2 + pcb->snd_wnd == right_wnd_edge) { + /* Clause 4 */ + if (pcb->rtime >= 0) { + /* Clause 5 */ + if (pcb->lastack == ackno) { + found_dupack = 1; + if ((u8_t)(pcb->dupacks + 1) > pcb->dupacks) { + ++pcb->dupacks; + } + if (pcb->dupacks > 3) { + /* Inflate the congestion window */ + TCP_WND_INC(pcb->cwnd, pcb->mss); + } + if (pcb->dupacks >= 3) { + /* Do fast retransmit (checked via TF_INFR, not via dupacks count) */ + tcp_rexmit_fast(pcb); + } + } + } + } + } + /* If Clause (1) or more is true, but not a duplicate ack, reset + * count of consecutive duplicate acks */ + if (!found_dupack) { + pcb->dupacks = 0; + } + } else if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) { + /* We come here when the ACK acknowledges new data. */ + tcpwnd_size_t acked; + + /* Reset the "IN Fast Retransmit" flag, since we are no longer + in fast retransmit. Also reset the congestion window to the + slow start threshold. */ + if (pcb->flags & TF_INFR) { + tcp_clear_flags(pcb, TF_INFR); + pcb->cwnd = pcb->ssthresh; + pcb->bytes_acked = 0; + } + + /* Reset the number of retransmissions. */ + pcb->nrtx = 0; + + /* Reset the retransmission time-out. */ + pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv); + + /* Record how much data this ACK acks */ + acked = (tcpwnd_size_t)(ackno - pcb->lastack); + + /* Reset the fast retransmit variables. */ + pcb->dupacks = 0; + pcb->lastack = ackno; + + /* Update the congestion control variables (cwnd and + ssthresh). */ + if (pcb->state >= ESTABLISHED) { + if (pcb->cwnd < pcb->ssthresh) { + tcpwnd_size_t increase; + /* limit to 1 SMSS segment during period following RTO */ + u8_t num_seg = (pcb->flags & TF_RTO) ? 1 : 2; + /* RFC 3465, section 2.2 Slow Start */ + increase = LWIP_MIN(acked, (tcpwnd_size_t)(num_seg * pcb->mss)); + TCP_WND_INC(pcb->cwnd, increase); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: slow start cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } else { + /* RFC 3465, section 2.1 Congestion Avoidance */ + TCP_WND_INC(pcb->bytes_acked, acked); + if (pcb->bytes_acked >= pcb->cwnd) { + pcb->bytes_acked = (tcpwnd_size_t)(pcb->bytes_acked - pcb->cwnd); + TCP_WND_INC(pcb->cwnd, pcb->mss); + } + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: congestion avoidance cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } + } + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: ACK for %"U32_F", unacked->seqno %"U32_F":%"U32_F"\n", + ackno, + pcb->unacked != NULL ? + lwip_ntohl(pcb->unacked->tcphdr->seqno) : 0, + pcb->unacked != NULL ? + lwip_ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked) : 0)); + + /* Remove segment from the unacknowledged list if the incoming + ACK acknowledges them. */ + pcb->unacked = tcp_free_acked_segments(pcb, pcb->unacked, "unacked", pcb->unsent); + /* We go through the ->unsent list to see if any of the segments + on the list are acknowledged by the ACK. This may seem + strange since an "unsent" segment shouldn't be acked. The + rationale is that lwIP puts all outstanding segments on the + ->unsent list after a retransmission, so these segments may + in fact have been sent once. */ + pcb->unsent = tcp_free_acked_segments(pcb, pcb->unsent, "unsent", pcb->unacked); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + } + + pcb->polltmr = 0; + +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + pcb->snd_buf = (tcpwnd_size_t)(pcb->snd_buf + recv_acked); + /* check if this ACK ends our retransmission of in-flight data */ + if (pcb->flags & TF_RTO) { + /* RTO is done if + 1) both queues are empty or + 2) unacked is empty and unsent head contains data not part of RTO or + 3) unacked head contains data not part of RTO */ + if (pcb->unacked == NULL) { + if ((pcb->unsent == NULL) || + (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unsent->tcphdr->seqno)))) { + tcp_clear_flags(pcb, TF_RTO); + } + } else if (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unacked->tcphdr->seqno))) { + tcp_clear_flags(pcb, TF_RTO); + } + } + /* End of ACK for new data processing. */ + } else { + /* Out of sequence ACK, didn't really ack anything */ + tcp_send_empty_ack(pcb); + } + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: pcb->rttest %"U32_F" rtseq %"U32_F" ackno %"U32_F"\n", + pcb->rttest, pcb->rtseq, ackno)); + + /* RTT estimation calculations. This is done by checking if the + incoming segment acknowledges the segment we use to take a + round-trip time measurement. */ + if (pcb->rttest && TCP_SEQ_LT(pcb->rtseq, ackno)) { + /* diff between this shouldn't exceed 32K since this are tcp timer ticks + and a round-trip shouldn't be that long... */ + m = (s16_t)(tcp_ticks - pcb->rttest); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: experienced rtt %"U16_F" ticks (%"U16_F" msec).\n", + m, (u16_t)(m * TCP_SLOW_INTERVAL))); + + /* This is taken directly from VJs original code in his paper */ + m = (s16_t)(m - (pcb->sa >> 3)); + pcb->sa = (s16_t)(pcb->sa + m); + if (m < 0) { + m = (s16_t) - m; + } + m = (s16_t)(m - (pcb->sv >> 2)); + pcb->sv = (s16_t)(pcb->sv + m); + pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: RTO %"U16_F" (%"U16_F" milliseconds)\n", + pcb->rto, (u16_t)(pcb->rto * TCP_SLOW_INTERVAL))); + + pcb->rttest = 0; + } + } + + /* If the incoming segment contains data, we must process it + further unless the pcb already received a FIN. + (RFC 793, chapter 3.9, "SEGMENT ARRIVES" in states CLOSE-WAIT, CLOSING, + LAST-ACK and TIME-WAIT: "Ignore the segment text.") */ + if ((tcplen > 0) && (pcb->state < CLOSE_WAIT)) { + /* This code basically does three things: + + +) If the incoming segment contains data that is the next + in-sequence data, this data is passed to the application. This + might involve trimming the first edge of the data. The rcv_nxt + variable and the advertised window are adjusted. + + +) If the incoming segment has data that is above the next + sequence number expected (->rcv_nxt), the segment is placed on + the ->ooseq queue. This is done by finding the appropriate + place in the ->ooseq queue (which is ordered by sequence + number) and trim the segment in both ends if needed. An + immediate ACK is sent to indicate that we received an + out-of-sequence segment. + + +) Finally, we check if the first segment on the ->ooseq queue + now is in sequence (i.e., if rcv_nxt >= ooseq->seqno). If + rcv_nxt > ooseq->seqno, we must trim the first edge of the + segment on ->ooseq before we adjust rcv_nxt. The data in the + segments that are now on sequence are chained onto the + incoming segment so that we only need to call the application + once. + */ + + /* First, we check if we must trim the first edge. We have to do + this if the sequence number of the incoming segment is less + than rcv_nxt, and the sequence number plus the length of the + segment is larger than rcv_nxt. */ + /* if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + if (TCP_SEQ_LT(pcb->rcv_nxt, seqno + tcplen)) {*/ + if (TCP_SEQ_BETWEEN(pcb->rcv_nxt, seqno + 1, seqno + tcplen - 1)) { + /* Trimming the first edge is done by pushing the payload + pointer in the pbuf downwards. This is somewhat tricky since + we do not want to discard the full contents of the pbuf up to + the new starting point of the data since we have to keep the + TCP header which is present in the first pbuf in the chain. + + What is done is really quite a nasty hack: the first pbuf in + the pbuf chain is pointed to by inseg.p. Since we need to be + able to deallocate the whole pbuf, we cannot change this + inseg.p pointer to point to any of the later pbufs in the + chain. Instead, we point the ->payload pointer in the first + pbuf to data in one of the later pbufs. We also set the + inseg.data pointer to point to the right place. This way, the + ->p pointer will still point to the first pbuf, but the + ->p->payload pointer will point to data in another pbuf. + + After we are done with adjusting the pbuf pointers we must + adjust the ->data pointer in the seg and the segment + length.*/ + + struct pbuf *p = inseg.p; + u32_t off32 = pcb->rcv_nxt - seqno; + u16_t new_tot_len, off; + LWIP_ASSERT("inseg.p != NULL", inseg.p); + LWIP_ASSERT("insane offset!", (off32 < 0xffff)); + off = (u16_t)off32; + LWIP_ASSERT("pbuf too short!", (((s32_t)inseg.p->tot_len) >= off)); + inseg.len -= off; + new_tot_len = (u16_t)(inseg.p->tot_len - off); + while (p->len < off) { + off -= p->len; + /* all pbufs up to and including this one have len==0, so tot_len is equal */ + p->tot_len = new_tot_len; + p->len = 0; + p = p->next; + } + /* cannot fail... */ + pbuf_remove_header(p, off); + inseg.tcphdr->seqno = seqno = pcb->rcv_nxt; + } else { + if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + /* the whole segment is < rcv_nxt */ + /* must be a duplicate of a packet that has already been correctly handled */ + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: duplicate seqno %"U32_F"\n", seqno)); + tcp_ack_now(pcb); + } + } + + /* The sequence number must be within the window (above rcv_nxt + and below rcv_nxt + rcv_wnd) in order to be further + processed. */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + if (pcb->rcv_nxt == seqno) { + /* The incoming segment is the next in sequence. We check if + we have to trim the end of the segment and update rcv_nxt + and pass the data to the application. */ + tcplen = TCP_TCPLEN(&inseg); + + if (tcplen > pcb->rcv_wnd) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(inseg.tcphdr, TCPH_FLAGS(inseg.tcphdr) & ~(unsigned int)TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + TCPWND_CHECK16(pcb->rcv_wnd); + inseg.len = (u16_t)pcb->rcv_wnd; + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } +#if TCP_QUEUE_OOSEQ + /* Received in-sequence data, adjust ooseq data if: + - FIN has been received or + - inseq overlaps with ooseq */ + if (pcb->ooseq != NULL) { + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: received in-order FIN, binning ooseq queue\n")); + /* Received in-order FIN means anything that was received + * out of order must now have been received in-order, so + * bin the ooseq queue */ + while (pcb->ooseq != NULL) { + struct tcp_seg *old_ooseq = pcb->ooseq; + pcb->ooseq = pcb->ooseq->next; + tcp_seg_free(old_ooseq); + } + } else { + struct tcp_seg *next = pcb->ooseq; + /* Remove all segments on ooseq that are covered by inseg already. + * FIN is copied from ooseq to inseg if present. */ + while (next && + TCP_SEQ_GEQ(seqno + tcplen, + next->tcphdr->seqno + next->len)) { + struct tcp_seg *tmp; + /* inseg cannot have FIN here (already processed above) */ + if ((TCPH_FLAGS(next->tcphdr) & TCP_FIN) != 0 && + (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) == 0) { + TCPH_SET_FLAG(inseg.tcphdr, TCP_FIN); + tcplen = TCP_TCPLEN(&inseg); + } + tmp = next; + next = next->next; + tcp_seg_free(tmp); + } + /* Now trim right side of inseg if it overlaps with the first + * segment on ooseq */ + if (next && + TCP_SEQ_GT(seqno + tcplen, + next->tcphdr->seqno)) { + /* inseg cannot have FIN here (already processed above) */ + inseg.len = (u16_t)(next->tcphdr->seqno - seqno); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to ooseq queue\n", + (seqno + tcplen) == next->tcphdr->seqno); + } + pcb->ooseq = next; + } + } +#endif /* TCP_QUEUE_OOSEQ */ + + pcb->rcv_nxt = seqno + tcplen; + + /* Update the receiver's (our) window. */ + LWIP_ASSERT("tcp_receive: tcplen > rcv_wnd\n", pcb->rcv_wnd >= tcplen); + pcb->rcv_wnd -= tcplen; + + tcp_update_rcv_ann_wnd(pcb); + + /* If there is data in the segment, we make preparations to + pass this up to the application. The ->recv_data variable + is used for holding the pbuf that goes to the + application. The code for reassembling out-of-sequence data + chains its data on this pbuf as well. + + If the segment was a FIN, we set the TF_GOT_FIN flag that will + be used to indicate to the application that the remote side has + closed its end of the connection. */ + if (inseg.p->tot_len > 0) { + recv_data = inseg.p; + /* Since this pbuf now is the responsibility of the + application, we delete our reference to it so that we won't + (mistakingly) deallocate it. */ + inseg.p = NULL; + } + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: received FIN.\n")); + recv_flags |= TF_GOT_FIN; + } + +#if TCP_QUEUE_OOSEQ + /* We now check if we have segments on the ->ooseq queue that + are now in sequence. */ + while (pcb->ooseq != NULL && + pcb->ooseq->tcphdr->seqno == pcb->rcv_nxt) { + + struct tcp_seg *cseg = pcb->ooseq; + seqno = pcb->ooseq->tcphdr->seqno; + + pcb->rcv_nxt += TCP_TCPLEN(cseg); + LWIP_ASSERT("tcp_receive: ooseq tcplen > rcv_wnd\n", + pcb->rcv_wnd >= TCP_TCPLEN(cseg)); + pcb->rcv_wnd -= TCP_TCPLEN(cseg); + + tcp_update_rcv_ann_wnd(pcb); + + if (cseg->p->tot_len > 0) { + /* Chain this pbuf onto the pbuf that we will pass to + the application. */ + /* With window scaling, this can overflow recv_data->tot_len, but + that's not a problem since we explicitly fix that before passing + recv_data to the application. */ + if (recv_data) { + pbuf_cat(recv_data, cseg->p); + } else { + recv_data = cseg->p; + } + cseg->p = NULL; + } + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: dequeued FIN.\n")); + recv_flags |= TF_GOT_FIN; + if (pcb->state == ESTABLISHED) { /* force passive close or we can move to active close */ + pcb->state = CLOSE_WAIT; + } + } + + pcb->ooseq = cseg->next; + tcp_seg_free(cseg); + } +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + if (pcb->ooseq != NULL) { + /* Some segments may have been removed from ooseq, let's remove all SACKs that + describe anything before the new beginning of that list. */ + tcp_remove_sacks_lt(pcb, pcb->ooseq->tcphdr->seqno); + } else if (LWIP_TCP_SACK_VALID(pcb, 0)) { + /* ooseq has been cleared. Nothing to SACK */ + memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks)); + } + } +#endif /* LWIP_TCP_SACK_OUT */ +#endif /* TCP_QUEUE_OOSEQ */ + + + /* Acknowledge the segment(s). */ + tcp_ack(pcb); + +#if LWIP_TCP_SACK_OUT + if (LWIP_TCP_SACK_VALID(pcb, 0)) { + /* Normally the ACK for the data received could be piggy-backed on a data packet, + but lwIP currently does not support including SACKs in data packets. So we force + it to respond with an empty ACK packet (only if there is at least one SACK to be sent). + NOTE: tcp_send_empty_ack() on success clears the ACK flags (set by tcp_ack()) */ + tcp_send_empty_ack(pcb); + } +#endif /* LWIP_TCP_SACK_OUT */ + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + } else { + /* We get here if the incoming segment is out-of-sequence. */ + +#if TCP_QUEUE_OOSEQ + /* We queue the segment on the ->ooseq queue. */ + if (pcb->ooseq == NULL) { + pcb->ooseq = tcp_seg_copy(&inseg); +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + /* All the SACKs should be invalid, so we can simply store the most recent one: */ + pcb->rcv_sacks[0].left = seqno; + pcb->rcv_sacks[0].right = seqno + inseg.len; + } +#endif /* LWIP_TCP_SACK_OUT */ + } else { + /* If the queue is not empty, we walk through the queue and + try to find a place where the sequence number of the + incoming segment is between the sequence numbers of the + previous and the next segment on the ->ooseq queue. That is + the place where we put the incoming segment. If needed, we + trim the second edges of the previous and the incoming + segment so that it will fit into the sequence. + + If the incoming segment has the same sequence number as a + segment on the ->ooseq queue, we discard the segment that + contains less data. */ + +#if LWIP_TCP_SACK_OUT + /* This is the left edge of the lowest possible SACK range. + It may start before the newly received segment (possibly adjusted below). */ + u32_t sackbeg = TCP_SEQ_LT(seqno, pcb->ooseq->tcphdr->seqno) ? seqno : pcb->ooseq->tcphdr->seqno; +#endif /* LWIP_TCP_SACK_OUT */ + struct tcp_seg *next, *prev = NULL; + for (next = pcb->ooseq; next != NULL; next = next->next) { + if (seqno == next->tcphdr->seqno) { + /* The sequence number of the incoming segment is the + same as the sequence number of the segment on + ->ooseq. We check the lengths to see which one to + discard. */ + if (inseg.len > next->len) { + /* The incoming segment is larger than the old + segment. We replace some segments with the new + one. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (prev != NULL) { + prev->next = cseg; + } else { + pcb->ooseq = cseg; + } + tcp_oos_insert_segment(cseg, next); + } + break; + } else { + /* Either the lengths are the same or the incoming + segment was smaller than the old one; in either + case, we ditch the incoming segment. */ + break; + } + } else { + if (prev == NULL) { + if (TCP_SEQ_LT(seqno, next->tcphdr->seqno)) { + /* The sequence number of the incoming segment is lower + than the sequence number of the first segment on the + queue. We put the incoming segment first on the + queue. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + pcb->ooseq = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } else { + /*if (TCP_SEQ_LT(prev->tcphdr->seqno, seqno) && + TCP_SEQ_LT(seqno, next->tcphdr->seqno)) {*/ + if (TCP_SEQ_BETWEEN(seqno, prev->tcphdr->seqno + 1, next->tcphdr->seqno - 1)) { + /* The sequence number of the incoming segment is in + between the sequence numbers of the previous and + the next segment on ->ooseq. We trim trim the previous + segment, delete next segments that included in received segment + and trim received, if needed. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (TCP_SEQ_GT(prev->tcphdr->seqno + prev->len, seqno)) { + /* We need to trim the prev segment. */ + prev->len = (u16_t)(seqno - prev->tcphdr->seqno); + pbuf_realloc(prev->p, prev->len); + } + prev->next = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } + +#if LWIP_TCP_SACK_OUT + /* The new segment goes after the 'next' one. If there is a "hole" in sequence numbers + between 'prev' and the beginning of 'next', we want to move sackbeg. */ + if (prev != NULL && prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) { + sackbeg = next->tcphdr->seqno; + } +#endif /* LWIP_TCP_SACK_OUT */ + + /* We don't use 'prev' below, so let's set it to current 'next'. + This way even if we break the loop below, 'prev' will be pointing + at the segment right in front of the newly added one. */ + prev = next; + + /* If the "next" segment is the last segment on the + ooseq queue, we add the incoming segment to the end + of the list. */ + if (next->next == NULL && + TCP_SEQ_GT(seqno, next->tcphdr->seqno)) { + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + /* segment "next" already contains all data */ + break; + } + next->next = tcp_seg_copy(&inseg); + if (next->next != NULL) { + if (TCP_SEQ_GT(next->tcphdr->seqno + next->len, seqno)) { + /* We need to trim the last segment. */ + next->len = (u16_t)(seqno - next->tcphdr->seqno); + pbuf_realloc(next->p, next->len); + } + /* check if the remote side overruns our receive window */ + if (TCP_SEQ_GT((u32_t)tcplen + seqno, pcb->rcv_nxt + (u32_t)pcb->rcv_wnd)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(next->next->tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(next->next->tcphdr, TCPH_FLAGS(next->next->tcphdr) & ~TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + next->next->len = (u16_t)(pcb->rcv_nxt + pcb->rcv_wnd - seqno); + pbuf_realloc(next->next->p, next->next->len); + tcplen = TCP_TCPLEN(next->next); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } + } + break; + } + } + } + +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + if (prev == NULL) { + /* The new segment is at the beginning. sackbeg should already be set properly. + We need to find the right edge. */ + next = pcb->ooseq; + } else if (prev->next != NULL) { + /* The new segment was added after 'prev'. If there is a "hole" between 'prev' and 'prev->next', + we need to move sackbeg. After that we should find the right edge. */ + next = prev->next; + if (prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) { + sackbeg = next->tcphdr->seqno; + } + } else { + next = NULL; + } + if (next != NULL) { + u32_t sackend = next->tcphdr->seqno; + for ( ; (next != NULL) && (sackend == next->tcphdr->seqno); next = next->next) { + sackend += next->len; + } + tcp_add_sack(pcb, sackbeg, sackend); + } + } +#endif /* LWIP_TCP_SACK_OUT */ + } +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) + { + /* Check that the data on ooseq doesn't exceed one of the limits + and throw away everything above that limit. */ +#ifdef TCP_OOSEQ_BYTES_LIMIT + const u32_t ooseq_max_blen = TCP_OOSEQ_BYTES_LIMIT(pcb); + u32_t ooseq_blen = 0; +#endif +#ifdef TCP_OOSEQ_PBUFS_LIMIT + const u16_t ooseq_max_qlen = TCP_OOSEQ_PBUFS_LIMIT(pcb); + u16_t ooseq_qlen = 0; +#endif + struct tcp_seg *next, *prev = NULL; + for (next = pcb->ooseq; next != NULL; prev = next, next = next->next) { + struct pbuf *p = next->p; + int stop_here = 0; +#ifdef TCP_OOSEQ_BYTES_LIMIT + ooseq_blen += p->tot_len; + if (ooseq_blen > ooseq_max_blen) { + stop_here = 1; + } +#endif +#ifdef TCP_OOSEQ_PBUFS_LIMIT + ooseq_qlen += pbuf_clen(p); + if (ooseq_qlen > ooseq_max_qlen) { + stop_here = 1; + } +#endif + if (stop_here) { +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + /* Let's remove all SACKs from next's seqno up. */ + tcp_remove_sacks_gt(pcb, next->tcphdr->seqno); + } +#endif /* LWIP_TCP_SACK_OUT */ + /* too much ooseq data, dump this and everything after it */ + tcp_segs_free(next); + if (prev == NULL) { + /* first ooseq segment is too much, dump the whole queue */ + pcb->ooseq = NULL; + } else { + /* just dump 'next' and everything after it */ + prev->next = NULL; + } + break; + } + } + } +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ +#endif /* TCP_QUEUE_OOSEQ */ + + /* We send the ACK packet after we've (potentially) dealt with SACKs, + so they can be included in the acknowledgment. */ + tcp_send_empty_ack(pcb); + } + } else { + /* The incoming segment is not within the window. */ + tcp_send_empty_ack(pcb); + } + } else { + /* Segments with length 0 is taken care of here. Segments that + fall out of the window are ACKed. */ + if (!TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + tcp_ack_now(pcb); + } + } +} + +static u8_t +tcp_get_next_optbyte(void) +{ + u16_t optidx = tcp_optidx++; + if ((tcphdr_opt2 == NULL) || (optidx < tcphdr_opt1len)) { + u8_t *opts = (u8_t *)tcphdr + TCP_HLEN; + return opts[optidx]; + } else { + u8_t idx = (u8_t)(optidx - tcphdr_opt1len); + return tcphdr_opt2[idx]; + } +} + +/** + * Parses the options contained in the incoming segment. + * + * Called from tcp_listen_input() and tcp_process(). + * Currently, only the MSS option is supported! + * + * @param pcb the tcp_pcb for which a segment arrived + */ +static void +tcp_parseopt(struct tcp_pcb *pcb) +{ + u8_t data; + u16_t mss; +#if LWIP_TCP_TIMESTAMPS + u32_t tsval; +#endif + + LWIP_ASSERT("tcp_parseopt: invalid pcb", pcb != NULL); + + /* Parse the TCP MSS option, if present. */ + if (tcphdr_optlen != 0) { + for (tcp_optidx = 0; tcp_optidx < tcphdr_optlen; ) { + u8_t opt = tcp_get_next_optbyte(); + switch (opt) { + case LWIP_TCP_OPT_EOL: + /* End of options. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: EOL\n")); + return; + case LWIP_TCP_OPT_NOP: + /* NOP option. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: NOP\n")); + break; + case LWIP_TCP_OPT_MSS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: MSS\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_MSS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_MSS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An MSS option with the right option length. */ + mss = (u16_t)(tcp_get_next_optbyte() << 8); + mss |= tcp_get_next_optbyte(); + /* Limit the mss to the configured TCP_MSS and prevent division by zero */ + pcb->mss = ((mss > TCP_MSS) || (mss == 0)) ? TCP_MSS : mss; + break; +#if LWIP_WND_SCALE + case LWIP_TCP_OPT_WS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: WND_SCALE\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_WS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_WS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An WND_SCALE option with the right option length. */ + data = tcp_get_next_optbyte(); + /* If syn was received with wnd scale option, + activate wnd scale opt, but only if this is not a retransmission */ + if ((flags & TCP_SYN) && !(pcb->flags & TF_WND_SCALE)) { + pcb->snd_scale = data; + if (pcb->snd_scale > 14U) { + pcb->snd_scale = 14U; + } + pcb->rcv_scale = TCP_RCV_SCALE; + tcp_set_flags(pcb, TF_WND_SCALE); + /* window scaling is enabled, we can use the full receive window */ + LWIP_ASSERT("window not at default value", pcb->rcv_wnd == TCPWND_MIN16(TCP_WND)); + LWIP_ASSERT("window not at default value", pcb->rcv_ann_wnd == TCPWND_MIN16(TCP_WND)); + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCP_WND; + } + break; +#endif /* LWIP_WND_SCALE */ +#if LWIP_TCP_TIMESTAMPS + case LWIP_TCP_OPT_TS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: TS\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_TS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_TS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP timestamp option with valid length */ + tsval = tcp_get_next_optbyte(); + tsval |= (tcp_get_next_optbyte() << 8); + tsval |= (tcp_get_next_optbyte() << 16); + tsval |= (tcp_get_next_optbyte() << 24); + if (flags & TCP_SYN) { + pcb->ts_recent = lwip_ntohl(tsval); + /* Enable sending timestamps in every segment now that we know + the remote host supports it. */ + tcp_set_flags(pcb, TF_TIMESTAMP); + } else if (TCP_SEQ_BETWEEN(pcb->ts_lastacksent, seqno, seqno + tcplen)) { + pcb->ts_recent = lwip_ntohl(tsval); + } + /* Advance to next option (6 bytes already read) */ + tcp_optidx += LWIP_TCP_OPT_LEN_TS - 6; + break; +#endif /* LWIP_TCP_TIMESTAMPS */ +#if LWIP_TCP_SACK_OUT + case LWIP_TCP_OPT_SACK_PERM: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: SACK_PERM\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_SACK_PERM || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_SACK_PERM) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP SACK_PERM option with valid length */ + if (flags & TCP_SYN) { + /* We only set it if we receive it in a SYN (or SYN+ACK) packet */ + tcp_set_flags(pcb, TF_SACK); + } + break; +#endif /* LWIP_TCP_SACK_OUT */ + default: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: other\n")); + data = tcp_get_next_optbyte(); + if (data < 2) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + /* If the length field is zero, the options are malformed + and we don't process them further. */ + return; + } + /* All other options have a length field, so that we easily + can skip past them. */ + tcp_optidx += data - 2; + } + } + } +} + +void +tcp_trigger_input_pcb_close(void) +{ + recv_flags |= TF_CLOSED; +} + +#if LWIP_TCP_SACK_OUT +/** + * Called by tcp_receive() to add new SACK entry. + * + * The new SACK entry will be placed at the beginning of rcv_sacks[], as the newest one. + * Existing SACK entries will be "pushed back", to preserve their order. + * This is the behavior described in RFC 2018, section 4. + * + * @param pcb the tcp_pcb for which a segment arrived + * @param left the left side of the SACK (the first sequence number) + * @param right the right side of the SACK (the first sequence number past this SACK) + */ +static void +tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right) +{ + u8_t i; + u8_t unused_idx; + + if ((pcb->flags & TF_SACK) == 0 || !TCP_SEQ_LT(left, right)) { + return; + } + + /* First, let's remove all SACKs that are no longer needed (because they overlap with the newest one), + while moving all other SACKs forward. + We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at [i] if it doesn't overlap with left:right range. + It does not overlap if its right side is before the newly added SACK, + or if its left side is after the newly added SACK. + NOTE: The equality should not really happen, but it doesn't hurt. */ + if (TCP_SEQ_LEQ(pcb->rcv_sacks[i].right, left) || TCP_SEQ_LEQ(right, pcb->rcv_sacks[i].left)) { + if (unused_idx != i) { + /* We don't need to copy if it's already in the right spot */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + ++unused_idx; + } + } + + /* Now 'unused_idx' is the index of the first invalid SACK entry, + anywhere between 0 (no valid entries) and LWIP_TCP_MAX_SACK_NUM (all entries are valid). + We want to clear this and all following SACKs. + However, we will be adding another one in the front (and shifting everything else back). + So let's just iterate from the back, and set each entry to the one to the left if it's valid, + or to 0 if it is not. */ + for (i = LWIP_TCP_MAX_SACK_NUM - 1; i > 0; --i) { + /* [i] is the index we are setting, and the value should be at index [i-1], + or 0 if that index is unused (>= unused_idx). */ + if (i - 1 >= unused_idx) { + /* [i-1] is unused. Let's clear [i]. */ + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } else { + pcb->rcv_sacks[i] = pcb->rcv_sacks[i - 1]; + } + } + + /* And now we can store the newest SACK */ + pcb->rcv_sacks[0].left = left; + pcb->rcv_sacks[0].right = right; +} + +/** + * Called to remove a range of SACKs. + * + * SACK entries will be removed or adjusted to not acknowledge any sequence + * numbers that are less than 'seq' passed. It not only invalidates entries, + * but also moves all entries that are still valid to the beginning. + * + * @param pcb the tcp_pcb to modify + * @param seq the lowest sequence number to keep in SACK entries + */ +static void +tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq) +{ + u8_t i; + u8_t unused_idx; + + /* We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at index [i] if its right side is > 'seq'. */ + if (TCP_SEQ_GT(pcb->rcv_sacks[i].right, seq)) { + if (unused_idx != i) { + /* We only copy it if it's not in the right spot already. */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + /* NOTE: It is possible that its left side is < 'seq', in which case we should adjust it. */ + if (TCP_SEQ_LT(pcb->rcv_sacks[unused_idx].left, seq)) { + pcb->rcv_sacks[unused_idx].left = seq; + } + ++unused_idx; + } + } + + /* We also need to invalidate everything from 'unused_idx' till the end */ + for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) { + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } +} + +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) +/** + * Called to remove a range of SACKs. + * + * SACK entries will be removed or adjusted to not acknowledge any sequence + * numbers that are greater than (or equal to) 'seq' passed. It not only invalidates entries, + * but also moves all entries that are still valid to the beginning. + * + * @param pcb the tcp_pcb to modify + * @param seq the highest sequence number to keep in SACK entries + */ +static void +tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq) +{ + u8_t i; + u8_t unused_idx; + + /* We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at index [i] if its left side is < 'seq'. */ + if (TCP_SEQ_LT(pcb->rcv_sacks[i].left, seq)) { + if (unused_idx != i) { + /* We only copy it if it's not in the right spot already. */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + /* NOTE: It is possible that its right side is > 'seq', in which case we should adjust it. */ + if (TCP_SEQ_GT(pcb->rcv_sacks[unused_idx].right, seq)) { + pcb->rcv_sacks[unused_idx].right = seq; + } + ++unused_idx; + } + } + + /* We also need to invalidate everything from 'unused_idx' till the end */ + for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) { + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } +} +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ + +#endif /* LWIP_TCP_SACK_OUT */ + +#endif /* LWIP_TCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/tcp_out.c b/Middlewares/Third_Party/LwIP/src/core/tcp_out.c new file mode 100644 index 00000000..724df109 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/tcp_out.c @@ -0,0 +1,2190 @@ +/** + * @file + * Transmission Control Protocol, outgoing traffic + * + * The output functions of TCP. + * + * There are two distinct ways for TCP segments to get sent: + * - queued data: these are segments transferring data or segments containing + * SYN or FIN (which both count as one sequence number). They are created as + * struct @ref pbuf together with a struct tcp_seg and enqueue to the + * unsent list of the pcb. They are sent by tcp_output: + * - @ref tcp_write : creates data segments + * - @ref tcp_split_unsent_seg : splits a data segment + * - @ref tcp_enqueue_flags : creates SYN-only or FIN-only segments + * - @ref tcp_output / tcp_output_segment : finalize the tcp header + * (e.g. sequence numbers, options, checksum) and output to IP + * - the various tcp_rexmit functions shuffle around segments between the + * unsent an unacked lists to retransmit them + * - tcp_create_segment and tcp_pbuf_prealloc allocate pbuf and + * segment for these functions + * - direct send: these segments don't contain data but control the connection + * behaviour. They are created as pbuf only and sent directly without + * enqueueing them: + * - @ref tcp_send_empty_ack sends an ACK-only segment + * - @ref tcp_rst sends a RST segment + * - @ref tcp_keepalive sends a keepalive segment + * - @ref tcp_zero_window_probe sends a window probe segment + * - tcp_output_alloc_header allocates a header-only pbuf for these functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_TCP_TIMESTAMPS +#include "lwip/sys.h" +#endif + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/* Allow to add custom TCP header options by defining this hook */ +#ifdef LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH +#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, LWIP_TCP_OPT_LENGTH(flags)) +#else +#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_TCP_OPT_LENGTH(flags) +#endif + +/* Define some copy-macros for checksum-on-copy so that the code looks + nicer by preventing too many ifdef's. */ +#if TCP_CHECKSUM_ON_COPY +#define TCP_DATA_COPY(dst, src, len, seg) do { \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \ + len, &seg->chksum, &seg->chksum_swapped); \ + seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped); +#else /* TCP_CHECKSUM_ON_COPY*/ +#define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len) +#endif /* TCP_CHECKSUM_ON_COPY*/ + +/** Define this to 1 for an extra check that the output checksum is valid + * (usefule when the checksum is generated by the application, not the stack) */ +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0 +#endif +/* Allow to override the failure of sanity check from warning to e.g. hard failure */ +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(msg) LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING, msg) +#endif +#endif + +#if TCP_OVERSIZE +/** The size of segment pbufs created when TCP_OVERSIZE is enabled */ +#ifndef TCP_OVERSIZE_CALC_LENGTH +#define TCP_OVERSIZE_CALC_LENGTH(length) ((length) + TCP_OVERSIZE) +#endif +#endif + +/* Forward declarations.*/ +static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif); + +/* tcp_route: common code that returns a fixed bound netif or calls ip_route */ +static struct netif * +tcp_route(const struct tcp_pcb *pcb, const ip_addr_t *src, const ip_addr_t *dst) +{ + LWIP_UNUSED_ARG(src); /* in case IPv4-only and source-based routing is disabled */ + + if ((pcb != NULL) && (pcb->netif_idx != NETIF_NO_INDEX)) { + return netif_get_by_index(pcb->netif_idx); + } else { + return ip_route(src, dst); + } +} + +/** + * Create a TCP segment with prefilled header. + * + * Called by @ref tcp_write, @ref tcp_enqueue_flags and @ref tcp_split_unsent_seg + * + * @param pcb Protocol control block for the TCP connection. + * @param p pbuf that is used to hold the TCP header. + * @param hdrflags TCP flags for header. + * @param seqno TCP sequence number of this packet + * @param optflags options to include in TCP header + * @return a new tcp_seg pointing to p, or NULL. + * The TCP header is filled in except ackno and wnd. + * p is freed on failure. + */ +static struct tcp_seg * +tcp_create_segment(const struct tcp_pcb *pcb, struct pbuf *p, u8_t hdrflags, u32_t seqno, u8_t optflags) +{ + struct tcp_seg *seg; + u8_t optlen; + + LWIP_ASSERT("tcp_create_segment: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_create_segment: invalid pbuf", p != NULL); + + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + + if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no memory.\n")); + pbuf_free(p); + return NULL; + } + seg->flags = optflags; + seg->next = NULL; + seg->p = p; + LWIP_ASSERT("p->tot_len >= optlen", p->tot_len >= optlen); + seg->len = p->tot_len - optlen; +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = 0; + seg->chksum_swapped = 0; + /* check optflags */ + LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED", + (optflags & TF_SEG_DATA_CHECKSUMMED) == 0); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* build TCP header */ + if (pbuf_add_header(p, TCP_HLEN)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no room for TCP header in pbuf.\n")); + TCP_STATS_INC(tcp.err); + tcp_seg_free(seg); + return NULL; + } + seg->tcphdr = (struct tcp_hdr *)seg->p->payload; + seg->tcphdr->src = lwip_htons(pcb->local_port); + seg->tcphdr->dest = lwip_htons(pcb->remote_port); + seg->tcphdr->seqno = lwip_htonl(seqno); + /* ackno is set in tcp_output */ + TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), hdrflags); + /* wnd and chksum are set in tcp_output */ + seg->tcphdr->urgp = 0; + return seg; +} + +/** + * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end. + * + * This function is like pbuf_alloc(layer, length, PBUF_RAM) except + * there may be extra bytes available at the end. + * + * Called by @ref tcp_write + * + * @param layer flag to define header size. + * @param length size of the pbuf's payload. + * @param max_length maximum usable size of payload+oversize. + * @param oversize pointer to a u16_t that will receive the number of usable tail bytes. + * @param pcb The TCP connection that will enqueue the pbuf. + * @param apiflags API flags given to tcp_write. + * @param first_seg true when this pbuf will be used in the first enqueued segment. + */ +#if TCP_OVERSIZE +static struct pbuf * +tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length, + u16_t *oversize, const struct tcp_pcb *pcb, u8_t apiflags, + u8_t first_seg) +{ + struct pbuf *p; + u16_t alloc = length; + + LWIP_ASSERT("tcp_pbuf_prealloc: invalid oversize", oversize != NULL); + LWIP_ASSERT("tcp_pbuf_prealloc: invalid pcb", pcb != NULL); + +#if LWIP_NETIF_TX_SINGLE_PBUF + LWIP_UNUSED_ARG(max_length); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(apiflags); + LWIP_UNUSED_ARG(first_seg); + alloc = max_length; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (length < max_length) { + /* Should we allocate an oversized pbuf, or just the minimum + * length required? If tcp_write is going to be called again + * before this segment is transmitted, we want the oversized + * buffer. If the segment will be transmitted immediately, we can + * save memory by allocating only length. We use a simple + * heuristic based on the following information: + * + * Did the user set TCP_WRITE_FLAG_MORE? + * + * Will the Nagle algorithm defer transmission of this segment? + */ + if ((apiflags & TCP_WRITE_FLAG_MORE) || + (!(pcb->flags & TF_NODELAY) && + (!first_seg || + pcb->unsent != NULL || + pcb->unacked != NULL))) { + alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(TCP_OVERSIZE_CALC_LENGTH(length))); + } + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + p = pbuf_alloc(layer, alloc, PBUF_RAM); + if (p == NULL) { + return NULL; + } + LWIP_ASSERT("need unchained pbuf", p->next == NULL); + *oversize = p->len - length; + /* trim p->len to the currently used size */ + p->len = p->tot_len = length; + return p; +} +#else /* TCP_OVERSIZE */ +#define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM) +#endif /* TCP_OVERSIZE */ + +#if TCP_CHECKSUM_ON_COPY +/** Add a checksum of newly added data to the segment. + * + * Called by tcp_write and tcp_split_unsent_seg. + */ +static void +tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum, + u8_t *seg_chksum_swapped) +{ + u32_t helper; + /* add chksum to old chksum and fold to u16_t */ + helper = chksum + *seg_chksum; + chksum = FOLD_U32T(helper); + if ((len & 1) != 0) { + *seg_chksum_swapped = 1 - *seg_chksum_swapped; + chksum = SWAP_BYTES_IN_WORD(chksum); + } + *seg_chksum = chksum; +} +#endif /* TCP_CHECKSUM_ON_COPY */ + +/** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen). + * + * @param pcb the tcp pcb to check for + * @param len length of data to send (checked agains snd_buf) + * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise + */ +static err_t +tcp_write_checks(struct tcp_pcb *pcb, u16_t len) +{ + LWIP_ASSERT("tcp_write_checks: invalid pcb", pcb != NULL); + + /* connection is in invalid state for data transmission? */ + if ((pcb->state != ESTABLISHED) && + (pcb->state != CLOSE_WAIT) && + (pcb->state != SYN_SENT) && + (pcb->state != SYN_RCVD)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n")); + return ERR_CONN; + } else if (len == 0) { + return ERR_OK; + } + + /* fail on too much data */ + if (len > pcb->snd_buf) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"TCPWNDSIZE_F")\n", + len, pcb->snd_buf)); + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return ERR_MEM; + } + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + + /* If total number of pbufs on the unsent/unacked queues exceeds the + * configured maximum, return an error */ + /* check for configured max queuelen and possible overflow */ + if (pcb->snd_queuelen >= LWIP_MIN(TCP_SND_QUEUELEN, (TCP_SNDQUEUELEN_OVERFLOW + 1))) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n", + pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN)); + TCP_STATS_INC(tcp.memerr); + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return ERR_MEM; + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty", + pcb->unacked != NULL || pcb->unsent != NULL); + } else { + LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty", + pcb->unacked == NULL && pcb->unsent == NULL); + } + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Write data for sending (but does not send it immediately). + * + * It waits in the expectation of more data being sent soon (as + * it can send them more efficiently by combining them together). + * To prompt the system to send data now, call tcp_output() after + * calling tcp_write(). + * + * This function enqueues the data pointed to by the argument dataptr. The length of + * the data is passed as the len parameter. The apiflags can be one or more of: + * - TCP_WRITE_FLAG_COPY: indicates whether the new memory should be allocated + * for the data to be copied into. If this flag is not given, no new memory + * should be allocated and the data should only be referenced by pointer. This + * also means that the memory behind dataptr must not change until the data is + * ACKed by the remote host + * - TCP_WRITE_FLAG_MORE: indicates that more data follows. If this is omitted, + * the PSH flag is set in the last segment created by this call to tcp_write. + * If this flag is given, the PSH flag is not set. + * + * The tcp_write() function will fail and return ERR_MEM if the length + * of the data exceeds the current send buffer size or if the length of + * the queue of outgoing segment is larger than the upper limit defined + * in lwipopts.h. The number of bytes available in the output queue can + * be retrieved with the tcp_sndbuf() function. + * + * The proper way to use this function is to call the function with at + * most tcp_sndbuf() bytes of data. If the function returns ERR_MEM, + * the application should wait until some of the currently enqueued + * data has been successfully received by the other host and try again. + * + * @param pcb Protocol control block for the TCP connection to enqueue data for. + * @param arg Pointer to the data to be enqueued for sending. + * @param len Data length in bytes + * @param apiflags combination of following flags : + * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack + * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will not be set on last segment sent, + * @return ERR_OK if enqueued, another err_t on error + */ +err_t +tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags) +{ + struct pbuf *concat_p = NULL; + struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL; + u16_t pos = 0; /* position in 'arg' data */ + u16_t queuelen; + u8_t optlen; + u8_t optflags = 0; +#if TCP_OVERSIZE + u16_t oversize = 0; + u16_t oversize_used = 0; +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_add = 0; +#endif /* TCP_OVERSIZE_DBGCHECK*/ +#endif /* TCP_OVERSIZE */ + u16_t extendlen = 0; +#if TCP_CHECKSUM_ON_COPY + u16_t concat_chksum = 0; + u8_t concat_chksum_swapped = 0; + u16_t concat_chksummed = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + err_t err; + u16_t mss_local; + + LWIP_ERROR("tcp_write: invalid pcb", pcb != NULL, return ERR_ARG); + + /* don't allocate segments bigger than half the maximum window we ever received */ + mss_local = LWIP_MIN(pcb->mss, TCPWND_MIN16(pcb->snd_wnd_max / 2)); + mss_local = mss_local ? mss_local : pcb->mss; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Always copy to try to create single pbufs for TX */ + apiflags |= TCP_WRITE_FLAG_COPY; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n", + (void *)pcb, arg, len, (u16_t)apiflags)); + LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)", + arg != NULL, return ERR_ARG;); + + err = tcp_write_checks(pcb, len); + if (err != ERR_OK) { + return err; + } + queuelen = pcb->snd_queuelen; + +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP)) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host. */ + optflags = TF_SEG_OPTS_TS; + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(TF_SEG_OPTS_TS, pcb); + /* ensure that segments can hold at least one data byte... */ + mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1); + } else +#endif /* LWIP_TCP_TIMESTAMPS */ + { + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + } + + + /* + * TCP segmentation is done in three phases with increasing complexity: + * + * 1. Copy data directly into an oversized pbuf. + * 2. Chain a new pbuf to the end of pcb->unsent. + * 3. Create new segments. + * + * We may run out of memory at any point. In that case we must + * return ERR_MEM and not change anything in pcb. Therefore, all + * changes are recorded in local variables and committed at the end + * of the function. Some pcb fields are maintained in local copies: + * + * queuelen = pcb->snd_queuelen + * oversize = pcb->unsent_oversize + * + * These variables are set consistently by the phases: + * + * seg points to the last segment tampered with. + * + * pos records progress as data is segmented. + */ + + /* Find the tail of the unsent queue. */ + if (pcb->unsent != NULL) { + u16_t space; + u16_t unsent_optlen; + + /* @todo: this could be sped up by keeping last_unsent in the pcb */ + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + /* Usable space at the end of the last unsent segment */ + unsent_optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(last_unsent->flags, pcb); + LWIP_ASSERT("mss_local is too small", mss_local >= last_unsent->len + unsent_optlen); + space = mss_local - (last_unsent->len + unsent_optlen); + + /* + * Phase 1: Copy data directly into an oversized pbuf. + * + * The number of bytes copied is recorded in the oversize_used + * variable. The actual copying is done at the bottom of the + * function. + */ +#if TCP_OVERSIZE +#if TCP_OVERSIZE_DBGCHECK + /* check that pcb->unsent_oversize matches last_unsent->oversize_left */ + LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)", + pcb->unsent_oversize == last_unsent->oversize_left); +#endif /* TCP_OVERSIZE_DBGCHECK */ + oversize = pcb->unsent_oversize; + if (oversize > 0) { + LWIP_ASSERT("inconsistent oversize vs. space", oversize <= space); + seg = last_unsent; + oversize_used = LWIP_MIN(space, LWIP_MIN(oversize, len)); + pos += oversize_used; + oversize -= oversize_used; + space -= oversize_used; + } + /* now we are either finished or oversize is zero */ + LWIP_ASSERT("inconsistent oversize vs. len", (oversize == 0) || (pos == len)); +#endif /* TCP_OVERSIZE */ + +#if !LWIP_NETIF_TX_SINGLE_PBUF + /* + * Phase 2: Chain a new pbuf to the end of pcb->unsent. + * + * As an exception when NOT copying the data, if the given data buffer + * directly follows the last unsent data buffer in memory, extend the last + * ROM pbuf reference to the buffer, thus saving a ROM pbuf allocation. + * + * We don't extend segments containing SYN/FIN flags or options + * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at + * the end. + * + * This phase is skipped for LWIP_NETIF_TX_SINGLE_PBUF as we could only execute + * it after rexmit puts a segment from unacked to unsent and at this point, + * oversize info is lost. + */ + if ((pos < len) && (space > 0) && (last_unsent->len > 0)) { + u16_t seglen = LWIP_MIN(space, len - pos); + seg = last_unsent; + + /* Create a pbuf with a copy or reference to seglen bytes. We + * can use PBUF_RAW here since the data appears in the middle of + * a segment. A header will never be prepended. */ + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* Data is copied */ + if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", + seglen)); + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + oversize_add = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ + TCP_DATA_COPY2(concat_p->payload, (const u8_t *)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped); +#if TCP_CHECKSUM_ON_COPY + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + queuelen += pbuf_clen(concat_p); + } else { + /* Data is not copied */ + /* If the last unsent pbuf is of type PBUF_ROM, try to extend it. */ + struct pbuf *p; + for (p = last_unsent->p; p->next != NULL; p = p->next); + if (((p->type_internal & (PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_DATA_VOLATILE)) == 0) && + (const u8_t *)p->payload + p->len == (const u8_t *)arg) { + LWIP_ASSERT("tcp_write: ROM pbufs cannot be oversized", pos == 0); + extendlen = seglen; + } else { + if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } + /* reference the non-volatile payload data */ + ((struct pbuf_rom *)concat_p)->payload = (const u8_t *)arg + pos; + queuelen += pbuf_clen(concat_p); + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t *)arg + pos, seglen), seglen, + &concat_chksum, &concat_chksum_swapped); + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + } + + pos += seglen; + } +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + } else { +#if TCP_OVERSIZE + LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)", + pcb->unsent_oversize == 0); +#endif /* TCP_OVERSIZE */ + } + + /* + * Phase 3: Create new segments. + * + * The new segments are chained together in the local 'queue' + * variable, ready to be appended to pcb->unsent. + */ + while (pos < len) { + struct pbuf *p; + u16_t left = len - pos; + u16_t max_len = mss_local - optlen; + u16_t seglen = LWIP_MIN(left, max_len); +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* If copy is set, memory should be allocated and data copied + * into pbuf */ + if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen)); + goto memerr; + } + LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen", + (p->len >= seglen)); + TCP_DATA_COPY2((char *)p->payload + optlen, (const u8_t *)arg + pos, seglen, &chksum, &chksum_swapped); + } else { + /* Copy is not set: First allocate a pbuf for holding the data. + * Since the referenced data is available at least until it is + * sent out on the link (as it has to be ACKed by the remote + * party) we can safely use PBUF_ROM instead of PBUF_REF here. + */ + struct pbuf *p2; +#if TCP_OVERSIZE + LWIP_ASSERT("oversize == 0", oversize == 0); +#endif /* TCP_OVERSIZE */ + if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + chksum = ~inet_chksum((const u8_t *)arg + pos, seglen); + if (seglen & 1) { + chksum_swapped = 1; + chksum = SWAP_BYTES_IN_WORD(chksum); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + /* reference the non-volatile payload data */ + ((struct pbuf_rom *)p2)->payload = (const u8_t *)arg + pos; + + /* Second, allocate a pbuf for the headers. */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + /* If allocation fails, we have to deallocate the data pbuf as + * well. */ + pbuf_free(p2); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for header pbuf\n")); + goto memerr; + } + /* Concatenate the headers and data pbufs together. */ + pbuf_cat(p/*header*/, p2/*data*/); + } + + queuelen += pbuf_clen(p); + + /* Now that there are more segments queued, we check again if the + * length of the queue exceeds the configured maximum or + * overflows. */ + if (queuelen > LWIP_MIN(TCP_SND_QUEUELEN, TCP_SNDQUEUELEN_OVERFLOW)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n", + queuelen, (int)TCP_SND_QUEUELEN)); + pbuf_free(p); + goto memerr; + } + + if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) { + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* first segment of to-be-queued data? */ + if (queue == NULL) { + queue = seg; + } else { + /* Attach the segment to the end of the queued segments */ + LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL); + prev_seg->next = seg; + } + /* remember last segment of to-be-queued data for next iteration */ + prev_seg = seg; + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg))); + + pos += seglen; + } + + /* + * All three segmentation phases were successful. We can commit the + * transaction. + */ +#if TCP_OVERSIZE_DBGCHECK + if ((last_unsent != NULL) && (oversize_add != 0)) { + last_unsent->oversize_left += oversize_add; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* + * Phase 1: If data has been added to the preallocated tail of + * last_unsent, we update the length fields of the pbuf chain. + */ +#if TCP_OVERSIZE + if (oversize_used > 0) { + struct pbuf *p; + /* Bump tot_len of whole chain, len of tail */ + for (p = last_unsent->p; p; p = p->next) { + p->tot_len += oversize_used; + if (p->next == NULL) { + TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent); + p->len += oversize_used; + } + } + last_unsent->len += oversize_used; +#if TCP_OVERSIZE_DBGCHECK + LWIP_ASSERT("last_unsent->oversize_left >= oversize_used", + last_unsent->oversize_left >= oversize_used); + last_unsent->oversize_left -= oversize_used; +#endif /* TCP_OVERSIZE_DBGCHECK */ + } + pcb->unsent_oversize = oversize; +#endif /* TCP_OVERSIZE */ + + /* + * Phase 2: concat_p can be concatenated onto last_unsent->p, unless we + * determined that the last ROM pbuf can be extended to include the new data. + */ + if (concat_p != NULL) { + LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty", + (last_unsent != NULL)); + pbuf_cat(last_unsent->p, concat_p); + last_unsent->len += concat_p->tot_len; + } else if (extendlen > 0) { + struct pbuf *p; + LWIP_ASSERT("tcp_write: extension of reference requires reference", + last_unsent != NULL && last_unsent->p != NULL); + for (p = last_unsent->p; p->next != NULL; p = p->next) { + p->tot_len += extendlen; + } + p->tot_len += extendlen; + p->len += extendlen; + last_unsent->len += extendlen; + } + +#if TCP_CHECKSUM_ON_COPY + if (concat_chksummed) { + LWIP_ASSERT("tcp_write: concat checksum needs concatenated data", + concat_p != NULL || extendlen > 0); + /*if concat checksumm swapped - swap it back */ + if (concat_chksum_swapped) { + concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum); + } + tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum, + &last_unsent->chksum_swapped); + last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED; + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* + * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that + * is harmless + */ + if (last_unsent == NULL) { + pcb->unsent = queue; + } else { + last_unsent->next = queue; + } + + /* + * Finally update the pcb state. + */ + pcb->snd_lbb += len; + pcb->snd_buf -= len; + pcb->snd_queuelen = queuelen; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n", + pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + /* Set the PSH flag in the last segment that we enqueued. */ + if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE) == 0)) { + TCPH_SET_FLAG(seg->tcphdr, TCP_PSH); + } + + return ERR_OK; +memerr: + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + + if (concat_p != NULL) { + pbuf_free(concat_p); + } + if (queue != NULL) { + tcp_segs_free(queue); + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL || + pcb->unsent != NULL); + } + LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen)); + return ERR_MEM; +} + +/** + * Split segment on the head of the unsent queue. If return is not + * ERR_OK, existing head remains intact + * + * The split is accomplished by creating a new TCP segment and pbuf + * which holds the remainder payload after the split. The original + * pbuf is trimmed to new length. This allows splitting of read-only + * pbufs + * + * @param pcb the tcp_pcb for which to split the unsent head + * @param split the amount of payload to remain in the head + */ +err_t +tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split) +{ + struct tcp_seg *seg = NULL, *useg = NULL; + struct pbuf *p = NULL; + u8_t optlen; + u8_t optflags; + u8_t split_flags; + u8_t remainder_flags; + u16_t remainder; + u16_t offset; +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; + struct pbuf *q; +#endif /* TCP_CHECKSUM_ON_COPY */ + + LWIP_ASSERT("tcp_split_unsent_seg: invalid pcb", pcb != NULL); + + useg = pcb->unsent; + if (useg == NULL) { + return ERR_MEM; + } + + if (split == 0) { + LWIP_ASSERT("Can't split segment into length 0", 0); + return ERR_VAL; + } + + if (useg->len <= split) { + return ERR_OK; + } + + LWIP_ASSERT("split <= mss", split <= pcb->mss); + LWIP_ASSERT("useg->len > 0", useg->len > 0); + + /* We should check that we don't exceed TCP_SND_QUEUELEN but we need + * to split this packet so we may actually exceed the max value by + * one! + */ + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue: split_unsent_seg: %u\n", (unsigned int)pcb->snd_queuelen)); + + optflags = useg->flags; +#if TCP_CHECKSUM_ON_COPY + /* Remove since checksum is not stored until after tcp_create_segment() */ + optflags &= ~TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + optlen = LWIP_TCP_OPT_LENGTH(optflags); + remainder = useg->len - split; + + /* Create new pbuf for the remainder of the split */ + p = pbuf_alloc(PBUF_TRANSPORT, remainder + optlen, PBUF_RAM); + if (p == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not allocate memory for pbuf remainder %u\n", remainder)); + goto memerr; + } + + /* Offset into the original pbuf is past TCP/IP headers, options, and split amount */ + offset = useg->p->tot_len - useg->len + split; + /* Copy remainder into new pbuf, headers and options will not be filled out */ + if (pbuf_copy_partial(useg->p, (u8_t *)p->payload + optlen, remainder, offset ) != remainder) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not copy pbuf remainder %u\n", remainder)); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum on remainder data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t *)p->payload + optlen, remainder), remainder, + &chksum, &chksum_swapped); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Options are created when calling tcp_output() */ + + /* Migrate flags from original segment */ + split_flags = TCPH_FLAGS(useg->tcphdr); + remainder_flags = 0; /* ACK added in tcp_output() */ + + if (split_flags & TCP_PSH) { + split_flags &= ~TCP_PSH; + remainder_flags |= TCP_PSH; + } + if (split_flags & TCP_FIN) { + split_flags &= ~TCP_FIN; + remainder_flags |= TCP_FIN; + } + /* SYN should be left on split, RST should not be present with data */ + + seg = tcp_create_segment(pcb, p, remainder_flags, lwip_ntohl(useg->tcphdr->seqno) + split, optflags); + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not create new TCP segment\n")); + goto memerr; + } + +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Remove this segment from the queue since trimming it may free pbufs */ + pcb->snd_queuelen -= pbuf_clen(useg->p); + + /* Trim the original pbuf into our split size. At this point our remainder segment must be setup + successfully because we are modifying the original segment */ + pbuf_realloc(useg->p, useg->p->tot_len - remainder); + useg->len -= remainder; + TCPH_SET_FLAG(useg->tcphdr, split_flags); +#if TCP_OVERSIZE_DBGCHECK + /* By trimming, realloc may have actually shrunk the pbuf, so clear oversize_left */ + useg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* Add back to the queue with new trimmed pbuf */ + pcb->snd_queuelen += pbuf_clen(useg->p); + +#if TCP_CHECKSUM_ON_COPY + /* The checksum on the split segment is now incorrect. We need to re-run it over the split */ + useg->chksum = 0; + useg->chksum_swapped = 0; + q = useg->p; + offset = q->tot_len - useg->len; /* Offset due to exposed headers */ + + /* Advance to the pbuf where the offset ends */ + while (q != NULL && offset > q->len) { + offset -= q->len; + q = q->next; + } + LWIP_ASSERT("Found start of payload pbuf", q != NULL); + /* Checksum the first payload pbuf accounting for offset, then other pbufs are all payload */ + for (; q != NULL; offset = 0, q = q->next) { + tcp_seg_add_chksum(~inet_chksum((const u8_t *)q->payload + offset, q->len - offset), q->len - offset, + &useg->chksum, &useg->chksum_swapped); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Update number of segments on the queues. Note that length now may + * exceed TCP_SND_QUEUELEN! We don't have to touch pcb->snd_buf + * because the total amount of data is constant when packet is split */ + pcb->snd_queuelen += pbuf_clen(seg->p); + + /* Finally insert remainder into queue after split (which stays head) */ + seg->next = useg->next; + useg->next = seg; + +#if TCP_OVERSIZE + /* If remainder is last segment on the unsent, ensure we clear the oversize amount + * because the remainder is always sized to the exact remaining amount */ + if (seg->next == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + return ERR_OK; +memerr: + TCP_STATS_INC(tcp.memerr); + + LWIP_ASSERT("seg == NULL", seg == NULL); + if (p != NULL) { + pbuf_free(p); + } + + return ERR_MEM; +} + +/** + * Called by tcp_close() to send a segment including FIN flag but not data. + * This FIN may be added to an existing segment or a new, otherwise empty + * segment is enqueued. + * + * @param pcb the tcp_pcb over which to send a segment + * @return ERR_OK if sent, another err_t otherwise + */ +err_t +tcp_send_fin(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_send_fin: invalid pcb", pcb != NULL); + + /* first, try to add the fin to the last unsent segment */ + if (pcb->unsent != NULL) { + struct tcp_seg *last_unsent; + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) { + /* no SYN/FIN/RST flag in the header, we can add the FIN flag */ + TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN); + tcp_set_flags(pcb, TF_FIN); + return ERR_OK; + } + } + /* no data, no length, flags, copy=1, no optdata */ + return tcp_enqueue_flags(pcb, TCP_FIN); +} + +/** + * Enqueue SYN or FIN for transmission. + * + * Called by @ref tcp_connect, tcp_listen_input, and @ref tcp_close + * (via @ref tcp_send_fin) + * + * @param pcb Protocol control block for the TCP connection. + * @param flags TCP header flags to set in the outgoing segment. + */ +err_t +tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags) +{ + struct pbuf *p; + struct tcp_seg *seg; + u8_t optflags = 0; + u8_t optlen = 0; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen)); + + LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)", + (flags & (TCP_SYN | TCP_FIN)) != 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid pcb", pcb != NULL); + + /* No need to check pcb->snd_queuelen if only SYN or FIN are allowed! */ + + /* Get options for this segment. This is a special case since this is the + only place where a SYN can be sent. */ + if (flags & TCP_SYN) { + optflags = TF_SEG_OPTS_MSS; +#if LWIP_WND_SCALE + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_WND_SCALE)) { + /* In a (sent in state SYN_RCVD), the window scale option may only + be sent if we received a window scale option from the remote host. */ + optflags |= TF_SEG_OPTS_WND_SCALE; + } +#endif /* LWIP_WND_SCALE */ +#if LWIP_TCP_SACK_OUT + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_SACK)) { + /* In a (sent in state SYN_RCVD), the SACK_PERM option may only + be sent if we received a SACK_PERM option from the remote host. */ + optflags |= TF_SEG_OPTS_SACK_PERM; + } +#endif /* LWIP_TCP_SACK_OUT */ + } +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP) || ((flags & TCP_SYN) && (pcb->state != SYN_RCVD))) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host (and in active open SYN segments). */ + optflags |= TF_SEG_OPTS_TS; + } +#endif /* LWIP_TCP_TIMESTAMPS */ + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + + /* Allocate pbuf with room for TCP header + options */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen", + (p->len >= optlen)); + + /* Allocate memory for tcp_seg, and fill in fields. */ + if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) { + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % LWIP_MIN(MEM_ALIGNMENT, 4)) == 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0); + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, + ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg), + (u16_t)flags)); + + /* Now append seg to pcb->unsent queue */ + if (pcb->unsent == NULL) { + pcb->unsent = seg; + } else { + struct tcp_seg *useg; + for (useg = pcb->unsent; useg->next != NULL; useg = useg->next); + useg->next = seg; + } +#if TCP_OVERSIZE + /* The new unsent tail has no space */ + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + + /* SYN and FIN bump the sequence number */ + if ((flags & TCP_SYN) || (flags & TCP_FIN)) { + pcb->snd_lbb++; + /* optlen does not influence snd_buf */ + } + if (flags & TCP_FIN) { + tcp_set_flags(pcb, TF_FIN); + } + + /* update number of segments on the queues */ + pcb->snd_queuelen += pbuf_clen(seg->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_enqueue_flags: invalid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + return ERR_OK; +} + +#if LWIP_TCP_TIMESTAMPS +/* Build a timestamp option (12 bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the timestamp option + */ +static void +tcp_build_timestamp_option(const struct tcp_pcb *pcb, u32_t *opts) +{ + LWIP_ASSERT("tcp_build_timestamp_option: invalid pcb", pcb != NULL); + + /* Pad with two NOP options to make everything nicely aligned */ + opts[0] = PP_HTONL(0x0101080A); + opts[1] = lwip_htonl(sys_now()); + opts[2] = lwip_htonl(pcb->ts_recent); +} +#endif + +#if LWIP_TCP_SACK_OUT +/** + * Calculates the number of SACK entries that should be generated. + * It takes into account whether TF_SACK flag is set, + * the number of SACK entries in tcp_pcb that are valid, + * as well as the available options size. + * + * @param pcb tcp_pcb + * @param optlen the length of other TCP options (in bytes) + * @return the number of SACK ranges that can be used + */ +static u8_t +tcp_get_num_sacks(const struct tcp_pcb *pcb, u8_t optlen) +{ + u8_t num_sacks = 0; + + LWIP_ASSERT("tcp_get_num_sacks: invalid pcb", pcb != NULL); + + if (pcb->flags & TF_SACK) { + u8_t i; + + /* The first SACK takes up 12 bytes (it includes SACK header and two NOP options), + each additional one - 8 bytes. */ + optlen += 12; + + /* Max options size = 40, number of SACK array entries = LWIP_TCP_MAX_SACK_NUM */ + for (i = 0; (i < LWIP_TCP_MAX_SACK_NUM) && (optlen <= TCP_MAX_OPTION_BYTES) && + LWIP_TCP_SACK_VALID(pcb, i); ++i) { + ++num_sacks; + optlen += 8; + } + } + + return num_sacks; +} + +/** Build a SACK option (12 or more bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the SACK option + * @param num_sacks the number of SACKs to store + */ +static void +tcp_build_sack_option(const struct tcp_pcb *pcb, u32_t *opts, u8_t num_sacks) +{ + u8_t i; + + LWIP_ASSERT("tcp_build_sack_option: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_build_sack_option: invalid opts", opts != NULL); + + /* Pad with two NOP options to make everything nicely aligned. + We add the length (of just the SACK option, not the NOPs in front of it), + which is 2B of header, plus 8B for each SACK. */ + *(opts++) = PP_HTONL(0x01010500 + 2 + num_sacks * 8); + + for (i = 0; i < num_sacks; ++i) { + *(opts++) = lwip_htonl(pcb->rcv_sacks[i].left); + *(opts++) = lwip_htonl(pcb->rcv_sacks[i].right); + } +} + +#endif + +#if LWIP_WND_SCALE +/** Build a window scale option (3 bytes long) at the specified options pointer) + * + * @param opts option pointer where to store the window scale option + */ +static void +tcp_build_wnd_scale_option(u32_t *opts) +{ + LWIP_ASSERT("tcp_build_wnd_scale_option: invalid opts", opts != NULL); + + /* Pad with one NOP option to make everything nicely aligned */ + opts[0] = PP_HTONL(0x01030300 | TCP_RCV_SCALE); +} +#endif + +/** + * @ingroup tcp_raw + * Find out what we can send and send it + * + * @param pcb Protocol control block for the TCP connection to send data + * @return ERR_OK if data has been sent or nothing to send + * another err_t on error + */ +err_t +tcp_output(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg, *useg; + u32_t wnd, snd_nxt; + err_t err; + struct netif *netif; +#if TCP_CWND_DEBUG + s16_t i = 0; +#endif /* TCP_CWND_DEBUG */ + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("tcp_output: invalid pcb", pcb != NULL); + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_output for listen-pcbs", + pcb->state != LISTEN); + + /* First, check if we are invoked by the TCP input processing + code. If so, we do not output anything. Instead, we rely on the + input processing code to call us when input processing is done + with. */ + if (tcp_input_pcb == pcb) { + return ERR_OK; + } + + wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd); + + seg = pcb->unsent; + + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n", + (void *)pcb->unsent)); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F + ", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", seg == NULL, ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack)); + + /* If the TF_ACK_NOW flag is set and the ->unsent queue is empty, construct + * an empty ACK segment and send it. */ + if (pcb->flags & TF_ACK_NOW) { + return tcp_send_empty_ack(pcb); + } + /* nothing to send: shortcut out of here */ + goto output_done; + } else { + LWIP_DEBUGF(TCP_CWND_DEBUG, + ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack)); + } + + netif = tcp_route(pcb, &pcb->local_ip, &pcb->remote_ip); + if (netif == NULL) { + return ERR_RTE; + } + + /* If we don't have a local IP address, we get one from netif */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + + /* Handle the current segment not fitting within the window */ + if (lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd) { + /* We need to start the persistent timer when the next unsent segment does not fit + * within the remaining (could be 0) send window and RTO timer is not running (we + * have no in-flight data). If window is still too small after persist timer fires, + * then we split the segment. We don't consider the congestion window since a cwnd + * smaller than 1 SMSS implies in-flight data + */ + if (wnd == pcb->snd_wnd && pcb->unacked == NULL && pcb->persist_backoff == 0) { + pcb->persist_cnt = 0; + pcb->persist_backoff = 1; + pcb->persist_probe = 0; + } + /* We need an ACK, but can't send data now, so send an empty ACK */ + if (pcb->flags & TF_ACK_NOW) { + return tcp_send_empty_ack(pcb); + } + goto output_done; + } + /* Stop persist timer, above conditions are not active */ + pcb->persist_backoff = 0; + + /* useg should point to last segment on unacked queue */ + useg = pcb->unacked; + if (useg != NULL) { + for (; useg->next != NULL; useg = useg->next); + } + /* data available and window allows it to be sent? */ + while (seg != NULL && + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) { + LWIP_ASSERT("RST not expected here!", + (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0); + /* Stop sending if the nagle algorithm would prevent it + * Don't stop: + * - if tcp_write had a memory error before (prevent delayed ACK timeout) or + * - if FIN was already enqueued for this PCB (SYN is always alone in a segment - + * either seg->next != NULL or pcb->unacked == NULL; + * RST is no sent using tcp_write/tcp_output. + */ + if ((tcp_do_output_nagle(pcb) == 0) && + ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)) { + break; + } +#if TCP_CWND_DEBUG + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) + seg->len - + pcb->lastack, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack, i)); + ++i; +#endif /* TCP_CWND_DEBUG */ + + if (pcb->state != SYN_SENT) { + TCPH_SET_FLAG(seg->tcphdr, TCP_ACK); + } + + err = tcp_output_segment(seg, pcb, netif); + if (err != ERR_OK) { + /* segment could not be sent, for whatever reason */ + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return err; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + pcb->unsent = seg->next; + if (pcb->state != SYN_SENT) { + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + /* put segment on unacknowledged list if length > 0 */ + if (TCP_TCPLEN(seg) > 0) { + seg->next = NULL; + /* unacked list is empty? */ + if (pcb->unacked == NULL) { + pcb->unacked = seg; + useg = seg; + /* unacked list is not empty? */ + } else { + /* In the case of fast retransmit, the packet should not go to the tail + * of the unacked queue, but rather somewhere before it. We need to check for + * this case. -STJ Jul 27, 2004 */ + if (TCP_SEQ_LT(lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(useg->tcphdr->seqno))) { + /* add segment to before tail of unacked list, keeping the list sorted */ + struct tcp_seg **cur_seg = &(pcb->unacked); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = (*cur_seg); + (*cur_seg) = seg; + } else { + /* add segment to tail of unacked list */ + useg->next = seg; + useg = useg->next; + } + } + /* do not queue empty segments on the unacked list */ + } else { + tcp_seg_free(seg); + } + seg = pcb->unsent; + } +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + /* last unsent has been removed, reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + +output_done: + tcp_clear_flags(pcb, TF_NAGLEMEMERR); + return ERR_OK; +} + +/** Check if a segment's pbufs are used by someone else than TCP. + * This can happen on retransmission if the pbuf of this segment is still + * referenced by the netif driver due to deferred transmission. + * This is the case (only!) if someone down the TX call path called + * pbuf_ref() on one of the pbufs! + * + * @arg seg the tcp segment to check + * @return 1 if ref != 1, 0 if ref == 1 + */ +static int +tcp_output_segment_busy(const struct tcp_seg *seg) +{ + LWIP_ASSERT("tcp_output_segment_busy: invalid seg", seg != NULL); + + /* We only need to check the first pbuf here: + If a pbuf is queued for transmission, a driver calls pbuf_ref(), + which only changes the ref count of the first pbuf */ + if (seg->p->ref != 1) { + /* other reference found */ + return 1; + } + /* no other references found */ + return 0; +} + +/** + * Called by tcp_output() to actually send a TCP segment over IP. + * + * @param seg the tcp_seg to send + * @param pcb the tcp_pcb for the TCP connection used to send the segment + * @param netif the netif used to send the segment + */ +static err_t +tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif) +{ + err_t err; + u16_t len; + u32_t *opts; +#if TCP_CHECKSUM_ON_COPY + int seg_chksum_was_swapped = 0; +#endif + + LWIP_ASSERT("tcp_output_segment: invalid seg", seg != NULL); + LWIP_ASSERT("tcp_output_segment: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_output_segment: invalid netif", netif != NULL); + + if (tcp_output_segment_busy(seg)) { + /* This should not happen: rexmit functions should have checked this. + However, since this function modifies p->len, we must not continue in this case. */ + LWIP_DEBUGF(TCP_RTO_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_output_segment: segment busy\n")); + return ERR_OK; + } + + /* The TCP header has already been constructed, but the ackno and + wnd fields remain. */ + seg->tcphdr->ackno = lwip_htonl(pcb->rcv_nxt); + + /* advertise our receive window size in this TCP segment */ +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + /* The Window field in a SYN segment itself (the only type where we send + the window scale option) is never scaled. */ + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(pcb->rcv_ann_wnd)); + } else +#endif /* LWIP_WND_SCALE */ + { + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + } + + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + + /* Add any requested options. NB MSS option is only set on SYN + packets, so ignore it here */ + /* cast through void* to get rid of alignment warnings */ + opts = (u32_t *)(void *)(seg->tcphdr + 1); + if (seg->flags & TF_SEG_OPTS_MSS) { + u16_t mss; +#if TCP_CALCULATE_EFF_SEND_MSS + mss = tcp_eff_send_mss_netif(TCP_MSS, netif, &pcb->remote_ip); +#else /* TCP_CALCULATE_EFF_SEND_MSS */ + mss = TCP_MSS; +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + *opts = TCP_BUILD_MSS_OPTION(mss); + opts += 1; + } +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; + + if (seg->flags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + tcp_build_wnd_scale_option(opts); + opts += 1; + } +#endif +#if LWIP_TCP_SACK_OUT + if (seg->flags & TF_SEG_OPTS_SACK_PERM) { + /* Pad with two NOP options to make everything nicely aligned + * NOTE: When we send both timestamp and SACK_PERM options, + * we could use the first two NOPs before the timestamp to store SACK_PERM option, + * but that would complicate the code. + */ + *(opts++) = PP_HTONL(0x01010402); + } +#endif + + /* Set retransmission timer running if it is not currently enabled + This must be set before checking the route. */ + if (pcb->rtime < 0) { + pcb->rtime = 0; + } + + if (pcb->rttest == 0) { + pcb->rttest = tcp_ticks; + pcb->rtseq = lwip_ntohl(seg->tcphdr->seqno); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq)); + } + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n", + lwip_htonl(seg->tcphdr->seqno), lwip_htonl(seg->tcphdr->seqno) + + seg->len)); + + len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload); + if (len == 0) { + /** Exclude retransmitted segments from this count. */ + MIB2_STATS_INC(mib2.tcpoutsegs); + } + + seg->p->len -= len; + seg->p->tot_len -= len; + + seg->p->payload = seg->tcphdr; + + seg->tcphdr->chksum = 0; + +#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS + opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(seg->p, seg->tcphdr, pcb, opts); +#endif + LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(seg->tcphdr + 1)) + LWIP_TCP_OPT_LENGTH_SEGMENT(seg->flags, pcb)); + +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { +#if TCP_CHECKSUM_ON_COPY + u32_t acc; +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + u16_t chksum_slow = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ + if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) { + LWIP_ASSERT("data included but not checksummed", + seg->p->tot_len == TCPH_HDRLEN_BYTES(seg->tcphdr)); + } + + /* rebuild TCP header checksum (TCP header changes for retransmissions!) */ + acc = ip_chksum_pseudo_partial(seg->p, IP_PROTO_TCP, + seg->p->tot_len, TCPH_HDRLEN_BYTES(seg->tcphdr), &pcb->local_ip, &pcb->remote_ip); + /* add payload checksum */ + if (seg->chksum_swapped) { + seg_chksum_was_swapped = 1; + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 0; + } + acc = (u16_t)~acc + seg->chksum; + seg->tcphdr->chksum = (u16_t)~FOLD_U32T(acc); +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + if (chksum_slow != seg->tcphdr->chksum) { + TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL( + ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n", + seg->tcphdr->chksum, chksum_slow)); + seg->tcphdr->chksum = chksum_slow; + } +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ +#else /* TCP_CHECKSUM_ON_COPY */ + seg->tcphdr->chksum = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY */ + } +#endif /* CHECKSUM_GEN_TCP */ + TCP_STATS_INC(tcp.xmit); + + NETIF_SET_HINTS(netif, &(pcb->netif_hints)); + err = ip_output_if(seg->p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, + pcb->tos, IP_PROTO_TCP, netif); + NETIF_RESET_HINTS(netif); + +#if TCP_CHECKSUM_ON_COPY + if (seg_chksum_was_swapped) { + /* if data is added to this segment later, chksum needs to be swapped, + so restore this now */ + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 1; + } +#endif + + return err; +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +err_t +tcp_rexmit_rto_prepare(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + + LWIP_ASSERT("tcp_rexmit_rto_prepare: invalid pcb", pcb != NULL); + + if (pcb->unacked == NULL) { + return ERR_VAL; + } + + /* Move all unacked segments to the head of the unsent queue. + However, give up if any of the unsent pbufs are still referenced by the + netif driver due to deferred transmission. No point loading the link further + if it is struggling to flush its buffered writes. */ + for (seg = pcb->unacked; seg->next != NULL; seg = seg->next) { + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n")); + return ERR_VAL; + } + } + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n")); + return ERR_VAL; + } + /* concatenate unsent queue after unacked queue */ + seg->next = pcb->unsent; +#if TCP_OVERSIZE_DBGCHECK + /* if last unsent changed, we need to update unsent_oversize */ + if (pcb->unsent == NULL) { + pcb->unsent_oversize = seg->oversize_left; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + /* unsent queue is the concatenated queue (of unacked, unsent) */ + pcb->unsent = pcb->unacked; + /* unacked queue is now empty */ + pcb->unacked = NULL; + + /* Mark RTO in-progress */ + tcp_set_flags(pcb, TF_RTO); + /* Record the next byte following retransmit */ + pcb->rto_end = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + /* Don't take any RTT measurements after retransmitting. */ + pcb->rttest = 0; + + return ERR_OK; +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto_commit(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_rto_commit: invalid pcb", pcb != NULL); + + /* increment number of retransmissions */ + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + /* Do the actual retransmission */ + tcp_output(pcb); +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_process() only, tcp_slowtmr() needs to do some things between + * "prepare" and "commit". + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_rto: invalid pcb", pcb != NULL); + + if (tcp_rexmit_rto_prepare(pcb) == ERR_OK) { + tcp_rexmit_rto_commit(pcb); + } +} + +/** + * Requeue the first unacked segment for retransmission + * + * Called by tcp_receive() for fast retransmit. + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +err_t +tcp_rexmit(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + struct tcp_seg **cur_seg; + + LWIP_ASSERT("tcp_rexmit: invalid pcb", pcb != NULL); + + if (pcb->unacked == NULL) { + return ERR_VAL; + } + + seg = pcb->unacked; + + /* Give up if the segment is still referenced by the netif driver + due to deferred transmission. */ + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit busy\n")); + return ERR_VAL; + } + + /* Move the first unacked segment to the unsent queue */ + /* Keep the unsent queue sorted. */ + pcb->unacked = seg->next; + + cur_seg = &(pcb->unsent); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = *cur_seg; + *cur_seg = seg; +#if TCP_OVERSIZE + if (seg->next == NULL) { + /* the retransmitted segment is last in unsent, so reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + + /* Don't take any rtt measurements after retransmitting. */ + pcb->rttest = 0; + + /* Do the actual retransmission. */ + MIB2_STATS_INC(mib2.tcpretranssegs); + /* No need to call tcp_output: we are always called from tcp_input() + and thus tcp_output directly returns. */ + return ERR_OK; +} + + +/** + * Handle retransmission after three dupacks received + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +void +tcp_rexmit_fast(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_fast: invalid pcb", pcb != NULL); + + if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) { + /* This is fast retransmit. Retransmit the first unacked segment. */ + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: dupacks %"U16_F" (%"U32_F + "), fast retransmit %"U32_F"\n", + (u16_t)pcb->dupacks, pcb->lastack, + lwip_ntohl(pcb->unacked->tcphdr->seqno))); + if (tcp_rexmit(pcb) == ERR_OK) { + /* Set ssthresh to half of the minimum of the current + * cwnd and the advertised window */ + pcb->ssthresh = LWIP_MIN(pcb->cwnd, pcb->snd_wnd) / 2; + + /* The minimum value for ssthresh should be 2 MSS */ + if (pcb->ssthresh < (2U * pcb->mss)) { + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: The minimum value for ssthresh %"TCPWNDSIZE_F + " should be min 2 mss %"U16_F"...\n", + pcb->ssthresh, (u16_t)(2 * pcb->mss))); + pcb->ssthresh = 2 * pcb->mss; + } + + pcb->cwnd = pcb->ssthresh + 3 * pcb->mss; + tcp_set_flags(pcb, TF_INFR); + + /* Reset the retransmission timer to prevent immediate rto retransmissions */ + pcb->rtime = 0; + } + } +} + +static struct pbuf * +tcp_output_alloc_header_common(u32_t ackno, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */, + u16_t src_port, u16_t dst_port, u8_t flags, u16_t wnd) +{ + struct tcp_hdr *tcphdr; + struct pbuf *p; + + p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM); + if (p != NULL) { + LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr", + (p->len >= TCP_HLEN + optlen)); + tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->src = lwip_htons(src_port); + tcphdr->dest = lwip_htons(dst_port); + tcphdr->seqno = seqno_be; + tcphdr->ackno = lwip_htonl(ackno); + TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), flags); + tcphdr->wnd = lwip_htons(wnd); + tcphdr->chksum = 0; + tcphdr->urgp = 0; + } + return p; +} + +/** Allocate a pbuf and create a tcphdr at p->payload, used for output + * functions other than the default tcp_output -> tcp_output_segment + * (e.g. tcp_send_empty_ack, etc.) + * + * @param pcb tcp pcb for which to send a packet (used to initialize tcp_hdr) + * @param optlen length of header-options + * @param datalen length of tcp data to reserve in pbuf + * @param seqno_be seqno in network byte order (big-endian) + * @return pbuf with p->payload being the tcp_hdr + */ +static struct pbuf * +tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */) +{ + struct pbuf *p; + + LWIP_ASSERT("tcp_output_alloc_header: invalid pcb", pcb != NULL); + + p = tcp_output_alloc_header_common(pcb->rcv_nxt, optlen, datalen, + seqno_be, pcb->local_port, pcb->remote_port, TCP_ACK, + TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + if (p != NULL) { + /* If we're sending a packet, update the announced right window edge */ + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + } + return p; +} + +/* Fill in options for control segments */ +static void +tcp_output_fill_options(const struct tcp_pcb *pcb, struct pbuf *p, u8_t optflags, u8_t num_sacks) +{ + struct tcp_hdr *tcphdr; + u32_t *opts; + u16_t sacks_len = 0; + + LWIP_ASSERT("tcp_output_fill_options: invalid pbuf", p != NULL); + + tcphdr = (struct tcp_hdr *)p->payload; + opts = (u32_t *)(void *)(tcphdr + 1); + + /* NB. MSS and window scale options are only sent on SYNs, so ignore them here */ + +#if LWIP_TCP_TIMESTAMPS + if (optflags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif + +#if LWIP_TCP_SACK_OUT + if (pcb && (num_sacks > 0)) { + tcp_build_sack_option(pcb, opts, num_sacks); + /* 1 word for SACKs header (including 2xNOP), and 2 words for each SACK */ + sacks_len = 1 + num_sacks * 2; + opts += sacks_len; + } +#else + LWIP_UNUSED_ARG(num_sacks); +#endif + +#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS + opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, tcphdr, pcb, opts); +#endif + + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(sacks_len); + LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(tcphdr + 1)) + sacks_len * 4 + LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb)); + LWIP_UNUSED_ARG(optflags); /* for LWIP_NOASSERT */ + LWIP_UNUSED_ARG(opts); /* for LWIP_NOASSERT */ +} + +/** Output a control segment pbuf to IP. + * + * Called from tcp_rst, tcp_send_empty_ack, tcp_keepalive and tcp_zero_window_probe, + * this function combines selecting a netif for transmission, generating the tcp + * header checksum and calling ip_output_if while handling netif hints and stats. + */ +static err_t +tcp_output_control_segment(const struct tcp_pcb *pcb, struct pbuf *p, + const ip_addr_t *src, const ip_addr_t *dst) +{ + err_t err; + struct netif *netif; + + LWIP_ASSERT("tcp_output_control_segment: invalid pbuf", p != NULL); + + netif = tcp_route(pcb, src, dst); + if (netif == NULL) { + err = ERR_RTE; + } else { + u8_t ttl, tos; +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { + struct tcp_hdr *tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + src, dst); + } +#endif + if (pcb != NULL) { + NETIF_SET_HINTS(netif, LWIP_CONST_CAST(struct netif_hint*, &(pcb->netif_hints))); + ttl = pcb->ttl; + tos = pcb->tos; + } else { + /* Send output with hardcoded TTL/HL since we have no access to the pcb */ + ttl = TCP_TTL; + tos = 0; + } + TCP_STATS_INC(tcp.xmit); + err = ip_output_if(p, src, dst, ttl, tos, IP_PROTO_TCP, netif); + NETIF_RESET_HINTS(netif); + } + pbuf_free(p); + return err; +} + +/** + * Send a TCP RESET packet (empty segment with RST flag set) either to + * abort a connection or to show that there is no matching local connection + * for a received segment. + * + * Called by tcp_abort() (to abort a local connection), tcp_input() (if no + * matching local pcb was found), tcp_listen_input() (if incoming segment + * has ACK flag set) and tcp_process() (received segment in the wrong state) + * + * Since a RST segment is in most cases not sent for an active connection, + * tcp_rst() has a number of arguments that are taken from a tcp_pcb for + * most other segment output functions. + * + * @param pcb TCP pcb (may be NULL if no pcb is available) + * @param seqno the sequence number to use for the outgoing segment + * @param ackno the acknowledge number to use for the outgoing segment + * @param local_ip the local IP address to send the segment from + * @param remote_ip the remote IP address to send the segment to + * @param local_port the local TCP port to send the segment from + * @param remote_port the remote TCP port to send the segment to + */ +void +tcp_rst(const struct tcp_pcb *pcb, u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port) +{ + struct pbuf *p; + u16_t wnd; + u8_t optlen; + + LWIP_ASSERT("tcp_rst: invalid local_ip", local_ip != NULL); + LWIP_ASSERT("tcp_rst: invalid remote_ip", remote_ip != NULL); + + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + +#if LWIP_WND_SCALE + wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF)); +#else + wnd = PP_HTONS(TCP_WND); +#endif + + p = tcp_output_alloc_header_common(ackno, optlen, 0, lwip_htonl(seqno), local_port, + remote_port, TCP_RST | TCP_ACK, wnd); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n")); + return; + } + tcp_output_fill_options(pcb, p, 0, optlen); + + MIB2_STATS_INC(mib2.tcpoutrsts); + + tcp_output_control_segment(pcb, p, local_ip, remote_ip); + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno)); +} + +/** + * Send an ACK without data. + * + * @param pcb Protocol control block for the TCP connection to send the ACK + */ +err_t +tcp_send_empty_ack(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen, optflags = 0; + u8_t num_sacks = 0; + + LWIP_ASSERT("tcp_send_empty_ack: invalid pcb", pcb != NULL); + +#if LWIP_TCP_TIMESTAMPS + if (pcb->flags & TF_TIMESTAMP) { + optflags = TF_SEG_OPTS_TS; + } +#endif + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + +#if LWIP_TCP_SACK_OUT + /* For now, SACKs are only sent with empty ACKs */ + if ((num_sacks = tcp_get_num_sacks(pcb, optlen)) > 0) { + optlen += 4 + num_sacks * 8; /* 4 bytes for header (including 2*NOP), plus 8B for each SACK */ + } +#endif + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt)); + if (p == NULL) { + /* let tcp_fasttmr retry sending this ACK */ + tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n")); + return ERR_BUF; + } + tcp_output_fill_options(pcb, p, optflags, num_sacks); + +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; +#endif + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, + ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt)); + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + if (err != ERR_OK) { + /* let tcp_fasttmr retry sending this ACK */ + tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } else { + /* remove ACK flags from the PCB, as we sent an empty ACK now */ + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + + return err; +} + +/** + * Send keepalive packets to keep a connection active although + * no data is sent over it. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a keepalive packet + */ +err_t +tcp_keepalive(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + + LWIP_ASSERT("tcp_keepalive: invalid pcb", pcb != NULL); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt - 1)); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_keepalive: could not allocate memory for pbuf\n")); + return ERR_MEM; + } + tcp_output_fill_options(pcb, p, 0, optlen); + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} + +/** + * Send persist timer zero-window probes to keep a connection active + * when a window update is lost. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a zero-window probe packet + */ +err_t +tcp_zero_window_probe(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + struct tcp_hdr *tcphdr; + struct tcp_seg *seg; + u16_t len; + u8_t is_fin; + u32_t snd_nxt; + u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + + LWIP_ASSERT("tcp_zero_window_probe: invalid pcb", pcb != NULL); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: sending ZERO WINDOW probe to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_zero_window_probe: tcp_ticks %"U32_F + " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + /* Only consider unsent, persist timer should be off when there is data in-flight */ + seg = pcb->unsent; + if (seg == NULL) { + /* Not expected, persist timer should be off when the send buffer is empty */ + return ERR_OK; + } + + /* increment probe count. NOTE: we record probe even if it fails + to actually transmit due to an error. This ensures memory exhaustion/ + routing problem doesn't leave a zero-window pcb as an indefinite zombie. + RTO mechanism has similar behavior, see pcb->nrtx */ + if (pcb->persist_probe < 0xFF) { + ++pcb->persist_probe; + } + + is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0); + /* we want to send one seqno: either FIN or data (no options) */ + len = is_fin ? 0 : 1; + + p = tcp_output_alloc_header(pcb, optlen, len, seg->tcphdr->seqno); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n")); + return ERR_MEM; + } + tcphdr = (struct tcp_hdr *)p->payload; + + if (is_fin) { + /* FIN segment, no data */ + TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN); + } else { + /* Data segment, copy in one byte from the head of the unacked queue */ + char *d = ((char *)p->payload + TCP_HLEN); + /* Depending on whether the segment has already been sent (unacked) or not + (unsent), seg->p->payload points to the IP header or TCP header. + Ensure we copy the first TCP data byte: */ + pbuf_copy_partial(seg->p, d, 1, seg->p->tot_len - seg->len); + } + + /* The byte may be acknowledged without the window being opened. */ + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + 1; + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + tcp_output_fill_options(pcb, p, 0, optlen); + + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F + " ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} +#endif /* LWIP_TCP */ diff --git a/Middlewares/Third_Party/LwIP/src/core/timeouts.c b/Middlewares/Third_Party/LwIP/src/core/timeouts.c new file mode 100644 index 00000000..f37acfec --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/timeouts.c @@ -0,0 +1,451 @@ +/** + * @file + * Stack-internal timers implementation. + * This file includes timer callbacks for stack-internal timers as well as + * functions to set up or stop timers and check for expired timers. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#include "lwip/timeouts.h" +#include "lwip/priv/tcp_priv.h" + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include "lwip/ip4_frag.h" +#include "lwip/etharp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/nd6.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" +#include "lwip/dhcp6.h" +#include "lwip/sys.h" +#include "lwip/pbuf.h" + +#if LWIP_DEBUG_TIMERNAMES +#define HANDLER(x) x, #x +#else /* LWIP_DEBUG_TIMERNAMES */ +#define HANDLER(x) x +#endif /* LWIP_DEBUG_TIMERNAMES */ + +#define LWIP_MAX_TIMEOUT 0x7fffffff + +/* Check if timer's expiry time is greater than time and care about u32_t wraparounds */ +#define TIME_LESS_THAN(t, compare_to) ( (((u32_t)((t)-(compare_to))) > LWIP_MAX_TIMEOUT) ? 1 : 0 ) + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use LWIP_ARRAYSIZE() */ +const struct lwip_cyclic_timer lwip_cyclic_timers[] = { +#if LWIP_TCP + /* The TCP timer is a special case: it does not have to run always and + is triggered to start from TCP using tcp_timer_needed() */ + {TCP_TMR_INTERVAL, HANDLER(tcp_tmr)}, +#endif /* LWIP_TCP */ +#if LWIP_IPV4 +#if IP_REASSEMBLY + {IP_TMR_INTERVAL, HANDLER(ip_reass_tmr)}, +#endif /* IP_REASSEMBLY */ +#if LWIP_ARP + {ARP_TMR_INTERVAL, HANDLER(etharp_tmr)}, +#endif /* LWIP_ARP */ +#if LWIP_DHCP + {DHCP_COARSE_TIMER_MSECS, HANDLER(dhcp_coarse_tmr)}, + {DHCP_FINE_TIMER_MSECS, HANDLER(dhcp_fine_tmr)}, +#endif /* LWIP_DHCP */ +#if LWIP_AUTOIP + {AUTOIP_TMR_INTERVAL, HANDLER(autoip_tmr)}, +#endif /* LWIP_AUTOIP */ +#if LWIP_IGMP + {IGMP_TMR_INTERVAL, HANDLER(igmp_tmr)}, +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4 */ +#if LWIP_DNS + {DNS_TMR_INTERVAL, HANDLER(dns_tmr)}, +#endif /* LWIP_DNS */ +#if LWIP_IPV6 + {ND6_TMR_INTERVAL, HANDLER(nd6_tmr)}, +#if LWIP_IPV6_REASS + {IP6_REASS_TMR_INTERVAL, HANDLER(ip6_reass_tmr)}, +#endif /* LWIP_IPV6_REASS */ +#if LWIP_IPV6_MLD + {MLD6_TMR_INTERVAL, HANDLER(mld6_tmr)}, +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6_DHCP6 + {DHCP6_TIMER_MSECS, HANDLER(dhcp6_tmr)}, +#endif /* LWIP_IPV6_DHCP6 */ +#endif /* LWIP_IPV6 */ +}; +const int lwip_num_cyclic_timers = LWIP_ARRAYSIZE(lwip_cyclic_timers); + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM + +/** The one and only timeout list */ +static struct sys_timeo *next_timeout; + +static u32_t current_timeout_due_time; + +#if LWIP_TESTMODE +struct sys_timeo** +sys_timeouts_get_next_timeout(void) +{ + return &next_timeout; +} +#endif + +#if LWIP_TCP +/** global variable that shows if the tcp timer is currently scheduled or not */ +static int tcpip_tcp_timer_active; + +/** + * Timer callback function that calls tcp_tmr() and reschedules itself. + * + * @param arg unused argument + */ +static void +tcpip_tcp_timer(void *arg) +{ + LWIP_UNUSED_ARG(arg); + + /* call TCP timer handler */ + tcp_tmr(); + /* timer still needed? */ + if (tcp_active_pcbs || tcp_tw_pcbs) { + /* restart timer */ + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } else { + /* disable timer */ + tcpip_tcp_timer_active = 0; + } +} + +/** + * Called from TCP_REG when registering a new PCB: + * the reason is to have the TCP timer only running when + * there are active (or time-wait) PCBs. + */ +void +tcp_timer_needed(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + + /* timer is off but needed again? */ + if (!tcpip_tcp_timer_active && (tcp_active_pcbs || tcp_tw_pcbs)) { + /* enable and start timer */ + tcpip_tcp_timer_active = 1; + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } +} +#endif /* LWIP_TCP */ + +static void +#if LWIP_DEBUG_TIMERNAMES +sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg, const char *handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg) +#endif +{ + struct sys_timeo *timeout, *t; + + timeout = (struct sys_timeo *)memp_malloc(MEMP_SYS_TIMEOUT); + if (timeout == NULL) { + LWIP_ASSERT("sys_timeout: timeout != NULL, pool MEMP_SYS_TIMEOUT is empty", timeout != NULL); + return; + } + + timeout->next = NULL; + timeout->h = handler; + timeout->arg = arg; + timeout->time = abs_time; + +#if LWIP_DEBUG_TIMERNAMES + timeout->handler_name = handler_name; + LWIP_DEBUGF(TIMERS_DEBUG, ("sys_timeout: %p abs_time=%"U32_F" handler=%s arg=%p\n", + (void *)timeout, abs_time, handler_name, (void *)arg)); +#endif /* LWIP_DEBUG_TIMERNAMES */ + + if (next_timeout == NULL) { + next_timeout = timeout; + return; + } + if (TIME_LESS_THAN(timeout->time, next_timeout->time)) { + timeout->next = next_timeout; + next_timeout = timeout; + } else { + for (t = next_timeout; t != NULL; t = t->next) { + if ((t->next == NULL) || TIME_LESS_THAN(timeout->time, t->next->time)) { + timeout->next = t->next; + t->next = timeout; + break; + } + } + } +} + +/** + * Timer callback function that calls cyclic->handler() and reschedules itself. + * + * @param arg unused argument + */ +#if !LWIP_TESTMODE +static +#endif +void +lwip_cyclic_timer(void *arg) +{ + u32_t now; + u32_t next_timeout_time; + const struct lwip_cyclic_timer *cyclic = (const struct lwip_cyclic_timer *)arg; + +#if LWIP_DEBUG_TIMERNAMES + LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: %s()\n", cyclic->handler_name)); +#endif + cyclic->handler(); + + now = sys_now(); + next_timeout_time = (u32_t)(current_timeout_due_time + cyclic->interval_ms); /* overflow handled by TIME_LESS_THAN macro */ + if (TIME_LESS_THAN(next_timeout_time, now)) { + /* timer would immediately expire again -> "overload" -> restart without any correction */ +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg, cyclic->handler_name); +#else + sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg); +#endif + + } else { + /* correct cyclic interval with handler execution delay and sys_check_timeouts jitter */ +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg, cyclic->handler_name); +#else + sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg); +#endif + } +} + +/** Initialize this module */ +void sys_timeouts_init(void) +{ + size_t i; + /* tcp_tmr() at index 0 is started on demand */ + for (i = (LWIP_TCP ? 1 : 0); i < LWIP_ARRAYSIZE(lwip_cyclic_timers); i++) { + /* we have to cast via size_t to get rid of const warning + (this is OK as cyclic_timer() casts back to const* */ + sys_timeout(lwip_cyclic_timers[i].interval_ms, lwip_cyclic_timer, LWIP_CONST_CAST(void *, &lwip_cyclic_timers[i])); + } +} + +/** + * Create a one-shot timer (aka timeout). Timeouts are processed in the + * following cases: + * - while waiting for a message using sys_timeouts_mbox_fetch() + * - by calling sys_check_timeouts() (NO_SYS==1 only) + * + * @param msecs time in milliseconds after that the timer should expire + * @param handler callback function to call when msecs have elapsed + * @param arg argument to pass to the callback function + */ +#if LWIP_DEBUG_TIMERNAMES +void +sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char *handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +void +sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg) +#endif /* LWIP_DEBUG_TIMERNAMES */ +{ + u32_t next_timeout_time; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("Timeout time too long, max is LWIP_UINT32_MAX/4 msecs", msecs <= (LWIP_UINT32_MAX / 4)); + + next_timeout_time = (u32_t)(sys_now() + msecs); /* overflow handled by TIME_LESS_THAN macro */ + +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs(next_timeout_time, handler, arg, handler_name); +#else + sys_timeout_abs(next_timeout_time, handler, arg); +#endif +} + +/** + * Go through timeout list (for this task only) and remove the first matching + * entry (subsequent entries remain untouched), even though the timeout has not + * triggered yet. + * + * @param handler callback function that would be called by the timeout + * @param arg callback argument that would be passed to handler +*/ +void +sys_untimeout(sys_timeout_handler handler, void *arg) +{ + struct sys_timeo *prev_t, *t; + + LWIP_ASSERT_CORE_LOCKED(); + + if (next_timeout == NULL) { + return; + } + + for (t = next_timeout, prev_t = NULL; t != NULL; prev_t = t, t = t->next) { + if ((t->h == handler) && (t->arg == arg)) { + /* We have a match */ + /* Unlink from previous in list */ + if (prev_t == NULL) { + next_timeout = t->next; + } else { + prev_t->next = t->next; + } + memp_free(MEMP_SYS_TIMEOUT, t); + return; + } + } + return; +} + +/** + * @ingroup lwip_nosys + * Handle timeouts for NO_SYS==1 (i.e. without using + * tcpip_thread/sys_timeouts_mbox_fetch(). Uses sys_now() to call timeout + * handler functions when timeouts expire. + * + * Must be called periodically from your main loop. + */ +void +sys_check_timeouts(void) +{ + u32_t now; + + LWIP_ASSERT_CORE_LOCKED(); + + /* Process only timers expired at the start of the function. */ + now = sys_now(); + + do { + struct sys_timeo *tmptimeout; + sys_timeout_handler handler; + void *arg; + + PBUF_CHECK_FREE_OOSEQ(); + + tmptimeout = next_timeout; + if (tmptimeout == NULL) { + return; + } + + if (TIME_LESS_THAN(now, tmptimeout->time)) { + return; + } + + /* Timeout has expired */ + next_timeout = tmptimeout->next; + handler = tmptimeout->h; + arg = tmptimeout->arg; + current_timeout_due_time = tmptimeout->time; +#if LWIP_DEBUG_TIMERNAMES + if (handler != NULL) { + LWIP_DEBUGF(TIMERS_DEBUG, ("sct calling h=%s t=%"U32_F" arg=%p\n", + tmptimeout->handler_name, sys_now() - tmptimeout->time, arg)); + } +#endif /* LWIP_DEBUG_TIMERNAMES */ + memp_free(MEMP_SYS_TIMEOUT, tmptimeout); + if (handler != NULL) { + handler(arg); + } + LWIP_TCPIP_THREAD_ALIVE(); + + /* Repeat until all expired timers have been called */ + } while (1); +} + +/** Rebase the timeout times to the current time. + * This is necessary if sys_check_timeouts() hasn't been called for a long + * time (e.g. while saving energy) to prevent all timer functions of that + * period being called. + */ +void +sys_restart_timeouts(void) +{ + u32_t now; + u32_t base; + struct sys_timeo *t; + + if (next_timeout == NULL) { + return; + } + + now = sys_now(); + base = next_timeout->time; + + for (t = next_timeout; t != NULL; t = t->next) { + t->time = (t->time - base) + now; + } +} + +/** Return the time left before the next timeout is due. If no timeouts are + * enqueued, returns 0xffffffff + */ +u32_t +sys_timeouts_sleeptime(void) +{ + u32_t now; + + LWIP_ASSERT_CORE_LOCKED(); + + if (next_timeout == NULL) { + return SYS_TIMEOUTS_SLEEPTIME_INFINITE; + } + now = sys_now(); + if (TIME_LESS_THAN(next_timeout->time, now)) { + return 0; + } else { + u32_t ret = (u32_t)(next_timeout->time - now); + LWIP_ASSERT("invalid sleeptime", ret <= LWIP_MAX_TIMEOUT); + return ret; + } +} + +#else /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ +/* Satisfy the TCP code which calls this function */ +void +tcp_timer_needed(void) +{ +} +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ diff --git a/Middlewares/Third_Party/LwIP/src/core/udp.c b/Middlewares/Third_Party/LwIP/src/core/udp.c new file mode 100644 index 00000000..9d2cb4af --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/core/udp.c @@ -0,0 +1,1314 @@ +/** + * @file + * User Datagram Protocol module\n + * The code for the User Datagram Protocol UDP & UDPLite (RFC 3828).\n + * See also @ref udp_raw + * + * @defgroup udp_raw UDP + * @ingroup callbackstyle_api + * User Datagram Protocol module\n + * @see @ref api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* @todo Check the use of '(struct udp_pcb).chksum_len_rx'! + */ + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/udp.h" +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/icmp6.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" + +#include + +#ifndef UDP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define UDP_LOCAL_PORT_RANGE_START 0xc000 +#define UDP_LOCAL_PORT_RANGE_END 0xffff +#define UDP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~UDP_LOCAL_PORT_RANGE_START) + UDP_LOCAL_PORT_RANGE_START)) +#endif + +/* last local UDP port */ +static u16_t udp_port = UDP_LOCAL_PORT_RANGE_START; + +/* The list of UDP PCBs */ +/* exported in udp.h (was static) */ +struct udp_pcb *udp_pcbs; + +/** + * Initialize this module. + */ +void +udp_init(void) +{ +#ifdef LWIP_RAND + udp_port = UDP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RAND */ +} + +/** + * Allocate a new local UDP port. + * + * @return a new (free) local UDP port number + */ +static u16_t +udp_new_port(void) +{ + u16_t n = 0; + struct udp_pcb *pcb; + +again: + if (udp_port++ == UDP_LOCAL_PORT_RANGE_END) { + udp_port = UDP_LOCAL_PORT_RANGE_START; + } + /* Check all PCBs. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == udp_port) { + if (++n > (UDP_LOCAL_PORT_RANGE_END - UDP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + return udp_port; +} + +/** Common code to see if the current input packet matches the pcb + * (current input packet is accessed via ip(4/6)_current_* macros) + * + * @param pcb pcb to check + * @param inp network interface on which the datagram was received (only used for IPv4) + * @param broadcast 1 if his is an IPv4 broadcast (global or subnet-only), 0 otherwise (only used for IPv4) + * @return 1 on match, 0 otherwise + */ +static u8_t +udp_input_local_match(struct udp_pcb *pcb, struct netif *inp, u8_t broadcast) +{ + LWIP_UNUSED_ARG(inp); /* in IPv6 only case */ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + LWIP_ASSERT("udp_input_local_match: invalid pcb", pcb != NULL); + LWIP_ASSERT("udp_input_local_match: invalid netif", inp != NULL); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + return 0; + } + + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if LWIP_IPV4 && IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST_RECV */ + return 1; + } + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: all or broadcasts in my subnet + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ((ip4_current_dest_addr()->addr == IPADDR_BROADCAST)) || + ip4_addr_netcmp(ip_2_ip4(&pcb->local_ip), ip4_current_dest_addr(), netif_ip4_netmask(inp))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Process an incoming UDP datagram. + * + * Given an incoming UDP datagram (as a chain of pbufs) this function + * finds a corresponding UDP PCB and hands over the pbuf to the pcbs + * recv function. If no pcb is found or the datagram is incorrect, the + * pbuf is freed. + * + * @param p pbuf to be demultiplexed to a UDP PCB (p->payload pointing to the UDP header) + * @param inp network interface on which the datagram was received. + * + */ +void +udp_input(struct pbuf *p, struct netif *inp) +{ + struct udp_hdr *udphdr; + struct udp_pcb *pcb, *prev; + struct udp_pcb *uncon_pcb; + u16_t src, dest; + u8_t broadcast; + u8_t for_us = 0; + + LWIP_UNUSED_ARG(inp); + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("udp_input: invalid pbuf", p != NULL); + LWIP_ASSERT("udp_input: invalid netif", inp != NULL); + + PERF_START; + + UDP_STATS_INC(udp.recv); + + /* Check minimum length (UDP header) */ + if (p->len < UDP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_input: short UDP datagram (%"U16_F" bytes) discarded\n", p->tot_len)); + UDP_STATS_INC(udp.lenerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + udphdr = (struct udp_hdr *)p->payload; + + /* is broadcast packet ? */ + broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_DEBUGF(UDP_DEBUG, ("udp_input: received datagram of length %"U16_F"\n", p->tot_len)); + + /* convert src and dest ports to host byte order */ + src = lwip_ntohs(udphdr->src); + dest = lwip_ntohs(udphdr->dest); + + udp_debug_print(udphdr); + + /* print the UDP source and destination */ + LWIP_DEBUGF(UDP_DEBUG, ("udp (")); + ip_addr_debug_print_val(UDP_DEBUG, *ip_current_dest_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", lwip_ntohs(udphdr->dest))); + ip_addr_debug_print_val(UDP_DEBUG, *ip_current_src_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", lwip_ntohs(udphdr->src))); + + pcb = NULL; + prev = NULL; + uncon_pcb = NULL; + /* Iterate through the UDP pcb list for a matching pcb. + * 'Perfect match' pcbs (connected to the remote port & ip address) are + * preferred. If no perfect match is found, the first unconnected pcb that + * matches the local port and ip address gets the datagram. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + /* print the PCB local and remote address */ + LWIP_DEBUGF(UDP_DEBUG, ("pcb (")); + ip_addr_debug_print_val(UDP_DEBUG, pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", pcb->local_port)); + ip_addr_debug_print_val(UDP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", pcb->remote_port)); + + /* compare PCB local addr+port to UDP destination addr+port */ + if ((pcb->local_port == dest) && + (udp_input_local_match(pcb, inp, broadcast) != 0)) { + if ((pcb->flags & UDP_FLAGS_CONNECTED) == 0) { + if (uncon_pcb == NULL) { + /* the first unconnected matching PCB */ + uncon_pcb = pcb; +#if LWIP_IPV4 + } else if (broadcast && ip4_current_dest_addr()->addr == IPADDR_BROADCAST) { + /* global broadcast address (only valid for IPv4; match was checked before) */ + if (!IP_IS_V4_VAL(uncon_pcb->local_ip) || !ip4_addr_cmp(ip_2_ip4(&uncon_pcb->local_ip), netif_ip4_addr(inp))) { + /* uncon_pcb does not match the input netif, check this pcb */ + if (IP_IS_V4_VAL(pcb->local_ip) && ip4_addr_cmp(ip_2_ip4(&pcb->local_ip), netif_ip4_addr(inp))) { + /* better match */ + uncon_pcb = pcb; + } + } +#endif /* LWIP_IPV4 */ + } +#if SO_REUSE + else if (!ip_addr_isany(&pcb->local_ip)) { + /* prefer specific IPs over catch-all */ + uncon_pcb = pcb; + } +#endif /* SO_REUSE */ + } + + /* compare PCB remote addr+port to UDP source addr+port */ + if ((pcb->remote_port == src) && + (ip_addr_isany_val(pcb->remote_ip) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* the first fully matching PCB */ + if (prev != NULL) { + /* move the pcb to the front of udp_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } else { + UDP_STATS_INC(udp.cachehit); + } + break; + } + } + + prev = pcb; + } + /* no fully matching pcb found? then look for an unconnected pcb */ + if (pcb == NULL) { + pcb = uncon_pcb; + } + + /* Check checksum if this is a match or if it was directed at us. */ + if (pcb != NULL) { + for_us = 1; + } else { +#if LWIP_IPV6 + if (ip_current_is_v6()) { + for_us = netif_get_ip6_addr_match(inp, ip6_current_dest_addr()) >= 0; + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + if (!ip_current_is_v6()) { + for_us = ip4_addr_cmp(netif_ip4_addr(inp), ip4_current_dest_addr()); + } +#endif /* LWIP_IPV4 */ + } + + if (for_us) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: calculating checksum\n")); +#if CHECKSUM_CHECK_UDP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_UDP) { +#if LWIP_UDPLITE + if (ip_current_header_proto() == IP_PROTO_UDPLITE) { + /* Do the UDP Lite checksum */ + u16_t chklen = lwip_ntohs(udphdr->len); + if (chklen < sizeof(struct udp_hdr)) { + if (chklen == 0) { + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet (See RFC 3828 chap. 3.1) */ + chklen = p->tot_len; + } else { + /* At least the UDP-Lite header must be covered by the + checksum! (Again, see RFC 3828 chap. 3.1) */ + goto chkerr; + } + } + if (ip_chksum_pseudo_partial(p, IP_PROTO_UDPLITE, + p->tot_len, chklen, + ip_current_src_addr(), ip_current_dest_addr()) != 0) { + goto chkerr; + } + } else +#endif /* LWIP_UDPLITE */ + { + if (udphdr->chksum != 0) { + if (ip_chksum_pseudo(p, IP_PROTO_UDP, p->tot_len, + ip_current_src_addr(), + ip_current_dest_addr()) != 0) { + goto chkerr; + } + } + } + } +#endif /* CHECKSUM_CHECK_UDP */ + if (pbuf_remove_header(p, UDP_HLEN)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + if (pcb != NULL) { + MIB2_STATS_INC(mib2.udpindatagrams); +#if SO_REUSE && SO_REUSE_RXTOALL + if (ip_get_option(pcb, SOF_REUSEADDR) && + (broadcast || ip_addr_ismulticast(ip_current_dest_addr()))) { + /* pass broadcast- or multicast packets to all multicast pcbs + if SOF_REUSEADDR is set on the first match */ + struct udp_pcb *mpcb; + for (mpcb = udp_pcbs; mpcb != NULL; mpcb = mpcb->next) { + if (mpcb != pcb) { + /* compare PCB local addr+port to UDP destination addr+port */ + if ((mpcb->local_port == dest) && + (udp_input_local_match(mpcb, inp, broadcast) != 0)) { + /* pass a copy of the packet to all local matches */ + if (mpcb->recv != NULL) { + struct pbuf *q; + q = pbuf_clone(PBUF_RAW, PBUF_POOL, p); + if (q != NULL) { + mpcb->recv(mpcb->recv_arg, mpcb, q, ip_current_src_addr(), src); + } + } + } + } + } + } +#endif /* SO_REUSE && SO_REUSE_RXTOALL */ + /* callback */ + if (pcb->recv != NULL) { + /* now the recv function is responsible for freeing p */ + pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr(), src); + } else { + /* no recv function registered? then we have to free the pbuf! */ + pbuf_free(p); + goto end; + } + } else { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: not for us.\n")); + +#if LWIP_ICMP || LWIP_ICMP6 + /* No match was found, send ICMP destination port unreachable unless + destination address was broadcast/multicast. */ + if (!broadcast && !ip_addr_ismulticast(ip_current_dest_addr())) { + /* move payload pointer back to ip header */ + pbuf_header_force(p, (s16_t)(ip_current_header_tot_len() + UDP_HLEN)); + icmp_port_unreach(ip_current_is_v6(), p); + } +#endif /* LWIP_ICMP || LWIP_ICMP6 */ + UDP_STATS_INC(udp.proterr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpnoports); + pbuf_free(p); + } + } else { + pbuf_free(p); + } +end: + PERF_STOP("udp_input"); + return; +#if CHECKSUM_CHECK_UDP +chkerr: + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_input: UDP (or UDP Lite) datagram discarded due to failing checksum\n")); + UDP_STATS_INC(udp.chkerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + PERF_STOP("udp_input"); +#endif /* CHECKSUM_CHECK_UDP */ +} + +/** + * @ingroup udp_raw + * Sends the pbuf p using UDP. The pbuf is not deallocated. + * + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * + * The datagram will be sent to the current remote_ip & remote_port + * stored in pcb. If the pcb is not bound to a port, it will + * automatically be bound to a random port. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_MEM. Out of memory. + * - ERR_RTE. Could not find route to destination address. + * - ERR_VAL. No PCB or PCB is dual-stack + * - More errors could be returned by lower protocol layers. + * + * @see udp_disconnect() udp_sendto() + */ +err_t +udp_send(struct udp_pcb *pcb, struct pbuf *p) +{ + LWIP_ERROR("udp_send: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_send: invalid pbuf", p != NULL, return ERR_ARG); + + if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto(pcb, p, &pcb->remote_ip, pcb->remote_port); +} + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +/** @ingroup udp_raw + * Same as udp_send() but with checksum + */ +err_t +udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum) +{ + LWIP_ERROR("udp_send_chksum: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_send_chksum: invalid pbuf", p != NULL, return ERR_ARG); + + if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto_chksum(pcb, p, &pcb->remote_ip, pcb->remote_port, + have_chksum, chksum); +} +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * If the PCB already has a remote address association, it will + * be restored after the data is sent. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_chksum(pcb, p, dst_ip, dst_port, 0, 0); +} + +/** @ingroup udp_raw + * Same as udp_sendto(), but with checksum */ +err_t +udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, u8_t have_chksum, u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct netif *netif; + + LWIP_ERROR("udp_sendto: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send\n")); + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { +#if LWIP_MULTICAST_TX_OPTIONS + netif = NULL; + if (ip_addr_ismulticast(dst_ip)) { + /* For IPv6, the interface to use for packets with a multicast destination + * is specified using an interface index. The same approach may be used for + * IPv4 as well, in which case it overrides the IPv4 multicast override + * address below. Here we have to look up the netif by going through the + * list, but by doing so we skip a route lookup. If the interface index has + * gone stale, we fall through and do the regular route lookup after all. */ + if (pcb->mcast_ifindex != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->mcast_ifindex); + } +#if LWIP_IPV4 + else +#if LWIP_IPV6 + if (IP_IS_V4(dst_ip)) +#endif /* LWIP_IPV6 */ + { + /* IPv4 does not use source-based routing by default, so we use an + administratively selected interface for multicast by default. + However, this can be overridden by setting an interface address + in pcb->mcast_ip4 that is used for routing. If this routing lookup + fails, we try regular routing as though no override was set. */ + if (!ip4_addr_isany_val(pcb->mcast_ip4) && + !ip4_addr_cmp(&pcb->mcast_ip4, IP4_ADDR_BROADCAST)) { + netif = ip4_route_src(ip_2_ip4(&pcb->local_ip), &pcb->mcast_ip4); + } + } +#endif /* LWIP_IPV4 */ + } + + if (netif == NULL) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + { + /* find the outgoing network interface for this packet */ + netif = ip_route(&pcb->local_ip, dst_ip); + } + } + + /* no outgoing network interface could be found? */ + if (netif == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: No route to ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, dst_ip); + LWIP_DEBUGF(UDP_DEBUG, ("\n")); + UDP_STATS_INC(udp.rterr); + return ERR_RTE; + } +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if(pcb, p, dst_ip, dst_port, netif); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * The netif used for sending can be specified. + * + * This function exists mainly for DHCP, to be able to send UDP packets + * on a netif that is still down. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * @param netif the netif used for sending. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto_if(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0); +} + +/** Same as udp_sendto_if(), but with checksum */ +err_t +udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + const ip_addr_t *src_ip; + + LWIP_ERROR("udp_sendto_if: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid netif", netif != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + /* PCB local address is IP_ANY_ADDR or multicast? */ +#if LWIP_IPV6 + if (IP_IS_V6(dst_ip)) { + if (ip6_addr_isany(ip_2_ip6(&pcb->local_ip)) || + ip6_addr_ismulticast(ip_2_ip6(&pcb->local_ip))) { + src_ip = ip6_select_source_address(netif, ip_2_ip6(dst_ip)); + if (src_ip == NULL) { + /* No suitable source address was found. */ + return ERR_RTE; + } + } else { + /* use UDP PCB local IPv6 address as source address, if still valid. */ + if (netif_get_ip6_addr_match(netif, ip_2_ip6(&pcb->local_ip)) < 0) { + /* Address isn't valid anymore. */ + return ERR_RTE; + } + src_ip = &pcb->local_ip; + } + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ip4_addr_ismulticast(ip_2_ip4(&pcb->local_ip))) { + /* if the local_ip is any or multicast + * use the outgoing network interface IP address as source address */ + src_ip = netif_ip_addr4(netif); + } else { + /* check if UDP PCB local IP address is correct + * this could be an old address if netif->ip_addr has changed */ + if (!ip4_addr_cmp(ip_2_ip4(&(pcb->local_ip)), netif_ip4_addr(netif))) { + /* local_ip doesn't match, drop the packet */ + return ERR_RTE; + } + /* use UDP PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } +#endif /* LWIP_IPV4 */ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum, src_ip); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if_src(pcb, p, dst_ip, dst_port, netif, src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** @ingroup udp_raw + * Same as @ref udp_sendto_if, but with source address */ +err_t +udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, const ip_addr_t *src_ip) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0, src_ip); +} + +/** Same as udp_sendto_if_src(), but with checksum */ +err_t +udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum, const ip_addr_t *src_ip) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct udp_hdr *udphdr; + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u8_t ip_proto; + u8_t ttl; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_sendto_if_src: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid src_ip", src_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid netif", netif != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + +#if LWIP_IPV4 && IP_SOF_BROADCAST + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && +#if LWIP_IPV6 + IP_IS_V4(dst_ip) && +#endif /* LWIP_IPV6 */ + ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_sendto_if: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + return ERR_VAL; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST */ + + /* if the PCB is not yet bound to a port, bind it here */ + if (pcb->local_port == 0) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send: not yet bound to a port, binding now\n")); + err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: forced port bind failed\n")); + return err; + } + } + + /* packet too large to add a UDP header without causing an overflow? */ + if ((u16_t)(p->tot_len + UDP_HLEN) < p->tot_len) { + return ERR_MEM; + } + /* not enough space to add an UDP header to first pbuf in given p chain? */ + if (pbuf_add_header(p, UDP_HLEN)) { + /* allocate header in a separate new pbuf */ + q = pbuf_alloc(PBUF_IP, UDP_HLEN, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p (only if p contains data) */ + pbuf_chain(q, p); + } + /* first pbuf q points to header pbuf */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_send: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* adding space for header within p succeeded */ + /* first pbuf q equals given pbuf */ + q = p; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: added header in given pbuf %p\n", (void *)p)); + } + LWIP_ASSERT("check that first pbuf can hold struct udp_hdr", + (q->len >= sizeof(struct udp_hdr))); + /* q now represents the packet to be sent */ + udphdr = (struct udp_hdr *)q->payload; + udphdr->src = lwip_htons(pcb->local_port); + udphdr->dest = lwip_htons(dst_port); + /* in UDP, 0 checksum means 'no checksum' */ + udphdr->chksum = 0x0000; + + /* Multicast Loop? */ +#if LWIP_MULTICAST_TX_OPTIONS + if (((pcb->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: sending datagram of length %"U16_F"\n", q->tot_len)); + +#if LWIP_UDPLITE + /* UDP Lite protocol? */ + if (pcb->flags & UDP_FLAGS_UDPLITE) { + u16_t chklen, chklen_hdr; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE packet length %"U16_F"\n", q->tot_len)); + /* set UDP message length in UDP header */ + chklen_hdr = chklen = pcb->chksum_len_tx; + if ((chklen < sizeof(struct udp_hdr)) || (chklen > q->tot_len)) { + if (chklen != 0) { + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE pcb->chksum_len is illegal: %"U16_F"\n", chklen)); + } + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet. (See RFC 3828 chap. 3.1) + At least the UDP-Lite header must be covered by the + checksum, therefore, if chksum_len has an illegal + value, we generate the checksum over the complete + packet to be safe. */ + chklen_hdr = 0; + chklen = q->tot_len; + } + udphdr->len = lwip_htons(chklen_hdr); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + chklen = UDP_HLEN; + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + udphdr->chksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDPLITE, + q->tot_len, chklen, src_ip, dst_ip); +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + acc = udphdr->chksum + (u16_t)~(chksum); + udphdr->chksum = FOLD_U32T(acc); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udphdr->chksum == 0x0000) { + udphdr->chksum = 0xffff; + } + } +#endif /* CHECKSUM_GEN_UDP */ + + ip_proto = IP_PROTO_UDPLITE; + } else +#endif /* LWIP_UDPLITE */ + { /* UDP */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP packet length %"U16_F"\n", q->tot_len)); + udphdr->len = lwip_htons(q->tot_len); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { + /* Checksum is mandatory over IPv6. */ + if (IP_IS_V6(dst_ip) || (pcb->flags & UDP_FLAGS_NOCHKSUM) == 0) { + u16_t udpchksum; +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + udpchksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDP, + q->tot_len, UDP_HLEN, src_ip, dst_ip); + acc = udpchksum + (u16_t)~(chksum); + udpchksum = FOLD_U32T(acc); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + udpchksum = ip_chksum_pseudo(q, IP_PROTO_UDP, q->tot_len, + src_ip, dst_ip); + } + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udpchksum == 0x0000) { + udpchksum = 0xffff; + } + udphdr->chksum = udpchksum; + } + } +#endif /* CHECKSUM_GEN_UDP */ + ip_proto = IP_PROTO_UDP; + } + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? udp_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP checksum 0x%04"X16_F"\n", udphdr->chksum)); + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: ip_output_if (,,,,0x%02"X16_F",)\n", (u16_t)ip_proto)); + /* output to IP */ + NETIF_SET_HINTS(netif, &(pcb->netif_hints)); + err = ip_output_if_src(q, src_ip, dst_ip, ttl, pcb->tos, ip_proto, netif); + NETIF_RESET_HINTS(netif); + + /* @todo: must this be increased even if error occurred? */ + MIB2_STATS_INC(mib2.udpoutdatagrams); + + /* did we chain a separate header pbuf earlier? */ + if (q != p) { + /* free the header pbuf */ + pbuf_free(q); + q = NULL; + /* p is still referenced by the caller, and will live on */ + } + + UDP_STATS_INC(udp.xmit); + return err; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB. + * + * @param pcb UDP PCB to be bound with a local address ipaddr and port. + * @param ipaddr local IP address to bind with. Use IP_ANY_TYPE to + * bind to all local interfaces. + * @param port local UDP port to bind with. Use 0 to automatically bind + * to a random port between UDP_LOCAL_PORT_RANGE_START and + * UDP_LOCAL_PORT_RANGE_END. + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified ipaddr and port are already bound to by + * another UDP PCB. + * + * @see udp_disconnect() + */ +err_t +udp_bind(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + u8_t rebind; +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + ip_addr_t zoned_ipaddr; +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#else /* LWIP_IPV4 */ + LWIP_ERROR("udp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG); +#endif /* LWIP_IPV4 */ + + LWIP_ERROR("udp_bind: invalid pcb", pcb != NULL, return ERR_ARG); + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_bind(ipaddr = ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE, ipaddr); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, (", port = %"U16_F")\n", port)); + + rebind = 0; + /* Check for double bind and rebind of the same pcb */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + /* is this UDP PCB already on active list? */ + if (pcb == ipcb) { + rebind = 1; + break; + } + } + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. Do the zone selection before the address-in-use + * check below; as such we have to make a temporary copy of the address. */ + if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { + ip_addr_copy(zoned_ipaddr, *ipaddr); + ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr)); + ipaddr = &zoned_ipaddr; + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + /* no port specified? */ + if (port == 0) { + port = udp_new_port(); + if (port == 0) { + /* no more ports available in local range */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_bind: out of free UDP ports\n")); + return ERR_USE; + } + } else { + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb != ipcb) { + /* By default, we don't allow to bind to a port that any other udp + PCB is already bound to, unless *all* PCBs with that port have tha + REUSEADDR flag set. */ +#if SO_REUSE + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(ipcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* port matches that of PCB in list and REUSEADDR not set -> reject */ + if ((ipcb->local_port == port) && + /* IP address matches or any IP used? */ + (ip_addr_cmp(&ipcb->local_ip, ipaddr) || ip_addr_isany(ipaddr) || + ip_addr_isany(&ipcb->local_ip))) { + /* other PCB already binds to this local IP and port */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_bind: local port %"U16_F" already bound by another pcb\n", port)); + return ERR_USE; + } + } + } + } + } + + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); + + pcb->local_port = port; + mib2_udp_bind(pcb); + /* pcb not active yet? */ + if (rebind == 0) { + /* place the PCB on the active list if not already there */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_bind: bound to ")); + ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->local_port)); + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB to a specific netif. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb UDP PCB to be bound. + * @param netif netif to bind udp pcb to. Can be NULL. + * + * @see udp_disconnect() + */ +void +udp_bind_netif(struct udp_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +/** + * @ingroup udp_raw + * Sets the remote end of the pcb. This function does not generate any + * network traffic, but only sets the remote address of the pcb. + * + * @param pcb UDP PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * @param port remote UDP port to connect with. + * + * @return lwIP error code + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * The udp pcb is bound to a random local port if not already bound. + * + * @see udp_disconnect() + */ +err_t +udp_connect(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_connect: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG); + + if (pcb->local_port == 0) { + err_t err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + return err; + } + } + + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now, + * using the bound address to make a more informed decision when possible. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + pcb->remote_port = port; + pcb->flags |= UDP_FLAGS_CONNECTED; + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_connect: connected to ")); + ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->remote_port)); + + /* Insert UDP PCB into the list of active UDP PCBs. */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb == ipcb) { + /* already on the list, just return */ + return ERR_OK; + } + } + /* PCB not yet on the list, add PCB now */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Remove the remote end of the pcb. This function does not generate + * any network traffic, but only removes the remote address of the pcb. + * + * @param pcb the udp pcb to disconnect. + */ +void +udp_disconnect(struct udp_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_disconnect: invalid pcb", pcb != NULL, return); + + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->remote_port = 0; + pcb->netif_idx = NETIF_NO_INDEX; + /* mark PCB as unconnected */ + udp_clear_flags(pcb, UDP_FLAGS_CONNECTED); +} + +/** + * @ingroup udp_raw + * Set a receive callback for a UDP PCB. + * This callback will be called when receiving a datagram for the pcb. + * + * @param pcb the pcb for which to set the recv callback + * @param recv function pointer of the callback function + * @param recv_arg additional argument to pass to the callback function + */ +void +udp_recv(struct udp_pcb *pcb, udp_recv_fn recv, void *recv_arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_recv: invalid pcb", pcb != NULL, return); + + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup udp_raw + * Removes and deallocates the pcb. + * + * @param pcb UDP PCB to be removed. The PCB is removed from the list of + * UDP PCB's and the data structure is freed from memory. + * + * @see udp_new() + */ +void +udp_remove(struct udp_pcb *pcb) +{ + struct udp_pcb *pcb2; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_remove: invalid pcb", pcb != NULL, return); + + mib2_udp_unbind(pcb); + /* pcb to be removed is first in list? */ + if (udp_pcbs == pcb) { + /* make list start at 2nd pcb */ + udp_pcbs = udp_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = udp_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in udp_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_UDP_PCB, pcb); +} + +/** + * @ingroup udp_raw + * Creates a new UDP pcb which can be used for UDP communication. The + * pcb is not active until it has either been bound to a local address + * or connected to a remote address. + * + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new(void) +{ + struct udp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct udp_pcb *)memp_malloc(MEMP_UDP_PCB); + /* could allocate UDP PCB? */ + if (pcb != NULL) { + /* UDP Lite: by initializing to all zeroes, chksum_len is set to 0 + * which means checksum is generated over the whole datagram per default + * (recommended as default by RFC 3828). */ + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct udp_pcb)); + pcb->ttl = UDP_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + udp_set_multicast_ttl(pcb, UDP_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + } + return pcb; +} + +/** + * @ingroup udp_raw + * Create a UDP PCB for specific IP type. + * The pcb is not active until it has either been bound to a local address + * or connected to a remote address. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new_ip_type(u8_t type) +{ + struct udp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = udp_new(); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void udp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct udp_pcb *upcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (upcb = udp_pcbs; upcb != NULL; upcb = upcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&upcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(upcb->local_ip, *new_addr); + } + } + } +} + +#if UDP_DEBUG +/** + * Print UDP header information for debug purposes. + * + * @param udphdr pointer to the udp header in memory. + */ +void +udp_debug_print(struct udp_hdr *udphdr) +{ + LWIP_DEBUGF(UDP_DEBUG, ("UDP header:\n")); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(udphdr->src), lwip_ntohs(udphdr->dest))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | 0x%04"X16_F" | (len, chksum)\n", + lwip_ntohs(udphdr->len), lwip_ntohs(udphdr->chksum))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* UDP_DEBUG */ + +#endif /* LWIP_UDP */ diff --git a/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h b/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h new file mode 100644 index 00000000..0ed9baf3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/sockets.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/sockets.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h b/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h new file mode 100644 index 00000000..6b8e63a5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h @@ -0,0 +1,36 @@ +/** + * @file + * This file is a posix wrapper for lwip/if_api.h. + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/if_api.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h b/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h new file mode 100644 index 00000000..12d4c7f5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/netdb.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/netdb.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h b/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h new file mode 100644 index 00000000..0ed9baf3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/sockets.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/sockets.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h b/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h new file mode 100644 index 00000000..98a9aec9 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix/stdc wrapper for lwip/errno.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/errno.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h new file mode 100644 index 00000000..97abc54d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h @@ -0,0 +1,201 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * + * This file contains the generic API. + * For more details see @ref altcp_api. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_H +#define LWIP_HDR_ALTCP_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpbase.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb; +struct altcp_functions; + +typedef err_t (*altcp_accept_fn)(void *arg, struct altcp_pcb *new_conn, err_t err); +typedef err_t (*altcp_connected_fn)(void *arg, struct altcp_pcb *conn, err_t err); +typedef err_t (*altcp_recv_fn)(void *arg, struct altcp_pcb *conn, struct pbuf *p, err_t err); +typedef err_t (*altcp_sent_fn)(void *arg, struct altcp_pcb *conn, u16_t len); +typedef err_t (*altcp_poll_fn)(void *arg, struct altcp_pcb *conn); +typedef void (*altcp_err_fn)(void *arg, err_t err); + +typedef struct altcp_pcb* (*altcp_new_fn)(void *arg, u8_t ip_type); + +struct altcp_pcb { + const struct altcp_functions *fns; + struct altcp_pcb *inner_conn; + void *arg; + void *state; + /* application callbacks */ + altcp_accept_fn accept; + altcp_connected_fn connected; + altcp_recv_fn recv; + altcp_sent_fn sent; + altcp_poll_fn poll; + altcp_err_fn err; + u8_t pollinterval; +}; + +/** @ingroup altcp */ +typedef struct altcp_allocator_s { + /** Allocator function */ + altcp_new_fn alloc; + /** Argument to allocator function */ + void *arg; +} altcp_allocator_t; + +struct altcp_pcb *altcp_new(altcp_allocator_t *allocator); +struct altcp_pcb *altcp_new_ip6(altcp_allocator_t *allocator); +struct altcp_pcb *altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type); + +void altcp_arg(struct altcp_pcb *conn, void *arg); +void altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept); +void altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv); +void altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent); +void altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval); +void altcp_err(struct altcp_pcb *conn, altcp_err_fn err); + +void altcp_recved(struct altcp_pcb *conn, u16_t len); +err_t altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +err_t altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected); + +/* return conn for source code compatibility to tcp callback API only */ +struct altcp_pcb *altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err); +#define altcp_listen_with_backlog(conn, backlog) altcp_listen_with_backlog_and_err(conn, backlog, NULL) +/** @ingroup altcp */ +#define altcp_listen(conn) altcp_listen_with_backlog_and_err(conn, TCP_DEFAULT_LISTEN_BACKLOG, NULL) + +void altcp_abort(struct altcp_pcb *conn); +err_t altcp_close(struct altcp_pcb *conn); +err_t altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx); + +err_t altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +err_t altcp_output(struct altcp_pcb *conn); + +u16_t altcp_mss(struct altcp_pcb *conn); +u16_t altcp_sndbuf(struct altcp_pcb *conn); +u16_t altcp_sndqueuelen(struct altcp_pcb *conn); +void altcp_nagle_disable(struct altcp_pcb *conn); +void altcp_nagle_enable(struct altcp_pcb *conn); +int altcp_nagle_disabled(struct altcp_pcb *conn); + +void altcp_setprio(struct altcp_pcb *conn, u8_t prio); + +err_t altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +ip_addr_t *altcp_get_ip(struct altcp_pcb *conn, int local); +u16_t altcp_get_port(struct altcp_pcb *conn, int local); + +#ifdef LWIP_DEBUG +enum tcp_state altcp_dbg_get_tcp_state(struct altcp_pcb *conn); +#endif + +#ifdef __cplusplus +} +#endif + +#else /* LWIP_ALTCP */ + +/* ALTCP disabled, define everything to link against tcp callback API (e.g. to get a small non-ssl httpd) */ + +#include "lwip/tcp.h" + +#define altcp_accept_fn tcp_accept_fn +#define altcp_connected_fn tcp_connected_fn +#define altcp_recv_fn tcp_recv_fn +#define altcp_sent_fn tcp_sent_fn +#define altcp_poll_fn tcp_poll_fn +#define altcp_err_fn tcp_err_fn + +#define altcp_pcb tcp_pcb +#define altcp_tcp_new_ip_type tcp_new_ip_type +#define altcp_tcp_new tcp_new +#define altcp_tcp_new_ip6 tcp_new_ip6 + +#define altcp_new(allocator) tcp_new() +#define altcp_new_ip6(allocator) tcp_new_ip6() +#define altcp_new_ip_type(allocator, ip_type) tcp_new_ip_type(ip_type) + +#define altcp_arg tcp_arg +#define altcp_accept tcp_accept +#define altcp_recv tcp_recv +#define altcp_sent tcp_sent +#define altcp_poll tcp_poll +#define altcp_err tcp_err + +#define altcp_recved tcp_recved +#define altcp_bind tcp_bind +#define altcp_connect tcp_connect + +#define altcp_listen_with_backlog_and_err tcp_listen_with_backlog_and_err +#define altcp_listen_with_backlog tcp_listen_with_backlog +#define altcp_listen tcp_listen + +#define altcp_abort tcp_abort +#define altcp_close tcp_close +#define altcp_shutdown tcp_shutdown + +#define altcp_write tcp_write +#define altcp_output tcp_output + +#define altcp_mss tcp_mss +#define altcp_sndbuf tcp_sndbuf +#define altcp_sndqueuelen tcp_sndqueuelen +#define altcp_nagle_disable tcp_nagle_disable +#define altcp_nagle_enable tcp_nagle_enable +#define altcp_nagle_disabled tcp_nagle_disabled +#define altcp_setprio tcp_setprio + +#define altcp_get_tcp_addrinfo tcp_get_tcp_addrinfo +#define altcp_get_ip(pcb, local) ((local) ? (&(pcb)->local_ip) : (&(pcb)->remote_ip)) + +#ifdef LWIP_DEBUG +#define altcp_dbg_get_tcp_state tcp_dbg_get_tcp_state +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h new file mode 100644 index 00000000..dbde5846 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h @@ -0,0 +1,72 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains the base implementation calling into tcp. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TCP_H +#define LWIP_HDR_ALTCP_TCP_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb *altcp_tcp_new_ip_type(u8_t ip_type); + +#define altcp_tcp_new() altcp_tcp_new_ip_type(IPADDR_TYPE_V4) +#define altcp_tcp_new_ip6() altcp_tcp_new_ip_type(IPADDR_TYPE_V6) + +struct altcp_pcb *altcp_tcp_alloc(void *arg, u8_t ip_type); + +struct tcp_pcb; +struct altcp_pcb *altcp_tcp_wrap(struct tcp_pcb *tpcb); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_TCP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h new file mode 100644 index 00000000..7b17c608 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h @@ -0,0 +1,117 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * @defgroup altcp_tls TLS layer + * @ingroup altcp + * This file contains function prototypes for a TLS layer. + * A port to ARM mbedtls is provided in the apps/ tree + * (LWIP_ALTCP_TLS_MBEDTLS option). + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TLS_H +#define LWIP_HDR_ALTCP_TLS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#if LWIP_ALTCP_TLS + +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup altcp_tls + * ALTCP_TLS configuration handle, content depends on port (e.g. mbedtls) + */ +struct altcp_tls_config; + +/** @ingroup altcp_tls + * Create an ALTCP_TLS server configuration handle + */ +struct altcp_tls_config *altcp_tls_create_config_server_privkey_cert(const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Create an ALTCP_TLS client configuration handle + */ +struct altcp_tls_config *altcp_tls_create_config_client(const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Create an ALTCP_TLS client configuration handle with two-way server/client authentication + */ +struct altcp_tls_config *altcp_tls_create_config_client_2wayauth(const u8_t *ca, size_t ca_len, const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Free an ALTCP_TLS configuration handle + */ +void altcp_tls_free_config(struct altcp_tls_config *conf); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS layer wrapping an existing pcb as inner connection (e.g. TLS over TCP) + */ +struct altcp_pcb *altcp_tls_wrap(struct altcp_tls_config *config, struct altcp_pcb *inner_pcb); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS pcb and its inner tcp pcb + */ +struct altcp_pcb *altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS layer pcb and its inner tcp pcb. + * Same as @ref altcp_tls_new but this allocator function fits to + * @ref altcp_allocator_t / @ref altcp_new.\n + 'arg' must contain a struct altcp_tls_config *. + */ +struct altcp_pcb *altcp_tls_alloc(void *arg, u8_t ip_type); + +/** @ingroup altcp_tls + * Return pointer to internal TLS context so application can tweak it. + * Real type depends on port (e.g. mbedtls) + */ +void *altcp_tls_context(struct altcp_pcb *conn); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP_TLS */ +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_ALTCP_TLS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/api.h b/Middlewares/Third_Party/LwIP/src/include/lwip/api.h new file mode 100644 index 00000000..c2afaf26 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/api.h @@ -0,0 +1,431 @@ +/** + * @file + * netconn API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_H +#define LWIP_HDR_API_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/arch.h" +#include "lwip/netbuf.h" +#include "lwip/sys.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ + +/* Flags for netconn_write (u8_t) */ +#define NETCONN_NOFLAG 0x00 +#define NETCONN_NOCOPY 0x00 /* Only for source code compatibility */ +#define NETCONN_COPY 0x01 +#define NETCONN_MORE 0x02 +#define NETCONN_DONTBLOCK 0x04 +#define NETCONN_NOAUTORCVD 0x08 /* prevent netconn_recv_data_tcp() from updating the tcp window - must be done manually via netconn_tcp_recvd() */ +#define NETCONN_NOFIN 0x10 /* upper layer already received data, leave FIN in queue until called again */ + +/* Flags for struct netconn.flags (u8_t) */ +/** This netconn had an error, don't block on recvmbox/acceptmbox any more */ +#define NETCONN_FLAG_MBOXCLOSED 0x01 +/** Should this netconn avoid blocking? */ +#define NETCONN_FLAG_NON_BLOCKING 0x02 +/** Was the last connect action a non-blocking one? */ +#define NETCONN_FLAG_IN_NONBLOCKING_CONNECT 0x04 +#if LWIP_NETCONN_FULLDUPLEX + /** The mbox of this netconn is being deallocated, don't use it anymore */ +#define NETCONN_FLAG_MBOXINVALID 0x08 +#endif /* LWIP_NETCONN_FULLDUPLEX */ +/** If a nonblocking write has been rejected before, poll_tcp needs to + check if the netconn is writable again */ +#define NETCONN_FLAG_CHECK_WRITESPACE 0x10 +#if LWIP_IPV6 +/** If this flag is set then only IPv6 communication is allowed on the + netconn. As per RFC#3493 this features defaults to OFF allowing + dual-stack usage by default. */ +#define NETCONN_FLAG_IPV6_V6ONLY 0x20 +#endif /* LWIP_IPV6 */ +#if LWIP_NETBUF_RECVINFO +/** Received packet info will be recorded for this netconn */ +#define NETCONN_FLAG_PKTINFO 0x40 +#endif /* LWIP_NETBUF_RECVINFO */ +/** A FIN has been received but not passed to the application yet */ +#define NETCONN_FIN_RX_PENDING 0x80 + +/* Helpers to process several netconn_types by the same code */ +#define NETCONNTYPE_GROUP(t) ((t)&0xF0) +#define NETCONNTYPE_DATAGRAM(t) ((t)&0xE0) +#if LWIP_IPV6 +#define NETCONN_TYPE_IPV6 0x08 +#define NETCONNTYPE_ISIPV6(t) (((t)&NETCONN_TYPE_IPV6) != 0) +#define NETCONNTYPE_ISUDPLITE(t) (((t)&0xF3) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) (((t)&0xF3) == NETCONN_UDPNOCHKSUM) +#else /* LWIP_IPV6 */ +#define NETCONNTYPE_ISIPV6(t) (0) +#define NETCONNTYPE_ISUDPLITE(t) ((t) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) ((t) == NETCONN_UDPNOCHKSUM) +#endif /* LWIP_IPV6 */ + +/** @ingroup netconn_common + * Protocol family and type of the netconn + */ +enum netconn_type { + NETCONN_INVALID = 0, + /** TCP IPv4 */ + NETCONN_TCP = 0x10, +#if LWIP_IPV6 + /** TCP IPv6 */ + NETCONN_TCP_IPV6 = NETCONN_TCP | NETCONN_TYPE_IPV6 /* 0x18 */, +#endif /* LWIP_IPV6 */ + /** UDP IPv4 */ + NETCONN_UDP = 0x20, + /** UDP IPv4 lite */ + NETCONN_UDPLITE = 0x21, + /** UDP IPv4 no checksum */ + NETCONN_UDPNOCHKSUM = 0x22, + +#if LWIP_IPV6 + /** UDP IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDP_IPV6 = NETCONN_UDP | NETCONN_TYPE_IPV6 /* 0x28 */, + /** UDP IPv6 lite (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPLITE_IPV6 = NETCONN_UDPLITE | NETCONN_TYPE_IPV6 /* 0x29 */, + /** UDP IPv6 no checksum (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPNOCHKSUM_IPV6 = NETCONN_UDPNOCHKSUM | NETCONN_TYPE_IPV6 /* 0x2a */, +#endif /* LWIP_IPV6 */ + + /** Raw connection IPv4 */ + NETCONN_RAW = 0x40 +#if LWIP_IPV6 + /** Raw connection IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + , NETCONN_RAW_IPV6 = NETCONN_RAW | NETCONN_TYPE_IPV6 /* 0x48 */ +#endif /* LWIP_IPV6 */ +}; + +/** Current state of the netconn. Non-TCP netconns are always + * in state NETCONN_NONE! */ +enum netconn_state { + NETCONN_NONE, + NETCONN_WRITE, + NETCONN_LISTEN, + NETCONN_CONNECT, + NETCONN_CLOSE +}; + +/** Used to inform the callback function about changes + * + * Event explanation: + * + * In the netconn implementation, there are three ways to block a client: + * + * - accept mbox (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); in netconn_accept()) + * - receive mbox (sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); in netconn_recv_data()) + * - send queue is full (sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); in lwip_netconn_do_write()) + * + * The events have to be seen as events signaling the state of these mboxes/semaphores. For non-blocking + * connections, you need to know in advance whether a call to a netconn function call would block or not, + * and these events tell you about that. + * + * RCVPLUS events say: Safe to perform a potentially blocking call call once more. + * They are counted in sockets - three RCVPLUS events for accept mbox means you are safe + * to call netconn_accept 3 times without being blocked. + * Same thing for receive mbox. + * + * RCVMINUS events say: Your call to to a possibly blocking function is "acknowledged". + * Socket implementation decrements the counter. + * + * For TX, there is no need to count, its merely a flag. SENDPLUS means you may send something. + * SENDPLUS occurs when enough data was delivered to peer so netconn_send() can be called again. + * A SENDMINUS event occurs when the next call to a netconn_send() would be blocking. + */ +enum netconn_evt { + NETCONN_EVT_RCVPLUS, + NETCONN_EVT_RCVMINUS, + NETCONN_EVT_SENDPLUS, + NETCONN_EVT_SENDMINUS, + NETCONN_EVT_ERROR +}; + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** Used for netconn_join_leave_group() */ +enum netconn_igmp { + NETCONN_JOIN, + NETCONN_LEAVE +}; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/* Used for netconn_gethostbyname_addrtype(), these should match the DNS_ADDRTYPE defines in dns.h */ +#define NETCONN_DNS_DEFAULT NETCONN_DNS_IPV4_IPV6 +#define NETCONN_DNS_IPV4 0 +#define NETCONN_DNS_IPV6 1 +#define NETCONN_DNS_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define NETCONN_DNS_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#endif /* LWIP_DNS */ + +/* forward-declare some structs to avoid to include their headers */ +struct ip_pcb; +struct tcp_pcb; +struct udp_pcb; +struct raw_pcb; +struct netconn; +struct api_msg; + +/** A callback prototype to inform about events for a netconn */ +typedef void (* netconn_callback)(struct netconn *, enum netconn_evt, u16_t len); + +/** A netconn descriptor */ +struct netconn { + /** type of the netconn (TCP, UDP or RAW) */ + enum netconn_type type; + /** current state of the netconn */ + enum netconn_state state; + /** the lwIP internal protocol control block */ + union { + struct ip_pcb *ip; + struct tcp_pcb *tcp; + struct udp_pcb *udp; + struct raw_pcb *raw; + } pcb; + /** the last asynchronous unreported error this netconn had */ + err_t pending_err; +#if !LWIP_NETCONN_SEM_PER_THREAD + /** sem that is used to synchronously execute functions in the core context */ + sys_sem_t op_completed; +#endif + /** mbox where received packets are stored until they are fetched + by the netconn application thread (can grow quite big) */ + sys_mbox_t recvmbox; +#if LWIP_TCP + /** mbox where new connections are stored until processed + by the application thread */ + sys_mbox_t acceptmbox; +#endif /* LWIP_TCP */ +#if LWIP_NETCONN_FULLDUPLEX + /** number of threads waiting on an mbox. This is required to unblock + all threads when closing while threads are waiting. */ + int mbox_threads_waiting; +#endif + /** only used for socket layer */ +#if LWIP_SOCKET + int socket; +#endif /* LWIP_SOCKET */ +#if LWIP_SO_SNDTIMEO + /** timeout to wait for sending data (which means enqueueing data for sending + in internal buffers) in milliseconds */ + s32_t send_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVTIMEO + /** timeout in milliseconds to wait for new data to be received + (or connections to arrive for listening netconns) */ + u32_t recv_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + /** maximum amount of bytes queued in recvmbox + not used for TCP: adjust TCP_WND instead! */ + int recv_bufsize; + /** number of bytes currently in recvmbox to be received, + tested against recv_bufsize to limit bytes on recvmbox + for UDP and RAW, used for FIONREAD */ + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + /** values <0 mean linger is disabled, values > 0 are seconds to linger */ + s16_t linger; +#endif /* LWIP_SO_LINGER */ + /** flags holding more netconn-internal state, see NETCONN_FLAG_* defines */ + u8_t flags; +#if LWIP_TCP + /** TCP: when data passed to netconn_write doesn't fit into the send buffer, + this temporarily stores the message. + Also used during connect and close. */ + struct api_msg *current_msg; +#endif /* LWIP_TCP */ + /** A callback function that is informed about events for this netconn */ + netconn_callback callback; +}; + +/** This vector type is passed to @ref netconn_write_vectors_partly to send + * multiple buffers at once. + * ATTENTION: This type has to directly map struct iovec since one is casted + * into the other! + */ +struct netvector { + /** pointer to the application buffer that contains the data to send */ + const void *ptr; + /** size of the application data to send */ + size_t len; +}; + +/** Register an Network connection event */ +#define API_EVENT(c,e,l) if (c->callback) { \ + (*c->callback)(c, e, l); \ + } + +/* Network connection functions: */ + +/** @ingroup netconn_common + * Create new netconn connection + * @param t @ref netconn_type */ +#define netconn_new(t) netconn_new_with_proto_and_callback(t, 0, NULL) +#define netconn_new_with_callback(t, c) netconn_new_with_proto_and_callback(t, 0, c) +struct netconn *netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, + netconn_callback callback); +err_t netconn_prepare_delete(struct netconn *conn); +err_t netconn_delete(struct netconn *conn); +/** Get the type of a netconn (as enum netconn_type). */ +#define netconn_type(conn) (conn->type) + +err_t netconn_getaddr(struct netconn *conn, ip_addr_t *addr, + u16_t *port, u8_t local); +/** @ingroup netconn_common */ +#define netconn_peer(c,i,p) netconn_getaddr(c,i,p,0) +/** @ingroup netconn_common */ +#define netconn_addr(c,i,p) netconn_getaddr(c,i,p,1) + +err_t netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_bind_if(struct netconn *conn, u8_t if_idx); +err_t netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_disconnect (struct netconn *conn); +err_t netconn_listen_with_backlog(struct netconn *conn, u8_t backlog); +/** @ingroup netconn_tcp */ +#define netconn_listen(conn) netconn_listen_with_backlog(conn, TCP_DEFAULT_LISTEN_BACKLOG) +err_t netconn_accept(struct netconn *conn, struct netconn **new_conn); +err_t netconn_recv(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags); +err_t netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf); +err_t netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags); +err_t netconn_tcp_recvd(struct netconn *conn, size_t len); +err_t netconn_sendto(struct netconn *conn, struct netbuf *buf, + const ip_addr_t *addr, u16_t port); +err_t netconn_send(struct netconn *conn, struct netbuf *buf); +err_t netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written); +err_t netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt, + u8_t apiflags, size_t *bytes_written); +/** @ingroup netconn_tcp */ +#define netconn_write(conn, dataptr, size, apiflags) \ + netconn_write_partly(conn, dataptr, size, apiflags, NULL) +err_t netconn_close(struct netconn *conn); +err_t netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx); + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +err_t netconn_join_leave_group(struct netconn *conn, const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, enum netconn_igmp join_or_leave); +err_t netconn_join_leave_group_netif(struct netconn *conn, const ip_addr_t *multiaddr, + u8_t if_idx, enum netconn_igmp join_or_leave); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if LWIP_DNS +#if LWIP_IPV4 && LWIP_IPV6 +err_t netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype); +#define netconn_gethostbyname(name, addr) netconn_gethostbyname_addrtype(name, addr, NETCONN_DNS_DEFAULT) +#else /* LWIP_IPV4 && LWIP_IPV6 */ +err_t netconn_gethostbyname(const char *name, ip_addr_t *addr); +#define netconn_gethostbyname_addrtype(name, addr, dns_addrtype) netconn_gethostbyname(name, addr) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#endif /* LWIP_DNS */ + +err_t netconn_err(struct netconn *conn); +#define netconn_recv_bufsize(conn) ((conn)->recv_bufsize) + +#define netconn_set_flags(conn, set_flags) do { (conn)->flags = (u8_t)((conn)->flags | (set_flags)); } while(0) +#define netconn_clear_flags(conn, clr_flags) do { (conn)->flags = (u8_t)((conn)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define netconn_is_flag_set(conn, flag) (((conn)->flags & (flag)) != 0) + +/** Set the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_set_nonblocking(conn, val) do { if(val) { \ + netconn_set_flags(conn, NETCONN_FLAG_NON_BLOCKING); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_NON_BLOCKING); }} while(0) +/** Get the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_is_nonblocking(conn) (((conn)->flags & NETCONN_FLAG_NON_BLOCKING) != 0) + +#if LWIP_IPV6 +/** @ingroup netconn_common + * TCP: Set the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_set_ipv6only(conn, val) do { if(val) { \ + netconn_set_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); }} while(0) +/** @ingroup netconn_common + * TCP: Get the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_get_ipv6only(conn) (((conn)->flags & NETCONN_FLAG_IPV6_V6ONLY) != 0) +#endif /* LWIP_IPV6 */ + +#if LWIP_SO_SNDTIMEO +/** Set the send timeout in milliseconds */ +#define netconn_set_sendtimeout(conn, timeout) ((conn)->send_timeout = (timeout)) +/** Get the send timeout in milliseconds */ +#define netconn_get_sendtimeout(conn) ((conn)->send_timeout) +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO +/** Set the receive timeout in milliseconds */ +#define netconn_set_recvtimeout(conn, timeout) ((conn)->recv_timeout = (timeout)) +/** Get the receive timeout in milliseconds */ +#define netconn_get_recvtimeout(conn) ((conn)->recv_timeout) +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF +/** Set the receive buffer in bytes */ +#define netconn_set_recvbufsize(conn, recvbufsize) ((conn)->recv_bufsize = (recvbufsize)) +/** Get the receive buffer in bytes */ +#define netconn_get_recvbufsize(conn) ((conn)->recv_bufsize) +#endif /* LWIP_SO_RCVBUF*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void netconn_thread_init(void); +void netconn_thread_cleanup(void); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define netconn_thread_init() +#define netconn_thread_cleanup() +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_API_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h new file mode 100644 index 00000000..65d31279 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h @@ -0,0 +1,79 @@ +/** + * @file + * Application layered TCP connection API that executes a proxy-connect. + * + * This file provides a starting layer that executes a proxy-connect e.g. to + * set up TLS connections through a http proxy. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H +#define LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_proxyconnect_config { + ip_addr_t proxy_addr; + u16_t proxy_port; +}; + + +struct altcp_pcb *altcp_proxyconnect_new(struct altcp_proxyconnect_config *config, struct altcp_pcb *inner_pcb); +struct altcp_pcb *altcp_proxyconnect_new_tcp(struct altcp_proxyconnect_config *config, u8_t ip_type); + +struct altcp_pcb *altcp_proxyconnect_alloc(void *arg, u8_t ip_type); + +#if LWIP_ALTCP_TLS +struct altcp_proxyconnect_tls_config { + struct altcp_proxyconnect_config proxy; + struct altcp_tls_config *tls_config; +}; + +struct altcp_pcb *altcp_proxyconnect_tls_alloc(void *arg, u8_t ip_type); +#endif /* LWIP_ALTCP_TLS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h new file mode 100644 index 00000000..36cddd93 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h @@ -0,0 +1,67 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * This file contains options for an mbedtls port of the TLS layer. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TLS_OPTS_H +#define LWIP_HDR_ALTCP_TLS_OPTS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +/** LWIP_ALTCP_TLS_MBEDTLS==1: use mbedTLS for TLS support for altcp API + * mbedtls include directory must be reachable via include search path + */ +#ifndef LWIP_ALTCP_TLS_MBEDTLS +#define LWIP_ALTCP_TLS_MBEDTLS 0 +#endif + +/** Configure debug level of this file */ +#ifndef ALTCP_MBEDTLS_DEBUG +#define ALTCP_MBEDTLS_DEBUG LWIP_DBG_OFF +#endif + +/** Set a session timeout in seconds for the basic session cache + * ATTENTION: Using a session cache can lower security by reusing keys! + */ +#ifndef ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS +#define ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS 0 +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_TLS_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h new file mode 100644 index 00000000..67b9a60a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_APPS_FS_H +#define LWIP_HDR_APPS_FS_H + +#include "httpd_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define FS_READ_EOF -1 +#define FS_READ_DELAYED -2 + +#if HTTPD_PRECALCULATED_CHECKSUM +struct fsdata_chksum { + u32_t offset; + u16_t chksum; + u16_t len; +}; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + +#define FS_FILE_FLAGS_HEADER_INCLUDED 0x01 +#define FS_FILE_FLAGS_HEADER_PERSISTENT 0x02 +#define FS_FILE_FLAGS_HEADER_HTTPVER_1_1 0x04 +#define FS_FILE_FLAGS_SSI 0x08 + +/** Define FS_FILE_EXTENSION_T_DEFINED if you have typedef'ed to your private + * pointer type (defaults to 'void' so the default usage is 'void*') + */ +#ifndef FS_FILE_EXTENSION_T_DEFINED +typedef void fs_file_extension; +#endif + +struct fs_file { + const char *data; + int len; + int index; + /* pextension is free for implementations to hold private (extensional) + arbitrary data, e.g. holding some file state or file system handle */ + fs_file_extension *pextension; +#if HTTPD_PRECALCULATED_CHECKSUM + const struct fsdata_chksum *chksum; + u16_t chksum_count; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + u8_t flags; +#if LWIP_HTTPD_CUSTOM_FILES + u8_t is_custom_file; +#endif /* LWIP_HTTPD_CUSTOM_FILES */ +#if LWIP_HTTPD_FILE_STATE + void *state; +#endif /* LWIP_HTTPD_FILE_STATE */ +}; + +#if LWIP_HTTPD_FS_ASYNC_READ +typedef void (*fs_wait_cb)(void *arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ + +err_t fs_open(struct fs_file *file, const char *name); +void fs_close(struct fs_file *file); +#if LWIP_HTTPD_DYNAMIC_FILE_READ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_read_async(struct fs_file *file, char *buffer, int count, fs_wait_cb callback_fn, void *callback_arg); +#else /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_read(struct fs_file *file, char *buffer, int count); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +#endif /* LWIP_HTTPD_DYNAMIC_FILE_READ */ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_is_file_ready(struct fs_file *file, fs_wait_cb callback_fn, void *callback_arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_bytes_left(struct fs_file *file); + +#if LWIP_HTTPD_FILE_STATE +/** This user-defined function is called when a file is opened. */ +void *fs_state_init(struct fs_file *file, const char *name); +/** This user-defined function is called when a file is closed. */ +void fs_state_free(struct fs_file *file, void *state); +#endif /* #if LWIP_HTTPD_FILE_STATE */ + +struct fsdata_file { + const struct fsdata_file *next; + const unsigned char *name; + const unsigned char *data; + int len; + u8_t flags; +#if HTTPD_PRECALCULATED_CHECKSUM + u16_t chksum_count; + const struct fsdata_chksum *chksum; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_FS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h new file mode 100644 index 00000000..8a063083 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h @@ -0,0 +1,160 @@ +/** + * @file + * HTTP client + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_APPS_HTTP_CLIENT_H +#define LWIP_HDR_APPS_HTTP_CLIENT_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/altcp.h" +#include "lwip/prot/iana.h" +#include "lwip/pbuf.h" + +#if LWIP_TCP && LWIP_CALLBACK_API + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup httpc + * HTTPC_HAVE_FILE_IO: define this to 1 to have functions dowloading directly + * to disk via fopen/fwrite. + * These functions are example implementations of the interface only. + */ +#ifndef LWIP_HTTPC_HAVE_FILE_IO +#define LWIP_HTTPC_HAVE_FILE_IO 0 +#endif + +/** + * @ingroup httpc + * The default TCP port used for HTTP + */ +#define HTTP_DEFAULT_PORT LWIP_IANA_PORT_HTTP + +/** + * @ingroup httpc + * HTTP client result codes + */ +typedef enum ehttpc_result { + /** File successfully received */ + HTTPC_RESULT_OK = 0, + /** Unknown error */ + HTTPC_RESULT_ERR_UNKNOWN = 1, + /** Connection to server failed */ + HTTPC_RESULT_ERR_CONNECT = 2, + /** Failed to resolve server hostname */ + HTTPC_RESULT_ERR_HOSTNAME = 3, + /** Connection unexpectedly closed by remote server */ + HTTPC_RESULT_ERR_CLOSED = 4, + /** Connection timed out (server didn't respond in time) */ + HTTPC_RESULT_ERR_TIMEOUT = 5, + /** Server responded with an error code */ + HTTPC_RESULT_ERR_SVR_RESP = 6, + /** Local memory error */ + HTTPC_RESULT_ERR_MEM = 7, + /** Local abort */ + HTTPC_RESULT_LOCAL_ABORT = 8, + /** Content length mismatch */ + HTTPC_RESULT_ERR_CONTENT_LEN = 9 +} httpc_result_t; + +typedef struct _httpc_state httpc_state_t; + +/** + * @ingroup httpc + * Prototype of a http client callback function + * + * @param arg argument specified when initiating the request + * @param httpc_result result of the http transfer (see enum httpc_result_t) + * @param rx_content_len number of bytes received (without headers) + * @param srv_res this contains the http status code received (if any) + * @param err an error returned by internal lwip functions, can help to specify + * the source of the error but must not necessarily be != ERR_OK + */ +typedef void (*httpc_result_fn)(void *arg, httpc_result_t httpc_result, u32_t rx_content_len, u32_t srv_res, err_t err); + +/** + * @ingroup httpc + * Prototype of http client callback: called when the headers are received + * + * @param connection http client connection + * @param arg argument specified when initiating the request + * @param hdr header pbuf(s) (may contain data also) + * @param hdr_len length of the heders in 'hdr' + * @param content_len content length as received in the headers (-1 if not received) + * @return if != ERR_OK is returned, the connection is aborted + */ +typedef err_t (*httpc_headers_done_fn)(httpc_state_t *connection, void *arg, struct pbuf *hdr, u16_t hdr_len, u32_t content_len); + +typedef struct _httpc_connection { + ip_addr_t proxy_addr; + u16_t proxy_port; + u8_t use_proxy; + /* @todo: add username:pass? */ + +#if LWIP_ALTCP + altcp_allocator_t *altcp_allocator; +#endif + + /* this callback is called when the transfer is finished (or aborted) */ + httpc_result_fn result_fn; + /* this callback is called after receiving the http headers + It can abort the connection by returning != ERR_OK */ + httpc_headers_done_fn headers_done_fn; +} httpc_connection_t; + +err_t httpc_get_file(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings, + altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection); +err_t httpc_get_file_dns(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings, + altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection); + +#if LWIP_HTTPC_HAVE_FILE_IO +err_t httpc_get_file_to_disk(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings, + void* callback_arg, const char* local_file_name, httpc_state_t **connection); +err_t httpc_get_file_dns_to_disk(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings, + void* callback_arg, const char* local_file_name, httpc_state_t **connection); +#endif /* LWIP_HTTPC_HAVE_FILE_IO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ + +#endif /* LWIP_HDR_APPS_HTTP_CLIENT_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h new file mode 100644 index 00000000..e872429f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h @@ -0,0 +1,255 @@ +/** + * @file + * HTTP server + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_H +#define LWIP_HDR_APPS_HTTPD_H + +#include "httpd_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_HTTPD_CGI + +/** + * @ingroup httpd + * Function pointer for a CGI script handler. + * + * This function is called each time the HTTPD server is asked for a file + * whose name was previously registered as a CGI function using a call to + * http_set_cgi_handlers. The iIndex parameter provides the index of the + * CGI within the cgis array passed to http_set_cgi_handlers. Parameters + * pcParam and pcValue provide access to the parameters provided along with + * the URI. iNumParams provides a count of the entries in the pcParam and + * pcValue arrays. Each entry in the pcParam array contains the name of a + * parameter with the corresponding entry in the pcValue array containing the + * value for that parameter. Note that pcParam may contain multiple elements + * with the same name if, for example, a multi-selection list control is used + * in the form generating the data. + * + * The function should return a pointer to a character string which is the + * path and filename of the response that is to be sent to the connected + * browser, for example "/thanks.htm" or "/response/error.ssi". + * + * The maximum number of parameters that will be passed to this function via + * iNumParams is defined by LWIP_HTTPD_MAX_CGI_PARAMETERS. Any parameters in + * the incoming HTTP request above this number will be discarded. + * + * Requests intended for use by this CGI mechanism must be sent using the GET + * method (which encodes all parameters within the URI rather than in a block + * later in the request). Attempts to use the POST method will result in the + * request being ignored. + * + */ +typedef const char *(*tCGIHandler)(int iIndex, int iNumParams, char *pcParam[], + char *pcValue[]); + +/** + * @ingroup httpd + * Structure defining the base filename (URL) of a CGI and the associated + * function which is to be called when that URL is requested. + */ +typedef struct +{ + const char *pcCGIName; + tCGIHandler pfnCGIHandler; +} tCGI; + +void http_set_cgi_handlers(const tCGI *pCGIs, int iNumHandlers); + +#endif /* LWIP_HTTPD_CGI */ + +#if LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI + +#if LWIP_HTTPD_CGI_SSI +/* we have to prototype this struct here to make it available for the handler */ +struct fs_file; + +/** Define this generic CGI handler in your application. + * It is called once for every URI with parameters. + * The parameters can be stored to the object passed as connection_state, which + * is allocated to file->state via fs_state_init() from fs_open() or fs_open_custom(). + * Content creation via SSI or complete dynamic files can retrieve the CGI params from there. + */ +extern void httpd_cgi_handler(struct fs_file *file, const char* uri, int iNumParams, + char **pcParam, char **pcValue +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); +#endif /* LWIP_HTTPD_CGI_SSI */ + +#endif /* LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI */ + +#if LWIP_HTTPD_SSI + +/** + * @ingroup httpd + * Function pointer for the SSI tag handler callback. + * + * This function will be called each time the HTTPD server detects a tag of the + * form in files with extensions mentioned in the g_pcSSIExtensions + * array (currently .shtml, .shtm, .ssi, .xml, .json) where "name" appears as + * one of the tags supplied to http_set_ssi_handler in the tags array. The + * returned insert string, which will be appended after the the string + * "" in file sent back to the client, should be written to pointer + * pcInsert. iInsertLen contains the size of the buffer pointed to by + * pcInsert. The iIndex parameter provides the zero-based index of the tag as + * found in the tags array and identifies the tag that is to be processed. + * + * The handler returns the number of characters written to pcInsert excluding + * any terminating NULL or HTTPD_SSI_TAG_UNKNOWN when tag is not recognized. + * + * Note that the behavior of this SSI mechanism is somewhat different from the + * "normal" SSI processing as found in, for example, the Apache web server. In + * this case, the inserted text is appended following the SSI tag rather than + * replacing the tag entirely. This allows for an implementation that does not + * require significant additional buffering of output data yet which will still + * offer usable SSI functionality. One downside to this approach is when + * attempting to use SSI within JavaScript. The SSI tag is structured to + * resemble an HTML comment but this syntax does not constitute a comment + * within JavaScript and, hence, leaving the tag in place will result in + * problems in these cases. In order to avoid these problems, define + * LWIP_HTTPD_SSI_INCLUDE_TAG as zero in your lwip options file, or use JavaScript + * style block comments in the form / * # name * / (without the spaces). + */ +typedef u16_t (*tSSIHandler)( +#if LWIP_HTTPD_SSI_RAW + const char* ssi_tag_name, +#else /* LWIP_HTTPD_SSI_RAW */ + int iIndex, +#endif /* LWIP_HTTPD_SSI_RAW */ + char *pcInsert, int iInsertLen +#if LWIP_HTTPD_SSI_MULTIPART + , u16_t current_tag_part, u16_t *next_tag_part +#endif /* LWIP_HTTPD_SSI_MULTIPART */ +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); + +/** Set the SSI handler function + * (if LWIP_HTTPD_SSI_RAW==1, only the first argument is used) + */ +void http_set_ssi_handler(tSSIHandler pfnSSIHandler, + const char **ppcTags, int iNumTags); + +/** For LWIP_HTTPD_SSI_RAW==1, return this to indicate the tag is unknown. + * In this case, the webserver writes a warning into the page. + * You can also just return 0 to write nothing for unknown tags. + */ +#define HTTPD_SSI_TAG_UNKNOWN 0xFFFF + +#endif /* LWIP_HTTPD_SSI */ + +#if LWIP_HTTPD_SUPPORT_POST + +/* These functions must be implemented by the application */ + +/** + * @ingroup httpd + * Called when a POST request has been received. The application can decide + * whether to accept it or not. + * + * @param connection Unique connection identifier, valid until httpd_post_end + * is called. + * @param uri The HTTP header URI receiving the POST request. + * @param http_request The raw HTTP request (the first packet, normally). + * @param http_request_len Size of 'http_request'. + * @param content_len Content-Length from HTTP header. + * @param response_uri Filename of response file, to be filled when denying the + * request + * @param response_uri_len Size of the 'response_uri' buffer. + * @param post_auto_wnd Set this to 0 to let the callback code handle window + * updates by calling 'httpd_post_data_recved' (to throttle rx speed) + * default is 1 (httpd handles window updates automatically) + * @return ERR_OK: Accept the POST request, data may be passed in + * another err_t: Deny the POST request, send back 'bad request'. + */ +err_t httpd_post_begin(void *connection, const char *uri, const char *http_request, + u16_t http_request_len, int content_len, char *response_uri, + u16_t response_uri_len, u8_t *post_auto_wnd); + +/** + * @ingroup httpd + * Called for each pbuf of data that has been received for a POST. + * ATTENTION: The application is responsible for freeing the pbufs passed in! + * + * @param connection Unique connection identifier. + * @param p Received data. + * @return ERR_OK: Data accepted. + * another err_t: Data denied, http_post_get_response_uri will be called. + */ +err_t httpd_post_receive_data(void *connection, struct pbuf *p); + +/** + * @ingroup httpd + * Called when all data is received or when the connection is closed. + * The application must return the filename/URI of a file to send in response + * to this POST request. If the response_uri buffer is untouched, a 404 + * response is returned. + * + * @param connection Unique connection identifier. + * @param response_uri Filename of response file, to be filled when denying the request + * @param response_uri_len Size of the 'response_uri' buffer. + */ +void httpd_post_finished(void *connection, char *response_uri, u16_t response_uri_len); + +#if LWIP_HTTPD_POST_MANUAL_WND +void httpd_post_data_recved(void *connection, u16_t recved_len); +#endif /* LWIP_HTTPD_POST_MANUAL_WND */ + +#endif /* LWIP_HTTPD_SUPPORT_POST */ + +void httpd_init(void); + +#if HTTPD_ENABLE_HTTPS +struct altcp_tls_config; +void httpd_inits(struct altcp_tls_config *conf); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_HTTPD_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h new file mode 100644 index 00000000..8723961f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h @@ -0,0 +1,396 @@ +/** + * @file + * HTTP server options list + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_OPTS_H +#define LWIP_HDR_APPS_HTTPD_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup httpd_opts Options + * @ingroup httpd + * @{ + */ + +/** Set this to 1 to support CGI (old style). + * + * This old style CGI support works by registering an array of URLs and + * associated CGI handler functions (@ref http_set_cgi_handlers). + * This list is scanned just before fs_open is called from request handling. + * The handler can return a new URL that is used internally by the httpd to + * load the returned page (passed to fs_open). + * + * Use this CGI type e.g. to execute specific actions and return a page that + * does not depend on the CGI parameters. + */ +#if !defined LWIP_HTTPD_CGI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI 0 +#endif + +/** Set this to 1 to support CGI (new style). + * + * This new style CGI support works by calling a global function + * (@ref tCGIHandler) for all URLs that are found. fs_open is called first + * and the URL can not be written by the CGI handler. Instead, this handler gets + * passed the http file state, an object where it can store information derived + * from the CGI URL or parameters. This file state is later passed to SSI, so + * the SSI code can return data depending on CGI input. + * + * Use this CGI handler if you want CGI information passed on to SSI. + */ +#if !defined LWIP_HTTPD_CGI_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI_SSI 0 +#endif + +/** Set this to 1 to support SSI (Server-Side-Includes) + * + * In contrast to other http servers, this only calls a preregistered callback + * function (@see http_set_ssi_handler) for each tag (in the format of + * ) encountered in SSI-enabled pages. + * SSI-enabled pages must have one of the predefined SSI-enabled file extensions. + * All files with one of these extensions are parsed when sent. + * + * A downside of the current SSI implementation is that persistent connections + * don't work, as the file length is not known in advance (and httpd currently + * relies on the Content-Length header for persistent connections). + * + * To save memory, the maximum tag length is limited (@see LWIP_HTTPD_MAX_TAG_NAME_LEN). + * To save memory, the maximum insertion string length is limited (@see + * LWIP_HTTPD_MAX_TAG_INSERT_LEN). If this is not enought, @ref LWIP_HTTPD_SSI_MULTIPART + * can be used. + */ +#if !defined LWIP_HTTPD_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI 0 +#endif + +/** Set this to 1 to implement an SSI tag handler callback that gets a const char* + * to the tag (instead of an index into a pre-registered array of known tags) + * If this is 0, the SSI handler callback function is only called pre-registered tags. + */ +#if !defined LWIP_HTTPD_SSI_RAW || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_RAW 0 +#endif + +/** Set this to 0 to prevent parsing the file extension at runtime to decide + * if a file should be scanned for SSI tags or not. + * Default is 1 (file extensions are checked using the g_pcSSIExtensions array) + * Set to 2 to override this runtime test function. + * + * This is enabled by default, but if you only use a newer version of makefsdata + * supporting the "-ssi" option, this info is already present in + */ +#if !defined LWIP_HTTPD_SSI_BY_FILE_EXTENSION || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_BY_FILE_EXTENSION 1 +#endif + +/** Set this to 1 to support HTTP POST */ +#if !defined LWIP_HTTPD_SUPPORT_POST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_POST 0 +#endif + +/* The maximum number of parameters that the CGI handler can be sent. */ +#if !defined LWIP_HTTPD_MAX_CGI_PARAMETERS || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_CGI_PARAMETERS 16 +#endif + +/** LWIP_HTTPD_SSI_MULTIPART==1: SSI handler function is called with 2 more + * arguments indicating a counter for insert string that are too long to be + * inserted at once: the SSI handler function must then set 'next_tag_part' + * which will be passed back to it in the next call. */ +#if !defined LWIP_HTTPD_SSI_MULTIPART || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_MULTIPART 0 +#endif + +/* The maximum length of the string comprising the SSI tag name + * ATTENTION: tags longer than this are ignored, not truncated! + */ +#if !defined LWIP_HTTPD_MAX_TAG_NAME_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_NAME_LEN 8 +#endif + +/* The maximum length of string that can be returned to replace any given tag + * If this buffer is not long enough, use LWIP_HTTPD_SSI_MULTIPART. + */ +#if !defined LWIP_HTTPD_MAX_TAG_INSERT_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_INSERT_LEN 192 +#endif + +#if !defined LWIP_HTTPD_POST_MANUAL_WND || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MANUAL_WND 0 +#endif + +/** This string is passed in the HTTP header as "Server: " */ +#if !defined HTTPD_SERVER_AGENT || defined __DOXYGEN__ +#define HTTPD_SERVER_AGENT "lwIP/" LWIP_VERSION_STRING " (http://savannah.nongnu.org/projects/lwip)" +#endif + +/** Set this to 1 if you want to include code that creates HTTP headers + * at runtime. Default is off: HTTP headers are then created statically + * by the makefsdata tool. Static headers mean smaller code size, but + * the (readonly) fsdata will grow a bit as every file includes the HTTP + * header. */ +#if !defined LWIP_HTTPD_DYNAMIC_HEADERS || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_HEADERS 0 +#endif + +#if !defined HTTPD_DEBUG || defined __DOXYGEN__ +#define HTTPD_DEBUG LWIP_DBG_OFF +#endif + +/** Set this to 1 to use a memp pool for allocating + * struct http_state instead of the heap. + * If enabled, you'll need to define MEMP_NUM_PARALLEL_HTTPD_CONNS + * (and MEMP_NUM_PARALLEL_HTTPD_SSI_CONNS for SSI) to set the size of + * the pool(s). + */ +#if !defined HTTPD_USE_MEM_POOL || defined __DOXYGEN__ +#define HTTPD_USE_MEM_POOL 0 +#endif + +/** The server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT LWIP_IANA_PORT_HTTP +#endif + +/** The https server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT_HTTPS || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT_HTTPS LWIP_IANA_PORT_HTTPS +#endif + +/** Enable https support? */ +#if !defined HTTPD_ENABLE_HTTPS || defined __DOXYGEN__ +#define HTTPD_ENABLE_HTTPS 0 +#endif + +/** Maximum retries before the connection is aborted/closed. + * - number of times pcb->poll is called -> default is 4*500ms = 2s; + * - reset when pcb->sent is called + */ +#if !defined HTTPD_MAX_RETRIES || defined __DOXYGEN__ +#define HTTPD_MAX_RETRIES 4 +#endif + +/** The poll delay is X*500ms */ +#if !defined HTTPD_POLL_INTERVAL || defined __DOXYGEN__ +#define HTTPD_POLL_INTERVAL 4 +#endif + +/** Priority for tcp pcbs created by HTTPD (very low by default). + * Lower priorities get killed first when running out of memory. + */ +#if !defined HTTPD_TCP_PRIO || defined __DOXYGEN__ +#define HTTPD_TCP_PRIO TCP_PRIO_MIN +#endif + +/** Set this to 1 to enable timing each file sent */ +#if !defined LWIP_HTTPD_TIMING || defined __DOXYGEN__ +#define LWIP_HTTPD_TIMING 0 +#endif +/** Set this to 1 to enable timing each file sent */ +#if !defined HTTPD_DEBUG_TIMING || defined __DOXYGEN__ +#define HTTPD_DEBUG_TIMING LWIP_DBG_OFF +#endif + +/** Set this to one to show error pages when parsing a request fails instead + of simply closing the connection. */ +#if !defined LWIP_HTTPD_SUPPORT_EXTSTATUS || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_EXTSTATUS 0 +#endif + +/** Set this to 0 to drop support for HTTP/0.9 clients (to save some bytes) */ +#if !defined LWIP_HTTPD_SUPPORT_V09 || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_V09 1 +#endif + +/** Set this to 1 to enable HTTP/1.1 persistent connections. + * ATTENTION: If the generated file system includes HTTP headers, these must + * include the "Connection: keep-alive" header (pass argument "-11" to makefsdata). + */ +#if !defined LWIP_HTTPD_SUPPORT_11_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_11_KEEPALIVE 0 +#endif + +/** Set this to 1 to support HTTP request coming in in multiple packets/pbufs */ +#if !defined LWIP_HTTPD_SUPPORT_REQUESTLIST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_REQUESTLIST 1 +#endif + +#if LWIP_HTTPD_SUPPORT_REQUESTLIST +/** Number of rx pbufs to enqueue to parse an incoming request (up to the first + newline) */ +#if !defined LWIP_HTTPD_REQ_QUEUELEN || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_QUEUELEN 5 +#endif + +/** Number of (TCP payload-) bytes (in pbufs) to enqueue to parse and incoming + request (up to the first double-newline) */ +#if !defined LWIP_HTTPD_REQ_BUFSIZE || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_BUFSIZE LWIP_HTTPD_MAX_REQ_LENGTH +#endif + +/** Defines the maximum length of a HTTP request line (up to the first CRLF, + copied from pbuf into this a global buffer when pbuf- or packet-queues + are received - otherwise the input pbuf is used directly) */ +#if !defined LWIP_HTTPD_MAX_REQ_LENGTH || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQ_LENGTH LWIP_MIN(1023, (LWIP_HTTPD_REQ_QUEUELEN * PBUF_POOL_BUFSIZE)) +#endif +#endif /* LWIP_HTTPD_SUPPORT_REQUESTLIST */ + +/** This is the size of a static buffer used when URIs end with '/'. + * In this buffer, the directory requested is concatenated with all the + * configured default file names. + * Set to 0 to disable checking default filenames on non-root directories. + */ +#if !defined LWIP_HTTPD_MAX_REQUEST_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQUEST_URI_LEN 63 +#endif + +/** Maximum length of the filename to send as response to a POST request, + * filled in by the application when a POST is finished. + */ +#if !defined LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN 63 +#endif + +/** Set this to 0 to not send the SSI tag (default is on, so the tag will + * be sent in the HTML page */ +#if !defined LWIP_HTTPD_SSI_INCLUDE_TAG || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_INCLUDE_TAG 1 +#endif + +/** Set this to 1 to call tcp_abort when tcp_close fails with memory error. + * This can be used to prevent consuming all memory in situations where the + * HTTP server has low priority compared to other communication. */ +#if !defined LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR || defined __DOXYGEN__ +#define LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR 0 +#endif + +/** Set this to 1 to kill the oldest connection when running out of + * memory for 'struct http_state' or 'struct http_ssi_state'. + * ATTENTION: This puts all connections on a linked list, so may be kind of slow. + */ +#if !defined LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED || defined __DOXYGEN__ +#define LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED 0 +#endif + +/** Set this to 1 to send URIs without extension without headers + * (who uses this at all??) */ +#if !defined LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI || defined __DOXYGEN__ +#define LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI 0 +#endif + +/** Default: Tags are sent from struct http_state and are therefore volatile */ +#if !defined HTTP_IS_TAG_VOLATILE || defined __DOXYGEN__ +#define HTTP_IS_TAG_VOLATILE(ptr) TCP_WRITE_FLAG_COPY +#endif + +/* By default, the httpd is limited to send 2*pcb->mss to keep resource usage low + when http is not an important protocol in the device. */ +#if !defined HTTPD_LIMIT_SENDING_TO_2MSS || defined __DOXYGEN__ +#define HTTPD_LIMIT_SENDING_TO_2MSS 1 +#endif + +/* Define this to a function that returns the maximum amount of data to enqueue. + The function have this signature: u16_t fn(struct altcp_pcb* pcb); + The best place to define this is the hooks file (@see LWIP_HOOK_FILENAME) */ +#if !defined HTTPD_MAX_WRITE_LEN || defined __DOXYGEN__ +#if HTTPD_LIMIT_SENDING_TO_2MSS +#define HTTPD_MAX_WRITE_LEN(pcb) ((u16_t)(2 * altcp_mss(pcb))) +#endif +#endif + +/*------------------- FS OPTIONS -------------------*/ + +/** Set this to 1 and provide the functions: + * - "int fs_open_custom(struct fs_file *file, const char *name)" + * Called first for every opened file to allow opening files + * that are not included in fsdata(_custom).c + * - "void fs_close_custom(struct fs_file *file)" + * Called to free resources allocated by fs_open_custom(). + */ +#if !defined LWIP_HTTPD_CUSTOM_FILES || defined __DOXYGEN__ +#define LWIP_HTTPD_CUSTOM_FILES 0 +#endif + +/** Set this to 1 to support fs_read() to dynamically read file data. + * Without this (default=off), only one-block files are supported, + * and the contents must be ready after fs_open(). + */ +#if !defined LWIP_HTTPD_DYNAMIC_FILE_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_FILE_READ 0 +#endif + +/** Set this to 1 to include an application state argument per file + * that is opened. This allows to keep a state per connection/file. + */ +#if !defined LWIP_HTTPD_FILE_STATE || defined __DOXYGEN__ +#define LWIP_HTTPD_FILE_STATE 0 +#endif + +/** HTTPD_PRECALCULATED_CHECKSUM==1: include precompiled checksums for + * predefined (MSS-sized) chunks of the files to prevent having to calculate + * the checksums at runtime. */ +#if !defined HTTPD_PRECALCULATED_CHECKSUM || defined __DOXYGEN__ +#define HTTPD_PRECALCULATED_CHECKSUM 0 +#endif + +/** LWIP_HTTPD_FS_ASYNC_READ==1: support asynchronous read operations + * (fs_read_async returns FS_READ_DELAYED and calls a callback when finished). + */ +#if !defined LWIP_HTTPD_FS_ASYNC_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_FS_ASYNC_READ 0 +#endif + +/** Filename (including path) to use as FS data file */ +#if !defined HTTPD_FSDATA_FILE || defined __DOXYGEN__ +/* HTTPD_USE_CUSTOM_FSDATA: Compatibility with deprecated lwIP option */ +#if defined(HTTPD_USE_CUSTOM_FSDATA) && (HTTPD_USE_CUSTOM_FSDATA != 0) +#define HTTPD_FSDATA_FILE "fsdata_custom.c" +#else +#define HTTPD_FSDATA_FILE "fsdata.c" +#endif +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_HTTPD_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h new file mode 100644 index 00000000..cc86e7f8 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP iPerf server implementation + */ + +/* + * Copyright (c) 2014 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_LWIPERF_H +#define LWIP_HDR_APPS_LWIPERF_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIPERF_TCP_PORT_DEFAULT 5001 + +/** lwIPerf test results */ +enum lwiperf_report_type +{ + /** The server side test is done */ + LWIPERF_TCP_DONE_SERVER, + /** The client side test is done */ + LWIPERF_TCP_DONE_CLIENT, + /** Local error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL, + /** Data check error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_DATAERROR, + /** Transmit error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_TXERROR, + /** Remote side aborted the test */ + LWIPERF_TCP_ABORTED_REMOTE +}; + +/** Control */ +enum lwiperf_client_type +{ + /** Unidirectional tx only test */ + LWIPERF_CLIENT, + /** Do a bidirectional test simultaneously */ + LWIPERF_DUAL, + /** Do a bidirectional test individually */ + LWIPERF_TRADEOFF +}; + +/** Prototype of a report function that is called when a session is finished. + This report function can show the test results. + @param report_type contains the test result */ +typedef void (*lwiperf_report_fn)(void *arg, enum lwiperf_report_type report_type, + const ip_addr_t* local_addr, u16_t local_port, const ip_addr_t* remote_addr, u16_t remote_port, + u32_t bytes_transferred, u32_t ms_duration, u32_t bandwidth_kbitpsec); + +void* lwiperf_start_tcp_server(const ip_addr_t* local_addr, u16_t local_port, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_server_default(lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_client(const ip_addr_t* remote_addr, u16_t remote_port, + enum lwiperf_client_type type, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_client_default(const ip_addr_t* remote_addr, + lwiperf_report_fn report_fn, void* report_arg); + +void lwiperf_abort(void* lwiperf_session); + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_LWIPERF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h new file mode 100644 index 00000000..20d7ee2a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h @@ -0,0 +1,105 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_H +#define LWIP_HDR_APPS_MDNS_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MDNS_RESPONDER + +enum mdns_sd_proto { + DNSSD_PROTO_UDP = 0, + DNSSD_PROTO_TCP = 1 +}; + +#define MDNS_PROBING_CONFLICT 0 +#define MDNS_PROBING_SUCCESSFUL 1 + +#define MDNS_LABEL_MAXLEN 63 + +struct mdns_host; +struct mdns_service; + +/** Callback function to add text to a reply, called when generating the reply */ +typedef void (*service_get_txt_fn_t)(struct mdns_service *service, void *txt_userdata); + +/** Callback function to let application know the result of probing network for name + * uniqueness, called with result MDNS_PROBING_SUCCESSFUL if no other node claimed + * use for the name for the netif or a service and is safe to use, or MDNS_PROBING_CONFLICT + * if another node is already using it and mdns is disabled on this interface */ +typedef void (*mdns_name_result_cb_t)(struct netif* netif, u8_t result); + +void mdns_resp_init(void); + +void mdns_resp_register_name_result_cb(mdns_name_result_cb_t cb); + +err_t mdns_resp_add_netif(struct netif *netif, const char *hostname, u32_t dns_ttl); +err_t mdns_resp_remove_netif(struct netif *netif); +err_t mdns_resp_rename_netif(struct netif *netif, const char *hostname); + +s8_t mdns_resp_add_service(struct netif *netif, const char *name, const char *service, enum mdns_sd_proto proto, u16_t port, u32_t dns_ttl, service_get_txt_fn_t txt_fn, void *txt_userdata); +err_t mdns_resp_del_service(struct netif *netif, s8_t slot); +err_t mdns_resp_rename_service(struct netif *netif, s8_t slot, const char *name); + +err_t mdns_resp_add_service_txtitem(struct mdns_service *service, const char *txt, u8_t txt_len); + +void mdns_resp_restart(struct netif *netif); +void mdns_resp_announce(struct netif *netif); + +/** + * @ingroup mdns + * Announce IP settings have changed on netif. + * Call this in your callback registered by netif_set_status_callback(). + * No need to call this function when LWIP_NETIF_EXT_STATUS_CALLBACK==1, + * this handled automatically for you. + * @param netif The network interface where settings have changed. + */ +#define mdns_resp_netif_settings_changed(netif) mdns_resp_announce(netif) + +#endif /* LWIP_MDNS_RESPONDER */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MDNS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h new file mode 100644 index 00000000..45f2c50c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h @@ -0,0 +1,81 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_OPTS_H +#define LWIP_HDR_APPS_MDNS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup mdns_opts Options + * @ingroup mdns + * @{ + */ + +/** + * LWIP_MDNS_RESPONDER==1: Turn on multicast DNS module. UDP must be available for MDNS + * transport. IGMP is needed for IPv4 multicast. + */ +#ifndef LWIP_MDNS_RESPONDER +#define LWIP_MDNS_RESPONDER 0 +#endif /* LWIP_MDNS_RESPONDER */ + +/** The maximum number of services per netif */ +#ifndef MDNS_MAX_SERVICES +#define MDNS_MAX_SERVICES 1 +#endif + +/** MDNS_RESP_USENETIF_EXTCALLBACK==1: register an ext_callback on the netif + * to automatically restart probing/announcing on status or address change. + */ +#ifndef MDNS_RESP_USENETIF_EXTCALLBACK +#define MDNS_RESP_USENETIF_EXTCALLBACK LWIP_NETIF_EXT_STATUS_CALLBACK +#endif + +/** + * MDNS_DEBUG: Enable debugging for multicast DNS. + */ +#ifndef MDNS_DEBUG +#define MDNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_MDNS_OPTS_H */ + diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h new file mode 100644 index 00000000..9635b5b1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h @@ -0,0 +1,74 @@ +/** + * @file + * MDNS responder private definitions + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ +#ifndef LWIP_HDR_MDNS_PRIV_H +#define LWIP_HDR_MDNS_PRIV_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MDNS_RESPONDER + +/* Domain struct and methods - visible for unit tests */ + +#define MDNS_DOMAIN_MAXLEN 256 +#define MDNS_READNAME_ERROR 0xFFFF + +struct mdns_domain { + /* Encoded domain name */ + u8_t name[MDNS_DOMAIN_MAXLEN]; + /* Total length of domain name, including zero */ + u16_t length; + /* Set if compression of this domain is not allowed */ + u8_t skip_compression; +}; + +err_t mdns_domain_add_label(struct mdns_domain *domain, const char *label, u8_t len); +u16_t mdns_readname(struct pbuf *p, u16_t offset, struct mdns_domain *domain); +int mdns_domain_eq(struct mdns_domain *a, struct mdns_domain *b); +u16_t mdns_compress_domain(struct pbuf *pbuf, u16_t *offset, struct mdns_domain *domain); + +#endif /* LWIP_MDNS_RESPONDER */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MDNS_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h new file mode 100644 index 00000000..c2bb2288 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h @@ -0,0 +1,205 @@ +/** + * @file + * MQTT client + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_CLIENT_H +#define LWIP_HDR_APPS_MQTT_CLIENT_H + +#include "lwip/apps/mqtt_opts.h" +#include "lwip/err.h" +#include "lwip/ip_addr.h" +#include "lwip/prot/iana.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct mqtt_client_s mqtt_client_t; + +#if LWIP_ALTCP && LWIP_ALTCP_TLS +struct altcp_tls_config; +#endif + +/** @ingroup mqtt + * Default MQTT port (non-TLS) */ +#define MQTT_PORT LWIP_IANA_PORT_MQTT +/** @ingroup mqtt + * Default MQTT TLS port */ +#define MQTT_TLS_PORT LWIP_IANA_PORT_SECURE_MQTT + +/*---------------------------------------------------------------------------------------------- */ +/* Connection with server */ + +/** + * @ingroup mqtt + * Client information and connection parameters */ +struct mqtt_connect_client_info_t { + /** Client identifier, must be set by caller */ + const char *client_id; + /** User name, set to NULL if not used */ + const char* client_user; + /** Password, set to NULL if not used */ + const char* client_pass; + /** keep alive time in seconds, 0 to disable keep alive functionality*/ + u16_t keep_alive; + /** will topic, set to NULL if will is not to be used, + will_msg, will_qos and will retain are then ignored */ + const char* will_topic; + /** will_msg, see will_topic */ + const char* will_msg; + /** will_qos, see will_topic */ + u8_t will_qos; + /** will_retain, see will_topic */ + u8_t will_retain; +#if LWIP_ALTCP && LWIP_ALTCP_TLS + /** TLS configuration for secure connections */ + struct altcp_tls_config *tls_config; +#endif +}; + +/** + * @ingroup mqtt + * Connection status codes */ +typedef enum +{ + /** Accepted */ + MQTT_CONNECT_ACCEPTED = 0, + /** Refused protocol version */ + MQTT_CONNECT_REFUSED_PROTOCOL_VERSION = 1, + /** Refused identifier */ + MQTT_CONNECT_REFUSED_IDENTIFIER = 2, + /** Refused server */ + MQTT_CONNECT_REFUSED_SERVER = 3, + /** Refused user credentials */ + MQTT_CONNECT_REFUSED_USERNAME_PASS = 4, + /** Refused not authorized */ + MQTT_CONNECT_REFUSED_NOT_AUTHORIZED_ = 5, + /** Disconnected */ + MQTT_CONNECT_DISCONNECTED = 256, + /** Timeout */ + MQTT_CONNECT_TIMEOUT = 257 +} mqtt_connection_status_t; + +/** + * @ingroup mqtt + * Function prototype for mqtt connection status callback. Called when + * client has connected to the server after initiating a mqtt connection attempt by + * calling mqtt_client_connect() or when connection is closed by server or an error + * + * @param client MQTT client itself + * @param arg Additional argument to pass to the callback function + * @param status Connect result code or disconnection notification @see mqtt_connection_status_t + * + */ +typedef void (*mqtt_connection_cb_t)(mqtt_client_t *client, void *arg, mqtt_connection_status_t status); + + +/** + * @ingroup mqtt + * Data callback flags */ +enum { + /** Flag set when last fragment of data arrives in data callback */ + MQTT_DATA_FLAG_LAST = 1 +}; + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish data callback function. Called when data + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param data User data, pointed object, data may not be referenced after callback return, + NULL is passed when all publish data are delivered + * @param len Length of publish data fragment + * @param flags MQTT_DATA_FLAG_LAST set when this call contains the last part of data from publish message + * + */ +typedef void (*mqtt_incoming_data_cb_t)(void *arg, const u8_t *data, u16_t len, u8_t flags); + + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish function. Called when an incoming publish + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param topic Zero terminated Topic text string, topic may not be referenced after callback return + * @param tot_len Total length of publish data, if set to 0 (no publish payload) data callback will not be invoked + */ +typedef void (*mqtt_incoming_publish_cb_t)(void *arg, const char *topic, u32_t tot_len); + + +/** + * @ingroup mqtt + * Function prototype for mqtt request callback. Called when a subscribe, unsubscribe + * or publish request has completed + * @param arg Pointer to user data supplied when invoking request + * @param err ERR_OK on success + * ERR_TIMEOUT if no response was received within timeout, + * ERR_ABRT if (un)subscribe was denied + */ +typedef void (*mqtt_request_cb_t)(void *arg, err_t err); + + +err_t mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ipaddr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info); + +void mqtt_disconnect(mqtt_client_t *client); + +mqtt_client_t *mqtt_client_new(void); +void mqtt_client_free(mqtt_client_t* client); + +u8_t mqtt_client_is_connected(mqtt_client_t *client); + +void mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t, + mqtt_incoming_data_cb_t data_cb, void *arg); + +err_t mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub); + +/** @ingroup mqtt + *Subscribe to topic */ +#define mqtt_subscribe(client, topic, qos, cb, arg) mqtt_sub_unsub(client, topic, qos, cb, arg, 1) +/** @ingroup mqtt + * Unsubscribe to topic */ +#define mqtt_unsubscribe(client, topic, cb, arg) mqtt_sub_unsub(client, topic, 0, cb, arg, 0) + +err_t mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_CLIENT_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h new file mode 100644 index 00000000..4226d21e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h @@ -0,0 +1,103 @@ +/** + * @file + * MQTT client options + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_OPTS_H +#define LWIP_HDR_APPS_MQTT_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup mqtt_opts Options + * @ingroup mqtt + * @{ + */ + +/** + * Output ring-buffer size, must be able to fit largest outgoing publish message topic+payloads + */ +#ifndef MQTT_OUTPUT_RINGBUF_SIZE +#define MQTT_OUTPUT_RINGBUF_SIZE 256 +#endif + +/** + * Number of bytes in receive buffer, must be at least the size of the longest incoming topic + 8 + * If one wants to avoid fragmented incoming publish, set length to max incoming topic length + max payload length + 8 + */ +#ifndef MQTT_VAR_HEADER_BUFFER_LEN +#define MQTT_VAR_HEADER_BUFFER_LEN 128 +#endif + +/** + * Maximum number of pending subscribe, unsubscribe and publish requests to server . + */ +#ifndef MQTT_REQ_MAX_IN_FLIGHT +#define MQTT_REQ_MAX_IN_FLIGHT 4 +#endif + +/** + * Seconds between each cyclic timer call. + */ +#ifndef MQTT_CYCLIC_TIMER_INTERVAL +#define MQTT_CYCLIC_TIMER_INTERVAL 5 +#endif + +/** + * Publish, subscribe and unsubscribe request timeout in seconds. + */ +#ifndef MQTT_REQ_TIMEOUT +#define MQTT_REQ_TIMEOUT 30 +#endif + +/** + * Seconds for MQTT connect response timeout after sending connect request + */ +#ifndef MQTT_CONNECT_TIMOUT +#define MQTT_CONNECT_TIMOUT 100 +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h new file mode 100644 index 00000000..b775913c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h @@ -0,0 +1,104 @@ +/** + * @file + * MQTT client (private interface) + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_PRIV_H +#define LWIP_HDR_APPS_MQTT_PRIV_H + +#include "lwip/apps/mqtt.h" +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Pending request item, binds application callback to pending server requests */ +struct mqtt_request_t +{ + /** Next item in list, NULL means this is the last in chain, + next pointing at itself means request is unallocated */ + struct mqtt_request_t *next; + /** Callback to upper layer */ + mqtt_request_cb_t cb; + void *arg; + /** MQTT packet identifier */ + u16_t pkt_id; + /** Expire time relative to element before this */ + u16_t timeout_diff; +}; + +/** Ring buffer */ +struct mqtt_ringbuf_t { + u16_t put; + u16_t get; + u8_t buf[MQTT_OUTPUT_RINGBUF_SIZE]; +}; + +/** MQTT client */ +struct mqtt_client_s +{ + /** Timers and timeouts */ + u16_t cyclic_tick; + u16_t keep_alive; + u16_t server_watchdog; + /** Packet identifier generator*/ + u16_t pkt_id_seq; + /** Packet identifier of pending incoming publish */ + u16_t inpub_pkt_id; + /** Connection state */ + u8_t conn_state; + struct altcp_pcb *conn; + /** Connection callback */ + void *connect_arg; + mqtt_connection_cb_t connect_cb; + /** Pending requests to server */ + struct mqtt_request_t *pend_req_queue; + struct mqtt_request_t req_list[MQTT_REQ_MAX_IN_FLIGHT]; + void *inpub_arg; + /** Incoming data callback */ + mqtt_incoming_data_cb_t data_cb; + mqtt_incoming_publish_cb_t pub_cb; + /** Input */ + u32_t msg_idx; + u8_t rx_buffer[MQTT_VAR_HEADER_BUFFER_LEN]; + /** Output ring-buffer */ + struct mqtt_ringbuf_t output; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h new file mode 100644 index 00000000..a326d33b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h @@ -0,0 +1,51 @@ +/** + * @file + * NETBIOS name service responder + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_H +#define LWIP_HDR_APPS_NETBIOS_H + +#include "lwip/apps/netbiosns_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void netbiosns_init(void); +#ifndef NETBIOS_LWIP_NAME +void netbiosns_set_name(const char* hostname); +#endif +void netbiosns_stop(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_NETBIOS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h new file mode 100644 index 00000000..1f51ab02 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h @@ -0,0 +1,66 @@ +/** + * @file + * NETBIOS name service responder options + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_OPTS_H +#define LWIP_HDR_APPS_NETBIOS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup netbiosns_opts Options + * @ingroup netbiosns + * @{ + */ + +/** NetBIOS name of lwip device + * This must be uppercase until NETBIOS_STRCMP() is defined to a string + * comparision function that is case insensitive. + * If you want to use the netif's hostname, use this (with LWIP_NETIF_HOSTNAME): + * (ip_current_netif() != NULL ? ip_current_netif()->hostname != NULL ? ip_current_netif()->hostname : "" : "") + * + * If this is not defined, netbiosns_set_name() can be called at runtime to change the name. + */ +#ifdef __DOXYGEN__ +#define NETBIOS_LWIP_NAME "NETBIOSLWIPDEV" +#endif + +/** Respond to NetBIOS name queries + * Default is disabled + */ +#if !defined LWIP_NETBIOS_RESPOND_NAME_QUERY || defined __DOXYGEN__ +#define LWIP_NETBIOS_RESPOND_NAME_QUERY 0 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_NETBIOS_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h new file mode 100644 index 00000000..fb4a4a7c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h @@ -0,0 +1,128 @@ +#ifndef LWIP_HDR_APPS_SMTP_H +#define LWIP_HDR_APPS_SMTP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/apps/smtp_opts.h" +#include "lwip/err.h" +#include "lwip/prot/iana.h" + +/** The default TCP port used for SMTP */ +#define SMTP_DEFAULT_PORT LWIP_IANA_PORT_SMTP +/** The default TCP port used for SMTPS */ +#define SMTPS_DEFAULT_PORT LWIP_IANA_PORT_SMTPS + +/** Email successfully sent */ +#define SMTP_RESULT_OK 0 +/** Unknown error */ +#define SMTP_RESULT_ERR_UNKNOWN 1 +/** Connection to server failed */ +#define SMTP_RESULT_ERR_CONNECT 2 +/** Failed to resolve server hostname */ +#define SMTP_RESULT_ERR_HOSTNAME 3 +/** Connection unexpectedly closed by remote server */ +#define SMTP_RESULT_ERR_CLOSED 4 +/** Connection timed out (server didn't respond in time) */ +#define SMTP_RESULT_ERR_TIMEOUT 5 +/** Server responded with an unknown response code */ +#define SMTP_RESULT_ERR_SVR_RESP 6 +/** Out of resources locally */ +#define SMTP_RESULT_ERR_MEM 7 + +/** Prototype of an smtp callback function + * + * @param arg argument specified when initiating the email + * @param smtp_result result of the mail transfer (see defines SMTP_RESULT_*) + * @param srv_err if aborted by the server, this contains the error code received + * @param err an error returned by internal lwip functions, can help to specify + * the source of the error but must not necessarily be != ERR_OK + */ +typedef void (*smtp_result_fn)(void *arg, u8_t smtp_result, u16_t srv_err, err_t err); + +/** This structure is used as argument for smtp_send_mail_int(), + * which in turn can be used with tcpip_callback() to send mail + * from interrupt context, e.g. like this: + * struct smtp_send_request *req; (to be filled) + * tcpip_try_callback(smtp_send_mail_int, (void*)req); + * + * For member description, see parameter description of smtp_send_mail(). + * When using with tcpip_callback, this structure has to stay allocated + * (e.g. using mem_malloc/mem_free) until its 'callback_fn' is called. + */ +struct smtp_send_request { + const char *from; + const char* to; + const char* subject; + const char* body; + smtp_result_fn callback_fn; + void* callback_arg; + /** If this is != 0, data is *not* copied into an extra buffer + * but used from the pointers supplied in this struct. + * This means less memory usage, but data must stay untouched until + * the callback function is called. */ + u8_t static_data; +}; + + +#if SMTP_BODYDH + +#ifndef SMTP_BODYDH_BUFFER_SIZE +#define SMTP_BODYDH_BUFFER_SIZE 256 +#endif /* SMTP_BODYDH_BUFFER_SIZE */ + +struct smtp_bodydh { + u16_t state; + u16_t length; /* Length of content in buffer */ + char buffer[SMTP_BODYDH_BUFFER_SIZE]; /* buffer for generated content */ +#ifdef SMTP_BODYDH_USER_SIZE + u8_t user[SMTP_BODYDH_USER_SIZE]; +#endif /* SMTP_BODYDH_USER_SIZE */ +}; + +enum bdh_retvals_e { + BDH_DONE = 0, + BDH_WORKING +}; + +/** Prototype of an smtp body callback function + * It receives a struct smtp_bodydh, and a buffer to write data, + * must return BDH_WORKING to be called again and BDH_DONE when + * it has finished processing. This one tries to fill one TCP buffer with + * data, your function will be repeatedly called until that happens; so if you + * know you'll be taking too long to serve your request, pause once in a while + * by writing length=0 to avoid hogging system resources + * + * @param arg argument specified when initiating the email + * @param smtp_bodydh state handling + buffer structure + */ +typedef int (*smtp_bodycback_fn)(void *arg, struct smtp_bodydh *bodydh); + +err_t smtp_send_mail_bodycback(const char *from, const char* to, const char* subject, + smtp_bodycback_fn bodycback_fn, smtp_result_fn callback_fn, void* callback_arg); + +#endif /* SMTP_BODYDH */ + + +err_t smtp_set_server_addr(const char* server); +void smtp_set_server_port(u16_t port); +#if LWIP_ALTCP && LWIP_ALTCP_TLS +struct altcp_tls_config; +void smtp_set_tls_config(struct altcp_tls_config *tls_config); +#endif +err_t smtp_set_auth(const char* username, const char* pass); +err_t smtp_send_mail(const char *from, const char* to, const char* subject, const char* body, + smtp_result_fn callback_fn, void* callback_arg); +err_t smtp_send_mail_static(const char *from, const char* to, const char* subject, const char* body, + smtp_result_fn callback_fn, void* callback_arg); +void smtp_send_mail_int(void *arg); +#ifdef LWIP_DEBUG +const char* smtp_result_str(u8_t smtp_result); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SMTP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h new file mode 100644 index 00000000..bc743f67 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h @@ -0,0 +1,81 @@ +#ifndef LWIP_HDR_APPS_SMTP_OPTS_H +#define LWIP_HDR_APPS_SMTP_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup smtp_opts Options + * @ingroup smtp + * + * @{ + */ + +/** Set this to 1 to enable data handler callback on BODY */ +#ifndef SMTP_BODYDH +#define SMTP_BODYDH 0 +#endif + +/** SMTP_DEBUG: Enable debugging for SNTP. */ +#ifndef SMTP_DEBUG +#define SMTP_DEBUG LWIP_DBG_OFF +#endif + +/** Maximum length reserved for server name including terminating 0 byte */ +#ifndef SMTP_MAX_SERVERNAME_LEN +#define SMTP_MAX_SERVERNAME_LEN 256 +#endif + +/** Maximum length reserved for username */ +#ifndef SMTP_MAX_USERNAME_LEN +#define SMTP_MAX_USERNAME_LEN 32 +#endif + +/** Maximum length reserved for password */ +#ifndef SMTP_MAX_PASS_LEN +#define SMTP_MAX_PASS_LEN 32 +#endif + +/** Set this to 0 if you know the authentication data will not change + * during the smtp session, which saves some heap space. */ +#ifndef SMTP_COPY_AUTHDATA +#define SMTP_COPY_AUTHDATA 1 +#endif + +/** Set this to 0 to save some code space if you know for sure that all data + * passed to this module conforms to the requirements in the SMTP RFC. + * WARNING: use this with care! + */ +#ifndef SMTP_CHECK_DATA +#define SMTP_CHECK_DATA 1 +#endif + +/** Set this to 1 to enable AUTH PLAIN support */ +#ifndef SMTP_SUPPORT_AUTH_PLAIN +#define SMTP_SUPPORT_AUTH_PLAIN 1 +#endif + +/** Set this to 1 to enable AUTH LOGIN support */ +#ifndef SMTP_SUPPORT_AUTH_LOGIN +#define SMTP_SUPPORT_AUTH_LOGIN 1 +#endif + +/* Memory allocation/deallocation can be overridden... */ +#ifndef SMTP_STATE_MALLOC +#define SMTP_STATE_MALLOC(size) mem_malloc(size) +#define SMTP_STATE_FREE(ptr) mem_free(ptr) +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* SMTP_OPTS_H */ + diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h new file mode 100644 index 00000000..a3f8eb15 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h @@ -0,0 +1,135 @@ +/** + * @file + * SNMP server main API - start and basic configuration + */ + +/* + * Copyright (c) 2001, 2002 Leon Woestenberg + * Copyright (c) 2001, 2002 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * Martin Hentschel + * + */ +#ifndef LWIP_HDR_APPS_SNMP_H +#define LWIP_HDR_APPS_SNMP_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/apps/snmp_core.h" + +/** SNMP variable binding descriptor (publically needed for traps) */ +struct snmp_varbind +{ + /** pointer to next varbind, NULL for last in list */ + struct snmp_varbind *next; + /** pointer to previous varbind, NULL for first in list */ + struct snmp_varbind *prev; + + /** object identifier */ + struct snmp_obj_id oid; + + /** value ASN1 type */ + u8_t type; + /** object value length */ + u16_t value_len; + /** object value */ + void *value; +}; + +/** + * @ingroup snmp_core + * Agent setup, start listening to port 161. + */ +void snmp_init(void); +void snmp_set_mibs(const struct snmp_mib **mibs, u8_t num_mibs); + +void snmp_set_device_enterprise_oid(const struct snmp_obj_id* device_enterprise_oid); +const struct snmp_obj_id* snmp_get_device_enterprise_oid(void); + +void snmp_trap_dst_enable(u8_t dst_idx, u8_t enable); +void snmp_trap_dst_ip_set(u8_t dst_idx, const ip_addr_t *dst); + +/** Generic trap: cold start */ +#define SNMP_GENTRAP_COLDSTART 0 +/** Generic trap: warm start */ +#define SNMP_GENTRAP_WARMSTART 1 +/** Generic trap: link down */ +#define SNMP_GENTRAP_LINKDOWN 2 +/** Generic trap: link up */ +#define SNMP_GENTRAP_LINKUP 3 +/** Generic trap: authentication failure */ +#define SNMP_GENTRAP_AUTH_FAILURE 4 +/** Generic trap: EGP neighbor lost */ +#define SNMP_GENTRAP_EGP_NEIGHBOR_LOSS 5 +/** Generic trap: enterprise specific */ +#define SNMP_GENTRAP_ENTERPRISE_SPECIFIC 6 + +err_t snmp_send_trap_generic(s32_t generic_trap); +err_t snmp_send_trap_specific(s32_t specific_trap, struct snmp_varbind *varbinds); +err_t snmp_send_trap(const struct snmp_obj_id* oid, s32_t generic_trap, s32_t specific_trap, struct snmp_varbind *varbinds); + +#define SNMP_AUTH_TRAPS_DISABLED 0 +#define SNMP_AUTH_TRAPS_ENABLED 1 +void snmp_set_auth_traps_enabled(u8_t enable); +u8_t snmp_get_auth_traps_enabled(void); + +u8_t snmp_v1_enabled(void); +u8_t snmp_v2c_enabled(void); +u8_t snmp_v3_enabled(void); +void snmp_v1_enable(u8_t enable); +void snmp_v2c_enable(u8_t enable); +void snmp_v3_enable(u8_t enable); + +const char * snmp_get_community(void); +const char * snmp_get_community_write(void); +const char * snmp_get_community_trap(void); +void snmp_set_community(const char * const community); +void snmp_set_community_write(const char * const community); +void snmp_set_community_trap(const char * const community); + +void snmp_coldstart_trap(void); +void snmp_authfail_trap(void); + +typedef void (*snmp_write_callback_fct)(const u32_t* oid, u8_t oid_len, void* callback_arg); +void snmp_set_write_callback(snmp_write_callback_fct write_callback, void* callback_arg); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h new file mode 100644 index 00000000..6021c722 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h @@ -0,0 +1,377 @@ +/** + * @file + * SNMP core API for implementing MIBs + */ + +/* + * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Christiaan Simons + * Martin Hentschel + */ + +#ifndef LWIP_HDR_APPS_SNMP_CORE_H +#define LWIP_HDR_APPS_SNMP_CORE_H + +#include "lwip/apps/snmp_opts.h" + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* basic ASN1 defines */ +#define SNMP_ASN1_CLASS_UNIVERSAL 0x00 +#define SNMP_ASN1_CLASS_APPLICATION 0x40 +#define SNMP_ASN1_CLASS_CONTEXT 0x80 +#define SNMP_ASN1_CLASS_PRIVATE 0xC0 + +#define SNMP_ASN1_CONTENTTYPE_PRIMITIVE 0x00 +#define SNMP_ASN1_CONTENTTYPE_CONSTRUCTED 0x20 + +/* universal tags (from ASN.1 spec.) */ +#define SNMP_ASN1_UNIVERSAL_END_OF_CONTENT 0 +#define SNMP_ASN1_UNIVERSAL_INTEGER 2 +#define SNMP_ASN1_UNIVERSAL_OCTET_STRING 4 +#define SNMP_ASN1_UNIVERSAL_NULL 5 +#define SNMP_ASN1_UNIVERSAL_OBJECT_ID 6 +#define SNMP_ASN1_UNIVERSAL_SEQUENCE_OF 16 + +/* application specific (SNMP) tags (from SNMPv2-SMI) */ +#define SNMP_ASN1_APPLICATION_IPADDR 0 /* [APPLICATION 0] IMPLICIT OCTET STRING (SIZE (4)) */ +#define SNMP_ASN1_APPLICATION_COUNTER 1 /* [APPLICATION 1] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_GAUGE 2 /* [APPLICATION 2] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_TIMETICKS 3 /* [APPLICATION 3] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_OPAQUE 4 /* [APPLICATION 4] IMPLICIT OCTET STRING */ +#define SNMP_ASN1_APPLICATION_COUNTER64 6 /* [APPLICATION 6] IMPLICIT INTEGER (0..18446744073709551615) */ + +/* context specific (SNMP) tags (from RFC 1905) */ +#define SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE 1 + +/* full ASN1 type defines */ +#define SNMP_ASN1_TYPE_END_OF_CONTENT (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_END_OF_CONTENT) +#define SNMP_ASN1_TYPE_INTEGER (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_INTEGER) +#define SNMP_ASN1_TYPE_OCTET_STRING (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OCTET_STRING) +#define SNMP_ASN1_TYPE_NULL (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_NULL) +#define SNMP_ASN1_TYPE_OBJECT_ID (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OBJECT_ID) +#define SNMP_ASN1_TYPE_SEQUENCE (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_UNIVERSAL_SEQUENCE_OF) +#define SNMP_ASN1_TYPE_IPADDR (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_IPADDR) +#define SNMP_ASN1_TYPE_IPADDRESS SNMP_ASN1_TYPE_IPADDR +#define SNMP_ASN1_TYPE_COUNTER (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER) +#define SNMP_ASN1_TYPE_COUNTER32 SNMP_ASN1_TYPE_COUNTER +#define SNMP_ASN1_TYPE_GAUGE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_GAUGE) +#define SNMP_ASN1_TYPE_GAUGE32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_UNSIGNED32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_TIMETICKS (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_TIMETICKS) +#define SNMP_ASN1_TYPE_OPAQUE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_OPAQUE) +#if LWIP_HAVE_INT64 +#define SNMP_ASN1_TYPE_COUNTER64 (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER64) +#endif + +#define SNMP_VARBIND_EXCEPTION_OFFSET 0xF0 +#define SNMP_VARBIND_EXCEPTION_MASK 0x0F + +/** error codes predefined by SNMP prot. */ +typedef enum { + SNMP_ERR_NOERROR = 0, +/* +outdated v1 error codes. do not use anmore! +#define SNMP_ERR_NOSUCHNAME 2 use SNMP_ERR_NOSUCHINSTANCE instead +#define SNMP_ERR_BADVALUE 3 use SNMP_ERR_WRONGTYPE,SNMP_ERR_WRONGLENGTH,SNMP_ERR_WRONGENCODING or SNMP_ERR_WRONGVALUE instead +#define SNMP_ERR_READONLY 4 use SNMP_ERR_NOTWRITABLE instead +*/ + SNMP_ERR_GENERROR = 5, + SNMP_ERR_NOACCESS = 6, + SNMP_ERR_WRONGTYPE = 7, + SNMP_ERR_WRONGLENGTH = 8, + SNMP_ERR_WRONGENCODING = 9, + SNMP_ERR_WRONGVALUE = 10, + SNMP_ERR_NOCREATION = 11, + SNMP_ERR_INCONSISTENTVALUE = 12, + SNMP_ERR_RESOURCEUNAVAILABLE = 13, + SNMP_ERR_COMMITFAILED = 14, + SNMP_ERR_UNDOFAILED = 15, + SNMP_ERR_NOTWRITABLE = 17, + SNMP_ERR_INCONSISTENTNAME = 18, + + SNMP_ERR_NOSUCHINSTANCE = SNMP_VARBIND_EXCEPTION_OFFSET + SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE +} snmp_err_t; + +/** internal object identifier representation */ +struct snmp_obj_id +{ + u8_t len; + u32_t id[SNMP_MAX_OBJ_ID_LEN]; +}; + +struct snmp_obj_id_const_ref +{ + u8_t len; + const u32_t* id; +}; + +extern const struct snmp_obj_id_const_ref snmp_zero_dot_zero; /* administrative identifier from SNMPv2-SMI */ + +/** SNMP variant value, used as reference in struct snmp_node_instance and table implementation */ +union snmp_variant_value +{ + void* ptr; + const void* const_ptr; + u32_t u32; + s32_t s32; +#if LWIP_HAVE_INT64 + u64_t u64; +#endif +}; + + +/** +SNMP MIB node types + tree node is the only node the stack can process in order to walk the tree, + all other nodes are assumed to be leaf nodes. + This cannot be an enum because users may want to define their own node types. +*/ +#define SNMP_NODE_TREE 0x00 +/* predefined leaf node types */ +#define SNMP_NODE_SCALAR 0x01 +#define SNMP_NODE_SCALAR_ARRAY 0x02 +#define SNMP_NODE_TABLE 0x03 +#define SNMP_NODE_THREADSYNC 0x04 + +/** node "base class" layout, the mandatory fields for a node */ +struct snmp_node +{ + /** one out of SNMP_NODE_TREE or any leaf node type (like SNMP_NODE_SCALAR) */ + u8_t node_type; + /** the number assigned to this node which used as part of the full OID */ + u32_t oid; +}; + +/** SNMP node instance access types */ +typedef enum { + SNMP_NODE_INSTANCE_ACCESS_READ = 1, + SNMP_NODE_INSTANCE_ACCESS_WRITE = 2, + SNMP_NODE_INSTANCE_READ_ONLY = SNMP_NODE_INSTANCE_ACCESS_READ, + SNMP_NODE_INSTANCE_READ_WRITE = (SNMP_NODE_INSTANCE_ACCESS_READ | SNMP_NODE_INSTANCE_ACCESS_WRITE), + SNMP_NODE_INSTANCE_WRITE_ONLY = SNMP_NODE_INSTANCE_ACCESS_WRITE, + SNMP_NODE_INSTANCE_NOT_ACCESSIBLE = 0 +} snmp_access_t; + +struct snmp_node_instance; + +typedef s16_t (*node_instance_get_value_method)(struct snmp_node_instance*, void*); +typedef snmp_err_t (*node_instance_set_test_method)(struct snmp_node_instance*, u16_t, void*); +typedef snmp_err_t (*node_instance_set_value_method)(struct snmp_node_instance*, u16_t, void*); +typedef void (*node_instance_release_method)(struct snmp_node_instance*); + +#define SNMP_GET_VALUE_RAW_DATA 0x4000 /* do not use 0x8000 because return value of node_instance_get_value_method is signed16 and 0x8000 would be the signed bit */ + +/** SNMP node instance */ +struct snmp_node_instance +{ + /** prefilled with the node, get_instance() is called on; may be changed by user to any value to pass an arbitrary node between calls to get_instance() and get_value/test_value/set_value */ + const struct snmp_node* node; + /** prefilled with the instance id requested; for get_instance() this is the exact oid requested; for get_next_instance() this is the relative starting point, stack expects relative oid of next node here */ + struct snmp_obj_id instance_oid; + + /** ASN type for this object (see snmp_asn1.h for definitions) */ + u8_t asn1_type; + /** one out of instance access types defined above (SNMP_NODE_INSTANCE_READ_ONLY,...) */ + snmp_access_t access; + + /** returns object value for the given object identifier. Return values <0 to indicate an error */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; + /** called in any case when the instance is not required anymore by stack (useful for freeing memory allocated in get_instance/get_next_instance methods) */ + node_instance_release_method release_instance; + + /** reference to pass arbitrary value between calls to get_instance() and get_value/test_value/set_value */ + union snmp_variant_value reference; + /** see reference (if reference is a pointer, the length of underlying data may be stored here or anything else) */ + u32_t reference_len; +}; + + +/** SNMP tree node */ +struct snmp_tree_node +{ + /** inherited "base class" members */ + struct snmp_node node; + u16_t subnode_count; + const struct snmp_node* const *subnodes; +}; + +#define SNMP_CREATE_TREE_NODE(oid, subnodes) \ + {{ SNMP_NODE_TREE, (oid) }, \ + (u16_t)LWIP_ARRAYSIZE(subnodes), (subnodes) } + +#define SNMP_CREATE_EMPTY_TREE_NODE(oid) \ + {{ SNMP_NODE_TREE, (oid) }, \ + 0, NULL } + +/** SNMP leaf node */ +struct snmp_leaf_node +{ + /** inherited "base class" members */ + struct snmp_node node; + snmp_err_t (*get_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + snmp_err_t (*get_next_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +}; + +/** represents a single mib with its base oid and root node */ +struct snmp_mib +{ + const u32_t *base_oid; + u8_t base_oid_len; + const struct snmp_node *root_node; +}; + +#define SNMP_MIB_CREATE(oid_list, root_node) { (oid_list), (u8_t)LWIP_ARRAYSIZE(oid_list), root_node } + +/** OID range structure */ +struct snmp_oid_range +{ + u32_t min; + u32_t max; +}; + +/** checks if incoming OID length and values are in allowed ranges */ +u8_t snmp_oid_in_range(const u32_t *oid_in, u8_t oid_len, const struct snmp_oid_range *oid_ranges, u8_t oid_ranges_len); + +typedef enum { + SNMP_NEXT_OID_STATUS_SUCCESS, + SNMP_NEXT_OID_STATUS_NO_MATCH, + SNMP_NEXT_OID_STATUS_BUF_TO_SMALL +} snmp_next_oid_status_t; + +/** state for next_oid_init / next_oid_check functions */ +struct snmp_next_oid_state +{ + const u32_t* start_oid; + u8_t start_oid_len; + + u32_t* next_oid; + u8_t next_oid_len; + u8_t next_oid_max_len; + + snmp_next_oid_status_t status; + void* reference; +}; + +void snmp_next_oid_init(struct snmp_next_oid_state *state, + const u32_t *start_oid, u8_t start_oid_len, + u32_t *next_oid_buf, u8_t next_oid_max_len); +u8_t snmp_next_oid_precheck(struct snmp_next_oid_state *state, const u32_t *oid, u8_t oid_len); +u8_t snmp_next_oid_check(struct snmp_next_oid_state *state, const u32_t *oid, u8_t oid_len, void* reference); + +void snmp_oid_assign(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_combine(struct snmp_obj_id* target, const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +void snmp_oid_prefix(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_append(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +u8_t snmp_oid_equal(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +s8_t snmp_oid_compare(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); + +#if LWIP_IPV4 +u8_t snmp_oid_to_ip4(const u32_t *oid, ip4_addr_t *ip); +void snmp_ip4_to_oid(const ip4_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +u8_t snmp_oid_to_ip6(const u32_t *oid, ip6_addr_t *ip); +void snmp_ip6_to_oid(const ip6_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 || LWIP_IPV6 +u8_t snmp_ip_to_oid(const ip_addr_t *ip, u32_t *oid); +u8_t snmp_ip_port_to_oid(const ip_addr_t *ip, u16_t port, u32_t *oid); + +u8_t snmp_oid_to_ip(const u32_t *oid, u8_t oid_len, ip_addr_t *ip); +u8_t snmp_oid_to_ip_port(const u32_t *oid, u8_t oid_len, ip_addr_t *ip, u16_t *port); +#endif /* LWIP_IPV4 || LWIP_IPV6 */ + +struct netif; +u8_t netif_to_num(const struct netif *netif); + +snmp_err_t snmp_set_test_ok(struct snmp_node_instance* instance, u16_t value_len, void* value); /* generic function which can be used if test is always successful */ + +err_t snmp_decode_bits(const u8_t *buf, u32_t buf_len, u32_t *bit_value); +err_t snmp_decode_truthvalue(const s32_t *asn1_value, u8_t *bool_value); +u8_t snmp_encode_bits(u8_t *buf, u32_t buf_len, u32_t bit_value, u8_t bit_count); +u8_t snmp_encode_truthvalue(s32_t *asn1_value, u32_t bool_value); + +struct snmp_statistics +{ + u32_t inpkts; + u32_t outpkts; + u32_t inbadversions; + u32_t inbadcommunitynames; + u32_t inbadcommunityuses; + u32_t inasnparseerrs; + u32_t intoobigs; + u32_t innosuchnames; + u32_t inbadvalues; + u32_t inreadonlys; + u32_t ingenerrs; + u32_t intotalreqvars; + u32_t intotalsetvars; + u32_t ingetrequests; + u32_t ingetnexts; + u32_t insetrequests; + u32_t ingetresponses; + u32_t intraps; + u32_t outtoobigs; + u32_t outnosuchnames; + u32_t outbadvalues; + u32_t outgenerrs; + u32_t outgetrequests; + u32_t outgetnexts; + u32_t outsetrequests; + u32_t outgetresponses; + u32_t outtraps; +#if LWIP_SNMP_V3 + u32_t unsupportedseclevels; + u32_t notintimewindows; + u32_t unknownusernames; + u32_t unknownengineids; + u32_t wrongdigests; + u32_t decryptionerrors; +#endif +}; + +extern struct snmp_statistics snmp_stats; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SNMP */ + +#endif /* LWIP_HDR_APPS_SNMP_CORE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h new file mode 100644 index 00000000..2f4a6893 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h @@ -0,0 +1,78 @@ +/** + * @file + * SNMP MIB2 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_APPS_SNMP_MIB2_H +#define LWIP_HDR_APPS_SNMP_MIB2_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ +#if SNMP_LWIP_MIB2 + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib mib2; + +#if SNMP_USE_NETCONN +#include "lwip/apps/snmp_threadsync.h" +void snmp_mib2_lwip_synchronizer(snmp_threadsync_called_fn fn, void* arg); +extern struct snmp_threadsync_instance snmp_mib2_lwip_locks; +#endif + +#ifndef SNMP_SYSSERVICES +#define SNMP_SYSSERVICES ((1 << 6) | (1 << 3) | ((IP_FORWARD) << 2)) +#endif + +void snmp_mib2_set_sysdescr(const u8_t* str, const u16_t* len); /* read-only be defintion */ +void snmp_mib2_set_syscontact(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syscontact_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_sysname(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_sysname_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_syslocation(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syslocation_readonly(const u8_t *ocstr, const u16_t *ocstrlen); + +#endif /* SNMP_LWIP_MIB2 */ +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_MIB2_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h new file mode 100644 index 00000000..c892d22a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h @@ -0,0 +1,297 @@ +/** + * @file + * SNMP server options list + */ + +/* + * Copyright (c) 2015 Dirk Ziegelmeier + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_OPTS_H +#define LWIP_HDR_SNMP_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup snmp_opts Options + * @ingroup snmp + * @{ + */ + +/** + * LWIP_SNMP==1: This enables the lwIP SNMP agent. UDP must be available + * for SNMP transport. + * If you want to use your own SNMP agent, leave this disabled. + * To integrate MIB2 of an external agent, you need to enable + * LWIP_MIB2_CALLBACKS and MIB2_STATS. This will give you the callbacks + * and statistics counters you need to get MIB2 working. + */ +#if !defined LWIP_SNMP || defined __DOXYGEN__ +#define LWIP_SNMP 0 +#endif + +/** + * SNMP_USE_NETCONN: Use netconn API instead of raw API. + * Makes SNMP agent run in a worker thread, so blocking operations + * can be done in MIB calls. + */ +#if !defined SNMP_USE_NETCONN || defined __DOXYGEN__ +#define SNMP_USE_NETCONN 0 +#endif + +/** + * SNMP_USE_RAW: Use raw API. + * SNMP agent does not run in a worker thread, so blocking operations + * should not be done in MIB calls. + */ +#if !defined SNMP_USE_RAW || defined __DOXYGEN__ +#define SNMP_USE_RAW 1 +#endif + +#if SNMP_USE_NETCONN && SNMP_USE_RAW +#error SNMP stack can use only one of the APIs {raw, netconn} +#endif + +#if LWIP_SNMP && !SNMP_USE_NETCONN && !SNMP_USE_RAW +#error SNMP stack needs a receive API and UDP {raw, netconn} +#endif + +#if SNMP_USE_NETCONN +/** + * SNMP_STACK_SIZE: Stack size of SNMP netconn worker thread + */ +#if !defined SNMP_STACK_SIZE || defined __DOXYGEN__ +#define SNMP_STACK_SIZE DEFAULT_THREAD_STACKSIZE +#endif + +/** + * SNMP_THREAD_PRIO: SNMP netconn worker thread priority + */ +#if !defined SNMP_THREAD_PRIO || defined __DOXYGEN__ +#define SNMP_THREAD_PRIO DEFAULT_THREAD_PRIO +#endif +#endif /* SNMP_USE_NETCONN */ + +/** + * SNMP_TRAP_DESTINATIONS: Number of trap destinations. At least one trap + * destination is required + */ +#if !defined SNMP_TRAP_DESTINATIONS || defined __DOXYGEN__ +#define SNMP_TRAP_DESTINATIONS 1 +#endif + +/** + * Only allow SNMP write actions that are 'safe' (e.g. disabling netifs is not + * a safe action and disabled when SNMP_SAFE_REQUESTS = 1). + * Unsafe requests are disabled by default! + */ +#if !defined SNMP_SAFE_REQUESTS || defined __DOXYGEN__ +#define SNMP_SAFE_REQUESTS 1 +#endif + +/** + * The maximum length of strings used. + */ +#if !defined SNMP_MAX_OCTET_STRING_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OCTET_STRING_LEN 127 +#endif + +/** + * The maximum number of Sub ID's inside an object identifier. + * Indirectly this also limits the maximum depth of SNMP tree. + */ +#if !defined SNMP_MAX_OBJ_ID_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OBJ_ID_LEN 50 +#endif + +#if !defined SNMP_MAX_VALUE_SIZE || defined __DOXYGEN__ +/** + * The minimum size of a value. + */ +#define SNMP_MIN_VALUE_SIZE (2 * sizeof(u32_t*)) /* size required to store the basic types (8 bytes for counter64) */ +/** + * The maximum size of a value. + */ +#define SNMP_MAX_VALUE_SIZE LWIP_MAX(LWIP_MAX((SNMP_MAX_OCTET_STRING_LEN), sizeof(u32_t)*(SNMP_MAX_OBJ_ID_LEN)), SNMP_MIN_VALUE_SIZE) +#endif + +/** + * The snmp read-access community. Used for write-access and traps, too + * unless SNMP_COMMUNITY_WRITE or SNMP_COMMUNITY_TRAP are enabled, respectively. + */ +#if !defined SNMP_COMMUNITY || defined __DOXYGEN__ +#define SNMP_COMMUNITY "public" +#endif + +/** + * The snmp write-access community. + * Set this community to "" in order to disallow any write access. + */ +#if !defined SNMP_COMMUNITY_WRITE || defined __DOXYGEN__ +#define SNMP_COMMUNITY_WRITE "private" +#endif + +/** + * The snmp community used for sending traps. + */ +#if !defined SNMP_COMMUNITY_TRAP || defined __DOXYGEN__ +#define SNMP_COMMUNITY_TRAP "public" +#endif + +/** + * The maximum length of community string. + * If community names shall be adjusted at runtime via snmp_set_community() calls, + * enter here the possible maximum length (+1 for terminating null character). + */ +#if !defined SNMP_MAX_COMMUNITY_STR_LEN || defined __DOXYGEN__ +#define SNMP_MAX_COMMUNITY_STR_LEN LWIP_MAX(LWIP_MAX(sizeof(SNMP_COMMUNITY), sizeof(SNMP_COMMUNITY_WRITE)), sizeof(SNMP_COMMUNITY_TRAP)) +#endif + +/** + * The OID identifiying the device. This may be the enterprise OID itself or any OID located below it in tree. + */ +#if !defined SNMP_DEVICE_ENTERPRISE_OID || defined __DOXYGEN__ +#define SNMP_LWIP_ENTERPRISE_OID 26381 +/** + * IANA assigned enterprise ID for lwIP is 26381 + * @see http://www.iana.org/assignments/enterprise-numbers + * + * @note this enterprise ID is assigned to the lwIP project, + * all object identifiers living under this ID are assigned + * by the lwIP maintainers! + * @note don't change this define, use snmp_set_device_enterprise_oid() + * + * If you need to create your own private MIB you'll need + * to apply for your own enterprise ID with IANA: + * http://www.iana.org/numbers.html + */ +#define SNMP_DEVICE_ENTERPRISE_OID {1, 3, 6, 1, 4, 1, SNMP_LWIP_ENTERPRISE_OID} +/** + * Length of SNMP_DEVICE_ENTERPRISE_OID + */ +#define SNMP_DEVICE_ENTERPRISE_OID_LEN 7 +#endif + +/** + * SNMP_DEBUG: Enable debugging for SNMP messages. + */ +#if !defined SNMP_DEBUG || defined __DOXYGEN__ +#define SNMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SNMP_MIB_DEBUG: Enable debugging for SNMP MIBs. + */ +#if !defined SNMP_MIB_DEBUG || defined __DOXYGEN__ +#define SNMP_MIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * Indicates if the MIB2 implementation of LWIP SNMP stack is used. + */ +#if !defined SNMP_LWIP_MIB2 || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2 LWIP_SNMP +#endif + +/** + * Value return for sysDesc field of MIB2. + */ +#if !defined SNMP_LWIP_MIB2_SYSDESC || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSDESC "lwIP" +#endif + +/** + * Value return for sysName field of MIB2. + * To make sysName field settable, call snmp_mib2_set_sysname() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSNAME || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSNAME "FQDN-unk" +#endif + +/** + * Value return for sysContact field of MIB2. + * To make sysContact field settable, call snmp_mib2_set_syscontact() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSCONTACT || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSCONTACT "" +#endif + +/** + * Value return for sysLocation field of MIB2. + * To make sysLocation field settable, call snmp_mib2_set_syslocation() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSLOCATION || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSLOCATION "" +#endif + +/** + * This value is used to limit the repetitions processed in GetBulk requests (value == 0 means no limitation). + * This may be useful to limit the load for a single request. + * According to SNMP RFC 1905 it is allowed to not return all requested variables from a GetBulk request if system load would be too high. + * so the effect is that the client will do more requests to gather all data. + * For the stack this could be useful in case that SNMP processing is done in TCP/IP thread. In this situation a request with many + * repetitions could block the thread for a longer time. Setting limit here will keep the stack more responsive. + */ +#if !defined SNMP_LWIP_GETBULK_MAX_REPETITIONS || defined __DOXYGEN__ +#define SNMP_LWIP_GETBULK_MAX_REPETITIONS 0 +#endif + +/** + * @} + */ + +/* + ------------------------------------ + ---------- SNMPv3 options ---------- + ------------------------------------ +*/ + +/** + * LWIP_SNMP_V3==1: This enables EXPERIMENTAL SNMPv3 support. LWIP_SNMP must + * also be enabled. + * THIS IS UNDER DEVELOPMENT AND SHOULD NOT BE ENABLED IN PRODUCTS. + */ +#ifndef LWIP_SNMP_V3 +#define LWIP_SNMP_V3 0 +#endif + +#ifndef LWIP_SNMP_V3_MBEDTLS +#define LWIP_SNMP_V3_MBEDTLS LWIP_SNMP_V3 +#endif + +#ifndef LWIP_SNMP_V3_CRYPTO +#define LWIP_SNMP_V3_CRYPTO LWIP_SNMP_V3_MBEDTLS +#endif + +#ifndef LWIP_SNMP_CONFIGURE_VERSIONS +#define LWIP_SNMP_CONFIGURE_VERSIONS 0 +#endif + +#endif /* LWIP_HDR_SNMP_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h new file mode 100644 index 00000000..40a060c6 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h @@ -0,0 +1,113 @@ +/** + * @file + * SNMP server MIB API to implement scalar nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_SCALAR_H +#define LWIP_HDR_APPS_SNMP_SCALAR_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** basic scalar node */ +struct snmp_scalar_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u8_t asn1_type; + snmp_access_t access; + node_instance_get_value_method get_value; + node_instance_set_test_method set_test; + node_instance_set_value_method set_value; +}; + + +snmp_err_t snmp_scalar_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_NODE(oid, access, asn1_type, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR, (oid) }, \ + snmp_scalar_get_instance, \ + snmp_scalar_get_next_instance }, \ + (asn1_type), (access), (get_value_method), (set_test_method), (set_value_method) } + +#define SNMP_SCALAR_CREATE_NODE_READONLY(oid, asn1_type, get_value_method) SNMP_SCALAR_CREATE_NODE(oid, SNMP_NODE_INSTANCE_READ_ONLY, asn1_type, get_value_method, NULL, NULL) + +/** scalar array node - a tree node which contains scalars only as children */ +struct snmp_scalar_array_node_def +{ + u32_t oid; + u8_t asn1_type; + snmp_access_t access; +}; + +typedef s16_t (*snmp_scalar_array_get_value_method)(const struct snmp_scalar_array_node_def*, void*); +typedef snmp_err_t (*snmp_scalar_array_set_test_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); +typedef snmp_err_t (*snmp_scalar_array_set_value_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); + +/** basic scalar array node */ +struct snmp_scalar_array_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t array_node_count; + const struct snmp_scalar_array_node_def* array_nodes; + snmp_scalar_array_get_value_method get_value; + snmp_scalar_array_set_test_method set_test; + snmp_scalar_array_set_value_method set_value; +}; + +snmp_err_t snmp_scalar_array_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_array_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_ARRAY_NODE(oid, array_nodes, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR_ARRAY, (oid) }, \ + snmp_scalar_array_get_instance, \ + snmp_scalar_array_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(array_nodes), (array_nodes), (get_value_method), (set_test_method), (set_value_method) } + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_SCALAR_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h new file mode 100644 index 00000000..47409cc2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h @@ -0,0 +1,32 @@ +/* +Generated by LwipMibCompiler +*/ + +#ifndef LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H +#define LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H + +#include "lwip/apps/snmp_opts.h" +#if LWIP_SNMP + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_obj_id usmNoAuthProtocol; +extern const struct snmp_obj_id usmHMACMD5AuthProtocol; +extern const struct snmp_obj_id usmHMACSHAAuthProtocol; + +extern const struct snmp_obj_id usmNoPrivProtocol; +extern const struct snmp_obj_id usmDESPrivProtocol; +extern const struct snmp_obj_id usmAESPrivProtocol; + +extern const struct snmp_mib snmpframeworkmib; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LWIP_SNMP */ +#endif /* LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h new file mode 100644 index 00000000..88cfcd8e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h @@ -0,0 +1,24 @@ +/* +Generated by LwipMibCompiler +*/ + +#ifndef LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H +#define LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H + +#include "lwip/apps/snmp_opts.h" +#if LWIP_SNMP + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib snmpusmmib; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LWIP_SNMP */ +#endif /* LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h new file mode 100644 index 00000000..4988b51c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h @@ -0,0 +1,134 @@ +/** + * @file + * SNMP server MIB API to implement table nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_TABLE_H +#define LWIP_HDR_APPS_SNMP_TABLE_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** default (customizable) read/write table */ +struct snmp_table_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_access_t access; +}; + +/** table node */ +struct snmp_table_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_col_def* columns; + snmp_err_t (*get_cell_instance)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, struct snmp_node_instance* cell_instance); + snmp_err_t (*get_next_cell_instance)(const u32_t* column, struct snmp_obj_id* row_oid, struct snmp_node_instance* cell_instance); + /** returns object value for the given object identifier */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; +}; + +snmp_err_t snmp_table_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE(oid, columns, get_cell_instance_method, get_next_cell_instance_method, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_get_instance, \ + snmp_table_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), \ + (get_cell_instance_method), (get_next_cell_instance_method), \ + (get_value_method), (set_test_method), (set_value_method)} + +#define SNMP_TABLE_GET_COLUMN_FROM_OID(oid) ((oid)[1]) /* first array value is (fixed) row entry (fixed to 1) and 2nd value is column, follow3ed by instance */ + + +/** simple read-only table */ +typedef enum { + SNMP_VARIANT_VALUE_TYPE_U32, + SNMP_VARIANT_VALUE_TYPE_S32, + SNMP_VARIANT_VALUE_TYPE_PTR, + SNMP_VARIANT_VALUE_TYPE_CONST_PTR +} snmp_table_column_data_type_t; + +struct snmp_table_simple_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_table_column_data_type_t data_type; /* depending of what union member is used to store the value*/ +}; + +/** simple read-only table node */ +struct snmp_table_simple_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_simple_col_def* columns; + snmp_err_t (*get_cell_value)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len); + snmp_err_t (*get_next_cell_instance_and_value)(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len); +}; + +snmp_err_t snmp_table_simple_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_simple_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE_SIMPLE(oid, columns, get_cell_value_method, get_next_cell_instance_and_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_simple_get_instance, \ + snmp_table_simple_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), (get_cell_value_method), (get_next_cell_instance_and_value_method) } + +s16_t snmp_table_extract_value_from_s32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_u32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_refconstptr(struct snmp_node_instance* instance, void* value); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_TABLE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h new file mode 100644 index 00000000..a25dbf2d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h @@ -0,0 +1,114 @@ +/** + * @file + * SNMP server MIB API to implement thread synchronization + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_THREADSYNC_H +#define LWIP_HDR_APPS_SNMP_THREADSYNC_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/apps/snmp_core.h" +#include "lwip/sys.h" + +typedef void (*snmp_threadsync_called_fn)(void* arg); +typedef void (*snmp_threadsync_synchronizer_fn)(snmp_threadsync_called_fn fn, void* arg); + + +/** Thread sync runtime data. For internal usage only. */ +struct threadsync_data +{ + union { + snmp_err_t err; + s16_t s16; + } retval; + union { + const u32_t *root_oid; + void *value; + } arg1; + union { + u8_t root_oid_len; + u16_t len; + } arg2; + const struct snmp_threadsync_node *threadsync_node; + struct snmp_node_instance proxy_instance; +}; + +/** Thread sync instance. Needed EXCATLY once for every thread to be synced into. */ +struct snmp_threadsync_instance +{ + sys_sem_t sem; + sys_mutex_t sem_usage_mutex; + snmp_threadsync_synchronizer_fn sync_fn; + struct threadsync_data data; +}; + +/** SNMP thread sync proxy leaf node */ +struct snmp_threadsync_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + + const struct snmp_leaf_node *target; + struct snmp_threadsync_instance *instance; +}; + +snmp_err_t snmp_threadsync_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_threadsync_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +/** Create thread sync proxy node */ +#define SNMP_CREATE_THREAD_SYNC_NODE(oid, target_leaf_node, threadsync_instance) \ + {{{ SNMP_NODE_THREADSYNC, (oid) }, \ + snmp_threadsync_get_instance, \ + snmp_threadsync_get_next_instance }, \ + (target_leaf_node), \ + (threadsync_instance) } + +/** Create thread sync instance data */ +void snmp_threadsync_init(struct snmp_threadsync_instance *instance, snmp_threadsync_synchronizer_fn sync_fn); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_THREADSYNC_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h new file mode 100644 index 00000000..e0dda649 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h @@ -0,0 +1,114 @@ +/** + * @file + * Additional SNMPv3 functionality RFC3414 and RFC3826. + */ + +/* + * Copyright (c) 2016 Elias Oenal. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Elias Oenal + */ + +#ifndef LWIP_HDR_APPS_SNMP_V3_H +#define LWIP_HDR_APPS_SNMP_V3_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP && LWIP_SNMP_V3 + +typedef enum +{ + SNMP_V3_AUTH_ALGO_INVAL = 0, + SNMP_V3_AUTH_ALGO_MD5 = 1, + SNMP_V3_AUTH_ALGO_SHA = 2 +} snmpv3_auth_algo_t; + +typedef enum +{ + SNMP_V3_PRIV_ALGO_INVAL = 0, + SNMP_V3_PRIV_ALGO_DES = 1, + SNMP_V3_PRIV_ALGO_AES = 2 +} snmpv3_priv_algo_t; + +typedef enum +{ + SNMP_V3_USER_STORAGETYPE_OTHER = 1, + SNMP_V3_USER_STORAGETYPE_VOLATILE = 2, + SNMP_V3_USER_STORAGETYPE_NONVOLATILE = 3, + SNMP_V3_USER_STORAGETYPE_PERMANENT = 4, + SNMP_V3_USER_STORAGETYPE_READONLY = 5 +} snmpv3_user_storagetype_t; + +/* + * The following callback functions must be implemented by the application. + * There is a dummy implementation in snmpv3_dummy.c. + */ + +void snmpv3_get_engine_id(const char **id, u8_t *len); +err_t snmpv3_set_engine_id(const char* id, u8_t len); + +u32_t snmpv3_get_engine_boots(void); +void snmpv3_set_engine_boots(u32_t boots); + +u32_t snmpv3_get_engine_time(void); +void snmpv3_reset_engine_time(void); + +err_t snmpv3_get_user(const char* username, snmpv3_auth_algo_t *auth_algo, u8_t *auth_key, snmpv3_priv_algo_t *priv_algo, u8_t *priv_key); +u8_t snmpv3_get_amount_of_users(void); +err_t snmpv3_get_user_storagetype(const char *username, snmpv3_user_storagetype_t *storagetype); +err_t snmpv3_get_username(char *username, u8_t index); + +/* The following functions are provided by the SNMPv3 agent */ + +void snmpv3_engine_id_changed(void); +s32_t snmpv3_get_engine_time_internal(void); + +void snmpv3_password_to_key_md5( + const u8_t *password, /* IN */ + size_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 16-octet buffer */ + +void snmpv3_password_to_key_sha( + const u8_t *password, /* IN */ + size_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 20-octet buffer */ + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_V3_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h new file mode 100644 index 00000000..3c0f95f7 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h @@ -0,0 +1,80 @@ +/** + * @file + * SNTP client API + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_H +#define LWIP_HDR_APPS_SNTP_H + +#include "lwip/apps/sntp_opts.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* SNTP operating modes: default is to poll using unicast. + The mode has to be set before calling sntp_init(). */ +#define SNTP_OPMODE_POLL 0 +#define SNTP_OPMODE_LISTENONLY 1 +void sntp_setoperatingmode(u8_t operating_mode); +u8_t sntp_getoperatingmode(void); + +void sntp_init(void); +void sntp_stop(void); +u8_t sntp_enabled(void); + +void sntp_setserver(u8_t idx, const ip_addr_t *addr); +const ip_addr_t* sntp_getserver(u8_t idx); + +#if SNTP_MONITOR_SERVER_REACHABILITY +u8_t sntp_getreachability(u8_t idx); +#endif /* SNTP_MONITOR_SERVER_REACHABILITY */ + +#if SNTP_SERVER_DNS +void sntp_setservername(u8_t idx, const char *server); +const char *sntp_getservername(u8_t idx); +#endif /* SNTP_SERVER_DNS */ + +#if SNTP_GET_SERVERS_FROM_DHCP +void sntp_servermode_dhcp(int set_servers_from_dhcp); +#else /* SNTP_GET_SERVERS_FROM_DHCP */ +#define sntp_servermode_dhcp(x) +#endif /* SNTP_GET_SERVERS_FROM_DHCP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNTP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h new file mode 100644 index 00000000..ed98040f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h @@ -0,0 +1,209 @@ +/** + * @file + * SNTP client options list + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_OPTS_H +#define LWIP_HDR_APPS_SNTP_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup sntp_opts Options + * @ingroup sntp + * @{ + */ + +/** SNTP macro to change system time in seconds + * Define SNTP_SET_SYSTEM_TIME_US(sec, us) to set the time in microseconds + * instead of this one if you need the additional precision. Alternatively, + * define SNTP_SET_SYSTEM_TIME_NTP(sec, frac) in order to work with native + * NTP timestamps instead. + */ +#if !defined SNTP_SET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_SET_SYSTEM_TIME(sec) LWIP_UNUSED_ARG(sec) +#endif + +/** The maximum number of SNTP servers that can be set */ +#if !defined SNTP_MAX_SERVERS || defined __DOXYGEN__ +#define SNTP_MAX_SERVERS LWIP_DHCP_MAX_NTP_SERVERS +#endif + +/** Set this to 1 to implement the callback function called by dhcp when + * NTP servers are received. */ +#if !defined SNTP_GET_SERVERS_FROM_DHCP || defined __DOXYGEN__ +#define SNTP_GET_SERVERS_FROM_DHCP LWIP_DHCP_GET_NTP_SRV +#endif + +/** Set this to 1 to support DNS names (or IP address strings) to set sntp servers + * One server address/name can be defined as default if SNTP_SERVER_DNS == 1: + * \#define SNTP_SERVER_ADDRESS "pool.ntp.org" + */ +#if !defined SNTP_SERVER_DNS || defined __DOXYGEN__ +#define SNTP_SERVER_DNS 0 +#endif + +/** + * SNTP_DEBUG: Enable debugging for SNTP. + */ +#if !defined SNTP_DEBUG || defined __DOXYGEN__ +#define SNTP_DEBUG LWIP_DBG_OFF +#endif + +/** SNTP server port */ +#if !defined SNTP_PORT || defined __DOXYGEN__ +#define SNTP_PORT LWIP_IANA_PORT_SNTP +#endif + +/** Sanity check: + * Define this to + * - 0 to turn off sanity checks (default; smaller code) + * - >= 1 to check address and port of the response packet to ensure the + * response comes from the server we sent the request to. + * - >= 2 to check returned Originate Timestamp against Transmit Timestamp + * sent to the server (to ensure response to older request). + * - >= 3 @todo: discard reply if any of the VN, Stratum, or Transmit Timestamp + * fields is 0 or the Mode field is not 4 (unicast) or 5 (broadcast). + * - >= 4 @todo: to check that the Root Delay and Root Dispersion fields are each + * greater than or equal to 0 and less than infinity, where infinity is + * currently a cozy number like one second. This check avoids using a + * server whose synchronization source has expired for a very long time. + */ +#if !defined SNTP_CHECK_RESPONSE || defined __DOXYGEN__ +#define SNTP_CHECK_RESPONSE 0 +#endif + +/** Enable round-trip delay compensation. + * Compensate for the round-trip delay by calculating the clock offset from + * the originate, receive, transmit and destination timestamps, as per RFC. + * + * The calculation requires compiler support for 64-bit integers. Also, either + * SNTP_SET_SYSTEM_TIME_US or SNTP_SET_SYSTEM_TIME_NTP has to be implemented + * for setting the system clock with sub-second precision. Likewise, either + * SNTP_GET_SYSTEM_TIME or SNTP_GET_SYSTEM_TIME_NTP needs to be implemented + * with sub-second precision. + * + * Although not strictly required, it makes sense to combine this option with + * SNTP_CHECK_RESPONSE >= 2 for sanity-checking of the received timestamps. + * Also, in order for the round-trip calculation to work, the difference + * between the local clock and the NTP server clock must not be larger than + * about 34 years. If that limit is exceeded, the implementation will fall back + * to setting the clock without compensation. In order to ensure that the local + * clock is always within the permitted range for compensation, even at first + * try, it may be necessary to store at least the current year in non-volatile + * memory. + */ +#if !defined SNTP_COMP_ROUNDTRIP || defined __DOXYGEN__ +#define SNTP_COMP_ROUNDTRIP 0 +#endif + +/** According to the RFC, this shall be a random delay + * between 1 and 5 minutes (in milliseconds) to prevent load peaks. + * This can be defined to a random generation function, + * which must return the delay in milliseconds as u32_t. + * Turned off by default. + */ +#if !defined SNTP_STARTUP_DELAY || defined __DOXYGEN__ +#ifdef LWIP_RAND +#define SNTP_STARTUP_DELAY 1 +#else +#define SNTP_STARTUP_DELAY 0 +#endif +#endif + +/** If you want the startup delay to be a function, define this + * to a function (including the brackets) and define SNTP_STARTUP_DELAY to 1. + */ +#if !defined SNTP_STARTUP_DELAY_FUNC || defined __DOXYGEN__ +#define SNTP_STARTUP_DELAY_FUNC (LWIP_RAND() % 5000) +#endif + +/** SNTP receive timeout - in milliseconds + * Also used as retry timeout - this shouldn't be too low. + * Default is 15 seconds. Must not be beolw 15 seconds by specification (i.e. 15000) + */ +#if !defined SNTP_RECV_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RECV_TIMEOUT 15000 +#endif + +/** SNTP update delay - in milliseconds + * Default is 1 hour. Must not be beolw 60 seconds by specification (i.e. 60000) + */ +#if !defined SNTP_UPDATE_DELAY || defined __DOXYGEN__ +#define SNTP_UPDATE_DELAY 3600000 +#endif + +/** SNTP macro to get system time, used with SNTP_CHECK_RESPONSE >= 2 + * to send in request and compare in response. Also used for round-trip + * delay compensation if SNTP_COMP_ROUNDTRIP != 0. + * Alternatively, define SNTP_GET_SYSTEM_TIME_NTP(sec, frac) in order to + * work with native NTP timestamps instead. + */ +#if !defined SNTP_GET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_GET_SYSTEM_TIME(sec, us) do { (sec) = 0; (us) = 0; } while(0) +#endif + +/** Default retry timeout (in milliseconds) if the response + * received is invalid. + * This is doubled with each retry until SNTP_RETRY_TIMEOUT_MAX is reached. + */ +#if !defined SNTP_RETRY_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT SNTP_RECV_TIMEOUT +#endif + +/** Maximum retry timeout (in milliseconds). */ +#if !defined SNTP_RETRY_TIMEOUT_MAX || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_MAX (SNTP_RETRY_TIMEOUT * 10) +#endif + +/** Increase retry timeout with every retry sent + * Default is on to conform to RFC. + */ +#if !defined SNTP_RETRY_TIMEOUT_EXP || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_EXP 1 +#endif + +/** Keep a reachability shift register per server + * Default is on to conform to RFC. + */ +#if !defined SNTP_MONITOR_SERVER_REACHABILITY || defined __DOXYGEN__ +#define SNTP_MONITOR_SERVER_REACHABILITY 1 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_SNTP_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h new file mode 100644 index 00000000..198f632b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h @@ -0,0 +1,106 @@ +/** + * + * @file tftp_opts.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) implementation options + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_OPTS_H +#define LWIP_HDR_APPS_TFTP_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup tftp_opts Options + * @ingroup tftp + * @{ + */ + +/** + * Enable TFTP debug messages + */ +#if !defined TFTP_DEBUG || defined __DOXYGEN__ +#define TFTP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TFTP server port + */ +#if !defined TFTP_PORT || defined __DOXYGEN__ +#define TFTP_PORT LWIP_IANA_PORT_TFTP +#endif + +/** + * TFTP timeout + */ +#if !defined TFTP_TIMEOUT_MSECS || defined __DOXYGEN__ +#define TFTP_TIMEOUT_MSECS 10000 +#endif + +/** + * Max. number of retries when a file is read from server + */ +#if !defined TFTP_MAX_RETRIES || defined __DOXYGEN__ +#define TFTP_MAX_RETRIES 5 +#endif + +/** + * TFTP timer cyclic interval + */ +#if !defined TFTP_TIMER_MSECS || defined __DOXYGEN__ +#define TFTP_TIMER_MSECS (TFTP_TIMEOUT_MSECS / 10) +#endif + +/** + * Max. length of TFTP filename + */ +#if !defined TFTP_MAX_FILENAME_LEN || defined __DOXYGEN__ +#define TFTP_MAX_FILENAME_LEN 20 +#endif + +/** + * Max. length of TFTP mode + */ +#if !defined TFTP_MAX_MODE_LEN || defined __DOXYGEN__ +#define TFTP_MAX_MODE_LEN 7 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_TFTP_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h new file mode 100644 index 00000000..0a7fbee0 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h @@ -0,0 +1,95 @@ +/** + * + * @file tftp_server.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_SERVER_H +#define LWIP_HDR_APPS_TFTP_SERVER_H + +#include "lwip/apps/tftp_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup tftp + * TFTP context containing callback functions for TFTP transfers + */ +struct tftp_context { + /** + * Open file for read/write. + * @param fname Filename + * @param mode Mode string from TFTP RFC 1350 (netascii, octet, mail) + * @param write Flag indicating read (0) or write (!= 0) access + * @returns File handle supplied to other functions + */ + void* (*open)(const char* fname, const char* mode, u8_t write); + /** + * Close file handle + * @param handle File handle returned by open() + */ + void (*close)(void* handle); + /** + * Read from file + * @param handle File handle returned by open() + * @param buf Target buffer to copy read data to + * @param bytes Number of bytes to copy to buf + * @returns >= 0: Success; < 0: Error + */ + int (*read)(void* handle, void* buf, int bytes); + /** + * Write to file + * @param handle File handle returned by open() + * @param pbuf PBUF adjusted such that payload pointer points + * to the beginning of write data. In other words, + * TFTP headers are stripped off. + * @returns >= 0: Success; < 0: Error + */ + int (*write)(void* handle, struct pbuf* p); +}; + +err_t tftp_init(const struct tftp_context* ctx); +void tftp_cleanup(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_TFTP_SERVER_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h b/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h new file mode 100644 index 00000000..58dae33a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h @@ -0,0 +1,393 @@ +/** + * @file + * Support for different processor and compiler architectures + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ARCH_H +#define LWIP_HDR_ARCH_H + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 1234 +#endif + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 4321 +#endif + +#include "arch/cc.h" + +/** + * @defgroup compiler_abstraction Compiler/platform abstraction + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * If the compiler does not provide memset() this file must include a + * definition of it, or include a file which defines it. + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + * @{ + */ + +/** Define the byte order of the system. + * Needed for conversion of network data to host byte order. + * Allowed values: LITTLE_ENDIAN and BIG_ENDIAN + */ +#ifndef BYTE_ORDER +#define BYTE_ORDER LITTLE_ENDIAN +#endif + +/** Define random number generator function of your system */ +#ifdef __DOXYGEN__ +#define LWIP_RAND() ((u32_t)rand()) +#endif + +/** Platform specific diagnostic output.\n + * Note the default implementation pulls in printf, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_DIAG +#define LWIP_PLATFORM_DIAG(x) do {printf x;} while(0) +#include +#include +#endif + +/** Platform specific assertion handling.\n + * Note the default implementation pulls in printf, fflush and abort, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_ASSERT +#define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ + x, __LINE__, __FILE__); fflush(NULL); abort();} while(0) +#include +#include +#endif + +/** Define this to 1 in arch/cc.h of your port if you do not want to + * include stddef.h header to get size_t. You need to typedef size_t + * by yourself in this case. + */ +#ifndef LWIP_NO_STDDEF_H +#define LWIP_NO_STDDEF_H 0 +#endif + +#if !LWIP_NO_STDDEF_H +#include /* for size_t */ +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the stdint.h header. You need to typedef the generic types listed in + * lwip/arch.h yourself in this case (u8_t, u16_t...). + */ +#ifndef LWIP_NO_STDINT_H +#define LWIP_NO_STDINT_H 0 +#endif + +/* Define generic types used in lwIP */ +#if !LWIP_NO_STDINT_H +#include +/* stdint.h is C99 which should also provide support for 64-bit integers */ +#if !defined(LWIP_HAVE_INT64) && defined(UINT64_MAX) +#define LWIP_HAVE_INT64 1 +#endif +typedef uint8_t u8_t; +typedef int8_t s8_t; +typedef uint16_t u16_t; +typedef int16_t s16_t; +typedef uint32_t u32_t; +typedef int32_t s32_t; +#if LWIP_HAVE_INT64 +typedef uint64_t u64_t; +typedef int64_t s64_t; +#endif +typedef uintptr_t mem_ptr_t; +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the inttypes.h header. You need to define the format strings listed in + * lwip/arch.h yourself in this case (X8_F, U16_F...). + */ +#ifndef LWIP_NO_INTTYPES_H +#define LWIP_NO_INTTYPES_H 0 +#endif + +/* Define (sn)printf formatters for these lwIP types */ +#if !LWIP_NO_INTTYPES_H +#include +#ifndef X8_F +#define X8_F "02" PRIx8 +#endif +#ifndef U16_F +#define U16_F PRIu16 +#endif +#ifndef S16_F +#define S16_F PRId16 +#endif +#ifndef X16_F +#define X16_F PRIx16 +#endif +#ifndef U32_F +#define U32_F PRIu32 +#endif +#ifndef S32_F +#define S32_F PRId32 +#endif +#ifndef X32_F +#define X32_F PRIx32 +#endif +#ifndef SZT_F +#define SZT_F PRIuPTR +#endif +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the limits.h header. You need to define the type limits yourself in this case + * (e.g. INT_MAX, SSIZE_MAX). + */ +#ifndef LWIP_NO_LIMITS_H +#define LWIP_NO_LIMITS_H 0 +#endif + +/* Include limits.h? */ +#if !LWIP_NO_LIMITS_H +#include +#endif + +/* Do we need to define ssize_t? This is a compatibility hack: + * Unfortunately, this type seems to be unavailable on some systems (even if + * sys/types or unistd.h are available). + * Being like that, we define it to 'int' if SSIZE_MAX is not defined. + */ +#ifdef SSIZE_MAX +/* If SSIZE_MAX is defined, unistd.h should provide the type as well */ +#ifndef LWIP_NO_UNISTD_H +#define LWIP_NO_UNISTD_H 0 +#endif +#if !LWIP_NO_UNISTD_H +#include +#endif +#else /* SSIZE_MAX */ +typedef int ssize_t; +#define SSIZE_MAX INT_MAX +#endif /* SSIZE_MAX */ + +/* some maximum values needed in lwip code */ +#define LWIP_UINT32_MAX 0xffffffff + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the ctype.h header. If ctype.h is available, a few character functions + * are mapped to the appropriate functions (lwip_islower, lwip_isdigit...), if + * not, a private implementation is provided. + */ +#ifndef LWIP_NO_CTYPE_H +#define LWIP_NO_CTYPE_H 0 +#endif + +#if LWIP_NO_CTYPE_H +#define lwip_in_range(c, lo, up) ((u8_t)(c) >= (lo) && (u8_t)(c) <= (up)) +#define lwip_isdigit(c) lwip_in_range((c), '0', '9') +#define lwip_isxdigit(c) (lwip_isdigit(c) || lwip_in_range((c), 'a', 'f') || lwip_in_range((c), 'A', 'F')) +#define lwip_islower(c) lwip_in_range((c), 'a', 'z') +#define lwip_isspace(c) ((c) == ' ' || (c) == '\f' || (c) == '\n' || (c) == '\r' || (c) == '\t' || (c) == '\v') +#define lwip_isupper(c) lwip_in_range((c), 'A', 'Z') +#define lwip_tolower(c) (lwip_isupper(c) ? (c) - 'A' + 'a' : c) +#define lwip_toupper(c) (lwip_islower(c) ? (c) - 'a' + 'A' : c) +#else +#include +#define lwip_isdigit(c) isdigit((unsigned char)(c)) +#define lwip_isxdigit(c) isxdigit((unsigned char)(c)) +#define lwip_islower(c) islower((unsigned char)(c)) +#define lwip_isspace(c) isspace((unsigned char)(c)) +#define lwip_isupper(c) isupper((unsigned char)(c)) +#define lwip_tolower(c) tolower((unsigned char)(c)) +#define lwip_toupper(c) toupper((unsigned char)(c)) +#endif + +/** C++ const_cast(val) equivalent to remove constness from a value (GCC -Wcast-qual) */ +#ifndef LWIP_CONST_CAST +#define LWIP_CONST_CAST(target_type, val) ((target_type)((ptrdiff_t)val)) +#endif + +/** Get rid of alignment cast warnings (GCC -Wcast-align) */ +#ifndef LWIP_ALIGNMENT_CAST +#define LWIP_ALIGNMENT_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Get rid of warnings related to pointer-to-numeric and vice-versa casts, + * e.g. "conversion from 'u8_t' to 'void *' of greater size" + */ +#ifndef LWIP_PTR_NUMERIC_CAST +#define LWIP_PTR_NUMERIC_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Avoid warnings/errors related to implicitly casting away packed attributes by doing a explicit cast */ +#ifndef LWIP_PACKED_CAST +#define LWIP_PACKED_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Allocates a memory buffer of specified size that is of sufficient size to align + * its start address using LWIP_MEM_ALIGN. + * You can declare your own version here e.g. to enforce alignment without adding + * trailing padding bytes (see LWIP_MEM_ALIGN_BUFFER) or your own section placement + * requirements.\n + * e.g. if you use gcc and need 32 bit alignment:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[size] \_\_attribute\_\_((aligned(4)))\n + * or more portable:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u32_t variable_name[(size + sizeof(u32_t) - 1) / sizeof(u32_t)] + */ +#ifndef LWIP_DECLARE_MEMORY_ALIGNED +#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[LWIP_MEM_ALIGN_BUFFER(size)] +#endif + +/** Calculate memory size for an aligned buffer - returns the next highest + * multiple of MEM_ALIGNMENT (e.g. LWIP_MEM_ALIGN_SIZE(3) and + * LWIP_MEM_ALIGN_SIZE(4) will both yield 4 for MEM_ALIGNMENT == 4). + */ +#ifndef LWIP_MEM_ALIGN_SIZE +#define LWIP_MEM_ALIGN_SIZE(size) (((size) + MEM_ALIGNMENT - 1U) & ~(MEM_ALIGNMENT-1U)) +#endif + +/** Calculate safe memory size for an aligned buffer when using an unaligned + * type as storage. This includes a safety-margin on (MEM_ALIGNMENT - 1) at the + * start (e.g. if buffer is u8_t[] and actual data will be u32_t*) + */ +#ifndef LWIP_MEM_ALIGN_BUFFER +#define LWIP_MEM_ALIGN_BUFFER(size) (((size) + MEM_ALIGNMENT - 1U)) +#endif + +/** Align a memory pointer to the alignment defined by MEM_ALIGNMENT + * so that ADDR % MEM_ALIGNMENT == 0 + */ +#ifndef LWIP_MEM_ALIGN +#define LWIP_MEM_ALIGN(addr) ((void *)(((mem_ptr_t)(addr) + MEM_ALIGNMENT - 1) & ~(mem_ptr_t)(MEM_ALIGNMENT-1))) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** Packed structs support. + * Placed BEFORE declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_BEGIN +#define PACK_STRUCT_BEGIN +#endif /* PACK_STRUCT_BEGIN */ + +/** Packed structs support. + * Placed AFTER declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_END +#define PACK_STRUCT_END +#endif /* PACK_STRUCT_END */ + +/** Packed structs support. + * Placed between end of declaration of a packed struct and trailing semicolon.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_STRUCT +#if defined(__GNUC__) || defined(__clang__) +#define PACK_STRUCT_STRUCT __attribute__((packed)) +#else +#define PACK_STRUCT_STRUCT +#endif +#endif /* PACK_STRUCT_STRUCT */ + +/** Packed structs support. + * Wraps u32_t and u16_t members.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FIELD +#define PACK_STRUCT_FIELD(x) x +#endif /* PACK_STRUCT_FIELD */ + +/** Packed structs support. + * Wraps u8_t members, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_8 +#define PACK_STRUCT_FLD_8(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_8 */ + +/** Packed structs support. + * Wraps members that are packed structs themselves, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_S +#define PACK_STRUCT_FLD_S(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_S */ + +/** PACK_STRUCT_USE_INCLUDES==1: Packed structs support using \#include files before and after struct to be packed.\n + * The file included BEFORE the struct is "arch/bpstruct.h".\n + * The file included AFTER the struct is "arch/epstruct.h".\n + * This can be used to implement struct packing on MS Visual C compilers, see + * the Win32 port in the lwIP contrib repository for reference. + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifdef __DOXYGEN__ +#define PACK_STRUCT_USE_INCLUDES +#endif + +/** Eliminates compiler warning about unused arguments (GCC -Wextra -Wunused). */ +#ifndef LWIP_UNUSED_ARG +#define LWIP_UNUSED_ARG(x) (void)x +#endif /* LWIP_UNUSED_ARG */ + +/** LWIP_PROVIDE_ERRNO==1: Let lwIP provide ERRNO values and the 'errno' variable. + * If this is disabled, cc.h must either define 'errno', include , + * define LWIP_ERRNO_STDINCLUDE to get included or + * define LWIP_ERRNO_INCLUDE to or equivalent. + */ +#if defined __DOXYGEN__ +#define LWIP_PROVIDE_ERRNO +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ARCH_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h b/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h new file mode 100644 index 00000000..1d85bccf --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h @@ -0,0 +1,99 @@ +/** + * @file + * + * AutoIP Automatic LinkLocal IP Configuration + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_AUTOIP_H +#define LWIP_HDR_AUTOIP_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +/* #include "lwip/udp.h" */ +#include "lwip/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** AutoIP Timing */ +#define AUTOIP_TMR_INTERVAL 100 +#define AUTOIP_TICKS_PER_SECOND (1000 / AUTOIP_TMR_INTERVAL) + +/** AutoIP state information per netif */ +struct autoip +{ + /** the currently selected, probed, announced or used LL IP-Address */ + ip4_addr_t llipaddr; + /** current AutoIP state machine state */ + u8_t state; + /** sent number of probes or announces, dependent on state */ + u8_t sent_num; + /** ticks to wait, tick is AUTOIP_TMR_INTERVAL long */ + u16_t ttw; + /** ticks until a conflict can be solved by defending */ + u8_t lastconflict; + /** total number of probed/used Link Local IP-Addresses */ + u8_t tried_llipaddr; +}; + + +void autoip_set_struct(struct netif *netif, struct autoip *autoip); +/** Remove a struct autoip previously set to the netif using autoip_set_struct() */ +#define autoip_remove_struct(netif) do { (netif)->autoip = NULL; } while (0) +err_t autoip_start(struct netif *netif); +err_t autoip_stop(struct netif *netif); +void autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr); +void autoip_tmr(void); +void autoip_network_changed(struct netif *netif); +u8_t autoip_supplied_address(const struct netif *netif); + +/* for lwIP internal use by ip4.c */ +u8_t autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr); + +#define netif_autoip_data(netif) ((struct autoip*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ + +#endif /* LWIP_HDR_AUTOIP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h b/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h new file mode 100644 index 00000000..baa6a409 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h @@ -0,0 +1,161 @@ +/** + * @file + * Debug messages infrastructure + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_DEBUG_H +#define LWIP_HDR_DEBUG_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +/** + * @defgroup debugging_levels LWIP_DBG_MIN_LEVEL and LWIP_DBG_TYPES_ON values + * @ingroup lwip_opts_debugmsg + * @{ + */ + +/** @name Debug level (LWIP_DBG_MIN_LEVEL) + * @{ + */ +/** Debug level: ALL messages*/ +#define LWIP_DBG_LEVEL_ALL 0x00 +/** Debug level: Warnings. bad checksums, dropped packets, ... */ +#define LWIP_DBG_LEVEL_WARNING 0x01 +/** Debug level: Serious. memory allocation failures, ... */ +#define LWIP_DBG_LEVEL_SERIOUS 0x02 +/** Debug level: Severe */ +#define LWIP_DBG_LEVEL_SEVERE 0x03 +/** + * @} + */ + +#define LWIP_DBG_MASK_LEVEL 0x03 +/* compatibility define only */ +#define LWIP_DBG_LEVEL_OFF LWIP_DBG_LEVEL_ALL + +/** @name Enable/disable debug messages completely (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF to enable that debug message */ +#define LWIP_DBG_ON 0x80U +/** flag for LWIP_DEBUGF to disable that debug message */ +#define LWIP_DBG_OFF 0x00U +/** + * @} + */ + +/** @name Debug message types (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF indicating a tracing message (to follow program flow) */ +#define LWIP_DBG_TRACE 0x40U +/** flag for LWIP_DEBUGF indicating a state debug message (to follow module states) */ +#define LWIP_DBG_STATE 0x20U +/** flag for LWIP_DEBUGF indicating newly added code, not thoroughly tested yet */ +#define LWIP_DBG_FRESH 0x10U +/** flag for LWIP_DEBUGF to halt after printing this debug message */ +#define LWIP_DBG_HALT 0x08U +/** + * @} + */ + +/** + * @} + */ + +/** + * @defgroup lwip_assertions Assertion handling + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_NOASSERT: Disable LWIP_ASSERT checks: + * To disable assertions define LWIP_NOASSERT in arch/cc.h. + */ +#ifdef __DOXYGEN__ +#define LWIP_NOASSERT +#undef LWIP_NOASSERT +#endif +/** + * @} + */ + +#ifndef LWIP_NOASSERT +#define LWIP_ASSERT(message, assertion) do { if (!(assertion)) { \ + LWIP_PLATFORM_ASSERT(message); }} while(0) +#else /* LWIP_NOASSERT */ +#define LWIP_ASSERT(message, assertion) +#endif /* LWIP_NOASSERT */ + +#ifndef LWIP_ERROR +#ifndef LWIP_NOASSERT +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_ASSERT(message) +#elif defined LWIP_DEBUG +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_DIAG((message)) +#else +#define LWIP_PLATFORM_ERROR(message) +#endif + +/* if "expression" isn't true, then print "message" and execute "handler" expression */ +#define LWIP_ERROR(message, expression, handler) do { if (!(expression)) { \ + LWIP_PLATFORM_ERROR(message); handler;}} while(0) +#endif /* LWIP_ERROR */ + +/** Enable debug message printing, but only if debug message type is enabled + * AND is of correct type AND is at least LWIP_DBG_LEVEL. + */ +#ifdef __DOXYGEN__ +#define LWIP_DEBUG +#undef LWIP_DEBUG +#endif + +#ifdef LWIP_DEBUG +#define LWIP_DEBUGF(debug, message) do { \ + if ( \ + ((debug) & LWIP_DBG_ON) && \ + ((debug) & LWIP_DBG_TYPES_ON) && \ + ((s16_t)((debug) & LWIP_DBG_MASK_LEVEL) >= LWIP_DBG_MIN_LEVEL)) { \ + LWIP_PLATFORM_DIAG(message); \ + if ((debug) & LWIP_DBG_HALT) { \ + while(1); \ + } \ + } \ + } while(0) + +#else /* LWIP_DEBUG */ +#define LWIP_DEBUGF(debug, message) +#endif /* LWIP_DEBUG */ + +#endif /* LWIP_HDR_DEBUG_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/def.h b/Middlewares/Third_Party/LwIP/src/include/lwip/def.h new file mode 100644 index 00000000..dfb266d1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/def.h @@ -0,0 +1,152 @@ +/** + * @file + * various utility macros + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup perf Performance measurement + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/perf.h! + * Measurement calls made throughout lwip, these can be defined to nothing. + * - PERF_START: start measuring something. + * - PERF_STOP(x): stop measuring something, and record the result. + */ + +#ifndef LWIP_HDR_DEF_H +#define LWIP_HDR_DEF_H + +/* arch.h might define NULL already */ +#include "lwip/arch.h" +#include "lwip/opt.h" +#if LWIP_PERF +#include "arch/perf.h" +#else /* LWIP_PERF */ +#define PERF_START /* null definition */ +#define PERF_STOP(x) /* null definition */ +#endif /* LWIP_PERF */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIP_MAX(x , y) (((x) > (y)) ? (x) : (y)) +#define LWIP_MIN(x , y) (((x) < (y)) ? (x) : (y)) + +/* Get the number of entries in an array ('x' must NOT be a pointer!) */ +#define LWIP_ARRAYSIZE(x) (sizeof(x)/sizeof((x)[0])) + +/** Create u32_t value from bytes */ +#define LWIP_MAKEU32(a,b,c,d) (((u32_t)((a) & 0xff) << 24) | \ + ((u32_t)((b) & 0xff) << 16) | \ + ((u32_t)((c) & 0xff) << 8) | \ + (u32_t)((d) & 0xff)) + +#ifndef NULL +#ifdef __cplusplus +#define NULL 0 +#else +#define NULL ((void *)0) +#endif +#endif + +#if BYTE_ORDER == BIG_ENDIAN +#define lwip_htons(x) ((u16_t)(x)) +#define lwip_ntohs(x) ((u16_t)(x)) +#define lwip_htonl(x) ((u32_t)(x)) +#define lwip_ntohl(x) ((u32_t)(x)) +#define PP_HTONS(x) ((u16_t)(x)) +#define PP_NTOHS(x) ((u16_t)(x)) +#define PP_HTONL(x) ((u32_t)(x)) +#define PP_NTOHL(x) ((u32_t)(x)) +#else /* BYTE_ORDER != BIG_ENDIAN */ +#ifndef lwip_htons +u16_t lwip_htons(u16_t x); +#endif +#define lwip_ntohs(x) lwip_htons(x) + +#ifndef lwip_htonl +u32_t lwip_htonl(u32_t x); +#endif +#define lwip_ntohl(x) lwip_htonl(x) + +/* These macros should be calculated by the preprocessor and are used + with compile-time constants only (so that there is no little-endian + overhead at runtime). */ +#define PP_HTONS(x) ((u16_t)((((x) & (u16_t)0x00ffU) << 8) | (((x) & (u16_t)0xff00U) >> 8))) +#define PP_NTOHS(x) PP_HTONS(x) +#define PP_HTONL(x) ((((x) & (u32_t)0x000000ffUL) << 24) | \ + (((x) & (u32_t)0x0000ff00UL) << 8) | \ + (((x) & (u32_t)0x00ff0000UL) >> 8) | \ + (((x) & (u32_t)0xff000000UL) >> 24)) +#define PP_NTOHL(x) PP_HTONL(x) +#endif /* BYTE_ORDER == BIG_ENDIAN */ + +/* Provide usual function names as macros for users, but this can be turned off */ +#ifndef LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS +#define htons(x) lwip_htons(x) +#define ntohs(x) lwip_ntohs(x) +#define htonl(x) lwip_htonl(x) +#define ntohl(x) lwip_ntohl(x) +#endif + +/* Functions that are not available as standard implementations. + * In cc.h, you can #define these to implementations available on + * your platform to save some code bytes if you use these functions + * in your application, too. + */ + +#ifndef lwip_itoa +/* This can be #defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform */ +void lwip_itoa(char* result, size_t bufsize, int number); +#endif +#ifndef lwip_strnicmp +/* This can be #defined to strnicmp() or strncasecmp() depending on your platform */ +int lwip_strnicmp(const char* str1, const char* str2, size_t len); +#endif +#ifndef lwip_stricmp +/* This can be #defined to stricmp() or strcasecmp() depending on your platform */ +int lwip_stricmp(const char* str1, const char* str2); +#endif +#ifndef lwip_strnstr +/* This can be #defined to strnstr() depending on your platform */ +char* lwip_strnstr(const char* buffer, const char* token, size_t n); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_DEF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h new file mode 100644 index 00000000..c78aa0ba --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h @@ -0,0 +1,139 @@ +/** + * @file + * DHCP client API + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_DHCP_H +#define LWIP_HDR_DHCP_H + +#include "lwip/opt.h" + +#if LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +#include "lwip/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in seconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_SECS 60 +/** period (in milliseconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_MSECS (DHCP_COARSE_TIMER_SECS * 1000UL) +/** period (in milliseconds) of the application calling dhcp_fine_tmr() */ +#define DHCP_FINE_TIMER_MSECS 500 + +#define DHCP_BOOT_FILE_LEN 128U + +/* AutoIP cooperation flags (struct dhcp.autoip_coop_state) */ +typedef enum { + DHCP_AUTOIP_COOP_STATE_OFF = 0, + DHCP_AUTOIP_COOP_STATE_ON = 1 +} dhcp_autoip_coop_state_enum_t; + +struct dhcp +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCP state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; +#if LWIP_DHCP_AUTOIP_COOP + u8_t autoip_coop_state; +#endif + u8_t subnet_mask_given; + + u16_t request_timeout; /* #ticks with period DHCP_FINE_TIMER_SECS for request timeout */ + u16_t t1_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for renewal time */ + u16_t t2_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for rebind time */ + u16_t t1_renew_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next renew try */ + u16_t t2_rebind_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next rebind try */ + u16_t lease_used; /* #ticks with period DHCP_COARSE_TIMER_SECS since last received DHCP ack */ + u16_t t0_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for lease time */ + ip_addr_t server_ip_addr; /* dhcp server address that offered this lease (ip_addr_t because passed to UDP) */ + ip4_addr_t offered_ip_addr; + ip4_addr_t offered_sn_mask; + ip4_addr_t offered_gw_addr; + + u32_t offered_t0_lease; /* lease period (in seconds) */ + u32_t offered_t1_renew; /* recommended renew time (usually 50% of lease period) */ + u32_t offered_t2_rebind; /* recommended rebind time (usually 87.5 of lease period) */ +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_t offered_si_addr; + char boot_file_name[DHCP_BOOT_FILE_LEN]; +#endif /* LWIP_DHCP_BOOTPFILE */ +}; + + +void dhcp_set_struct(struct netif *netif, struct dhcp *dhcp); +/** Remove a struct dhcp previously set to the netif using dhcp_set_struct() */ +#define dhcp_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL) +void dhcp_cleanup(struct netif *netif); +err_t dhcp_start(struct netif *netif); +err_t dhcp_renew(struct netif *netif); +err_t dhcp_release(struct netif *netif); +void dhcp_stop(struct netif *netif); +void dhcp_release_and_stop(struct netif *netif); +void dhcp_inform(struct netif *netif); +void dhcp_network_changed(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +void dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr); +#endif +u8_t dhcp_supplied_address(const struct netif *netif); +/* to be called every minute */ +void dhcp_coarse_tmr(void); +/* to be called every half second */ +void dhcp_fine_tmr(void); + +#if LWIP_DHCP_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP_MAX_NTP_SERVERS */ +extern void dhcp_set_ntp_servers(u8_t num_ntp_servers, const ip4_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#define netif_dhcp_data(netif) ((struct dhcp*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DHCP */ + +#endif /*LWIP_HDR_DHCP_H*/ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h new file mode 100644 index 00000000..5cc4a015 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h @@ -0,0 +1,104 @@ +/** + * @file + * + * DHCPv6 client: IPv6 address autoconfiguration as per + * RFC 3315 (stateful DHCPv6) and + * RFC 3736 (stateless DHCPv6). + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + */ + +#ifndef LWIP_HDR_IP6_DHCP6_H +#define LWIP_HDR_IP6_DHCP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in milliseconds) of the application calling dhcp6_tmr() */ +#define DHCP6_TIMER_MSECS 500 + +struct dhcp6 +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCPv6 state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; + /** if request config is triggered while another action is active, this keeps track of it */ + u8_t request_config_pending; + /** #ticks with period DHCP6_TIMER_MSECS for request timeout */ + u16_t request_timeout; +#if LWIP_IPV6_DHCP6_STATEFUL + /* @todo: add more members here to keep track of stateful DHCPv6 data, like lease times */ +#endif /* LWIP_IPV6_DHCP6_STATEFUL */ +}; + +void dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6); +/** Remove a struct dhcp6 previously set to the netif using dhcp6_set_struct() */ +#define dhcp6_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL) +void dhcp6_cleanup(struct netif *netif); + +err_t dhcp6_enable_stateful(struct netif *netif); +err_t dhcp6_enable_stateless(struct netif *netif); +void dhcp6_disable(struct netif *netif); + +void dhcp6_tmr(void); + +void dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config); + +#if LWIP_DHCP6_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP6_MAX_NTP_SERVERS */ +extern void dhcp6_set_ntp_servers(u8_t num_ntp_servers, const ip_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP6_GET_NTP_SRV */ + +#define netif_dhcp6_data(netif) ((struct dhcp6*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6_DHCP6 */ + +#endif /* LWIP_HDR_IP6_DHCP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h b/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h new file mode 100644 index 00000000..09134154 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h @@ -0,0 +1,131 @@ +/** + * @file + * DNS API + */ + +/** + * lwip DNS resolver header file. + + * Author: Jim Pettinato + * April 2007 + + * ported from uIP resolv.c Copyright (c) 2002-2003, Adam Dunkels. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_DNS_H +#define LWIP_HDR_DNS_H + +#include "lwip/opt.h" + +#if LWIP_DNS + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS timer period */ +#define DNS_TMR_INTERVAL 1000 + +/* DNS resolve types: */ +#define LWIP_DNS_ADDRTYPE_IPV4 0 +#define LWIP_DNS_ADDRTYPE_IPV6 1 +#define LWIP_DNS_ADDRTYPE_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define LWIP_DNS_ADDRTYPE_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#if LWIP_IPV4 && LWIP_IPV6 +#ifndef LWIP_DNS_ADDRTYPE_DEFAULT +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4_IPV6 +#endif +#elif LWIP_IPV4 +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4 +#else +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV6 +#endif + +#if DNS_LOCAL_HOSTLIST +/** struct used for local host-list */ +struct local_hostlist_entry { + /** static hostname */ + const char *name; + /** static host address in network byteorder */ + ip_addr_t addr; + struct local_hostlist_entry *next; +}; +#define DNS_LOCAL_HOSTLIST_ELEM(name, addr_init) {name, addr_init, NULL} +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +#ifndef DNS_LOCAL_HOSTLIST_MAX_NAMELEN +#define DNS_LOCAL_HOSTLIST_MAX_NAMELEN DNS_MAX_NAME_LENGTH +#endif +#define LOCALHOSTLIST_ELEM_SIZE ((sizeof(struct local_hostlist_entry) + DNS_LOCAL_HOSTLIST_MAX_NAMELEN + 1)) +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#if LWIP_IPV4 +extern const ip_addr_t dns_mquery_v4group; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +extern const ip_addr_t dns_mquery_v6group; +#endif /* LWIP_IPV6 */ + +/** Callback which is invoked when a hostname is found. + * A function of this type must be implemented by the application using the DNS resolver. + * @param name pointer to the name that was looked up. + * @param ipaddr pointer to an ip_addr_t containing the IP address of the hostname, + * or NULL if the name could not be found (or on any other error). + * @param callback_arg a user-specified callback argument passed to dns_gethostbyname +*/ +typedef void (*dns_found_callback)(const char *name, const ip_addr_t *ipaddr, void *callback_arg); + +void dns_init(void); +void dns_tmr(void); +void dns_setserver(u8_t numdns, const ip_addr_t *dnsserver); +const ip_addr_t* dns_getserver(u8_t numdns); +err_t dns_gethostbyname(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg); +err_t dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg, + u8_t dns_addrtype); + + +#if DNS_LOCAL_HOSTLIST +size_t dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg); +err_t dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype); +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +int dns_local_removehost(const char *hostname, const ip_addr_t *addr); +err_t dns_local_addhost(const char *hostname, const ip_addr_t *addr); +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS */ + +#endif /* LWIP_HDR_DNS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/err.h b/Middlewares/Third_Party/LwIP/src/include/lwip/err.h new file mode 100644 index 00000000..887d9b3f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/err.h @@ -0,0 +1,117 @@ +/** + * @file + * lwIP Error codes + */ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERR_H +#define LWIP_HDR_ERR_H + +#include "lwip/opt.h" +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup infrastructure_errors Error codes + * @ingroup infrastructure + * @{ + */ + +/** Definitions for error constants. */ +typedef enum { +/** No error, everything OK. */ + ERR_OK = 0, +/** Out of memory error. */ + ERR_MEM = -1, +/** Buffer error. */ + ERR_BUF = -2, +/** Timeout. */ + ERR_TIMEOUT = -3, +/** Routing problem. */ + ERR_RTE = -4, +/** Operation in progress */ + ERR_INPROGRESS = -5, +/** Illegal value. */ + ERR_VAL = -6, +/** Operation would block. */ + ERR_WOULDBLOCK = -7, +/** Address in use. */ + ERR_USE = -8, +/** Already connecting. */ + ERR_ALREADY = -9, +/** Conn already established.*/ + ERR_ISCONN = -10, +/** Not connected. */ + ERR_CONN = -11, +/** Low-level netif error */ + ERR_IF = -12, + +/** Connection aborted. */ + ERR_ABRT = -13, +/** Connection reset. */ + ERR_RST = -14, +/** Connection closed. */ + ERR_CLSD = -15, +/** Illegal argument. */ + ERR_ARG = -16 +} err_enum_t; + +/** Define LWIP_ERR_T in cc.h if you want to use + * a different type for your platform (must be signed). */ +#ifdef LWIP_ERR_T +typedef LWIP_ERR_T err_t; +#else /* LWIP_ERR_T */ +typedef s8_t err_t; +#endif /* LWIP_ERR_T*/ + +/** + * @} + */ + +#ifdef LWIP_DEBUG +extern const char *lwip_strerr(err_t err); +#else +#define lwip_strerr(x) "" +#endif /* LWIP_DEBUG */ + +#if !NO_SYS +int err_to_errno(err_t err); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERR_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h b/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h new file mode 100644 index 00000000..48d6b539 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h @@ -0,0 +1,198 @@ +/** + * @file + * Posix Errno defines + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERRNO_H +#define LWIP_HDR_ERRNO_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_PROVIDE_ERRNO + +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* I/O error */ +#define ENXIO 6 /* No such device or address */ +#define E2BIG 7 /* Arg list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file number */ +#define ECHILD 10 /* No child processes */ +#define EAGAIN 11 /* Try again */ +#define ENOMEM 12 /* Out of memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#define ENOTBLK 15 /* Block device required */ +#define EBUSY 16 /* Device or resource busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* No such device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* File table overflow */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Not a typewriter */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ +#define EDOM 33 /* Math argument out of domain of func */ +#define ERANGE 34 /* Math result not representable */ +#define EDEADLK 35 /* Resource deadlock would occur */ +#define ENAMETOOLONG 36 /* File name too long */ +#define ENOLCK 37 /* No record locks available */ +#define ENOSYS 38 /* Function not implemented */ +#define ENOTEMPTY 39 /* Directory not empty */ +#define ELOOP 40 /* Too many symbolic links encountered */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define ENOMSG 42 /* No message of desired type */ +#define EIDRM 43 /* Identifier removed */ +#define ECHRNG 44 /* Channel number out of range */ +#define EL2NSYNC 45 /* Level 2 not synchronized */ +#define EL3HLT 46 /* Level 3 halted */ +#define EL3RST 47 /* Level 3 reset */ +#define ELNRNG 48 /* Link number out of range */ +#define EUNATCH 49 /* Protocol driver not attached */ +#define ENOCSI 50 /* No CSI structure available */ +#define EL2HLT 51 /* Level 2 halted */ +#define EBADE 52 /* Invalid exchange */ +#define EBADR 53 /* Invalid request descriptor */ +#define EXFULL 54 /* Exchange full */ +#define ENOANO 55 /* No anode */ +#define EBADRQC 56 /* Invalid request code */ +#define EBADSLT 57 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 59 /* Bad font file format */ +#define ENOSTR 60 /* Device not a stream */ +#define ENODATA 61 /* No data available */ +#define ETIME 62 /* Timer expired */ +#define ENOSR 63 /* Out of streams resources */ +#define ENONET 64 /* Machine is not on the network */ +#define ENOPKG 65 /* Package not installed */ +#define EREMOTE 66 /* Object is remote */ +#define ENOLINK 67 /* Link has been severed */ +#define EADV 68 /* Advertise error */ +#define ESRMNT 69 /* Srmount error */ +#define ECOMM 70 /* Communication error on send */ +#define EPROTO 71 /* Protocol error */ +#define EMULTIHOP 72 /* Multihop attempted */ +#define EDOTDOT 73 /* RFS specific error */ +#define EBADMSG 74 /* Not a data message */ +#define EOVERFLOW 75 /* Value too large for defined data type */ +#define ENOTUNIQ 76 /* Name not unique on network */ +#define EBADFD 77 /* File descriptor in bad state */ +#define EREMCHG 78 /* Remote address changed */ +#define ELIBACC 79 /* Can not access a needed shared library */ +#define ELIBBAD 80 /* Accessing a corrupted shared library */ +#define ELIBSCN 81 /* .lib section in a.out corrupted */ +#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 83 /* Cannot exec a shared library directly */ +#define EILSEQ 84 /* Illegal byte sequence */ +#define ERESTART 85 /* Interrupted system call should be restarted */ +#define ESTRPIPE 86 /* Streams pipe error */ +#define EUSERS 87 /* Too many users */ +#define ENOTSOCK 88 /* Socket operation on non-socket */ +#define EDESTADDRREQ 89 /* Destination address required */ +#define EMSGSIZE 90 /* Message too long */ +#define EPROTOTYPE 91 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 92 /* Protocol not available */ +#define EPROTONOSUPPORT 93 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ +#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 96 /* Protocol family not supported */ +#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ +#define EADDRINUSE 98 /* Address already in use */ +#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ +#define ENETDOWN 100 /* Network is down */ +#define ENETUNREACH 101 /* Network is unreachable */ +#define ENETRESET 102 /* Network dropped connection because of reset */ +#define ECONNABORTED 103 /* Software caused connection abort */ +#define ECONNRESET 104 /* Connection reset by peer */ +#define ENOBUFS 105 /* No buffer space available */ +#define EISCONN 106 /* Transport endpoint is already connected */ +#define ENOTCONN 107 /* Transport endpoint is not connected */ +#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 109 /* Too many references: cannot splice */ +#define ETIMEDOUT 110 /* Connection timed out */ +#define ECONNREFUSED 111 /* Connection refused */ +#define EHOSTDOWN 112 /* Host is down */ +#define EHOSTUNREACH 113 /* No route to host */ +#define EALREADY 114 /* Operation already in progress */ +#define EINPROGRESS 115 /* Operation now in progress */ +#define ESTALE 116 /* Stale NFS file handle */ +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ +#define EDQUOT 122 /* Quota exceeded */ + +#define ENOMEDIUM 123 /* No medium found */ +#define EMEDIUMTYPE 124 /* Wrong medium type */ + +#ifndef errno +extern int errno; +#endif + +#else /* LWIP_PROVIDE_ERRNO */ + +/* Define LWIP_ERRNO_STDINCLUDE if you want to include here */ +#ifdef LWIP_ERRNO_STDINCLUDE +#include +#else /* LWIP_ERRNO_STDINCLUDE */ +/* Define LWIP_ERRNO_INCLUDE to an equivalent of to include the error defines here */ +#ifdef LWIP_ERRNO_INCLUDE +#include LWIP_ERRNO_INCLUDE +#endif /* LWIP_ERRNO_INCLUDE */ +#endif /* LWIP_ERRNO_STDINCLUDE */ + +#endif /* LWIP_PROVIDE_ERRNO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERRNO_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h new file mode 100644 index 00000000..2036b246 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h @@ -0,0 +1,105 @@ +/** + * @file + * Ethernet output function - handles OUTGOING ethernet level traffic, implements + * ARP resolving. + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHARP_H +#define LWIP_HDR_NETIF_ETHARP_H + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/prot/ethernet.h" + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/prot/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 seconds period */ +#define ARP_TMR_INTERVAL 1000 + +#if ARP_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct etharp_q_entry { + struct etharp_q_entry *next; + struct pbuf *p; +}; +#endif /* ARP_QUEUEING */ + +#define etharp_init() /* Compatibility define, no init needed. */ +void etharp_tmr(void); +ssize_t etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret); +int etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret); +err_t etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +err_t etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q); +err_t etharp_request(struct netif *netif, const ip4_addr_t *ipaddr); +/** For Ethernet network interfaces, we might want to send "gratuitous ARP"; + * this is an ARP packet sent by a node in order to spontaneously cause other + * nodes to update an entry in their ARP cache. + * From RFC 3220 "IP Mobility Support for IPv4" section 4.6. */ +#define etharp_gratuitous(netif) etharp_request((netif), netif_ip4_addr(netif)) +void etharp_cleanup_netif(struct netif *netif); + +#if ETHARP_SUPPORT_STATIC_ENTRIES +err_t etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr); +err_t etharp_remove_static_entry(const ip4_addr_t *ipaddr); +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +void etharp_input(struct pbuf *p, struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_ARP */ +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#endif /* LWIP_HDR_NETIF_ETHARP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h new file mode 100644 index 00000000..5e88dffd --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h @@ -0,0 +1,68 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ETHIP6_H +#define LWIP_HDR_ETHIP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + + +err_t ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ + +#endif /* LWIP_HDR_ETHIP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h new file mode 100644 index 00000000..f5a31fd4 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h @@ -0,0 +1,110 @@ +/** + * @file + * ICMP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ICMP_H +#define LWIP_HDR_ICMP_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp.h" + +#if LWIP_IPV6 && LWIP_ICMP6 +#include "lwip/icmp6.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP destination unreachable codes */ +enum icmp_dur_type { + /** net unreachable */ + ICMP_DUR_NET = 0, + /** host unreachable */ + ICMP_DUR_HOST = 1, + /** protocol unreachable */ + ICMP_DUR_PROTO = 2, + /** port unreachable */ + ICMP_DUR_PORT = 3, + /** fragmentation needed and DF set */ + ICMP_DUR_FRAG = 4, + /** source route failed */ + ICMP_DUR_SR = 5 +}; + +/** ICMP time exceeded codes */ +enum icmp_te_type { + /** time to live exceeded in transit */ + ICMP_TE_TTL = 0, + /** fragment reassembly time exceeded */ + ICMP_TE_FRAG = 1 +}; + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +void icmp_input(struct pbuf *p, struct netif *inp); +void icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t); +void icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t); + +#endif /* LWIP_IPV4 && LWIP_ICMP */ + +#if LWIP_IPV4 && LWIP_IPV6 +#if LWIP_ICMP && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) ((isipv6) ? \ + icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) : \ + icmp_dest_unreach(pbuf, ICMP_DUR_PORT)) +#elif LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) do{ if(!(isipv6)) { icmp_dest_unreach(pbuf, ICMP_DUR_PORT);}}while(0) +#elif LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) do{ if(isipv6) { icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT);}}while(0) +#else +#define icmp_port_unreach(isipv6, pbuf) +#endif +#elif LWIP_IPV6 && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) +#elif LWIP_IPV4 && LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) icmp_dest_unreach(pbuf, ICMP_DUR_PORT) +#else /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) */ +#define icmp_port_unreach(isipv6, pbuf) +#endif /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) LWIP_IPV4*/ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ICMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h new file mode 100644 index 00000000..0ccb7899 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h @@ -0,0 +1,72 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_ICMP6_H +#define LWIP_HDR_ICMP6_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +void icmp6_input(struct pbuf *p, struct netif *inp); +void icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c); +void icmp6_packet_too_big(struct pbuf *p, u32_t mtu); +void icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c); +void icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr); +void icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer); + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* LWIP_HDR_ICMP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h b/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h new file mode 100644 index 00000000..39017abd --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h @@ -0,0 +1,68 @@ +/** + * @file + * Interface Identification APIs from: + * RFC 3493: Basic Socket Interface Extensions for IPv6 + * Section 4: Interface Identification + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#ifndef LWIP_HDR_IF_H +#define LWIP_HDR_IF_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define IF_NAMESIZE NETIF_NAMESIZE + +char * lwip_if_indextoname(unsigned int ifindex, char *ifname); +unsigned int lwip_if_nametoindex(const char *ifname); + +#if LWIP_COMPAT_SOCKETS +#define if_indextoname(ifindex, ifname) lwip_if_indextoname(ifindex,ifname) +#define if_nametoindex(ifname) lwip_if_nametoindex(ifname) +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_IF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h new file mode 100644 index 00000000..ffd80e68 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h @@ -0,0 +1,115 @@ +/** + * @file + * IGMP API + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +#ifndef LWIP_HDR_IGMP_H +#define LWIP_HDR_IGMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/pbuf.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* IGMP timer */ +#define IGMP_TMR_INTERVAL 100 /* Milliseconds */ +#define IGMP_V1_DELAYING_MEMBER_TMR (1000/IGMP_TMR_INTERVAL) +#define IGMP_JOIN_DELAYING_MEMBER_TMR (500 /IGMP_TMR_INTERVAL) + +/* Compatibility defines (don't use for new code) */ +#define IGMP_DEL_MAC_FILTER NETIF_DEL_MAC_FILTER +#define IGMP_ADD_MAC_FILTER NETIF_ADD_MAC_FILTER + +/** + * igmp group structure - there is + * a list of groups for each interface + * these should really be linked from the interface, but + * if we keep them separate we will not affect the lwip original code + * too much + * + * There will be a group for the all systems group address but this + * will not run the state machine as it is used to kick off reports + * from all the other groups + */ +struct igmp_group { + /** next link */ + struct igmp_group *next; + /** multicast address */ + ip4_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting, negative is OFF */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +/* Prototypes */ +void igmp_init(void); +err_t igmp_start(struct netif *netif); +err_t igmp_stop(struct netif *netif); +void igmp_report_groups(struct netif *netif); +struct igmp_group *igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr); +void igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest); +err_t igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +void igmp_tmr(void); + +/** @ingroup igmp + * Get list head of IGMP groups for netif. + * Note: The allsystems group IP is contained in the list as first entry. + * @see @ref netif_set_igmp_mac_filter() + */ +#define netif_igmp_data(netif) ((struct igmp_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_IGMP */ + +#endif /* LWIP_HDR_IGMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h b/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h new file mode 100644 index 00000000..2982a0f4 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h @@ -0,0 +1,169 @@ +/** + * @file + * This file (together with sockets.h) aims to provide structs and functions from + * - arpa/inet.h + * - netinet/in.h + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_H +#define LWIP_HDR_INET_H + +#include "lwip/opt.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's in_addr_t, define IN_ADDR_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_addr_t) && !defined(IN_ADDR_T_DEFINED) +typedef u32_t in_addr_t; +#endif + +struct in_addr { + in_addr_t s_addr; +}; + +struct in6_addr { + union { + u32_t u32_addr[4]; + u8_t u8_addr[16]; + } un; +#define s6_addr un.u8_addr +}; + +/** 255.255.255.255 */ +#define INADDR_NONE IPADDR_NONE +/** 127.0.0.1 */ +#define INADDR_LOOPBACK IPADDR_LOOPBACK +/** 0.0.0.0 */ +#define INADDR_ANY IPADDR_ANY +/** 255.255.255.255 */ +#define INADDR_BROADCAST IPADDR_BROADCAST + +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 wildcard address. */ +#define IN6ADDR_ANY_INIT {{{0,0,0,0}}} +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 loopback address. */ +#define IN6ADDR_LOOPBACK_INIT {{{0,0,0,PP_HTONL(1)}}} +/** This variable is initialized by the system to contain the wildcard IPv6 address. */ +extern const struct in6_addr in6addr_any; + +/* Definitions of the bits in an (IPv4) Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IN_CLASSA(a) IP_CLASSA(a) +#define IN_CLASSA_NET IP_CLASSA_NET +#define IN_CLASSA_NSHIFT IP_CLASSA_NSHIFT +#define IN_CLASSA_HOST IP_CLASSA_HOST +#define IN_CLASSA_MAX IP_CLASSA_MAX + +#define IN_CLASSB(b) IP_CLASSB(b) +#define IN_CLASSB_NET IP_CLASSB_NET +#define IN_CLASSB_NSHIFT IP_CLASSB_NSHIFT +#define IN_CLASSB_HOST IP_CLASSB_HOST +#define IN_CLASSB_MAX IP_CLASSB_MAX + +#define IN_CLASSC(c) IP_CLASSC(c) +#define IN_CLASSC_NET IP_CLASSC_NET +#define IN_CLASSC_NSHIFT IP_CLASSC_NSHIFT +#define IN_CLASSC_HOST IP_CLASSC_HOST +#define IN_CLASSC_MAX IP_CLASSC_MAX + +#define IN_CLASSD(d) IP_CLASSD(d) +#define IN_CLASSD_NET IP_CLASSD_NET /* These ones aren't really */ +#define IN_CLASSD_NSHIFT IP_CLASSD_NSHIFT /* net and host fields, but */ +#define IN_CLASSD_HOST IP_CLASSD_HOST /* routing needn't know. */ +#define IN_CLASSD_MAX IP_CLASSD_MAX + +#define IN_MULTICAST(a) IP_MULTICAST(a) + +#define IN_EXPERIMENTAL(a) IP_EXPERIMENTAL(a) +#define IN_BADCLASS(a) IP_BADCLASS(a) + +#define IN_LOOPBACKNET IP_LOOPBACKNET + + +#ifndef INET_ADDRSTRLEN +#define INET_ADDRSTRLEN IP4ADDR_STRLEN_MAX +#endif +#if LWIP_IPV6 +#ifndef INET6_ADDRSTRLEN +#define INET6_ADDRSTRLEN IP6ADDR_STRLEN_MAX +#endif +#endif + +#if LWIP_IPV4 + +#define inet_addr_from_ip4addr(target_inaddr, source_ipaddr) ((target_inaddr)->s_addr = ip4_addr_get_u32(source_ipaddr)) +#define inet_addr_to_ip4addr(target_ipaddr, source_inaddr) (ip4_addr_set_u32(target_ipaddr, (source_inaddr)->s_addr)) + +/* directly map this to the lwip internal functions */ +#define inet_addr(cp) ipaddr_addr(cp) +#define inet_aton(cp, addr) ip4addr_aton(cp, (ip4_addr_t*)addr) +#define inet_ntoa(addr) ip4addr_ntoa((const ip4_addr_t*)&(addr)) +#define inet_ntoa_r(addr, buf, buflen) ip4addr_ntoa_r((const ip4_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define inet6_addr_from_ip6addr(target_in6addr, source_ip6addr) {(target_in6addr)->un.u32_addr[0] = (source_ip6addr)->addr[0]; \ + (target_in6addr)->un.u32_addr[1] = (source_ip6addr)->addr[1]; \ + (target_in6addr)->un.u32_addr[2] = (source_ip6addr)->addr[2]; \ + (target_in6addr)->un.u32_addr[3] = (source_ip6addr)->addr[3];} +#define inet6_addr_to_ip6addr(target_ip6addr, source_in6addr) {(target_ip6addr)->addr[0] = (source_in6addr)->un.u32_addr[0]; \ + (target_ip6addr)->addr[1] = (source_in6addr)->un.u32_addr[1]; \ + (target_ip6addr)->addr[2] = (source_in6addr)->un.u32_addr[2]; \ + (target_ip6addr)->addr[3] = (source_in6addr)->un.u32_addr[3]; \ + ip6_addr_clear_zone(target_ip6addr);} + +/* directly map this to the lwip internal functions */ +#define inet6_aton(cp, addr) ip6addr_aton(cp, (ip6_addr_t*)addr) +#define inet6_ntoa(addr) ip6addr_ntoa((const ip6_addr_t*)&(addr)) +#define inet6_ntoa_r(addr, buf, buflen) ip6addr_ntoa_r((const ip6_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h b/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h new file mode 100644 index 00000000..76893ef5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h @@ -0,0 +1,105 @@ +/** + * @file + * IP checksum calculation functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_CHKSUM_H +#define LWIP_HDR_INET_CHKSUM_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +/** Swap the bytes in an u16_t: much like lwip_htons() for little-endian */ +#ifndef SWAP_BYTES_IN_WORD +#define SWAP_BYTES_IN_WORD(w) (((w) & 0xff) << 8) | (((w) & 0xff00) >> 8) +#endif /* SWAP_BYTES_IN_WORD */ + +/** Split an u32_t in two u16_ts and add them up */ +#ifndef FOLD_U32T +#define FOLD_U32T(u) ((u32_t)(((u) >> 16) + ((u) & 0x0000ffffUL))) +#endif + +#if LWIP_CHECKSUM_ON_COPY +/** Function-like macro: same as MEMCPY but returns the checksum of copied data + as u16_t */ +# ifndef LWIP_CHKSUM_COPY +# define LWIP_CHKSUM_COPY(dst, src, len) lwip_chksum_copy(dst, src, len) +# ifndef LWIP_CHKSUM_COPY_ALGORITHM +# define LWIP_CHKSUM_COPY_ALGORITHM 1 +# endif /* LWIP_CHKSUM_COPY_ALGORITHM */ +# else /* LWIP_CHKSUM_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +# endif /* LWIP_CHKSUM_COPY */ +#else /* LWIP_CHECKSUM_ON_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +extern "C" { +#endif + +u16_t inet_chksum(const void *dataptr, u16_t len); +u16_t inet_chksum_pbuf(struct pbuf *p); +#if LWIP_CHKSUM_COPY_ALGORITHM +u16_t lwip_chksum_copy(void *dst, const void *src, u16_t len); +#endif /* LWIP_CHKSUM_COPY_ALGORITHM */ + +#if LWIP_IPV4 +u16_t inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest); +u16_t inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, + u16_t proto_len, u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest); +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +u16_t ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest); +u16_t ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest); +#endif /* LWIP_IPV6 */ + + +u16_t ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest); +u16_t ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ + diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/init.h b/Middlewares/Third_Party/LwIP/src/include/lwip/init.h new file mode 100644 index 00000000..a149be18 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/init.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP initialization API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INIT_H +#define LWIP_HDR_INIT_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup lwip_version Version + * @ingroup lwip + * @{ + */ + +/** X.x.x: Major version of the stack */ +#define LWIP_VERSION_MAJOR 2 +/** x.X.x: Minor version of the stack */ +#define LWIP_VERSION_MINOR 1 +/** x.x.X: Revision of the stack */ +#define LWIP_VERSION_REVISION 2 +/** For release candidates, this is set to 1..254 + * For official releases, this is set to 255 (LWIP_RC_RELEASE) + * For development versions (Git), this is set to 0 (LWIP_RC_DEVELOPMENT) */ +#define LWIP_VERSION_RC LWIP_RC_RELEASE + +/** LWIP_VERSION_RC is set to LWIP_RC_RELEASE for official releases */ +#define LWIP_RC_RELEASE 255 +/** LWIP_VERSION_RC is set to LWIP_RC_DEVELOPMENT for Git versions */ +#define LWIP_RC_DEVELOPMENT 0 + +#define LWIP_VERSION_IS_RELEASE (LWIP_VERSION_RC == LWIP_RC_RELEASE) +#define LWIP_VERSION_IS_DEVELOPMENT (LWIP_VERSION_RC == LWIP_RC_DEVELOPMENT) +#define LWIP_VERSION_IS_RC ((LWIP_VERSION_RC != LWIP_RC_RELEASE) && (LWIP_VERSION_RC != LWIP_RC_DEVELOPMENT)) + +/* Some helper defines to get a version string */ +#define LWIP_VERSTR2(x) #x +#define LWIP_VERSTR(x) LWIP_VERSTR2(x) +#if LWIP_VERSION_IS_RELEASE +#define LWIP_VERSION_STRING_SUFFIX "" +#elif LWIP_VERSION_IS_DEVELOPMENT +#define LWIP_VERSION_STRING_SUFFIX "d" +#else +#define LWIP_VERSION_STRING_SUFFIX "rc" LWIP_VERSTR(LWIP_VERSION_RC) +#endif + +/** Provides the version of the stack */ +#define LWIP_VERSION ((LWIP_VERSION_MAJOR) << 24 | (LWIP_VERSION_MINOR) << 16 | \ + (LWIP_VERSION_REVISION) << 8 | (LWIP_VERSION_RC)) +/** Provides the version of the stack as string */ +#define LWIP_VERSION_STRING LWIP_VERSTR(LWIP_VERSION_MAJOR) "." LWIP_VERSTR(LWIP_VERSION_MINOR) "." LWIP_VERSTR(LWIP_VERSION_REVISION) LWIP_VERSION_STRING_SUFFIX + +/** + * @} + */ + +/* Modules initialization */ +void lwip_init(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INIT_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h new file mode 100644 index 00000000..653c3b2f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h @@ -0,0 +1,330 @@ +/** + * @file + * IP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_H +#define LWIP_HDR_IP_H + +#include "lwip/opt.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/ip6.h" +#include "lwip/prot/ip.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is passed as the destination address to ip_output_if (not + to ip_output), meaning that an IP header already is constructed + in the pbuf. This is used when TCP retransmits. */ +#define LWIP_IP_HDRINCL NULL + +/** pbufs passed to IP must have a ref-count of 1 as their payload pointer + gets altered as the packet is passed down the stack */ +#ifndef LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX +#define LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p) LWIP_ASSERT("p->ref == 1", (p)->ref == 1) +#endif + +#if LWIP_NETIF_USE_HINTS +#define IP_PCB_NETIFHINT ;struct netif_hint netif_hints +#else /* LWIP_NETIF_USE_HINTS */ +#define IP_PCB_NETIFHINT +#endif /* LWIP_NETIF_USE_HINTS */ + +/** This is the common part of all PCB types. It needs to be at the + beginning of a PCB type definition. It is located here so that + changes to this common part are made in one location instead of + having to change all PCB structs. */ +#define IP_PCB \ + /* ip addresses in network byte order */ \ + ip_addr_t local_ip; \ + ip_addr_t remote_ip; \ + /* Bound netif index */ \ + u8_t netif_idx; \ + /* Socket options */ \ + u8_t so_options; \ + /* Type Of Service */ \ + u8_t tos; \ + /* Time To Live */ \ + u8_t ttl \ + /* link layer address resolution hint */ \ + IP_PCB_NETIFHINT + +struct ip_pcb { + /* Common members of all PCB types */ + IP_PCB; +}; + +/* + * Option flags per-socket. These are the same like SO_XXX in sockets.h + */ +#define SOF_REUSEADDR 0x04U /* allow local address reuse */ +#define SOF_KEEPALIVE 0x08U /* keep connections alive */ +#define SOF_BROADCAST 0x20U /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + +/* These flags are inherited (e.g. from a listen-pcb to a connection-pcb): */ +#define SOF_INHERITED (SOF_REUSEADDR|SOF_KEEPALIVE) + +/** Global variables of this module, kept in a struct for efficient access using base+index. */ +struct ip_globals +{ + /** The interface that accepted the packet for the current callback invocation. */ + struct netif *current_netif; + /** The interface that received the packet for the current callback invocation. */ + struct netif *current_input_netif; +#if LWIP_IPV4 + /** Header of the input packet currently being processed. */ + const struct ip_hdr *current_ip4_header; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Header of the input IPv6 packet currently being processed. */ + struct ip6_hdr *current_ip6_header; +#endif /* LWIP_IPV6 */ + /** Total header length of current_ip4/6_header (i.e. after this, the UDP/TCP header starts) */ + u16_t current_ip_header_tot_len; + /** Source IP address of current_header */ + ip_addr_t current_iphdr_src; + /** Destination IP address of current_header */ + ip_addr_t current_iphdr_dest; +}; +extern struct ip_globals ip_data; + + +/** Get the interface that accepted the current packet. + * This may or may not be the receiving netif, depending on your netif/network setup. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_netif() (ip_data.current_netif) +/** Get the interface that received the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_input_netif() (ip_data.current_input_netif) +/** Total header length of ip(6)_current_header() (i.e. after this, the UDP/TCP header starts) */ +#define ip_current_header_tot_len() (ip_data.current_ip_header_tot_len) +/** Source IP address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +#if LWIP_IPV4 && LWIP_IPV6 +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ip_data.current_ip4_header +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Returns TRUE if the current IP input packet is IPv6, FALSE if it is IPv4 */ +#define ip_current_is_v6() (ip6_current_header() != NULL) +/** Source IPv6 address of current_header */ +#define ip6_current_src_addr() (ip_2_ip6(&ip_data.current_iphdr_src)) +/** Destination IPv6 address of current_header */ +#define ip6_current_dest_addr() (ip_2_ip6(&ip_data.current_iphdr_dest)) +/** Get the transport layer protocol */ +#define ip_current_header_proto() (ip_current_is_v6() ? \ + IP6H_NEXTH(ip6_current_header()) :\ + IPH_PROTO(ip4_current_header())) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((ip_current_is_v6() ? \ + (const u8_t*)ip6_current_header() : (const u8_t*)ip4_current_header()) + ip_current_header_tot_len())) + +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (ip_2_ip4(&ip_data.current_iphdr_src)) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (ip_2_ip4(&ip_data.current_iphdr_dest)) + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ip_data.current_ip4_header +/** Always returns FALSE when only supporting IPv4 only */ +#define ip_current_is_v6() 0 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IPH_PROTO(ip4_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((const u8_t*)ip4_current_header() + ip_current_header_tot_len())) +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (&ip_data.current_iphdr_dest) + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Always returns TRUE when only supporting IPv6 only */ +#define ip_current_is_v6() 1 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IP6H_NEXTH(ip6_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)(((const u8_t*)ip6_current_header()) + ip_current_header_tot_len())) +/** Source IP6 address of current_header */ +#define ip6_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP6 address of current_header */ +#define ip6_current_dest_addr() (&ip_data.current_iphdr_dest) + +#endif /* LWIP_IPV6 */ + +/** Union source address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Union destination address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +/** Gets an IP pcb option (SOF_* flags) */ +#define ip_get_option(pcb, opt) ((pcb)->so_options & (opt)) +/** Sets an IP pcb option (SOF_* flags) */ +#define ip_set_option(pcb, opt) ((pcb)->so_options = (u8_t)((pcb)->so_options | (opt))) +/** Resets an IP pcb option (SOF_* flags) */ +#define ip_reset_option(pcb, opt) ((pcb)->so_options = (u8_t)((pcb)->so_options & ~(opt))) + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ip + * Output IP packet, netif is selected by source address + */ +#define ip_output(p, src, dest, ttl, tos, proto) \ + (IP_IS_V6(dest) ? \ + ip6_output(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto) : \ + ip4_output(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto)) +/** + * @ingroup ip + * Output IP packet to specified interface + */ +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** + * @ingroup ip + * Output IP packet to interface specifying source address + */ +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if_src(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if_src(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** Output IP packet that already includes an IP header. */ +#define ip_output_if_hdrincl(p, src, dest, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), LWIP_IP_HDRINCL, 0, 0, 0, netif) : \ + ip4_output_if(p, ip_2_ip4(src), LWIP_IP_HDRINCL, 0, 0, 0, netif)) +/** Output IP packet with netif_hint */ +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + (IP_IS_V6(dest) ? \ + ip6_output_hinted(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif_hint) : \ + ip4_output_hinted(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif_hint)) +/** + * @ingroup ip + * Get netif for address combination. See \ref ip6_route and \ref ip4_route + */ +#define ip_route(src, dest) \ + (IP_IS_V6(dest) ? \ + ip6_route(ip_2_ip6(src), ip_2_ip6(dest)) : \ + ip4_route_src(ip_2_ip4(src), ip_2_ip4(dest))) +/** + * @ingroup ip + * Get netif for IP. + */ +#define ip_netif_get_local_ip(netif, dest) (IP_IS_V6(dest) ? \ + ip6_netif_get_local_ip(netif, ip_2_ip6(dest)) : \ + ip4_netif_get_local_ip(netif)) +#define ip_debug_print(is_ipv6, p) ((is_ipv6) ? ip6_debug_print(p) : ip4_debug_print(p)) + +err_t ip_input(struct pbuf *p, struct netif *inp); + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip4_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + ip4_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) +#define ip_output_if_hdrincl(p, src, dest, netif) \ + ip4_output_if(p, src, LWIP_IP_HDRINCL, 0, 0, 0, netif) +#define ip_route(src, dest) \ + ip4_route_src(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip4_netif_get_local_ip(netif) +#define ip_debug_print(is_ipv6, p) ip4_debug_print(p) + +#define ip_input ip4_input + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip6_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + ip6_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) +#define ip_output_if_hdrincl(p, src, dest, netif) \ + ip6_output_if(p, src, LWIP_IP_HDRINCL, 0, 0, 0, netif) +#define ip_route(src, dest) \ + ip6_route(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip6_netif_get_local_ip(netif, dest) +#define ip_debug_print(is_ipv6, p) ip6_debug_print(p) + +#define ip_input ip6_input + +#endif /* LWIP_IPV6 */ + +#define ip_route_get_local_ip(src, dest, netif, ipaddr) do { \ + (netif) = ip_route(src, dest); \ + (ipaddr) = ip_netif_get_local_ip(netif, dest); \ +}while(0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h new file mode 100644 index 00000000..fd35a336 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h @@ -0,0 +1,111 @@ +/** + * @file + * IPv4 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_H +#define LWIP_HDR_IP4_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +#define LWIP_IPV4_SRC_ROUTING 1 +#else +#define LWIP_IPV4_SRC_ROUTING 0 +#endif + +/** Currently, the function ip_output_if_opt() is only used with IGMP */ +#define IP_OPTIONS_SEND (LWIP_IPV4 && LWIP_IGMP) + +#define ip_init() /* Compatibility define, no init needed. */ +struct netif *ip4_route(const ip4_addr_t *dest); +#if LWIP_IPV4_SRC_ROUTING +struct netif *ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest); +#else /* LWIP_IPV4_SRC_ROUTING */ +#define ip4_route_src(src, dest) ip4_route(dest) +#endif /* LWIP_IPV4_SRC_ROUTING */ +err_t ip4_input(struct pbuf *p, struct netif *inp); +err_t ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto); +err_t ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +err_t ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +#if LWIP_NETIF_USE_HINTS +err_t ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint); +#endif /* LWIP_NETIF_USE_HINTS */ +#if IP_OPTIONS_SEND +err_t ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +err_t ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +#endif /* IP_OPTIONS_SEND */ + +#if LWIP_MULTICAST_TX_OPTIONS +void ip4_set_default_multicast_netif(struct netif* default_multicast_netif); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#define ip4_netif_get_local_ip(netif) (((netif) != NULL) ? netif_ip_addr4(netif) : NULL) + +#if IP_DEBUG +void ip4_debug_print(struct pbuf *p); +#else +#define ip4_debug_print(p) +#endif /* IP_DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h new file mode 100644 index 00000000..f244c4f5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h @@ -0,0 +1,216 @@ +/** + * @file + * IPv4 address API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_ADDR_H +#define LWIP_HDR_IP4_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the aligned version of ip4_addr_t, + used as local variable, on the stack, etc. */ +struct ip4_addr { + u32_t addr; +}; + +/** ip4_addr_t uses a struct for convenience only, so that the same defines can + * operate both on ip4_addr_t as well as on ip4_addr_p_t. */ +typedef struct ip4_addr ip4_addr_t; + +/* Forward declaration to not include netif.h */ +struct netif; + +/** 255.255.255.255 */ +#define IPADDR_NONE ((u32_t)0xffffffffUL) +/** 127.0.0.1 */ +#define IPADDR_LOOPBACK ((u32_t)0x7f000001UL) +/** 0.0.0.0 */ +#define IPADDR_ANY ((u32_t)0x00000000UL) +/** 255.255.255.255 */ +#define IPADDR_BROADCAST ((u32_t)0xffffffffUL) + +/* Definitions of the bits in an Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IP_CLASSA(a) ((((u32_t)(a)) & 0x80000000UL) == 0) +#define IP_CLASSA_NET 0xff000000 +#define IP_CLASSA_NSHIFT 24 +#define IP_CLASSA_HOST (0xffffffff & ~IP_CLASSA_NET) +#define IP_CLASSA_MAX 128 + +#define IP_CLASSB(a) ((((u32_t)(a)) & 0xc0000000UL) == 0x80000000UL) +#define IP_CLASSB_NET 0xffff0000 +#define IP_CLASSB_NSHIFT 16 +#define IP_CLASSB_HOST (0xffffffff & ~IP_CLASSB_NET) +#define IP_CLASSB_MAX 65536 + +#define IP_CLASSC(a) ((((u32_t)(a)) & 0xe0000000UL) == 0xc0000000UL) +#define IP_CLASSC_NET 0xffffff00 +#define IP_CLASSC_NSHIFT 8 +#define IP_CLASSC_HOST (0xffffffff & ~IP_CLASSC_NET) + +#define IP_CLASSD(a) (((u32_t)(a) & 0xf0000000UL) == 0xe0000000UL) +#define IP_CLASSD_NET 0xf0000000 /* These ones aren't really */ +#define IP_CLASSD_NSHIFT 28 /* net and host fields, but */ +#define IP_CLASSD_HOST 0x0fffffff /* routing needn't know. */ +#define IP_MULTICAST(a) IP_CLASSD(a) + +#define IP_EXPERIMENTAL(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) +#define IP_BADCLASS(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) + +#define IP_LOOPBACKNET 127 /* official! */ + +/** Set an IP address given by the four byte-parts */ +#define IP4_ADDR(ipaddr, a,b,c,d) (ipaddr)->addr = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Copy IP address - faster than ip4_addr_set: no NULL check */ +#define ip4_addr_copy(dest, src) ((dest).addr = (src).addr) +/** Safely copy one IP address to another (src may be NULL) */ +#define ip4_addr_set(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0 : \ + (src)->addr)) +/** Set complete address to zero */ +#define ip4_addr_set_zero(ipaddr) ((ipaddr)->addr = 0) +/** Set address to IPADDR_ANY (no need for lwip_htonl()) */ +#define ip4_addr_set_any(ipaddr) ((ipaddr)->addr = IPADDR_ANY) +/** Set address to loopback address */ +#define ip4_addr_set_loopback(ipaddr) ((ipaddr)->addr = PP_HTONL(IPADDR_LOOPBACK)) +/** Check if an address is in the loopback region */ +#define ip4_addr_isloopback(ipaddr) (((ipaddr)->addr & PP_HTONL(IP_CLASSA_NET)) == PP_HTONL(((u32_t)IP_LOOPBACKNET) << 24)) +/** Safely copy one IP address to another and change byte order + * from host- to network-order. */ +#define ip4_addr_set_hton(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0:\ + lwip_htonl((src)->addr))) +/** IPv4 only: set the IP address given as an u32_t */ +#define ip4_addr_set_u32(dest_ipaddr, src_u32) ((dest_ipaddr)->addr = (src_u32)) +/** IPv4 only: get the IP address as an u32_t */ +#define ip4_addr_get_u32(src_ipaddr) ((src_ipaddr)->addr) + +/** Get the network address by combining host address with netmask */ +#define ip4_addr_get_network(target, host, netmask) do { ((target)->addr = ((host)->addr) & ((netmask)->addr)); } while(0) + +/** + * Determine if two address are on the same network. + * + * @arg addr1 IP address 1 + * @arg addr2 IP address 2 + * @arg mask network identifier mask + * @return !0 if the network identifiers of both address match + */ +#define ip4_addr_netcmp(addr1, addr2, mask) (((addr1)->addr & \ + (mask)->addr) == \ + ((addr2)->addr & \ + (mask)->addr)) +#define ip4_addr_cmp(addr1, addr2) ((addr1)->addr == (addr2)->addr) + +#define ip4_addr_isany_val(addr1) ((addr1).addr == IPADDR_ANY) +#define ip4_addr_isany(addr1) ((addr1) == NULL || ip4_addr_isany_val(*(addr1))) + +#define ip4_addr_isbroadcast(addr1, netif) ip4_addr_isbroadcast_u32((addr1)->addr, netif) +u8_t ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif); + +#define ip_addr_netmask_valid(netmask) ip4_addr_netmask_valid((netmask)->addr) +u8_t ip4_addr_netmask_valid(u32_t netmask); + +#define ip4_addr_ismulticast(addr1) (((addr1)->addr & PP_HTONL(0xf0000000UL)) == PP_HTONL(0xe0000000UL)) + +#define ip4_addr_islinklocal(addr1) (((addr1)->addr & PP_HTONL(0xffff0000UL)) == PP_HTONL(0xa9fe0000UL)) + +#define ip4_addr_debug_print_parts(debug, a, b, c, d) \ + LWIP_DEBUGF(debug, ("%" U16_F ".%" U16_F ".%" U16_F ".%" U16_F, a, b, c, d)) +#define ip4_addr_debug_print(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? ip4_addr1_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr2_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr3_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr4_16(ipaddr) : 0)) +#define ip4_addr_debug_print_val(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + ip4_addr1_16_val(ipaddr), \ + ip4_addr2_16_val(ipaddr), \ + ip4_addr3_16_val(ipaddr), \ + ip4_addr4_16_val(ipaddr)) + +/* Get one byte from the 4-byte address */ +#define ip4_addr_get_byte(ipaddr, idx) (((const u8_t*)(&(ipaddr)->addr))[idx]) +#define ip4_addr1(ipaddr) ip4_addr_get_byte(ipaddr, 0) +#define ip4_addr2(ipaddr) ip4_addr_get_byte(ipaddr, 1) +#define ip4_addr3(ipaddr) ip4_addr_get_byte(ipaddr, 2) +#define ip4_addr4(ipaddr) ip4_addr_get_byte(ipaddr, 3) +/* Get one byte from the 4-byte address, but argument is 'ip4_addr_t', + * not a pointer */ +#define ip4_addr_get_byte_val(ipaddr, idx) ((u8_t)(((ipaddr).addr >> (idx * 8)) & 0xff)) +#define ip4_addr1_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 0) +#define ip4_addr2_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 1) +#define ip4_addr3_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 2) +#define ip4_addr4_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 3) +/* These are cast to u16_t, with the intent that they are often arguments + * to printf using the U16_F format from cc.h. */ +#define ip4_addr1_16(ipaddr) ((u16_t)ip4_addr1(ipaddr)) +#define ip4_addr2_16(ipaddr) ((u16_t)ip4_addr2(ipaddr)) +#define ip4_addr3_16(ipaddr) ((u16_t)ip4_addr3(ipaddr)) +#define ip4_addr4_16(ipaddr) ((u16_t)ip4_addr4(ipaddr)) +#define ip4_addr1_16_val(ipaddr) ((u16_t)ip4_addr1_val(ipaddr)) +#define ip4_addr2_16_val(ipaddr) ((u16_t)ip4_addr2_val(ipaddr)) +#define ip4_addr3_16_val(ipaddr) ((u16_t)ip4_addr3_val(ipaddr)) +#define ip4_addr4_16_val(ipaddr) ((u16_t)ip4_addr4_val(ipaddr)) + +#define IP4ADDR_STRLEN_MAX 16 + +/** For backwards compatibility */ +#define ip_ntoa(ipaddr) ipaddr_ntoa(ipaddr) + +u32_t ipaddr_addr(const char *cp); +int ip4addr_aton(const char *cp, ip4_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip4addr_ntoa(const ip4_addr_t *addr); +char *ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h new file mode 100644 index 00000000..ed5bf14a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h @@ -0,0 +1,100 @@ +/** + * @file + * IP fragmentation/reassembly + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * + */ + +#ifndef LWIP_HDR_IP4_FRAG_H +#define LWIP_HDR_IP4_FRAG_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +#if IP_REASSEMBLY +/* The IP reassembly timer interval in milliseconds. */ +#define IP_TMR_INTERVAL 1000 + +/** IP reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip_reassdata { + struct ip_reassdata *next; + struct pbuf *p; + struct ip_hdr iphdr; + u16_t datagram_len; + u8_t flags; + u8_t timer; +}; + +void ip_reass_init(void); +void ip_reass_tmr(void); +struct pbuf * ip4_reass(struct pbuf *p); +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +err_t ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest); +#endif /* IP_FRAG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP4_FRAG_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h new file mode 100644 index 00000000..f894e063 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h @@ -0,0 +1,93 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_H +#define LWIP_HDR_IP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct netif *ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest); +const ip_addr_t *ip6_select_source_address(struct netif *netif, const ip6_addr_t * dest); +err_t ip6_input(struct pbuf *p, struct netif *inp); +err_t ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth); +err_t ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +err_t ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +#if LWIP_NETIF_USE_HINTS +err_t ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint); +#endif /* LWIP_NETIF_USE_HINTS */ +#if LWIP_IPV6_MLD +err_t ip6_options_add_hbh_ra(struct pbuf * p, u8_t nexth, u8_t value); +#endif /* LWIP_IPV6_MLD */ + +#define ip6_netif_get_local_ip(netif, dest) (((netif) != NULL) ? \ + ip6_select_source_address(netif, dest) : NULL) + +#if IP6_DEBUG +void ip6_debug_print(struct pbuf *p); +#else +#define ip6_debug_print(p) +#endif /* IP6_DEBUG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h new file mode 100644 index 00000000..29c2a34d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h @@ -0,0 +1,352 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Structs and macros for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_ADDR_H +#define LWIP_HDR_IP6_ADDR_H + +#include "lwip/opt.h" +#include "def.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_zone.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/** This is the aligned version of ip6_addr_t, + used as local variable, on the stack, etc. */ +struct ip6_addr { + u32_t addr[4]; +#if LWIP_IPV6_SCOPES + u8_t zone; +#endif /* LWIP_IPV6_SCOPES */ +}; + +/** IPv6 address */ +typedef struct ip6_addr ip6_addr_t; + +/** Set an IPv6 partial address given by byte-parts */ +#define IP6_ADDR_PART(ip6addr, index, a,b,c,d) \ + (ip6addr)->addr[index] = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Set a full IPv6 address by passing the 4 u32_t indices in network byte order + (use PP_HTONL() for constants) */ +#define IP6_ADDR(ip6addr, idx0, idx1, idx2, idx3) do { \ + (ip6addr)->addr[0] = idx0; \ + (ip6addr)->addr[1] = idx1; \ + (ip6addr)->addr[2] = idx2; \ + (ip6addr)->addr[3] = idx3; \ + ip6_addr_clear_zone(ip6addr); } while(0) + +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK1(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK2(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK3(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK4(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK5(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK6(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK7(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK8(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3])) & 0xffff)) + +/** Copy IPv6 address - faster than ip6_addr_set: no NULL check */ +#define ip6_addr_copy(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; \ + ip6_addr_copy_zone((dest), (src)); }while(0) +/** Safely copy one IPv6 address to another (src may be NULL) */ +#define ip6_addr_set(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : (src)->addr[0]; \ + (dest)->addr[1] = (src) == NULL ? 0 : (src)->addr[1]; \ + (dest)->addr[2] = (src) == NULL ? 0 : (src)->addr[2]; \ + (dest)->addr[3] = (src) == NULL ? 0 : (src)->addr[3]; \ + ip6_addr_set_zone((dest), (src) == NULL ? IP6_NO_ZONE : ip6_addr_zone(src)); }while(0) + +/** Copy packed IPv6 address to unpacked IPv6 address; zone is not set */ +#define ip6_addr_copy_from_packed(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; \ + ip6_addr_clear_zone(&dest); }while(0) + +/** Copy unpacked IPv6 address to packed IPv6 address; zone is lost */ +#define ip6_addr_copy_to_packed(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; }while(0) + +/** Set complete address to zero */ +#define ip6_addr_set_zero(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = 0; \ + ip6_addr_clear_zone(ip6addr);}while(0) + +/** Set address to ipv6 'any' (no need for lwip_htonl()) */ +#define ip6_addr_set_any(ip6addr) ip6_addr_set_zero(ip6addr) +/** Set address to ipv6 loopback address */ +#define ip6_addr_set_loopback(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL); \ + ip6_addr_clear_zone(ip6addr);}while(0) +/** Safely copy one IPv6 address to another and change byte order + * from host- to network-order. */ +#define ip6_addr_set_hton(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : lwip_htonl((src)->addr[0]); \ + (dest)->addr[1] = (src) == NULL ? 0 : lwip_htonl((src)->addr[1]); \ + (dest)->addr[2] = (src) == NULL ? 0 : lwip_htonl((src)->addr[2]); \ + (dest)->addr[3] = (src) == NULL ? 0 : lwip_htonl((src)->addr[3]); \ + ip6_addr_set_zone((dest), (src) == NULL ? IP6_NO_ZONE : ip6_addr_zone(src));}while(0) + + +/** Compare IPv6 networks, ignoring zone information. To be used sparingly! */ +#define ip6_addr_netcmp_zoneless(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1])) + +/** + * Determine if two IPv6 address are on the same network. + * + * @param addr1 IPv6 address 1 + * @param addr2 IPv6 address 2 + * @return 1 if the network identifiers of both address match, 0 if not + */ +#define ip6_addr_netcmp(addr1, addr2) (ip6_addr_netcmp_zoneless((addr1), (addr2)) && \ + ip6_addr_cmp_zone((addr1), (addr2))) + +/* Exact-host comparison *after* ip6_addr_netcmp() succeeded, for efficiency. */ +#define ip6_addr_nethostcmp(addr1, addr2) (((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) + +/** Compare IPv6 addresses, ignoring zone information. To be used sparingly! */ +#define ip6_addr_cmp_zoneless(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1]) && \ + ((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) +/** + * Determine if two IPv6 addresses are the same. In particular, the address + * part of both must be the same, and the zone must be compatible. + * + * @param addr1 IPv6 address 1 + * @param addr2 IPv6 address 2 + * @return 1 if the addresses are considered equal, 0 if not + */ +#define ip6_addr_cmp(addr1, addr2) (ip6_addr_cmp_zoneless((addr1), (addr2)) && \ + ip6_addr_cmp_zone((addr1), (addr2))) + +/** Compare IPv6 address to packed address and zone */ +#define ip6_addr_cmp_packed(ip6addr, paddr, zone_idx) (((ip6addr)->addr[0] == (paddr)->addr[0]) && \ + ((ip6addr)->addr[1] == (paddr)->addr[1]) && \ + ((ip6addr)->addr[2] == (paddr)->addr[2]) && \ + ((ip6addr)->addr[3] == (paddr)->addr[3]) && \ + ip6_addr_equals_zone((ip6addr), (zone_idx))) + +#define ip6_get_subnet_id(ip6addr) (lwip_htonl((ip6addr)->addr[2]) & 0x0000ffffUL) + +#define ip6_addr_isany_val(ip6addr) (((ip6addr).addr[0] == 0) && \ + ((ip6addr).addr[1] == 0) && \ + ((ip6addr).addr[2] == 0) && \ + ((ip6addr).addr[3] == 0)) +#define ip6_addr_isany(ip6addr) (((ip6addr) == NULL) || ip6_addr_isany_val(*(ip6addr))) + +#define ip6_addr_isloopback(ip6addr) (((ip6addr)->addr[0] == 0UL) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isglobal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xe0000000UL)) == PP_HTONL(0x20000000UL)) + +#define ip6_addr_islinklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfe800000UL)) + +#define ip6_addr_issitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfec00000UL)) + +#define ip6_addr_isuniquelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xfe000000UL)) == PP_HTONL(0xfc000000UL)) + +#define ip6_addr_isipv4mappedipv6(ip6addr) (((ip6addr)->addr[0] == 0) && ((ip6addr)->addr[1] == 0) && (((ip6addr)->addr[2]) == PP_HTONL(0x0000FFFFUL))) + +#define ip6_addr_ismulticast(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) +#define ip6_addr_multicast_transient_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00100000UL)) +#define ip6_addr_multicast_prefix_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00200000UL)) +#define ip6_addr_multicast_rendezvous_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00400000UL)) +#define ip6_addr_multicast_scope(ip6addr) ((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xf) +#define IP6_MULTICAST_SCOPE_RESERVED 0x0 +#define IP6_MULTICAST_SCOPE_RESERVED0 0x0 +#define IP6_MULTICAST_SCOPE_INTERFACE_LOCAL 0x1 +#define IP6_MULTICAST_SCOPE_LINK_LOCAL 0x2 +#define IP6_MULTICAST_SCOPE_RESERVED3 0x3 +#define IP6_MULTICAST_SCOPE_ADMIN_LOCAL 0x4 +#define IP6_MULTICAST_SCOPE_SITE_LOCAL 0x5 +#define IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL 0x8 +#define IP6_MULTICAST_SCOPE_GLOBAL 0xe +#define IP6_MULTICAST_SCOPE_RESERVEDF 0xf +#define ip6_addr_ismulticast_iflocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff010000UL)) +#define ip6_addr_ismulticast_linklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff020000UL)) +#define ip6_addr_ismulticast_adminlocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff040000UL)) +#define ip6_addr_ismulticast_sitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff050000UL)) +#define ip6_addr_ismulticast_orglocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff080000UL)) +#define ip6_addr_ismulticast_global(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff0e0000UL)) + +/* Scoping note: while interface-local and link-local multicast addresses do + * have a scope (i.e., they are meaningful only in the context of a particular + * interface), the following functions are not assigning or comparing zone + * indices. The reason for this is backward compatibility. Any call site that + * produces a non-global multicast address must assign a multicast address as + * appropriate itself. */ + +#define ip6_addr_isallnodes_iflocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff010000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isallnodes_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) +#define ip6_addr_set_allnodes_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_isallrouters_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000002UL))) +#define ip6_addr_set_allrouters_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000002UL); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_issolicitednode(ip6addr) ( ((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + (((ip6addr)->addr[3] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) ) + +#define ip6_addr_set_solicitednode(ip6addr, if_id) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = PP_HTONL(0x00000001UL); \ + (ip6addr)->addr[3] = (PP_HTONL(0xff000000UL) | (if_id)); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_cmp_solicitednode(ip6addr, sn_addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + ((ip6addr)->addr[3] == (PP_HTONL(0xff000000UL) | (sn_addr)->addr[3]))) + +/* IPv6 address states. */ +#define IP6_ADDR_INVALID 0x00 +#define IP6_ADDR_TENTATIVE 0x08 +#define IP6_ADDR_TENTATIVE_1 0x09 /* 1 probe sent */ +#define IP6_ADDR_TENTATIVE_2 0x0a /* 2 probes sent */ +#define IP6_ADDR_TENTATIVE_3 0x0b /* 3 probes sent */ +#define IP6_ADDR_TENTATIVE_4 0x0c /* 4 probes sent */ +#define IP6_ADDR_TENTATIVE_5 0x0d /* 5 probes sent */ +#define IP6_ADDR_TENTATIVE_6 0x0e /* 6 probes sent */ +#define IP6_ADDR_TENTATIVE_7 0x0f /* 7 probes sent */ +#define IP6_ADDR_VALID 0x10 /* This bit marks an address as valid (preferred or deprecated) */ +#define IP6_ADDR_PREFERRED 0x30 +#define IP6_ADDR_DEPRECATED 0x10 /* Same as VALID (valid but not preferred) */ +#define IP6_ADDR_DUPLICATED 0x40 /* Failed DAD test, not valid */ + +#define IP6_ADDR_TENTATIVE_COUNT_MASK 0x07 /* 1-7 probes sent */ + +#define ip6_addr_isinvalid(addr_state) (addr_state == IP6_ADDR_INVALID) +#define ip6_addr_istentative(addr_state) (addr_state & IP6_ADDR_TENTATIVE) +#define ip6_addr_isvalid(addr_state) (addr_state & IP6_ADDR_VALID) /* Include valid, preferred, and deprecated. */ +#define ip6_addr_ispreferred(addr_state) (addr_state == IP6_ADDR_PREFERRED) +#define ip6_addr_isdeprecated(addr_state) (addr_state == IP6_ADDR_DEPRECATED) +#define ip6_addr_isduplicated(addr_state) (addr_state == IP6_ADDR_DUPLICATED) + +#if LWIP_IPV6_ADDRESS_LIFETIMES +#define IP6_ADDR_LIFE_STATIC (0) +#define IP6_ADDR_LIFE_INFINITE (0xffffffffUL) +#define ip6_addr_life_isstatic(addr_life) ((addr_life) == IP6_ADDR_LIFE_STATIC) +#define ip6_addr_life_isinfinite(addr_life) ((addr_life) == IP6_ADDR_LIFE_INFINITE) +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + +#define ip6_addr_debug_print_parts(debug, a, b, c, d, e, f, g, h) \ + LWIP_DEBUGF(debug, ("%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F, \ + a, b, c, d, e, f, g, h)) +#define ip6_addr_debug_print(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK1(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK2(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK3(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK4(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK5(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK6(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK7(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK8(ipaddr) : 0)) +#define ip6_addr_debug_print_val(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + IP6_ADDR_BLOCK1(&(ipaddr)), \ + IP6_ADDR_BLOCK2(&(ipaddr)), \ + IP6_ADDR_BLOCK3(&(ipaddr)), \ + IP6_ADDR_BLOCK4(&(ipaddr)), \ + IP6_ADDR_BLOCK5(&(ipaddr)), \ + IP6_ADDR_BLOCK6(&(ipaddr)), \ + IP6_ADDR_BLOCK7(&(ipaddr)), \ + IP6_ADDR_BLOCK8(&(ipaddr))) + +#define IP6ADDR_STRLEN_MAX 46 + +int ip6addr_aton(const char *cp, ip6_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip6addr_ntoa(const ip6_addr_t *addr); +char *ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen); + + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_ADDR_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h new file mode 100644 index 00000000..87e0e86a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h @@ -0,0 +1,144 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_FRAG_H +#define LWIP_HDR_IP6_FRAG_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + +/** The IPv6 reassembly timer interval in milliseconds. */ +#define IP6_REASS_TMR_INTERVAL 1000 + +/** IP6_FRAG_COPYHEADER==1: for platforms where sizeof(void*) > 4, "struct + * ip6_reass_helper" is too large to be stored in the IPv6 fragment header, and + * will bleed into the header before it, which may be the IPv6 header or an + * extension header. This means that for each first fragment packet, we need to + * 1) make a copy of some IPv6 header fields (src+dest) that we need later on, + * just in case we do overwrite part of the IPv6 header, and 2) make a copy of + * the header data that we overwrote, so that we can restore it before either + * completing reassembly or sending an ICMPv6 reply. The last part is true even + * if this setting is disabled, but if it is enabled, we need to save a bit + * more data (up to the size of a pointer) because we overwrite more. */ +#ifndef IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_COPYHEADER 0 +#endif + +/* With IPV6_FRAG_COPYHEADER==1, a helper structure may (or, depending on the + * presence of extensions, may not) overwrite part of the IP header. Therefore, + * we copy the fields that we need from the IP header for as long as the helper + * structure may still be in place. This is easier than temporarily restoring + * those fields in the IP header each time we need to perform checks on them. */ +#if IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_SRC(ipr) ((ipr)->src) +#define IPV6_FRAG_DEST(ipr) ((ipr)->dest) +#else /* IPV6_FRAG_COPYHEADER */ +#define IPV6_FRAG_SRC(ipr) ((ipr)->iphdr->src) +#define IPV6_FRAG_DEST(ipr) ((ipr)->iphdr->dest) +#endif /* IPV6_FRAG_COPYHEADER */ + +/** IPv6 reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip6_reassdata { + struct ip6_reassdata *next; + struct pbuf *p; + struct ip6_hdr *iphdr; /* pointer to the first (original) IPv6 header */ +#if IPV6_FRAG_COPYHEADER + ip6_addr_p_t src; /* copy of the source address in the IP header */ + ip6_addr_p_t dest; /* copy of the destination address in the IP header */ + /* This buffer (for the part of the original header that we overwrite) will + * be slightly oversized, but we cannot compute the exact size from here. */ + u8_t orig_hdr[sizeof(struct ip6_frag_hdr) + sizeof(void*)]; +#else /* IPV6_FRAG_COPYHEADER */ + /* In this case we still need the buffer, for sending ICMPv6 replies. */ + u8_t orig_hdr[sizeof(struct ip6_frag_hdr)]; +#endif /* IPV6_FRAG_COPYHEADER */ + u32_t identification; + u16_t datagram_len; + u8_t nexth; + u8_t timer; +#if LWIP_IPV6_SCOPES + u8_t src_zone; /* zone of original packet's source address */ + u8_t dest_zone; /* zone of original packet's destination address */ +#endif /* LWIP_IPV6_SCOPES */ +}; + +#define ip6_reass_init() /* Compatibility define */ +void ip6_reass_tmr(void); +struct pbuf *ip6_reass(struct pbuf *p); + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG /* don't build if not configured for use in lwipopts.h */ + +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ + +err_t ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest); + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP6_FRAG_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h new file mode 100644 index 00000000..525790e6 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h @@ -0,0 +1,304 @@ +/** + * @file + * + * IPv6 address scopes, zones, and scoping policy. + * + * This header provides the means to implement support for IPv6 address scopes, + * as per RFC 4007. An address scope can be either global or more constrained. + * In lwIP, we say that an address "has a scope" or "is scoped" when its scope + * is constrained, in which case the address is meaningful only in a specific + * "zone." For unicast addresses, only link-local addresses have a scope; in + * that case, the scope is the link. For multicast addresses, there are various + * scopes defined by RFC 4007 and others. For any constrained scope, a system + * must establish a (potentially one-to-many) mapping between zones and local + * interfaces. For example, a link-local address is valid on only one link (its + * zone). That link may be attached to one or more local interfaces. The + * decisions on which scopes are constrained and the mapping between zones and + * interfaces is together what we refer to as the "scoping policy" - more on + * this in a bit. + * + * In lwIP, each IPv6 address has an associated zone index. This zone index may + * be set to "no zone" (IP6_NO_ZONE, 0) or an actual zone. We say that an + * address "has a zone" or "is zoned" when its zone index is *not* set to "no + * zone." In lwIP, in principle, each address should be "properly zoned," which + * means that if the address has a zone if and only if has a scope. As such, it + * is a rule that an unscoped (e.g., global) address must never have a zone. + * Even though one could argue that there is always one zone even for global + * scopes, this rule exists for implementation simplicity. Violation of the + * rule will trigger assertions or otherwise result in undesired behavior. + * + * Backward compatibility prevents us from requiring that applications always + * provide properly zoned addresses. We do enforce the rule that the in the + * lwIP link layer (everything below netif->output_ip6() and in particular ND6) + * *all* addresses are properly zoned. Thus, on the output paths down the + * stack, various places deal with the case of addresses that lack a zone. + * Some of them are best-effort for efficiency (e.g. the PCB bind and connect + * API calls' attempts to add missing zones); ultimately the IPv6 output + * handler (@ref ip6_output_if_src) will set a zone if necessary. + * + * Aside from dealing with scoped addresses lacking a zone, a proper IPv6 + * implementation must also ensure that a packet with a scoped source and/or + * destination address does not leave its zone. This is currently implemented + * in the input and forward functions. However, for output, these checks are + * deliberately omitted in order to keep the implementation lightweight. The + * routing algorithm in @ref ip6_route will take decisions such that it will + * not cause zone violations unless the application sets bad addresses, though. + * + * In terms of scoping policy, lwIP implements the default policy from RFC 4007 + * using macros in this file. This policy considers link-local unicast + * addresses and (only) interface-local and link-local multicast addresses as + * having a scope. For all these addresses, the zone is equal to the interface. + * As shown below in this file, it is possible to implement a custom policy. + */ + +/* + * Copyright (c) 2017 The MINIX 3 Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: David van Moolenbroek + * + */ +#ifndef LWIP_HDR_IP6_ZONE_H +#define LWIP_HDR_IP6_ZONE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ip6_zones IPv6 Zones + * @ingroup ip6 + * @{ + */ + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +/** Identifier for "no zone". */ +#define IP6_NO_ZONE 0 + +#if LWIP_IPV6_SCOPES + +/** Zone initializer for static IPv6 address initialization, including comma. */ +#define IPADDR6_ZONE_INIT , IP6_NO_ZONE + +/** Return the zone index of the given IPv6 address; possibly "no zone". */ +#define ip6_addr_zone(ip6addr) ((ip6addr)->zone) + +/** Does the given IPv6 address have a zone set? (0/1) */ +#define ip6_addr_has_zone(ip6addr) (ip6_addr_zone(ip6addr) != IP6_NO_ZONE) + +/** Set the zone field of an IPv6 address to a particular value. */ +#define ip6_addr_set_zone(ip6addr, zone_idx) ((ip6addr)->zone = (zone_idx)) + +/** Clear the zone field of an IPv6 address, setting it to "no zone". */ +#define ip6_addr_clear_zone(ip6addr) ((ip6addr)->zone = IP6_NO_ZONE) + +/** Copy the zone field from the second IPv6 address to the first one. */ +#define ip6_addr_copy_zone(ip6addr1, ip6addr2) ((ip6addr1).zone = (ip6addr2).zone) + +/** Is the zone field of the given IPv6 address equal to the given zone index? (0/1) */ +#define ip6_addr_equals_zone(ip6addr, zone_idx) ((ip6addr)->zone == (zone_idx)) + +/** Are the zone fields of the given IPv6 addresses equal? (0/1) + * This macro must only be used on IPv6 addresses of the same scope. */ +#define ip6_addr_cmp_zone(ip6addr1, ip6addr2) ((ip6addr1)->zone == (ip6addr2)->zone) + +/** Symbolic constants for the 'type' parameters in some of the macros. + * These exist for efficiency only, allowing the macros to avoid certain tests + * when the address is known not to be of a certain type. Dead code elimination + * will do the rest. IP6_MULTICAST is supported but currently not optimized. + * @see ip6_addr_has_scope, ip6_addr_assign_zone, ip6_addr_lacks_zone. + */ +enum lwip_ipv6_scope_type +{ + /** Unknown */ + IP6_UNKNOWN = 0, + /** Unicast */ + IP6_UNICAST = 1, + /** Multicast */ + IP6_MULTICAST = 2 +}; + +/** IPV6_CUSTOM_SCOPES: together, the following three macro definitions, + * @ref ip6_addr_has_scope, @ref ip6_addr_assign_zone, and + * @ref ip6_addr_test_zone, completely define the lwIP scoping policy. + * The definitions below implement the default policy from RFC 4007 Sec. 6. + * Should an implementation desire to implement a different policy, it can + * define IPV6_CUSTOM_SCOPES to 1 and supply its own definitions for the three + * macros instead. + */ +#ifndef IPV6_CUSTOM_SCOPES +#define IPV6_CUSTOM_SCOPES 0 +#endif /* !IPV6_CUSTOM_SCOPES */ + +#if !IPV6_CUSTOM_SCOPES + +/** + * Determine whether an IPv6 address has a constrained scope, and as such is + * meaningful only if accompanied by a zone index to identify the scope's zone. + * The given address type may be used to eliminate at compile time certain + * checks that will evaluate to false at run time anyway. + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined. + * + * Even though the unicast loopback address does have an implied link-local + * scope, in this implementation it does not have an explicitly assigned zone + * index. As such it should not be tested for in this macro. + * + * @param ip6addr the IPv6 address (const); only its address part is examined. + * @param type address type; see @ref lwip_ipv6_scope_type. + * @return 1 if the address has a constrained scope, 0 if it does not. + */ +#define ip6_addr_has_scope(ip6addr, type) \ + (ip6_addr_islinklocal(ip6addr) || (((type) != IP6_UNICAST) && \ + (ip6_addr_ismulticast_iflocal(ip6addr) || \ + ip6_addr_ismulticast_linklocal(ip6addr)))) + +/** + * Assign a zone index to an IPv6 address, based on a network interface. If the + * given address has a scope, the assigned zone index is that scope's zone of + * the given netif; otherwise, the assigned zone index is "no zone". + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined, and the zone index + * of both of those scopes always equals the index of the network interface. + * As such, this default implementation need not distinguish between different + * constrained scopes when assigning the zone. + * + * @param ip6addr the IPv6 address; its address part is examined, and its zone + * index is assigned. + * @param type address type; see @ref lwip_ipv6_scope_type. + * @param netif the network interface (const). + */ +#define ip6_addr_assign_zone(ip6addr, type, netif) \ + (ip6_addr_set_zone((ip6addr), \ + ip6_addr_has_scope((ip6addr), (type)) ? netif_get_index(netif) : 0)) + +/** + * Test whether an IPv6 address is "zone-compatible" with a network interface. + * That is, test whether the network interface is part of the zone associated + * with the address. For efficiency, this macro is only ever called if the + * given address is either scoped or zoned, and thus, it need not test this. + * If an address is scoped but not zoned, or zoned and not scoped, it is + * considered not zone-compatible with any netif. + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined, and the zone index + * of both of those scopes always equals the index of the network interface. + * As such, there is always only one matching netif for a specific zone index, + * but all call sites of this macro currently support multiple matching netifs + * as well (at no additional expense in the common case). + * + * @param ip6addr the IPv6 address (const). + * @param netif the network interface (const). + * @return 1 if the address is scope-compatible with the netif, 0 if not. + */ +#define ip6_addr_test_zone(ip6addr, netif) \ + (ip6_addr_equals_zone((ip6addr), netif_get_index(netif))) + +#endif /* !IPV6_CUSTOM_SCOPES */ + +/** Does the given IPv6 address have a scope, and as such should also have a + * zone to be meaningful, but does not actually have a zone? (0/1) */ +#define ip6_addr_lacks_zone(ip6addr, type) \ + (!ip6_addr_has_zone(ip6addr) && ip6_addr_has_scope((ip6addr), (type))) + +/** + * Try to select a zone for a scoped address that does not yet have a zone. + * Called from PCB bind and connect routines, for two reasons: 1) to save on + * this (relatively expensive) selection for every individual packet route + * operation and 2) to allow the application to obtain the selected zone from + * the PCB as is customary for e.g. getsockname/getpeername BSD socket calls. + * + * Ideally, callers would always supply a properly zoned address, in which case + * this function would not be needed. It exists both for compatibility with the + * BSD socket API (which accepts zoneless destination addresses) and for + * backward compatibility with pre-scoping lwIP code. + * + * It may be impossible to select a zone, e.g. if there are no netifs. In that + * case, the address's zone field will be left as is. + * + * @param dest the IPv6 address for which to select and set a zone. + * @param src source IPv6 address (const); may be equal to dest. + */ +#define ip6_addr_select_zone(dest, src) do { struct netif *selected_netif; \ + selected_netif = ip6_route((src), (dest)); \ + if (selected_netif != NULL) { \ + ip6_addr_assign_zone((dest), IP6_UNKNOWN, selected_netif); \ + } } while (0) + +/** + * @} + */ + +#else /* LWIP_IPV6_SCOPES */ + +#define IPADDR6_ZONE_INIT +#define ip6_addr_zone(ip6addr) (IP6_NO_ZONE) +#define ip6_addr_has_zone(ip6addr) (0) +#define ip6_addr_set_zone(ip6addr, zone_idx) +#define ip6_addr_clear_zone(ip6addr) +#define ip6_addr_copy_zone(ip6addr1, ip6addr2) +#define ip6_addr_equals_zone(ip6addr, zone_idx) (1) +#define ip6_addr_cmp_zone(ip6addr1, ip6addr2) (1) +#define IPV6_CUSTOM_SCOPES 0 +#define ip6_addr_has_scope(ip6addr, type) (0) +#define ip6_addr_assign_zone(ip6addr, type, netif) +#define ip6_addr_test_zone(ip6addr, netif) (1) +#define ip6_addr_lacks_zone(ip6addr, type) (0) +#define ip6_addr_select_zone(ip6addr, src) + +#endif /* LWIP_IPV6_SCOPES */ + +#if LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG + +/** Verify that the given IPv6 address is properly zoned. */ +#define IP6_ADDR_ZONECHECK(ip6addr) LWIP_ASSERT("IPv6 zone check failed", \ + ip6_addr_has_scope(ip6addr, IP6_UNKNOWN) == ip6_addr_has_zone(ip6addr)) + +/** Verify that the given IPv6 address is properly zoned for the given netif. */ +#define IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif) LWIP_ASSERT("IPv6 netif zone check failed", \ + ip6_addr_has_scope(ip6addr, IP6_UNKNOWN) ? \ + (ip6_addr_has_zone(ip6addr) && \ + (((netif) == NULL) || ip6_addr_test_zone((ip6addr), (netif)))) : \ + !ip6_addr_has_zone(ip6addr)) + +#else /* LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG */ + +#define IP6_ADDR_ZONECHECK(ip6addr) +#define IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif) + +#endif /* LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG */ + +#endif /* LWIP_IPV6 */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP6_ZONE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h b/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h new file mode 100644 index 00000000..2f977709 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h @@ -0,0 +1,438 @@ +/** + * @file + * IP address API (common IPv4 and IPv6) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_ADDR_H +#define LWIP_HDR_IP_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include "lwip/ip4_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup ipaddr + * IP address types for use in ip_addr_t.type member. + * @see tcp_new_ip_type(), udp_new_ip_type(), raw_new_ip_type(). + */ +enum lwip_ip_addr_type { + /** IPv4 */ + IPADDR_TYPE_V4 = 0U, + /** IPv6 */ + IPADDR_TYPE_V6 = 6U, + /** IPv4+IPv6 ("dual-stack") */ + IPADDR_TYPE_ANY = 46U +}; + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ipaddr + * A union struct for both IP version's addresses. + * ATTENTION: watch out for its size when adding IPv6 address scope! + */ +typedef struct ip_addr { + union { + ip6_addr_t ip6; + ip4_addr_t ip4; + } u_addr; + /** @ref lwip_ip_addr_type */ + u8_t type; +} ip_addr_t; + +extern const ip_addr_t ip_addr_any_type; + +/** @ingroup ip4addr */ +#define IPADDR4_INIT(u32val) { { { { u32val, 0ul, 0ul, 0ul } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V4 } +/** @ingroup ip4addr */ +#define IPADDR4_INIT_BYTES(a,b,c,d) IPADDR4_INIT(PP_HTONL(LWIP_MAKEU32(a,b,c,d))) + +/** @ingroup ip6addr */ +#define IPADDR6_INIT(a, b, c, d) { { { { a, b, c, d } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V6 } +/** @ingroup ip6addr */ +#define IPADDR6_INIT_HOST(a, b, c, d) { { { { PP_HTONL(a), PP_HTONL(b), PP_HTONL(c), PP_HTONL(d) } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V6 } + +/** @ingroup ipaddr */ +#define IP_IS_ANY_TYPE_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_ANY) +/** @ingroup ipaddr */ +#define IPADDR_ANY_TYPE_INIT { { { { 0ul, 0ul, 0ul, 0ul } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_ANY } + +/** @ingroup ip4addr */ +#define IP_IS_V4_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V4) +/** @ingroup ip6addr */ +#define IP_IS_V6_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V6) +/** @ingroup ip4addr */ +#define IP_IS_V4(ipaddr) (((ipaddr) == NULL) || IP_IS_V4_VAL(*(ipaddr))) +/** @ingroup ip6addr */ +#define IP_IS_V6(ipaddr) (((ipaddr) != NULL) && IP_IS_V6_VAL(*(ipaddr))) + +#define IP_SET_TYPE_VAL(ipaddr, iptype) do { (ipaddr).type = (iptype); }while(0) +#define IP_SET_TYPE(ipaddr, iptype) do { if((ipaddr) != NULL) { IP_SET_TYPE_VAL(*(ipaddr), iptype); }}while(0) +#define IP_GET_TYPE(ipaddr) ((ipaddr)->type) + +#define IP_ADDR_RAW_SIZE(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V4 ? sizeof(ip4_addr_t) : sizeof(ip6_addr_t)) + +#define IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr) (IP_GET_TYPE(&pcb->local_ip) == IP_GET_TYPE(ipaddr)) +#define IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr) (IP_IS_ANY_TYPE_VAL(pcb->local_ip) || IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr)) + +/** @ingroup ip6addr + * Convert generic ip address to specific protocol version + */ +#define ip_2_ip6(ipaddr) (&((ipaddr)->u_addr.ip6)) +/** @ingroup ip4addr + * Convert generic ip address to specific protocol version + */ +#define ip_2_ip4(ipaddr) (&((ipaddr)->u_addr.ip4)) + +/** @ingroup ip4addr */ +#define IP_ADDR4(ipaddr,a,b,c,d) do { IP4_ADDR(ip_2_ip4(ipaddr),a,b,c,d); \ + IP_SET_TYPE_VAL(*(ipaddr), IPADDR_TYPE_V4); } while(0) +/** @ingroup ip6addr */ +#define IP_ADDR6(ipaddr,i0,i1,i2,i3) do { IP6_ADDR(ip_2_ip6(ipaddr),i0,i1,i2,i3); \ + IP_SET_TYPE_VAL(*(ipaddr), IPADDR_TYPE_V6); } while(0) +/** @ingroup ip6addr */ +#define IP_ADDR6_HOST(ipaddr,i0,i1,i2,i3) IP_ADDR6(ipaddr,PP_HTONL(i0),PP_HTONL(i1),PP_HTONL(i2),PP_HTONL(i3)) + +#define ip_clear_no4(ipaddr) do { ip_2_ip6(ipaddr)->addr[1] = \ + ip_2_ip6(ipaddr)->addr[2] = \ + ip_2_ip6(ipaddr)->addr[3] = 0; \ + ip6_addr_clear_zone(ip_2_ip6(ipaddr)); }while(0) + +/** @ingroup ipaddr */ +#define ip_addr_copy(dest, src) do{ IP_SET_TYPE_VAL(dest, IP_GET_TYPE(&src)); if(IP_IS_V6_VAL(src)){ \ + ip6_addr_copy(*ip_2_ip6(&(dest)), *ip_2_ip6(&(src))); }else{ \ + ip4_addr_copy(*ip_2_ip4(&(dest)), *ip_2_ip4(&(src))); ip_clear_no4(&dest); }}while(0) +/** @ingroup ip6addr */ +#define ip_addr_copy_from_ip6(dest, src) do{ \ + ip6_addr_copy(*ip_2_ip6(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V6); }while(0) +/** @ingroup ip6addr */ +#define ip_addr_copy_from_ip6_packed(dest, src) do{ \ + ip6_addr_copy_from_packed(*ip_2_ip6(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V6); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_copy_from_ip4(dest, src) do{ \ + ip4_addr_copy(*ip_2_ip4(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V4); ip_clear_no4(&dest); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_set_ip4_u32(ipaddr, val) do{if(ipaddr){ip4_addr_set_u32(ip_2_ip4(ipaddr), val); \ + IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ip4addr */ +#define ip_addr_set_ip4_u32_val(ipaddr, val) do{ ip4_addr_set_u32(ip_2_ip4(&(ipaddr)), val); \ + IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_get_ip4_u32(ipaddr) (((ipaddr) && IP_IS_V4(ipaddr)) ? \ + ip4_addr_get_u32(ip_2_ip4(ipaddr)) : 0) +/** @ingroup ipaddr */ +#define ip_addr_set(dest, src) do{ IP_SET_TYPE(dest, IP_GET_TYPE(src)); if(IP_IS_V6(src)){ \ + ip6_addr_set(ip_2_ip6(dest), ip_2_ip6(src)); }else{ \ + ip4_addr_set(ip_2_ip4(dest), ip_2_ip4(src)); ip_clear_no4(dest); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_ipaddr(dest, src) ip_addr_set(dest, src) +/** @ingroup ipaddr */ +#define ip_addr_set_zero(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, 0); }while(0) +/** @ingroup ip5addr */ +#define ip_addr_set_zero_ip4(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); }while(0) +/** @ingroup ip6addr */ +#define ip_addr_set_zero_ip6(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_any(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_any(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_any(ip_2_ip4(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_any_val(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_any(ip_2_ip6(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_any(ip_2_ip4(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_loopback(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_loopback(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_loopback(ip_2_ip4(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_loopback_val(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_loopback(ip_2_ip6(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_loopback(ip_2_ip4(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_hton(dest, src) do{if(IP_IS_V6(src)){ \ + ip6_addr_set_hton(ip_2_ip6(dest), ip_2_ip6(src)); IP_SET_TYPE(dest, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_hton(ip_2_ip4(dest), ip_2_ip4(src)); IP_SET_TYPE(dest, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_get_network(target, host, netmask) do{if(IP_IS_V6(host)){ \ + ip4_addr_set_zero(ip_2_ip4(target)); IP_SET_TYPE(target, IPADDR_TYPE_V6); } else { \ + ip4_addr_get_network(ip_2_ip4(target), ip_2_ip4(host), ip_2_ip4(netmask)); IP_SET_TYPE(target, IPADDR_TYPE_V4); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_netcmp(addr1, addr2, mask) ((IP_IS_V6(addr1) && IP_IS_V6(addr2)) ? \ + 0 : \ + ip4_addr_netcmp(ip_2_ip4(addr1), ip_2_ip4(addr2), mask)) +/** @ingroup ipaddr */ +#define ip_addr_cmp(addr1, addr2) ((IP_GET_TYPE(addr1) != IP_GET_TYPE(addr2)) ? 0 : (IP_IS_V6_VAL(*(addr1)) ? \ + ip6_addr_cmp(ip_2_ip6(addr1), ip_2_ip6(addr2)) : \ + ip4_addr_cmp(ip_2_ip4(addr1), ip_2_ip4(addr2)))) +/** @ingroup ipaddr */ +#define ip_addr_cmp_zoneless(addr1, addr2) ((IP_GET_TYPE(addr1) != IP_GET_TYPE(addr2)) ? 0 : (IP_IS_V6_VAL(*(addr1)) ? \ + ip6_addr_cmp_zoneless(ip_2_ip6(addr1), ip_2_ip6(addr2)) : \ + ip4_addr_cmp(ip_2_ip4(addr1), ip_2_ip4(addr2)))) +/** @ingroup ipaddr */ +#define ip_addr_isany(ipaddr) (((ipaddr) == NULL) ? 1 : ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_isany(ip_2_ip6(ipaddr)) : \ + ip4_addr_isany(ip_2_ip4(ipaddr)))) +/** @ingroup ipaddr */ +#define ip_addr_isany_val(ipaddr) ((IP_IS_V6_VAL(ipaddr)) ? \ + ip6_addr_isany_val(*ip_2_ip6(&(ipaddr))) : \ + ip4_addr_isany_val(*ip_2_ip4(&(ipaddr)))) +/** @ingroup ipaddr */ +#define ip_addr_isbroadcast(ipaddr, netif) ((IP_IS_V6(ipaddr)) ? \ + 0 : \ + ip4_addr_isbroadcast(ip_2_ip4(ipaddr), netif)) +/** @ingroup ipaddr */ +#define ip_addr_ismulticast(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_ismulticast(ip_2_ip6(ipaddr)) : \ + ip4_addr_ismulticast(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_isloopback(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_isloopback(ip_2_ip6(ipaddr)) : \ + ip4_addr_isloopback(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_islinklocal(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_islinklocal(ip_2_ip6(ipaddr)) : \ + ip4_addr_islinklocal(ip_2_ip4(ipaddr))) +#define ip_addr_debug_print(debug, ipaddr) do { if(IP_IS_V6(ipaddr)) { \ + ip6_addr_debug_print(debug, ip_2_ip6(ipaddr)); } else { \ + ip4_addr_debug_print(debug, ip_2_ip4(ipaddr)); }}while(0) +#define ip_addr_debug_print_val(debug, ipaddr) do { if(IP_IS_V6_VAL(ipaddr)) { \ + ip6_addr_debug_print_val(debug, *ip_2_ip6(&(ipaddr))); } else { \ + ip4_addr_debug_print_val(debug, *ip_2_ip4(&(ipaddr))); }}while(0) +char *ipaddr_ntoa(const ip_addr_t *addr); +char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen); +int ipaddr_aton(const char *cp, ip_addr_t *addr); + +/** @ingroup ipaddr */ +#define IPADDR_STRLEN_MAX IP6ADDR_STRLEN_MAX + +/** @ingroup ipaddr */ +#define ip4_2_ipv4_mapped_ipv6(ip6addr, ip4addr) do { \ + (ip6addr)->addr[3] = (ip4addr)->addr; \ + (ip6addr)->addr[2] = PP_HTONL(0x0000FFFFUL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[0] = 0; \ + ip6_addr_clear_zone(ip6addr); } while(0); + +/** @ingroup ipaddr */ +#define unmap_ipv4_mapped_ipv6(ip4addr, ip6addr) \ + (ip4addr)->addr = (ip6addr)->addr[3]; + +#define IP46_ADDR_ANY(type) (((type) == IPADDR_TYPE_V6)? IP6_ADDR_ANY : IP4_ADDR_ANY) + +#else /* LWIP_IPV4 && LWIP_IPV6 */ + +#define IP_ADDR_PCB_VERSION_MATCH(addr, pcb) 1 +#define IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr) 1 + +#define ip_addr_set_any_val(is_ipv6, ipaddr) ip_addr_set_any(is_ipv6, &(ipaddr)) +#define ip_addr_set_loopback_val(is_ipv6, ipaddr) ip_addr_set_loopback(is_ipv6, &(ipaddr)) + +#if LWIP_IPV4 + +typedef ip4_addr_t ip_addr_t; +#define IPADDR4_INIT(u32val) { u32val } +#define IPADDR4_INIT_BYTES(a,b,c,d) IPADDR4_INIT(PP_HTONL(LWIP_MAKEU32(a,b,c,d))) +#define IP_IS_V4_VAL(ipaddr) 1 +#define IP_IS_V6_VAL(ipaddr) 0 +#define IP_IS_V4(ipaddr) 1 +#define IP_IS_V6(ipaddr) 0 +#define IP_IS_ANY_TYPE_VAL(ipaddr) 0 +#define IP_SET_TYPE_VAL(ipaddr, iptype) +#define IP_SET_TYPE(ipaddr, iptype) +#define IP_GET_TYPE(ipaddr) IPADDR_TYPE_V4 +#define IP_ADDR_RAW_SIZE(ipaddr) sizeof(ip4_addr_t) +#define ip_2_ip4(ipaddr) (ipaddr) +#define IP_ADDR4(ipaddr,a,b,c,d) IP4_ADDR(ipaddr,a,b,c,d) + +#define ip_addr_copy(dest, src) ip4_addr_copy(dest, src) +#define ip_addr_copy_from_ip4(dest, src) ip4_addr_copy(dest, src) +#define ip_addr_set_ip4_u32(ipaddr, val) ip4_addr_set_u32(ip_2_ip4(ipaddr), val) +#define ip_addr_set_ip4_u32_val(ipaddr, val) ip_addr_set_ip4_u32(&(ipaddr), val) +#define ip_addr_get_ip4_u32(ipaddr) ip4_addr_get_u32(ip_2_ip4(ipaddr)) +#define ip_addr_set(dest, src) ip4_addr_set(dest, src) +#define ip_addr_set_ipaddr(dest, src) ip4_addr_set(dest, src) +#define ip_addr_set_zero(ipaddr) ip4_addr_set_zero(ipaddr) +#define ip_addr_set_zero_ip4(ipaddr) ip4_addr_set_zero(ipaddr) +#define ip_addr_set_any(is_ipv6, ipaddr) ip4_addr_set_any(ipaddr) +#define ip_addr_set_loopback(is_ipv6, ipaddr) ip4_addr_set_loopback(ipaddr) +#define ip_addr_set_hton(dest, src) ip4_addr_set_hton(dest, src) +#define ip_addr_get_network(target, host, mask) ip4_addr_get_network(target, host, mask) +#define ip_addr_netcmp(addr1, addr2, mask) ip4_addr_netcmp(addr1, addr2, mask) +#define ip_addr_cmp(addr1, addr2) ip4_addr_cmp(addr1, addr2) +#define ip_addr_isany(ipaddr) ip4_addr_isany(ipaddr) +#define ip_addr_isany_val(ipaddr) ip4_addr_isany_val(ipaddr) +#define ip_addr_isloopback(ipaddr) ip4_addr_isloopback(ipaddr) +#define ip_addr_islinklocal(ipaddr) ip4_addr_islinklocal(ipaddr) +#define ip_addr_isbroadcast(addr, netif) ip4_addr_isbroadcast(addr, netif) +#define ip_addr_ismulticast(ipaddr) ip4_addr_ismulticast(ipaddr) +#define ip_addr_debug_print(debug, ipaddr) ip4_addr_debug_print(debug, ipaddr) +#define ip_addr_debug_print_val(debug, ipaddr) ip4_addr_debug_print_val(debug, ipaddr) +#define ipaddr_ntoa(ipaddr) ip4addr_ntoa(ipaddr) +#define ipaddr_ntoa_r(ipaddr, buf, buflen) ip4addr_ntoa_r(ipaddr, buf, buflen) +#define ipaddr_aton(cp, addr) ip4addr_aton(cp, addr) + +#define IPADDR_STRLEN_MAX IP4ADDR_STRLEN_MAX + +#define IP46_ADDR_ANY(type) (IP4_ADDR_ANY) + +#else /* LWIP_IPV4 */ + +typedef ip6_addr_t ip_addr_t; +#define IPADDR6_INIT(a, b, c, d) { { a, b, c, d } IPADDR6_ZONE_INIT } +#define IPADDR6_INIT_HOST(a, b, c, d) { { PP_HTONL(a), PP_HTONL(b), PP_HTONL(c), PP_HTONL(d) } IPADDR6_ZONE_INIT } +#define IP_IS_V4_VAL(ipaddr) 0 +#define IP_IS_V6_VAL(ipaddr) 1 +#define IP_IS_V4(ipaddr) 0 +#define IP_IS_V6(ipaddr) 1 +#define IP_IS_ANY_TYPE_VAL(ipaddr) 0 +#define IP_SET_TYPE_VAL(ipaddr, iptype) +#define IP_SET_TYPE(ipaddr, iptype) +#define IP_GET_TYPE(ipaddr) IPADDR_TYPE_V6 +#define IP_ADDR_RAW_SIZE(ipaddr) sizeof(ip6_addr_t) +#define ip_2_ip6(ipaddr) (ipaddr) +#define IP_ADDR6(ipaddr,i0,i1,i2,i3) IP6_ADDR(ipaddr,i0,i1,i2,i3) +#define IP_ADDR6_HOST(ipaddr,i0,i1,i2,i3) IP_ADDR6(ipaddr,PP_HTONL(i0),PP_HTONL(i1),PP_HTONL(i2),PP_HTONL(i3)) + +#define ip_addr_copy(dest, src) ip6_addr_copy(dest, src) +#define ip_addr_copy_from_ip6(dest, src) ip6_addr_copy(dest, src) +#define ip_addr_copy_from_ip6_packed(dest, src) ip6_addr_copy_from_packed(dest, src) +#define ip_addr_set(dest, src) ip6_addr_set(dest, src) +#define ip_addr_set_ipaddr(dest, src) ip6_addr_set(dest, src) +#define ip_addr_set_zero(ipaddr) ip6_addr_set_zero(ipaddr) +#define ip_addr_set_zero_ip6(ipaddr) ip6_addr_set_zero(ipaddr) +#define ip_addr_set_any(is_ipv6, ipaddr) ip6_addr_set_any(ipaddr) +#define ip_addr_set_loopback(is_ipv6, ipaddr) ip6_addr_set_loopback(ipaddr) +#define ip_addr_set_hton(dest, src) ip6_addr_set_hton(dest, src) +#define ip_addr_get_network(target, host, mask) ip6_addr_set_zero(target) +#define ip_addr_netcmp(addr1, addr2, mask) 0 +#define ip_addr_cmp(addr1, addr2) ip6_addr_cmp(addr1, addr2) +#define ip_addr_cmp_zoneless(addr1, addr2) ip6_addr_cmp_zoneless(addr1, addr2) +#define ip_addr_isany(ipaddr) ip6_addr_isany(ipaddr) +#define ip_addr_isany_val(ipaddr) ip6_addr_isany_val(ipaddr) +#define ip_addr_isloopback(ipaddr) ip6_addr_isloopback(ipaddr) +#define ip_addr_islinklocal(ipaddr) ip6_addr_islinklocal(ipaddr) +#define ip_addr_isbroadcast(addr, netif) 0 +#define ip_addr_ismulticast(ipaddr) ip6_addr_ismulticast(ipaddr) +#define ip_addr_debug_print(debug, ipaddr) ip6_addr_debug_print(debug, ipaddr) +#define ip_addr_debug_print_val(debug, ipaddr) ip6_addr_debug_print_val(debug, ipaddr) +#define ipaddr_ntoa(ipaddr) ip6addr_ntoa(ipaddr) +#define ipaddr_ntoa_r(ipaddr, buf, buflen) ip6addr_ntoa_r(ipaddr, buf, buflen) +#define ipaddr_aton(cp, addr) ip6addr_aton(cp, addr) + +#define IPADDR_STRLEN_MAX IP6ADDR_STRLEN_MAX + +#define IP46_ADDR_ANY(type) (IP6_ADDR_ANY) + +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_IPV4 + +extern const ip_addr_t ip_addr_any; +extern const ip_addr_t ip_addr_broadcast; + +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip_addr_t + * for the IP wildcard. + * Defined to @ref IP4_ADDR_ANY when IPv4 is enabled. + * Defined to @ref IP6_ADDR_ANY in IPv6 only systems. + * Use this if you can handle IPv4 _AND_ IPv6 addresses. + * Use @ref IP4_ADDR_ANY or @ref IP6_ADDR_ANY when the IP + * type matters. + */ +#define IP_ADDR_ANY IP4_ADDR_ANY +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip_addr_t + * for the IPv4 wildcard and the broadcast address + */ +#define IP4_ADDR_ANY (&ip_addr_any) +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip4_addr_t + * for the wildcard and the broadcast address + */ +#define IP4_ADDR_ANY4 (ip_2_ip4(&ip_addr_any)) + +/** @ingroup ip4addr */ +#define IP_ADDR_BROADCAST (&ip_addr_broadcast) +/** @ingroup ip4addr */ +#define IP4_ADDR_BROADCAST (ip_2_ip4(&ip_addr_broadcast)) + +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + +extern const ip_addr_t ip6_addr_any; + +/** + * @ingroup ip6addr + * IP6_ADDR_ANY can be used as a fixed ip_addr_t + * for the IPv6 wildcard address + */ +#define IP6_ADDR_ANY (&ip6_addr_any) +/** + * @ingroup ip6addr + * IP6_ADDR_ANY6 can be used as a fixed ip6_addr_t + * for the IPv6 wildcard address + */ +#define IP6_ADDR_ANY6 (ip_2_ip6(&ip6_addr_any)) + +#if !LWIP_IPV4 +/** IPv6-only configurations */ +#define IP_ADDR_ANY IP6_ADDR_ANY +#endif /* !LWIP_IPV4 */ + +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +/** @ingroup ipaddr */ +#define IP_ANY_TYPE (&ip_addr_any_type) +#else +#define IP_ANY_TYPE IP_ADDR_ANY +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h b/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h new file mode 100644 index 00000000..ff208d25 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h @@ -0,0 +1,82 @@ +/** + * @file + * Heap API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_MEM_H +#define LWIP_HDR_MEM_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if MEM_LIBC_MALLOC + +#include "lwip/arch.h" + +typedef size_t mem_size_t; +#define MEM_SIZE_F SZT_F + +#elif MEM_USE_POOLS + +typedef u16_t mem_size_t; +#define MEM_SIZE_F U16_F + +#else + +/* MEM_SIZE would have to be aligned, but using 64000 here instead of + * 65535 leaves some room for alignment... + */ +#if MEM_SIZE > 64000L +typedef u32_t mem_size_t; +#define MEM_SIZE_F U32_F +#else +typedef u16_t mem_size_t; +#define MEM_SIZE_F U16_F +#endif /* MEM_SIZE > 64000 */ +#endif + +void mem_init(void); +void *mem_trim(void *mem, mem_size_t size); +void *mem_malloc(mem_size_t size); +void *mem_calloc(mem_size_t count, mem_size_t size); +void mem_free(void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEM_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h new file mode 100644 index 00000000..1630b26c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h @@ -0,0 +1,155 @@ +/** + * @file + * Memory pool API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_MEMP_H +#define LWIP_HDR_MEMP_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* run once with empty definition to handle all custom includes in lwippools.h */ +#define LWIP_MEMPOOL(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +/** Create the list of all memory pools managed by memp. MEMP_MAX represents a NULL pool at the end */ +typedef enum { +#define LWIP_MEMPOOL(name,num,size,desc) MEMP_##name, +#include "lwip/priv/memp_std.h" + MEMP_MAX +} memp_t; + +#include "lwip/priv/memp_priv.h" +#include "lwip/stats.h" + +extern const struct memp_desc* const memp_pools[MEMP_MAX]; + +/** + * @ingroup mempool + * Declare prototype for private memory pool if it is used in multiple files + */ +#define LWIP_MEMPOOL_PROTOTYPE(name) extern const struct memp_desc memp_ ## name + +#if MEMP_MEM_MALLOC + +#define LWIP_MEMPOOL_DECLARE(name,num,size,desc) \ + LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(memp_stats_ ## name) \ + const struct memp_desc memp_ ## name = { \ + DECLARE_LWIP_MEMPOOL_DESC(desc) \ + LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(memp_stats_ ## name) \ + LWIP_MEM_ALIGN_SIZE(size) \ + }; + +#else /* MEMP_MEM_MALLOC */ + +/** + * @ingroup mempool + * Declare a private memory pool + * Private mempools example: + * .h: only when pool is used in multiple .c files: LWIP_MEMPOOL_PROTOTYPE(my_private_pool); + * .c: + * - in global variables section: LWIP_MEMPOOL_DECLARE(my_private_pool, 10, sizeof(foo), "Some description") + * - call ONCE before using pool (e.g. in some init() function): LWIP_MEMPOOL_INIT(my_private_pool); + * - allocate: void* my_new_mem = LWIP_MEMPOOL_ALLOC(my_private_pool); + * - free: LWIP_MEMPOOL_FREE(my_private_pool, my_new_mem); + * + * To relocate a pool, declare it as extern in cc.h. Example for GCC: + * extern u8_t \_\_attribute\_\_((section(".onchip_mem"))) memp_memory_my_private_pool_base[]; + */ +#define LWIP_MEMPOOL_DECLARE(name,num,size,desc) \ + LWIP_DECLARE_MEMORY_ALIGNED(memp_memory_ ## name ## _base, ((num) * (MEMP_SIZE + MEMP_ALIGN_SIZE(size)))); \ + \ + LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(memp_stats_ ## name) \ + \ + static struct memp *memp_tab_ ## name; \ + \ + const struct memp_desc memp_ ## name = { \ + DECLARE_LWIP_MEMPOOL_DESC(desc) \ + LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(memp_stats_ ## name) \ + LWIP_MEM_ALIGN_SIZE(size), \ + (num), \ + memp_memory_ ## name ## _base, \ + &memp_tab_ ## name \ + }; + +#endif /* MEMP_MEM_MALLOC */ + +/** + * @ingroup mempool + * Initialize a private memory pool + */ +#define LWIP_MEMPOOL_INIT(name) memp_init_pool(&memp_ ## name) +/** + * @ingroup mempool + * Allocate from a private memory pool + */ +#define LWIP_MEMPOOL_ALLOC(name) memp_malloc_pool(&memp_ ## name) +/** + * @ingroup mempool + * Free element from a private memory pool + */ +#define LWIP_MEMPOOL_FREE(name, x) memp_free_pool(&memp_ ## name, (x)) + +#if MEM_USE_POOLS +/** This structure is used to save the pool one element came from. + * This has to be defined here as it is required for pool size calculation. */ +struct memp_malloc_helper +{ + memp_t poolnr; +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + u16_t size; +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +}; +#endif /* MEM_USE_POOLS */ + +void memp_init(void); + +#if MEMP_OVERFLOW_CHECK +void *memp_malloc_fn(memp_t type, const char* file, const int line); +#define memp_malloc(t) memp_malloc_fn((t), __FILE__, __LINE__) +#else +void *memp_malloc(memp_t type); +#endif +void memp_free(memp_t type, void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h new file mode 100644 index 00000000..7fa0797f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h @@ -0,0 +1,99 @@ +/** + * @file + * + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_MLD6_H +#define LWIP_HDR_MLD6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_MLD && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** MLD group */ +struct mld_group { + /** next link */ + struct mld_group *next; + /** multicast address */ + ip6_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +#define MLD6_TMR_INTERVAL 100 /* Milliseconds */ + +err_t mld6_stop(struct netif *netif); +void mld6_report_groups(struct netif *netif); +void mld6_tmr(void); +struct mld_group *mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr); +void mld6_input(struct pbuf *p, struct netif *inp); +err_t mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr); +err_t mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr); +err_t mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr); +err_t mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr); + +/** @ingroup mld6 + * Get list head of MLD6 groups for netif. + * Note: The allnodes group IP is NOT in the list, since it must always + * be received for correct IPv6 operation. + * @see @ref netif_set_mld_mac_filter() + */ +#define netif_mld6_data(netif) ((struct mld_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6_MLD && LWIP_IPV6 */ + +#endif /* LWIP_HDR_MLD6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h new file mode 100644 index 00000000..c30e624f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h @@ -0,0 +1,90 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ND6_H +#define LWIP_HDR_ND6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 second period */ +#define ND6_TMR_INTERVAL 1000 + +/** Router solicitations are sent in 4 second intervals (see RFC 4861, ch. 6.3.7) */ +#ifndef ND6_RTR_SOLICITATION_INTERVAL +#define ND6_RTR_SOLICITATION_INTERVAL 4000 +#endif + +struct pbuf; +struct netif; + +void nd6_tmr(void); +void nd6_input(struct pbuf *p, struct netif *inp); +void nd6_clear_destination_cache(void); +struct netif *nd6_find_route(const ip6_addr_t *ip6addr); +err_t nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp); +u16_t nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif); +#if LWIP_ND6_TCP_REACHABILITY_HINTS +void nd6_reachability_hint(const ip6_addr_t *ip6addr); +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ +void nd6_cleanup_netif(struct netif *netif); +#if LWIP_IPV6_MLD +void nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state); +#endif /* LWIP_IPV6_MLD */ +void nd6_restart_netif(struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_ND6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h b/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h new file mode 100644 index 00000000..42a911bc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h @@ -0,0 +1,116 @@ +/** + * @file + * netbuf API (for netconn API) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETBUF_H +#define LWIP_HDR_NETBUF_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This netbuf has dest-addr/port set */ +#define NETBUF_FLAG_DESTADDR 0x01 +/** This netbuf includes a checksum */ +#define NETBUF_FLAG_CHKSUM 0x02 + +/** "Network buffer" - contains data and addressing info */ +struct netbuf { + struct pbuf *p, *ptr; + ip_addr_t addr; + u16_t port; +#if LWIP_NETBUF_RECVINFO || LWIP_CHECKSUM_ON_COPY + u8_t flags; + u16_t toport_chksum; +#if LWIP_NETBUF_RECVINFO + ip_addr_t toaddr; +#endif /* LWIP_NETBUF_RECVINFO */ +#endif /* LWIP_NETBUF_RECVINFO || LWIP_CHECKSUM_ON_COPY */ +}; + +/* Network buffer functions: */ +struct netbuf * netbuf_new (void); +void netbuf_delete (struct netbuf *buf); +void * netbuf_alloc (struct netbuf *buf, u16_t size); +void netbuf_free (struct netbuf *buf); +err_t netbuf_ref (struct netbuf *buf, + const void *dataptr, u16_t size); +void netbuf_chain (struct netbuf *head, struct netbuf *tail); + +err_t netbuf_data (struct netbuf *buf, + void **dataptr, u16_t *len); +s8_t netbuf_next (struct netbuf *buf); +void netbuf_first (struct netbuf *buf); + + +#define netbuf_copy_partial(buf, dataptr, len, offset) \ + pbuf_copy_partial((buf)->p, (dataptr), (len), (offset)) +#define netbuf_copy(buf,dataptr,len) netbuf_copy_partial(buf, dataptr, len, 0) +#define netbuf_take(buf, dataptr, len) pbuf_take((buf)->p, dataptr, len) +#define netbuf_len(buf) ((buf)->p->tot_len) +#define netbuf_fromaddr(buf) (&((buf)->addr)) +#define netbuf_set_fromaddr(buf, fromaddr) ip_addr_set(&((buf)->addr), fromaddr) +#define netbuf_fromport(buf) ((buf)->port) +#if LWIP_NETBUF_RECVINFO +#define netbuf_destaddr(buf) (&((buf)->toaddr)) +#define netbuf_set_destaddr(buf, destaddr) ip_addr_set(&((buf)->toaddr), destaddr) +#if LWIP_CHECKSUM_ON_COPY +#define netbuf_destport(buf) (((buf)->flags & NETBUF_FLAG_DESTADDR) ? (buf)->toport_chksum : 0) +#else /* LWIP_CHECKSUM_ON_COPY */ +#define netbuf_destport(buf) ((buf)->toport_chksum) +#endif /* LWIP_CHECKSUM_ON_COPY */ +#endif /* LWIP_NETBUF_RECVINFO */ +#if LWIP_CHECKSUM_ON_COPY +#define netbuf_set_chksum(buf, chksum) do { (buf)->flags = NETBUF_FLAG_CHKSUM; \ + (buf)->toport_chksum = chksum; } while(0) +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_NETBUF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h b/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h new file mode 100644 index 00000000..d3d15dfa --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h @@ -0,0 +1,150 @@ +/** + * @file + * NETDB API (sockets) + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETDB_H +#define LWIP_HDR_NETDB_H + +#include "lwip/opt.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/arch.h" +#include "lwip/inet.h" +#include "lwip/sockets.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* some rarely used options */ +#ifndef LWIP_DNS_API_DECLARE_H_ERRNO +#define LWIP_DNS_API_DECLARE_H_ERRNO 1 +#endif + +#ifndef LWIP_DNS_API_DEFINE_ERRORS +#define LWIP_DNS_API_DEFINE_ERRORS 1 +#endif + +#ifndef LWIP_DNS_API_DEFINE_FLAGS +#define LWIP_DNS_API_DEFINE_FLAGS 1 +#endif + +#ifndef LWIP_DNS_API_DECLARE_STRUCTS +#define LWIP_DNS_API_DECLARE_STRUCTS 1 +#endif + +#if LWIP_DNS_API_DEFINE_ERRORS +/** Errors used by the DNS API functions, h_errno can be one of them */ +#define EAI_NONAME 200 +#define EAI_SERVICE 201 +#define EAI_FAIL 202 +#define EAI_MEMORY 203 +#define EAI_FAMILY 204 + +#define HOST_NOT_FOUND 210 +#define NO_DATA 211 +#define NO_RECOVERY 212 +#define TRY_AGAIN 213 +#endif /* LWIP_DNS_API_DEFINE_ERRORS */ + +#if LWIP_DNS_API_DEFINE_FLAGS +/* input flags for struct addrinfo */ +#define AI_PASSIVE 0x01 +#define AI_CANONNAME 0x02 +#define AI_NUMERICHOST 0x04 +#define AI_NUMERICSERV 0x08 +#define AI_V4MAPPED 0x10 +#define AI_ALL 0x20 +#define AI_ADDRCONFIG 0x40 +#endif /* LWIP_DNS_API_DEFINE_FLAGS */ + +#if LWIP_DNS_API_DECLARE_STRUCTS +struct hostent { + char *h_name; /* Official name of the host. */ + char **h_aliases; /* A pointer to an array of pointers to alternative host names, + terminated by a null pointer. */ + int h_addrtype; /* Address type. */ + int h_length; /* The length, in bytes, of the address. */ + char **h_addr_list; /* A pointer to an array of pointers to network addresses (in + network byte order) for the host, terminated by a null pointer. */ +#define h_addr h_addr_list[0] /* for backward compatibility */ +}; + +struct addrinfo { + int ai_flags; /* Input flags. */ + int ai_family; /* Address family of socket. */ + int ai_socktype; /* Socket type. */ + int ai_protocol; /* Protocol of socket. */ + socklen_t ai_addrlen; /* Length of socket address. */ + struct sockaddr *ai_addr; /* Socket address of socket. */ + char *ai_canonname; /* Canonical name of service location. */ + struct addrinfo *ai_next; /* Pointer to next in list. */ +}; +#endif /* LWIP_DNS_API_DECLARE_STRUCTS */ + +#define NETDB_ELEM_SIZE (sizeof(struct addrinfo) + sizeof(struct sockaddr_storage) + DNS_MAX_NAME_LENGTH + 1) + +#if LWIP_DNS_API_DECLARE_H_ERRNO +/* application accessible error code set by the DNS API functions */ +extern int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO*/ + +struct hostent *lwip_gethostbyname(const char *name); +int lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop); +void lwip_freeaddrinfo(struct addrinfo *ai); +int lwip_getaddrinfo(const char *nodename, + const char *servname, + const struct addrinfo *hints, + struct addrinfo **res); + +#if LWIP_COMPAT_SOCKETS +/** @ingroup netdbapi */ +#define gethostbyname(name) lwip_gethostbyname(name) +/** @ingroup netdbapi */ +#define gethostbyname_r(name, ret, buf, buflen, result, h_errnop) \ + lwip_gethostbyname_r(name, ret, buf, buflen, result, h_errnop) +/** @ingroup netdbapi */ +#define freeaddrinfo(addrinfo) lwip_freeaddrinfo(addrinfo) +/** @ingroup netdbapi */ +#define getaddrinfo(nodname, servname, hints, res) \ + lwip_getaddrinfo(nodname, servname, hints, res) +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS && LWIP_SOCKET */ + +#endif /* LWIP_HDR_NETDB_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h b/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h new file mode 100644 index 00000000..911196ab --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h @@ -0,0 +1,669 @@ +/** + * @file + * netif API (to be used from TCPIP thread) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETIF_H +#define LWIP_HDR_NETIF_H + +#include "lwip/opt.h" + +#define ENABLE_LOOPBACK (LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF) + +#include "lwip/err.h" + +#include "lwip/ip_addr.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/stats.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses are expected to be in + * the same byte order as in IP_PCB. */ + +/** Must be the maximum of all used hardware address lengths + across all types of interfaces in use. + This does not have to be changed, normally. */ +#ifndef NETIF_MAX_HWADDR_LEN +#define NETIF_MAX_HWADDR_LEN 6U +#endif + +/** The size of a fully constructed netif name which the + * netif can be identified by in APIs. Composed of + * 2 chars, 3 (max) digits, and 1 \0 + */ +#define NETIF_NAMESIZE 6 + +/** + * @defgroup netif_flags Flags + * @ingroup netif + * @{ + */ + +/** Whether the network interface is 'up'. This is + * a software flag used to control whether this network + * interface is enabled and processes traffic. + * It must be set by the startup code before this netif can be used + * (also for dhcp/autoip). + */ +#define NETIF_FLAG_UP 0x01U +/** If set, the netif has broadcast capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_BROADCAST 0x02U +/** If set, the interface has an active link + * (set by the network interface driver). + * Either set by the netif driver in its init function (if the link + * is up at that time) or at a later point once the link comes up + * (if link detection is supported by the hardware). */ +#define NETIF_FLAG_LINK_UP 0x04U +/** If set, the netif is an ethernet device using ARP. + * Set by the netif driver in its init function. + * Used to check input packet types and use of DHCP. */ +#define NETIF_FLAG_ETHARP 0x08U +/** If set, the netif is an ethernet device. It might not use + * ARP or TCP/IP if it is used for PPPoE only. + */ +#define NETIF_FLAG_ETHERNET 0x10U +/** If set, the netif has IGMP capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_IGMP 0x20U +/** If set, the netif has MLD6 capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_MLD6 0x40U + +/** + * @} + */ + +enum lwip_internal_netif_client_data_index +{ +#if LWIP_IPV4 +#if LWIP_DHCP + LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, +#endif +#if LWIP_AUTOIP + LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, +#endif +#if LWIP_IGMP + LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, +#endif +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +#if LWIP_IPV6_DHCP6 + LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, +#endif +#if LWIP_IPV6_MLD + LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, +#endif +#endif /* LWIP_IPV6 */ + LWIP_NETIF_CLIENT_DATA_INDEX_MAX +}; + +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define NETIF_CHECKSUM_GEN_IP 0x0001 +#define NETIF_CHECKSUM_GEN_UDP 0x0002 +#define NETIF_CHECKSUM_GEN_TCP 0x0004 +#define NETIF_CHECKSUM_GEN_ICMP 0x0008 +#define NETIF_CHECKSUM_GEN_ICMP6 0x0010 +#define NETIF_CHECKSUM_CHECK_IP 0x0100 +#define NETIF_CHECKSUM_CHECK_UDP 0x0200 +#define NETIF_CHECKSUM_CHECK_TCP 0x0400 +#define NETIF_CHECKSUM_CHECK_ICMP 0x0800 +#define NETIF_CHECKSUM_CHECK_ICMP6 0x1000 +#define NETIF_CHECKSUM_ENABLE_ALL 0xFFFF +#define NETIF_CHECKSUM_DISABLE_ALL 0x0000 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ + +struct netif; + +/** MAC Filter Actions, these are passed to a netif's igmp_mac_filter or + * mld_mac_filter callback function. */ +enum netif_mac_filter_action { + /** Delete a filter entry */ + NETIF_DEL_MAC_FILTER = 0, + /** Add a filter entry */ + NETIF_ADD_MAC_FILTER = 1 +}; + +/** Function prototype for netif init functions. Set up flags and output/linkoutput + * callback functions in this function. + * + * @param netif The netif to initialize + */ +typedef err_t (*netif_init_fn)(struct netif *netif); +/** Function prototype for netif->input functions. This function is saved as 'input' + * callback function in the netif struct. Call it when a packet has been received. + * + * @param p The received packet, copied into a pbuf + * @param inp The netif which received the packet + * @return ERR_OK if the packet was handled + * != ERR_OK is the packet was NOT handled, in this case, the caller has + * to free the pbuf + */ +typedef err_t (*netif_input_fn)(struct pbuf *p, struct netif *inp); + +#if LWIP_IPV4 +/** Function prototype for netif->output functions. Called by lwIP when a packet + * shall be sent. For ethernet netif, set this to 'etharp_output' and set + * 'linkoutput'. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (p->payload points to IP header) + * @param ipaddr The IP address to which the packet shall be sent + */ +typedef err_t (*netif_output_fn)(struct netif *netif, struct pbuf *p, + const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 +/** Function prototype for netif->output_ip6 functions. Called by lwIP when a packet + * shall be sent. For ethernet netif, set this to 'ethip6_output' and set + * 'linkoutput'. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (p->payload points to IP header) + * @param ipaddr The IPv6 address to which the packet shall be sent + */ +typedef err_t (*netif_output_ip6_fn)(struct netif *netif, struct pbuf *p, + const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ + +/** Function prototype for netif->linkoutput functions. Only used for ethernet + * netifs. This function is called by ARP when a packet shall be sent. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (raw ethernet packet) + */ +typedef err_t (*netif_linkoutput_fn)(struct netif *netif, struct pbuf *p); +/** Function prototype for netif status- or link-callback functions. */ +typedef void (*netif_status_callback_fn)(struct netif *netif); +#if LWIP_IPV4 && LWIP_IGMP +/** Function prototype for netif igmp_mac_filter functions */ +typedef err_t (*netif_igmp_mac_filter_fn)(struct netif *netif, + const ip4_addr_t *group, enum netif_mac_filter_action action); +#endif /* LWIP_IPV4 && LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD +/** Function prototype for netif mld_mac_filter functions */ +typedef err_t (*netif_mld_mac_filter_fn)(struct netif *netif, + const ip6_addr_t *group, enum netif_mac_filter_action action); +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + +#if LWIP_DHCP || LWIP_AUTOIP || LWIP_IGMP || LWIP_IPV6_MLD || LWIP_IPV6_DHCP6 || (LWIP_NUM_NETIF_CLIENT_DATA > 0) +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +u8_t netif_alloc_client_data_id(void); +#endif +/** @ingroup netif_cd + * Set client data. Obtain ID from netif_alloc_client_data_id(). + */ +#define netif_set_client_data(netif, id, data) netif_get_client_data(netif, id) = (data) +/** @ingroup netif_cd + * Get client data. Obtain ID from netif_alloc_client_data_id(). + */ +#define netif_get_client_data(netif, id) (netif)->client_data[(id)] +#endif + +#if (LWIP_IPV4 && LWIP_ARP && (ARP_TABLE_SIZE > 0x7f)) || (LWIP_IPV6 && (LWIP_ND6_NUM_DESTINATIONS > 0x7f)) +typedef u16_t netif_addr_idx_t; +#define NETIF_ADDR_IDX_MAX 0x7FFF +#else +typedef u8_t netif_addr_idx_t; +#define NETIF_ADDR_IDX_MAX 0x7F +#endif + +#if LWIP_NETIF_HWADDRHINT +#define LWIP_NETIF_USE_HINTS 1 +struct netif_hint { + netif_addr_idx_t addr_hint; +}; +#else /* LWIP_NETIF_HWADDRHINT */ +#define LWIP_NETIF_USE_HINTS 0 +#endif /* LWIP_NETIF_HWADDRHINT */ + +/** Generic data structure used for all lwIP network interfaces. + * The following fields should be filled in by the initialization + * function for the device driver: hwaddr_len, hwaddr[], mtu, flags */ +struct netif { +#if !LWIP_SINGLE_NETIF + /** pointer to next in linked list */ + struct netif *next; +#endif + +#if LWIP_IPV4 + /** IP address configuration in network byte order */ + ip_addr_t ip_addr; + ip_addr_t netmask; + ip_addr_t gw; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Array of IPv6 addresses for this netif. */ + ip_addr_t ip6_addr[LWIP_IPV6_NUM_ADDRESSES]; + /** The state of each IPv6 address (Tentative, Preferred, etc). + * @see ip6_addr.h */ + u8_t ip6_addr_state[LWIP_IPV6_NUM_ADDRESSES]; +#if LWIP_IPV6_ADDRESS_LIFETIMES + /** Remaining valid and preferred lifetime of each IPv6 address, in seconds. + * For valid lifetimes, the special value of IP6_ADDR_LIFE_STATIC (0) + * indicates the address is static and has no lifetimes. */ + u32_t ip6_addr_valid_life[LWIP_IPV6_NUM_ADDRESSES]; + u32_t ip6_addr_pref_life[LWIP_IPV6_NUM_ADDRESSES]; +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ +#endif /* LWIP_IPV6 */ + /** This function is called by the network device driver + * to pass a packet up the TCP/IP stack. */ + netif_input_fn input; +#if LWIP_IPV4 + /** This function is called by the IP module when it wants + * to send a packet on the interface. This function typically + * first resolves the hardware address, then sends the packet. + * For ethernet physical layer, this is usually etharp_output() */ + netif_output_fn output; +#endif /* LWIP_IPV4 */ + /** This function is called by ethernet_output() when it wants + * to send a packet on the interface. This function outputs + * the pbuf as-is on the link medium. */ + netif_linkoutput_fn linkoutput; +#if LWIP_IPV6 + /** This function is called by the IPv6 module when it wants + * to send a packet on the interface. This function typically + * first resolves the hardware address, then sends the packet. + * For ethernet physical layer, this is usually ethip6_output() */ + netif_output_ip6_fn output_ip6; +#endif /* LWIP_IPV6 */ +#if LWIP_NETIF_STATUS_CALLBACK + /** This function is called when the netif state is set to up or down + */ + netif_status_callback_fn status_callback; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + /** This function is called when the netif link is set to up or down + */ + netif_status_callback_fn link_callback; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_NETIF_REMOVE_CALLBACK + /** This function is called when the netif has been removed */ + netif_status_callback_fn remove_callback; +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + /** This field can be set by the device driver and could point + * to state information for the device. */ + void *state; +#ifdef netif_get_client_data + void* client_data[LWIP_NETIF_CLIENT_DATA_INDEX_MAX + LWIP_NUM_NETIF_CLIENT_DATA]; +#endif +#if LWIP_NETIF_HOSTNAME + /* the hostname for this netif, NULL is a valid value */ + const char* hostname; +#endif /* LWIP_NETIF_HOSTNAME */ +#if LWIP_CHECKSUM_CTRL_PER_NETIF + u16_t chksum_flags; +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ + /** maximum transfer unit (in bytes) */ + u16_t mtu; +#if LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES + /** maximum transfer unit (in bytes), updated by RA */ + u16_t mtu6; +#endif /* LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES */ + /** link level hardware address of this interface */ + u8_t hwaddr[NETIF_MAX_HWADDR_LEN]; + /** number of bytes used in hwaddr */ + u8_t hwaddr_len; + /** flags (@see @ref netif_flags) */ + u8_t flags; + /** descriptive abbreviation */ + char name[2]; + /** number of this interface. Used for @ref if_api and @ref netifapi_netif, + * as well as for IPv6 zones */ + u8_t num; +#if LWIP_IPV6_AUTOCONFIG + /** is this netif enabled for IPv6 autoconfiguration */ + u8_t ip6_autoconfig_enabled; +#endif /* LWIP_IPV6_AUTOCONFIG */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /** Number of Router Solicitation messages that remain to be sent. */ + u8_t rs_count; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +#if MIB2_STATS + /** link type (from "snmp_ifType" enum from snmp_mib2.h) */ + u8_t link_type; + /** (estimate) link speed */ + u32_t link_speed; + /** timestamp at last change made (up/down) */ + u32_t ts; + /** counters */ + struct stats_mib2_netif_ctrs mib2_counters; +#endif /* MIB2_STATS */ +#if LWIP_IPV4 && LWIP_IGMP + /** This function could be called to add or delete an entry in the multicast + filter table of the ethernet MAC.*/ + netif_igmp_mac_filter_fn igmp_mac_filter; +#endif /* LWIP_IPV4 && LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + /** This function could be called to add or delete an entry in the IPv6 multicast + filter table of the ethernet MAC. */ + netif_mld_mac_filter_fn mld_mac_filter; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if LWIP_NETIF_USE_HINTS + struct netif_hint *hints; +#endif /* LWIP_NETIF_USE_HINTS */ +#if ENABLE_LOOPBACK + /* List of packets to be queued for ourselves. */ + struct pbuf *loop_first; + struct pbuf *loop_last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t loop_cnt_current; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ +#endif /* ENABLE_LOOPBACK */ +}; + +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define NETIF_SET_CHECKSUM_CTRL(netif, chksumflags) do { \ + (netif)->chksum_flags = chksumflags; } while(0) +#define IF__NETIF_CHECKSUM_ENABLED(netif, chksumflag) if (((netif) == NULL) || (((netif)->chksum_flags & (chksumflag)) != 0)) +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define NETIF_SET_CHECKSUM_CTRL(netif, chksumflags) +#define IF__NETIF_CHECKSUM_ENABLED(netif, chksumflag) +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ + +#if LWIP_SINGLE_NETIF +#define NETIF_FOREACH(netif) if (((netif) = netif_default) != NULL) +#else /* LWIP_SINGLE_NETIF */ +/** The list of network interfaces. */ +extern struct netif *netif_list; +#define NETIF_FOREACH(netif) for ((netif) = netif_list; (netif) != NULL; (netif) = (netif)->next) +#endif /* LWIP_SINGLE_NETIF */ +/** The default network interface. */ +extern struct netif *netif_default; + +void netif_init(void); + +struct netif *netif_add_noaddr(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input); + +#if LWIP_IPV4 +struct netif *netif_add(struct netif *netif, + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, + void *state, netif_init_fn init, netif_input_fn input); +void netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw); +#else /* LWIP_IPV4 */ +struct netif *netif_add(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input); +#endif /* LWIP_IPV4 */ +void netif_remove(struct netif * netif); + +/* Returns a network interface given its name. The name is of the form + "et0", where the first two letters are the "name" field in the + netif structure, and the digit is in the num field in the same + structure. */ +struct netif *netif_find(const char *name); + +void netif_set_default(struct netif *netif); + +#if LWIP_IPV4 +void netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr); +void netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask); +void netif_set_gw(struct netif *netif, const ip4_addr_t *gw); +/** @ingroup netif_ip4 */ +#define netif_ip4_addr(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->ip_addr))) +/** @ingroup netif_ip4 */ +#define netif_ip4_netmask(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->netmask))) +/** @ingroup netif_ip4 */ +#define netif_ip4_gw(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->gw))) +/** @ingroup netif_ip4 */ +#define netif_ip_addr4(netif) ((const ip_addr_t*)&((netif)->ip_addr)) +/** @ingroup netif_ip4 */ +#define netif_ip_netmask4(netif) ((const ip_addr_t*)&((netif)->netmask)) +/** @ingroup netif_ip4 */ +#define netif_ip_gw4(netif) ((const ip_addr_t*)&((netif)->gw)) +#endif /* LWIP_IPV4 */ + +#define netif_set_flags(netif, set_flags) do { (netif)->flags = (u8_t)((netif)->flags | (set_flags)); } while(0) +#define netif_clear_flags(netif, clr_flags) do { (netif)->flags = (u8_t)((netif)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define netif_is_flag_set(nefif, flag) (((netif)->flags & (flag)) != 0) + +void netif_set_up(struct netif *netif); +void netif_set_down(struct netif *netif); +/** @ingroup netif + * Ask if an interface is up + */ +#define netif_is_up(netif) (((netif)->flags & NETIF_FLAG_UP) ? (u8_t)1 : (u8_t)0) + +#if LWIP_NETIF_STATUS_CALLBACK +void netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback); +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_REMOVE_CALLBACK +void netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback); +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +void netif_set_link_up(struct netif *netif); +void netif_set_link_down(struct netif *netif); +/** Ask if a link is up */ +#define netif_is_link_up(netif) (((netif)->flags & NETIF_FLAG_LINK_UP) ? (u8_t)1 : (u8_t)0) + +#if LWIP_NETIF_LINK_CALLBACK +void netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback); +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if LWIP_NETIF_HOSTNAME +/** @ingroup netif */ +#define netif_set_hostname(netif, name) do { if((netif) != NULL) { (netif)->hostname = name; }}while(0) +/** @ingroup netif */ +#define netif_get_hostname(netif) (((netif) != NULL) ? ((netif)->hostname) : NULL) +#endif /* LWIP_NETIF_HOSTNAME */ + +#if LWIP_IGMP +/** @ingroup netif */ +#define netif_set_igmp_mac_filter(netif, function) do { if((netif) != NULL) { (netif)->igmp_mac_filter = function; }}while(0) +#define netif_get_igmp_mac_filter(netif) (((netif) != NULL) ? ((netif)->igmp_mac_filter) : NULL) +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6 && LWIP_IPV6_MLD +/** @ingroup netif */ +#define netif_set_mld_mac_filter(netif, function) do { if((netif) != NULL) { (netif)->mld_mac_filter = function; }}while(0) +#define netif_get_mld_mac_filter(netif) (((netif) != NULL) ? ((netif)->mld_mac_filter) : NULL) +#define netif_mld_mac_filter(netif, addr, action) do { if((netif) && (netif)->mld_mac_filter) { (netif)->mld_mac_filter((netif), (addr), (action)); }}while(0) +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + +#if ENABLE_LOOPBACK +err_t netif_loop_output(struct netif *netif, struct pbuf *p); +void netif_poll(struct netif *netif); +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +void netif_poll_all(void); +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +err_t netif_input(struct pbuf *p, struct netif *inp); + +#if LWIP_IPV6 +/** @ingroup netif_ip6 */ +#define netif_ip_addr6(netif, i) ((const ip_addr_t*)(&((netif)->ip6_addr[i]))) +/** @ingroup netif_ip6 */ +#define netif_ip6_addr(netif, i) ((const ip6_addr_t*)ip_2_ip6(&((netif)->ip6_addr[i]))) +void netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6); +void netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3); +#define netif_ip6_addr_state(netif, i) ((netif)->ip6_addr_state[i]) +void netif_ip6_addr_set_state(struct netif* netif, s8_t addr_idx, u8_t state); +s8_t netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr); +void netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit); +err_t netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx); +#define netif_set_ip6_autoconfig_enabled(netif, action) do { if(netif) { (netif)->ip6_autoconfig_enabled = (action); }}while(0) +#if LWIP_IPV6_ADDRESS_LIFETIMES +#define netif_ip6_addr_valid_life(netif, i) \ + (((netif) != NULL) ? ((netif)->ip6_addr_valid_life[i]) : IP6_ADDR_LIFE_STATIC) +#define netif_ip6_addr_set_valid_life(netif, i, secs) \ + do { if (netif != NULL) { (netif)->ip6_addr_valid_life[i] = (secs); }} while (0) +#define netif_ip6_addr_pref_life(netif, i) \ + (((netif) != NULL) ? ((netif)->ip6_addr_pref_life[i]) : IP6_ADDR_LIFE_STATIC) +#define netif_ip6_addr_set_pref_life(netif, i, secs) \ + do { if (netif != NULL) { (netif)->ip6_addr_pref_life[i] = (secs); }} while (0) +#define netif_ip6_addr_isstatic(netif, i) \ + (netif_ip6_addr_valid_life((netif), (i)) == IP6_ADDR_LIFE_STATIC) +#else /* !LWIP_IPV6_ADDRESS_LIFETIMES */ +#define netif_ip6_addr_isstatic(netif, i) (1) /* all addresses are static */ +#endif /* !LWIP_IPV6_ADDRESS_LIFETIMES */ +#if LWIP_ND6_ALLOW_RA_UPDATES +#define netif_mtu6(netif) ((netif)->mtu6) +#else /* LWIP_ND6_ALLOW_RA_UPDATES */ +#define netif_mtu6(netif) ((netif)->mtu) +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ +#endif /* LWIP_IPV6 */ + +#if LWIP_NETIF_USE_HINTS +#define NETIF_SET_HINTS(netif, netifhint) (netif)->hints = (netifhint) +#define NETIF_RESET_HINTS(netif) (netif)->hints = NULL +#else /* LWIP_NETIF_USE_HINTS */ +#define NETIF_SET_HINTS(netif, netifhint) +#define NETIF_RESET_HINTS(netif) +#endif /* LWIP_NETIF_USE_HINTS */ + +u8_t netif_name_to_index(const char *name); +char * netif_index_to_name(u8_t idx, char *name); +struct netif* netif_get_by_index(u8_t idx); + +/* Interface indexes always start at 1 per RFC 3493, section 4, num starts at 0 (internal index is 0..254)*/ +#define netif_get_index(netif) ((u8_t)((netif)->num + 1)) +#define NETIF_NO_INDEX (0) + +/** + * @ingroup netif + * Extended netif status callback (NSC) reasons flags. + * May be extended in the future! + */ +typedef u16_t netif_nsc_reason_t; + +/* used for initialization only */ +#define LWIP_NSC_NONE 0x0000 +/** netif was added. arg: NULL. Called AFTER netif was added. */ +#define LWIP_NSC_NETIF_ADDED 0x0001 +/** netif was removed. arg: NULL. Called BEFORE netif is removed. */ +#define LWIP_NSC_NETIF_REMOVED 0x0002 +/** link changed */ +#define LWIP_NSC_LINK_CHANGED 0x0004 +/** netif administrative status changed.\n + * up is called AFTER netif is set up.\n + * down is called BEFORE the netif is actually set down. */ +#define LWIP_NSC_STATUS_CHANGED 0x0008 +/** IPv4 address has changed */ +#define LWIP_NSC_IPV4_ADDRESS_CHANGED 0x0010 +/** IPv4 gateway has changed */ +#define LWIP_NSC_IPV4_GATEWAY_CHANGED 0x0020 +/** IPv4 netmask has changed */ +#define LWIP_NSC_IPV4_NETMASK_CHANGED 0x0040 +/** called AFTER IPv4 address/gateway/netmask changes have been applied */ +#define LWIP_NSC_IPV4_SETTINGS_CHANGED 0x0080 +/** IPv6 address was added */ +#define LWIP_NSC_IPV6_SET 0x0100 +/** IPv6 address state has changed */ +#define LWIP_NSC_IPV6_ADDR_STATE_CHANGED 0x0200 + +/** @ingroup netif + * Argument supplied to netif_ext_callback_fn. + */ +typedef union +{ + /** Args to LWIP_NSC_LINK_CHANGED callback */ + struct link_changed_s + { + /** 1: up; 0: down */ + u8_t state; + } link_changed; + /** Args to LWIP_NSC_STATUS_CHANGED callback */ + struct status_changed_s + { + /** 1: up; 0: down */ + u8_t state; + } status_changed; + /** Args to LWIP_NSC_IPV4_ADDRESS_CHANGED|LWIP_NSC_IPV4_GATEWAY_CHANGED|LWIP_NSC_IPV4_NETMASK_CHANGED|LWIP_NSC_IPV4_SETTINGS_CHANGED callback */ + struct ipv4_changed_s + { + /** Old IPv4 address */ + const ip_addr_t* old_address; + const ip_addr_t* old_netmask; + const ip_addr_t* old_gw; + } ipv4_changed; + /** Args to LWIP_NSC_IPV6_SET callback */ + struct ipv6_set_s + { + /** Index of changed IPv6 address */ + s8_t addr_index; + /** Old IPv6 address */ + const ip_addr_t* old_address; + } ipv6_set; + /** Args to LWIP_NSC_IPV6_ADDR_STATE_CHANGED callback */ + struct ipv6_addr_state_changed_s + { + /** Index of affected IPv6 address */ + s8_t addr_index; + /** Old IPv6 address state */ + u8_t old_state; + /** Affected IPv6 address */ + const ip_addr_t* address; + } ipv6_addr_state_changed; +} netif_ext_callback_args_t; + +/** + * @ingroup netif + * Function used for extended netif status callbacks + * Note: When parsing reason argument, keep in mind that more reasons may be added in the future! + * @param netif netif that is affected by change + * @param reason change reason + * @param args depends on reason, see reason description + */ +typedef void (*netif_ext_callback_fn)(struct netif* netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t* args); + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +struct netif_ext_callback; +typedef struct netif_ext_callback +{ + netif_ext_callback_fn callback_fn; + struct netif_ext_callback* next; +} netif_ext_callback_t; + +#define NETIF_DECLARE_EXT_CALLBACK(name) static netif_ext_callback_t name; +void netif_add_ext_callback(netif_ext_callback_t* callback, netif_ext_callback_fn fn); +void netif_remove_ext_callback(netif_ext_callback_t* callback); +void netif_invoke_ext_callback(struct netif* netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t* args); +#else +#define NETIF_DECLARE_EXT_CALLBACK(name) +#define netif_add_ext_callback(callback, fn) +#define netif_remove_ext_callback(callback) +#define netif_invoke_ext_callback(netif, reason, args) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h b/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h new file mode 100644 index 00000000..e0631791 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h @@ -0,0 +1,161 @@ +/** + * @file + * netif API (to be used from non-TCPIP threads) + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_NETIFAPI_H +#define LWIP_HDR_NETIFAPI_H + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sys.h" +#include "lwip/netif.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/priv/api_msg.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* API for application */ +#if LWIP_ARP && LWIP_IPV4 +/* Used for netfiapi_arp_* APIs */ +enum netifapi_arp_entry { + NETIFAPI_ARP_PERM /* Permanent entry */ + /* Other entry types can be added here */ +}; + +/** @ingroup netifapi_arp */ +err_t netifapi_arp_add(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, enum netifapi_arp_entry type); +/** @ingroup netifapi_arp */ +err_t netifapi_arp_remove(const ip4_addr_t *ipaddr, enum netifapi_arp_entry type); +#endif /* LWIP_ARP && LWIP_IPV4 */ + +err_t netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input); + +#if LWIP_IPV4 +err_t netifapi_netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, const ip4_addr_t *gw); +#endif /* LWIP_IPV4*/ + +err_t netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc); + +/** @ingroup netifapi_netif */ +err_t netifapi_netif_name_to_index(const char *name, u8_t *index); +/** @ingroup netifapi_netif */ +err_t netifapi_netif_index_to_name(u8_t index, char *name); + +/** @ingroup netifapi_netif + * @see netif_remove() + */ +#define netifapi_netif_remove(n) netifapi_netif_common(n, netif_remove, NULL) +/** @ingroup netifapi_netif + * @see netif_set_up() + */ +#define netifapi_netif_set_up(n) netifapi_netif_common(n, netif_set_up, NULL) +/** @ingroup netifapi_netif + * @see netif_set_down() + */ +#define netifapi_netif_set_down(n) netifapi_netif_common(n, netif_set_down, NULL) +/** @ingroup netifapi_netif + * @see netif_set_default() + */ +#define netifapi_netif_set_default(n) netifapi_netif_common(n, netif_set_default, NULL) +/** @ingroup netifapi_netif + * @see netif_set_link_up() + */ +#define netifapi_netif_set_link_up(n) netifapi_netif_common(n, netif_set_link_up, NULL) +/** @ingroup netifapi_netif + * @see netif_set_link_down() + */ +#define netifapi_netif_set_link_down(n) netifapi_netif_common(n, netif_set_link_down, NULL) + +/** + * @defgroup netifapi_dhcp4 DHCPv4 + * @ingroup netifapi + * To be called from non-TCPIP threads + */ +/** @ingroup netifapi_dhcp4 + * @see dhcp_start() + */ +#define netifapi_dhcp_start(n) netifapi_netif_common(n, NULL, dhcp_start) +/** + * @ingroup netifapi_dhcp4 + * @deprecated Use netifapi_dhcp_release_and_stop() instead. + */ +#define netifapi_dhcp_stop(n) netifapi_netif_common(n, dhcp_stop, NULL) +/** @ingroup netifapi_dhcp4 + * @see dhcp_inform() + */ +#define netifapi_dhcp_inform(n) netifapi_netif_common(n, dhcp_inform, NULL) +/** @ingroup netifapi_dhcp4 + * @see dhcp_renew() + */ +#define netifapi_dhcp_renew(n) netifapi_netif_common(n, NULL, dhcp_renew) +/** + * @ingroup netifapi_dhcp4 + * @deprecated Use netifapi_dhcp_release_and_stop() instead. + */ +#define netifapi_dhcp_release(n) netifapi_netif_common(n, NULL, dhcp_release) +/** @ingroup netifapi_dhcp4 + * @see dhcp_release_and_stop() + */ +#define netifapi_dhcp_release_and_stop(n) netifapi_netif_common(n, dhcp_release_and_stop, NULL) + +/** + * @defgroup netifapi_autoip AUTOIP + * @ingroup netifapi + * To be called from non-TCPIP threads + */ +/** @ingroup netifapi_autoip + * @see autoip_start() + */ +#define netifapi_autoip_start(n) netifapi_netif_common(n, NULL, autoip_start) +/** @ingroup netifapi_autoip + * @see autoip_stop() + */ +#define netifapi_autoip_stop(n) netifapi_netif_common(n, NULL, autoip_stop) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETIF_API */ + +#endif /* LWIP_HDR_NETIFAPI_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h b/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h new file mode 100644 index 00000000..82c420c1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h @@ -0,0 +1,3519 @@ +/** + * @file + * + * lwIP Options Configuration + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* + * NOTE: || defined __DOXYGEN__ is a workaround for doxygen bug - + * without this, doxygen does not see the actual #define + */ + +#if !defined LWIP_HDR_OPT_H +#define LWIP_HDR_OPT_H + +/* + * Include user defined options first. Anything not defined in these files + * will be set to standard values. Override anything you don't like! + */ +#include "lwipopts.h" +#include "lwip/debug.h" + +/** + * @defgroup lwip_opts Options (lwipopts.h) + * @ingroup lwip + * + * @defgroup lwip_opts_debug Debugging + * @ingroup lwip_opts + * + * @defgroup lwip_opts_infrastructure Infrastructure + * @ingroup lwip_opts + * + * @defgroup lwip_opts_callback Callback-style APIs + * @ingroup lwip_opts + * + * @defgroup lwip_opts_threadsafe_apis Thread-safe APIs + * @ingroup lwip_opts + */ + + /* + ------------------------------------ + -------------- NO SYS -------------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_nosys NO_SYS + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * NO_SYS==1: Use lwIP without OS-awareness (no thread, semaphores, mutexes or + * mboxes). This means threaded APIs cannot be used (socket, netconn, + * i.e. everything in the 'api' folder), only the callback-style raw API is + * available (and you have to watch out for yourself that you don't access + * lwIP functions/structures from more than one context at a time!) + */ +#if !defined NO_SYS || defined __DOXYGEN__ +#define NO_SYS 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_timers Timers + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_TIMERS==0: Drop support for sys_timeout and lwip-internal cyclic timers. + * (the array of lwip-internal cyclic timers is still provided) + * (check NO_SYS_NO_TIMERS for compatibility to old versions) + */ +#if !defined LWIP_TIMERS || defined __DOXYGEN__ +#ifdef NO_SYS_NO_TIMERS +#define LWIP_TIMERS (!NO_SYS || (NO_SYS && !NO_SYS_NO_TIMERS)) +#else +#define LWIP_TIMERS 1 +#endif +#endif + +/** + * LWIP_TIMERS_CUSTOM==1: Provide your own timer implementation. + * Function prototypes in timeouts.h and the array of lwip-internal cyclic timers + * are still included, but the implementation is not. The following functions + * will be required: sys_timeouts_init(), sys_timeout(), sys_untimeout(), + * sys_timeouts_mbox_fetch() + */ +#if !defined LWIP_TIMERS_CUSTOM || defined __DOXYGEN__ +#define LWIP_TIMERS_CUSTOM 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_memcpy memcpy + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEMCPY: override this if you have a faster implementation at hand than the + * one included in your C library + */ +#if !defined MEMCPY || defined __DOXYGEN__ +#define MEMCPY(dst,src,len) memcpy(dst,src,len) +#endif + +/** + * SMEMCPY: override this with care! Some compilers (e.g. gcc) can inline a + * call to memcpy() if the length is known at compile time and is small. + */ +#if !defined SMEMCPY || defined __DOXYGEN__ +#define SMEMCPY(dst,src,len) memcpy(dst,src,len) +#endif + +/** + * MEMMOVE: override this if you have a faster implementation at hand than the + * one included in your C library. lwIP currently uses MEMMOVE only when IPv6 + * fragmentation support is enabled. + */ +#if !defined MEMMOVE || defined __DOXYGEN__ +#define MEMMOVE(dst,src,len) memmove(dst,src,len) +#endif +/** + * @} + */ + +/* + ------------------------------------ + ----------- Core locking ----------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_lock Core locking and MPU + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MPU_COMPATIBLE: enables special memory management mechanism + * which makes lwip able to work on MPU (Memory Protection Unit) system + * by not passing stack-pointers to other threads + * (this decreases performance as memory is allocated from pools instead + * of keeping it on the stack) + */ +#if !defined LWIP_MPU_COMPATIBLE || defined __DOXYGEN__ +#define LWIP_MPU_COMPATIBLE 0 +#endif + +/** + * LWIP_TCPIP_CORE_LOCKING + * Creates a global mutex that is held during TCPIP thread operations. + * Can be locked by client code to perform lwIP operations without changing + * into TCPIP thread using callbacks. See LOCK_TCPIP_CORE() and + * UNLOCK_TCPIP_CORE(). + * Your system should provide mutexes supporting priority inversion to use this. + */ +#if !defined LWIP_TCPIP_CORE_LOCKING || defined __DOXYGEN__ +#define LWIP_TCPIP_CORE_LOCKING 1 +#endif + +/** + * LWIP_TCPIP_CORE_LOCKING_INPUT: when LWIP_TCPIP_CORE_LOCKING is enabled, + * this lets tcpip_input() grab the mutex for input packets as well, + * instead of allocating a message and passing it to tcpip_thread. + * + * ATTENTION: this does not work when tcpip_input() is called from + * interrupt context! + */ +#if !defined LWIP_TCPIP_CORE_LOCKING_INPUT || defined __DOXYGEN__ +#define LWIP_TCPIP_CORE_LOCKING_INPUT 0 +#endif + +/** + * SYS_LIGHTWEIGHT_PROT==1: enable inter-task protection (and task-vs-interrupt + * protection) for certain critical regions during buffer allocation, deallocation + * and memory allocation and deallocation. + * ATTENTION: This is required when using lwIP from more than one context! If + * you disable this, you must be sure what you are doing! + */ +#if !defined SYS_LIGHTWEIGHT_PROT || defined __DOXYGEN__ +#define SYS_LIGHTWEIGHT_PROT 1 +#endif + +/** + * Macro/function to check whether lwIP's threading/locking + * requirements are satisfied during current function call. + * This macro usually calls a function that is implemented in the OS-dependent + * sys layer and performs the following checks: + * - Not in ISR (this should be checked for NO_SYS==1, too!) + * - If @ref LWIP_TCPIP_CORE_LOCKING = 1: TCPIP core lock is held + * - If @ref LWIP_TCPIP_CORE_LOCKING = 0: function is called from TCPIP thread + * @see @ref multithreading + */ +#if !defined LWIP_ASSERT_CORE_LOCKED || defined __DOXYGEN__ +#define LWIP_ASSERT_CORE_LOCKED() +#endif + +/** + * Called as first thing in the lwIP TCPIP thread. Can be used in conjunction + * with @ref LWIP_ASSERT_CORE_LOCKED to check core locking. + * @see @ref multithreading + */ +#if !defined LWIP_MARK_TCPIP_THREAD || defined __DOXYGEN__ +#define LWIP_MARK_TCPIP_THREAD() +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Memory options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_mem Heap and memory pools + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEM_LIBC_MALLOC==1: Use malloc/free/realloc provided by your C-library + * instead of the lwip internal allocator. Can save code size if you + * already use it. + */ +#if !defined MEM_LIBC_MALLOC || defined __DOXYGEN__ +#define MEM_LIBC_MALLOC 0 +#endif + +/** + * MEMP_MEM_MALLOC==1: Use mem_malloc/mem_free instead of the lwip pool allocator. + * Especially useful with MEM_LIBC_MALLOC but handle with care regarding execution + * speed (heap alloc can be much slower than pool alloc) and usage from interrupts + * (especially if your netif driver allocates PBUF_POOL pbufs for received frames + * from interrupt)! + * ATTENTION: Currently, this uses the heap for ALL pools (also for private pools, + * not only for internal pools defined in memp_std.h)! + */ +#if !defined MEMP_MEM_MALLOC || defined __DOXYGEN__ +#define MEMP_MEM_MALLOC 0 +#endif + +/** + * MEMP_MEM_INIT==1: Force use of memset to initialize pool memory. + * Useful if pool are moved in uninitialized section of memory. This will ensure + * default values in pcbs struct are well initialized in all conditions. + */ +#if !defined MEMP_MEM_INIT || defined __DOXYGEN__ +#define MEMP_MEM_INIT 0 +#endif + +/** + * MEM_ALIGNMENT: should be set to the alignment of the CPU + * 4 byte alignment -> \#define MEM_ALIGNMENT 4 + * 2 byte alignment -> \#define MEM_ALIGNMENT 2 + */ +#if !defined MEM_ALIGNMENT || defined __DOXYGEN__ +#define MEM_ALIGNMENT 1 +#endif + +/** + * MEM_SIZE: the size of the heap memory. If the application will send + * a lot of data that needs to be copied, this should be set high. + */ +#if !defined MEM_SIZE || defined __DOXYGEN__ +#define MEM_SIZE 1600 +#endif + +/** + * MEMP_OVERFLOW_CHECK: memp overflow protection reserves a configurable + * amount of bytes before and after each memp element in every pool and fills + * it with a prominent default value. + * MEMP_OVERFLOW_CHECK == 0 no checking + * MEMP_OVERFLOW_CHECK == 1 checks each element when it is freed + * MEMP_OVERFLOW_CHECK >= 2 checks each element in every pool every time + * memp_malloc() or memp_free() is called (useful but slow!) + */ +#if !defined MEMP_OVERFLOW_CHECK || defined __DOXYGEN__ +#define MEMP_OVERFLOW_CHECK 0 +#endif + +/** + * MEMP_SANITY_CHECK==1: run a sanity check after each memp_free() to make + * sure that there are no cycles in the linked lists. + */ +#if !defined MEMP_SANITY_CHECK || defined __DOXYGEN__ +#define MEMP_SANITY_CHECK 0 +#endif + +/** + * MEM_OVERFLOW_CHECK: mem overflow protection reserves a configurable + * amount of bytes before and after each heap allocation chunk and fills + * it with a prominent default value. + * MEM_OVERFLOW_CHECK == 0 no checking + * MEM_OVERFLOW_CHECK == 1 checks each element when it is freed + * MEM_OVERFLOW_CHECK >= 2 checks all heap elements every time + * mem_malloc() or mem_free() is called (useful but slow!) + */ +#if !defined MEM_OVERFLOW_CHECK || defined __DOXYGEN__ +#define MEM_OVERFLOW_CHECK 0 +#endif + +/** + * MEM_SANITY_CHECK==1: run a sanity check after each mem_free() to make + * sure that the linked list of heap elements is not corrupted. + */ +#if !defined MEM_SANITY_CHECK || defined __DOXYGEN__ +#define MEM_SANITY_CHECK 0 +#endif + +/** + * MEM_USE_POOLS==1: Use an alternative to malloc() by allocating from a set + * of memory pools of various sizes. When mem_malloc is called, an element of + * the smallest pool that can provide the length needed is returned. + * To use this, MEMP_USE_CUSTOM_POOLS also has to be enabled. + */ +#if !defined MEM_USE_POOLS || defined __DOXYGEN__ +#define MEM_USE_POOLS 0 +#endif + +/** + * MEM_USE_POOLS_TRY_BIGGER_POOL==1: if one malloc-pool is empty, try the next + * bigger pool - WARNING: THIS MIGHT WASTE MEMORY but it can make a system more + * reliable. */ +#if !defined MEM_USE_POOLS_TRY_BIGGER_POOL || defined __DOXYGEN__ +#define MEM_USE_POOLS_TRY_BIGGER_POOL 0 +#endif + +/** + * MEMP_USE_CUSTOM_POOLS==1: whether to include a user file lwippools.h + * that defines additional pools beyond the "standard" ones required + * by lwIP. If you set this to 1, you must have lwippools.h in your + * include path somewhere. + */ +#if !defined MEMP_USE_CUSTOM_POOLS || defined __DOXYGEN__ +#define MEMP_USE_CUSTOM_POOLS 0 +#endif + +/** + * Set this to 1 if you want to free PBUF_RAM pbufs (or call mem_free()) from + * interrupt context (or another context that doesn't allow waiting for a + * semaphore). + * If set to 1, mem_malloc will be protected by a semaphore and SYS_ARCH_PROTECT, + * while mem_free will only use SYS_ARCH_PROTECT. mem_malloc SYS_ARCH_UNPROTECTs + * with each loop so that mem_free can run. + * + * ATTENTION: As you can see from the above description, this leads to dis-/ + * enabling interrupts often, which can be slow! Also, on low memory, mem_malloc + * can need longer. + * + * If you don't want that, at least for NO_SYS=0, you can still use the following + * functions to enqueue a deallocation call which then runs in the tcpip_thread + * context: + * - pbuf_free_callback(p); + * - mem_free_callback(m); + */ +#if !defined LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT || defined __DOXYGEN__ +#define LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 0 +#endif +/** + * @} + */ + +/* + ------------------------------------------------ + ---------- Internal Memory Pool Sizes ---------- + ------------------------------------------------ +*/ +/** + * @defgroup lwip_opts_memp Internal memory pools + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEMP_NUM_PBUF: the number of memp struct pbufs (used for PBUF_ROM and PBUF_REF). + * If the application sends a lot of data out of ROM (or other static memory), + * this should be set high. + */ +#if !defined MEMP_NUM_PBUF || defined __DOXYGEN__ +#define MEMP_NUM_PBUF 16 +#endif + +/** + * MEMP_NUM_RAW_PCB: Number of raw connection PCBs + * (requires the LWIP_RAW option) + */ +#if !defined MEMP_NUM_RAW_PCB || defined __DOXYGEN__ +#define MEMP_NUM_RAW_PCB 4 +#endif + +/** + * MEMP_NUM_UDP_PCB: the number of UDP protocol control blocks. One + * per active UDP "connection". + * (requires the LWIP_UDP option) + */ +#if !defined MEMP_NUM_UDP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_UDP_PCB 4 +#endif + +/** + * MEMP_NUM_TCP_PCB: the number of simultaneously active TCP connections. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_TCP_PCB 5 +#endif + +/** + * MEMP_NUM_TCP_PCB_LISTEN: the number of listening TCP connections. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_PCB_LISTEN || defined __DOXYGEN__ +#define MEMP_NUM_TCP_PCB_LISTEN 8 +#endif + +/** + * MEMP_NUM_TCP_SEG: the number of simultaneously queued TCP segments. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_SEG || defined __DOXYGEN__ +#define MEMP_NUM_TCP_SEG 16 +#endif + +/** + * MEMP_NUM_ALTCP_PCB: the number of simultaneously active altcp layer pcbs. + * (requires the LWIP_ALTCP option) + * Connections with multiple layers require more than one altcp_pcb (e.g. TLS + * over TCP requires 2 altcp_pcbs, one for TLS and one for TCP). + */ +#if !defined MEMP_NUM_ALTCP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_ALTCP_PCB MEMP_NUM_TCP_PCB +#endif + +/** + * MEMP_NUM_REASSDATA: the number of IP packets simultaneously queued for + * reassembly (whole packets, not fragments!) + */ +#if !defined MEMP_NUM_REASSDATA || defined __DOXYGEN__ +#define MEMP_NUM_REASSDATA 5 +#endif + +/** + * MEMP_NUM_FRAG_PBUF: the number of IP fragments simultaneously sent + * (fragments, not whole packets!). + * This is only used with LWIP_NETIF_TX_SINGLE_PBUF==0 and only has to be > 1 + * with DMA-enabled MACs where the packet is not yet sent when netif->output + * returns. + */ +#if !defined MEMP_NUM_FRAG_PBUF || defined __DOXYGEN__ +#define MEMP_NUM_FRAG_PBUF 15 +#endif + +/** + * MEMP_NUM_ARP_QUEUE: the number of simultaneously queued outgoing + * packets (pbufs) that are waiting for an ARP request (to resolve + * their destination address) to finish. + * (requires the ARP_QUEUEING option) + */ +#if !defined MEMP_NUM_ARP_QUEUE || defined __DOXYGEN__ +#define MEMP_NUM_ARP_QUEUE 30 +#endif + +/** + * MEMP_NUM_IGMP_GROUP: The number of multicast groups whose network interfaces + * can be members at the same time (one per netif - allsystems group -, plus one + * per netif membership). + * (requires the LWIP_IGMP option) + */ +#if !defined MEMP_NUM_IGMP_GROUP || defined __DOXYGEN__ +#define MEMP_NUM_IGMP_GROUP 8 +#endif + +/** + * The number of sys timeouts used by the core stack (not apps) + * The default number of timeouts is calculated here for all enabled modules. + */ +#define LWIP_NUM_SYS_TIMEOUT_INTERNAL (LWIP_TCP + IP_REASSEMBLY + LWIP_ARP + (2*LWIP_DHCP) + LWIP_AUTOIP + LWIP_IGMP + LWIP_DNS + PPP_NUM_TIMEOUTS + (LWIP_IPV6 * (1 + LWIP_IPV6_REASS + LWIP_IPV6_MLD))) + +/** + * MEMP_NUM_SYS_TIMEOUT: the number of simultaneously active timeouts. + * The default number of timeouts is calculated here for all enabled modules. + * The formula expects settings to be either '0' or '1'. + */ +#if !defined MEMP_NUM_SYS_TIMEOUT || defined __DOXYGEN__ +#define MEMP_NUM_SYS_TIMEOUT LWIP_NUM_SYS_TIMEOUT_INTERNAL +#endif + +/** + * MEMP_NUM_NETBUF: the number of struct netbufs. + * (only needed if you use the sequential API, like api_lib.c) + */ +#if !defined MEMP_NUM_NETBUF || defined __DOXYGEN__ +#define MEMP_NUM_NETBUF 2 +#endif + +/** + * MEMP_NUM_NETCONN: the number of struct netconns. + * (only needed if you use the sequential API, like api_lib.c) + */ +#if !defined MEMP_NUM_NETCONN || defined __DOXYGEN__ +#define MEMP_NUM_NETCONN 4 +#endif + +/** + * MEMP_NUM_SELECT_CB: the number of struct lwip_select_cb. + * (Only needed if you have LWIP_MPU_COMPATIBLE==1 and use the socket API. + * In that case, you need one per thread calling lwip_select.) + */ +#if !defined MEMP_NUM_SELECT_CB || defined __DOXYGEN__ +#define MEMP_NUM_SELECT_CB 4 +#endif + +/** + * MEMP_NUM_TCPIP_MSG_API: the number of struct tcpip_msg, which are used + * for callback/timeout API communication. + * (only needed if you use tcpip.c) + */ +#if !defined MEMP_NUM_TCPIP_MSG_API || defined __DOXYGEN__ +#define MEMP_NUM_TCPIP_MSG_API 8 +#endif + +/** + * MEMP_NUM_TCPIP_MSG_INPKT: the number of struct tcpip_msg, which are used + * for incoming packets. + * (only needed if you use tcpip.c) + */ +#if !defined MEMP_NUM_TCPIP_MSG_INPKT || defined __DOXYGEN__ +#define MEMP_NUM_TCPIP_MSG_INPKT 8 +#endif + +/** + * MEMP_NUM_NETDB: the number of concurrently running lwip_addrinfo() calls + * (before freeing the corresponding memory using lwip_freeaddrinfo()). + */ +#if !defined MEMP_NUM_NETDB || defined __DOXYGEN__ +#define MEMP_NUM_NETDB 1 +#endif + +/** + * MEMP_NUM_LOCALHOSTLIST: the number of host entries in the local host list + * if DNS_LOCAL_HOSTLIST_IS_DYNAMIC==1. + */ +#if !defined MEMP_NUM_LOCALHOSTLIST || defined __DOXYGEN__ +#define MEMP_NUM_LOCALHOSTLIST 1 +#endif + +/** + * PBUF_POOL_SIZE: the number of buffers in the pbuf pool. + */ +#if !defined PBUF_POOL_SIZE || defined __DOXYGEN__ +#define PBUF_POOL_SIZE 16 +#endif + +/** MEMP_NUM_API_MSG: the number of concurrently active calls to various + * socket, netconn, and tcpip functions + */ +#if !defined MEMP_NUM_API_MSG || defined __DOXYGEN__ +#define MEMP_NUM_API_MSG MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_DNS_API_MSG: the number of concurrently active calls to netconn_gethostbyname + */ +#if !defined MEMP_NUM_DNS_API_MSG || defined __DOXYGEN__ +#define MEMP_NUM_DNS_API_MSG MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA: the number of concurrently active calls + * to getsockopt/setsockopt + */ +#if !defined MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA || defined __DOXYGEN__ +#define MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_NETIFAPI_MSG: the number of concurrently active calls to the + * netifapi functions + */ +#if !defined MEMP_NUM_NETIFAPI_MSG || defined __DOXYGEN__ +#define MEMP_NUM_NETIFAPI_MSG MEMP_NUM_TCPIP_MSG_API +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- ARP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_arp ARP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_ARP==1: Enable ARP functionality. + */ +#if !defined LWIP_ARP || defined __DOXYGEN__ +#define LWIP_ARP 1 +#endif + +/** + * ARP_TABLE_SIZE: Number of active MAC-IP address pairs cached. + */ +#if !defined ARP_TABLE_SIZE || defined __DOXYGEN__ +#define ARP_TABLE_SIZE 10 +#endif + +/** the time an ARP entry stays valid after its last update, + * for ARP_TMR_INTERVAL = 1000, this is + * (60 * 5) seconds = 5 minutes. + */ +#if !defined ARP_MAXAGE || defined __DOXYGEN__ +#define ARP_MAXAGE 300 +#endif + +/** + * ARP_QUEUEING==1: Multiple outgoing packets are queued during hardware address + * resolution. By default, only the most recent packet is queued per IP address. + * This is sufficient for most protocols and mainly reduces TCP connection + * startup time. Set this to 1 if you know your application sends more than one + * packet in a row to an IP address that is not in the ARP cache. + */ +#if !defined ARP_QUEUEING || defined __DOXYGEN__ +#define ARP_QUEUEING 0 +#endif + +/** The maximum number of packets which may be queued for each + * unresolved address by other network layers. Defaults to 3, 0 means disabled. + * Old packets are dropped, new packets are queued. + */ +#if !defined ARP_QUEUE_LEN || defined __DOXYGEN__ +#define ARP_QUEUE_LEN 3 +#endif + +/** + * ETHARP_SUPPORT_VLAN==1: support receiving and sending ethernet packets with + * VLAN header. See the description of LWIP_HOOK_VLAN_CHECK and + * LWIP_HOOK_VLAN_SET hooks to check/set VLAN headers. + * Additionally, you can define ETHARP_VLAN_CHECK to an u16_t VLAN ID to check. + * If ETHARP_VLAN_CHECK is defined, only VLAN-traffic for this VLAN is accepted. + * If ETHARP_VLAN_CHECK is not defined, all traffic is accepted. + * Alternatively, define a function/define ETHARP_VLAN_CHECK_FN(eth_hdr, vlan) + * that returns 1 to accept a packet or 0 to drop a packet. + */ +#if !defined ETHARP_SUPPORT_VLAN || defined __DOXYGEN__ +#define ETHARP_SUPPORT_VLAN 0 +#endif + +/** LWIP_ETHERNET==1: enable ethernet support even though ARP might be disabled + */ +#if !defined LWIP_ETHERNET || defined __DOXYGEN__ +#define LWIP_ETHERNET LWIP_ARP +#endif + +/** ETH_PAD_SIZE: number of bytes added before the ethernet header to ensure + * alignment of payload after that header. Since the header is 14 bytes long, + * without this padding e.g. addresses in the IP header will not be aligned + * on a 32-bit boundary, so setting this to 2 can speed up 32-bit-platforms. + */ +#if !defined ETH_PAD_SIZE || defined __DOXYGEN__ +#define ETH_PAD_SIZE 0 +#endif + +/** ETHARP_SUPPORT_STATIC_ENTRIES==1: enable code to support static ARP table + * entries (using etharp_add_static_entry/etharp_remove_static_entry). + */ +#if !defined ETHARP_SUPPORT_STATIC_ENTRIES || defined __DOXYGEN__ +#define ETHARP_SUPPORT_STATIC_ENTRIES 0 +#endif + +/** ETHARP_TABLE_MATCH_NETIF==1: Match netif for ARP table entries. + * If disabled, duplicate IP address on multiple netifs are not supported + * (but this should only occur for AutoIP). + */ +#if !defined ETHARP_TABLE_MATCH_NETIF || defined __DOXYGEN__ +#define ETHARP_TABLE_MATCH_NETIF !LWIP_SINGLE_NETIF +#endif +/** + * @} + */ + +/* + -------------------------------- + ---------- IP options ---------- + -------------------------------- +*/ +/** + * @defgroup lwip_opts_ipv4 IPv4 + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_IPV4==1: Enable IPv4 + */ +#if !defined LWIP_IPV4 || defined __DOXYGEN__ +#define LWIP_IPV4 1 +#endif + +/** + * IP_FORWARD==1: Enables the ability to forward IP packets across network + * interfaces. If you are going to run lwIP on a device with only one network + * interface, define this to 0. + */ +#if !defined IP_FORWARD || defined __DOXYGEN__ +#define IP_FORWARD 0 +#endif + +/** + * IP_REASSEMBLY==1: Reassemble incoming fragmented IP packets. Note that + * this option does not affect outgoing packet sizes, which can be controlled + * via IP_FRAG. + */ +#if !defined IP_REASSEMBLY || defined __DOXYGEN__ +#define IP_REASSEMBLY 1 +#endif + +/** + * IP_FRAG==1: Fragment outgoing IP packets if their size exceeds MTU. Note + * that this option does not affect incoming packet sizes, which can be + * controlled via IP_REASSEMBLY. + */ +#if !defined IP_FRAG || defined __DOXYGEN__ +#define IP_FRAG 1 +#endif + +#if !LWIP_IPV4 +/* disable IPv4 extensions when IPv4 is disabled */ +#undef IP_FORWARD +#define IP_FORWARD 0 +#undef IP_REASSEMBLY +#define IP_REASSEMBLY 0 +#undef IP_FRAG +#define IP_FRAG 0 +#endif /* !LWIP_IPV4 */ + +/** + * IP_OPTIONS_ALLOWED: Defines the behavior for IP options. + * IP_OPTIONS_ALLOWED==0: All packets with IP options are dropped. + * IP_OPTIONS_ALLOWED==1: IP options are allowed (but not parsed). + */ +#if !defined IP_OPTIONS_ALLOWED || defined __DOXYGEN__ +#define IP_OPTIONS_ALLOWED 1 +#endif + +/** + * IP_REASS_MAXAGE: Maximum time (in multiples of IP_TMR_INTERVAL - so seconds, normally) + * a fragmented IP packet waits for all fragments to arrive. If not all fragments arrived + * in this time, the whole packet is discarded. + */ +#if !defined IP_REASS_MAXAGE || defined __DOXYGEN__ +#define IP_REASS_MAXAGE 15 +#endif + +/** + * IP_REASS_MAX_PBUFS: Total maximum amount of pbufs waiting to be reassembled. + * Since the received pbufs are enqueued, be sure to configure + * PBUF_POOL_SIZE > IP_REASS_MAX_PBUFS so that the stack is still able to receive + * packets even if the maximum amount of fragments is enqueued for reassembly! + * When IPv4 *and* IPv6 are enabled, this even changes to + * (PBUF_POOL_SIZE > 2 * IP_REASS_MAX_PBUFS)! + */ +#if !defined IP_REASS_MAX_PBUFS || defined __DOXYGEN__ +#define IP_REASS_MAX_PBUFS 10 +#endif + +/** + * IP_DEFAULT_TTL: Default value for Time-To-Live used by transport layers. + */ +#if !defined IP_DEFAULT_TTL || defined __DOXYGEN__ +#define IP_DEFAULT_TTL 255 +#endif + +/** + * IP_SOF_BROADCAST=1: Use the SOF_BROADCAST field to enable broadcast + * filter per pcb on udp and raw send operations. To enable broadcast filter + * on recv operations, you also have to set IP_SOF_BROADCAST_RECV=1. + */ +#if !defined IP_SOF_BROADCAST || defined __DOXYGEN__ +#define IP_SOF_BROADCAST 0 +#endif + +/** + * IP_SOF_BROADCAST_RECV (requires IP_SOF_BROADCAST=1) enable the broadcast + * filter on recv operations. + */ +#if !defined IP_SOF_BROADCAST_RECV || defined __DOXYGEN__ +#define IP_SOF_BROADCAST_RECV 0 +#endif + +/** + * IP_FORWARD_ALLOW_TX_ON_RX_NETIF==1: allow ip_forward() to send packets back + * out on the netif where it was received. This should only be used for + * wireless networks. + * ATTENTION: When this is 1, make sure your netif driver correctly marks incoming + * link-layer-broadcast/multicast packets as such using the corresponding pbuf flags! + */ +#if !defined IP_FORWARD_ALLOW_TX_ON_RX_NETIF || defined __DOXYGEN__ +#define IP_FORWARD_ALLOW_TX_ON_RX_NETIF 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- ICMP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_icmp ICMP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_ICMP==1: Enable ICMP module inside the IP stack. + * Be careful, disable that make your product non-compliant to RFC1122 + */ +#if !defined LWIP_ICMP || defined __DOXYGEN__ +#define LWIP_ICMP 1 +#endif + +/** + * ICMP_TTL: Default value for Time-To-Live used by ICMP packets. + */ +#if !defined ICMP_TTL || defined __DOXYGEN__ +#define ICMP_TTL IP_DEFAULT_TTL +#endif + +/** + * LWIP_BROADCAST_PING==1: respond to broadcast pings (default is unicast only) + */ +#if !defined LWIP_BROADCAST_PING || defined __DOXYGEN__ +#define LWIP_BROADCAST_PING 0 +#endif + +/** + * LWIP_MULTICAST_PING==1: respond to multicast pings (default is unicast only) + */ +#if !defined LWIP_MULTICAST_PING || defined __DOXYGEN__ +#define LWIP_MULTICAST_PING 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- RAW options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_raw RAW + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_RAW==1: Enable application layer to hook into the IP layer itself. + */ +#if !defined LWIP_RAW || defined __DOXYGEN__ +#define LWIP_RAW 0 +#endif + +/** + * LWIP_RAW==1: Enable application layer to hook into the IP layer itself. + */ +#if !defined RAW_TTL || defined __DOXYGEN__ +#define RAW_TTL IP_DEFAULT_TTL +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- DHCP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_dhcp DHCP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_DHCP==1: Enable DHCP module. + */ +#if !defined LWIP_DHCP || defined __DOXYGEN__ +#define LWIP_DHCP 0 +#endif +#if !LWIP_IPV4 +/* disable DHCP when IPv4 is disabled */ +#undef LWIP_DHCP +#define LWIP_DHCP 0 +#endif /* !LWIP_IPV4 */ + +/** + * DHCP_DOES_ARP_CHECK==1: Do an ARP check on the offered address. + */ +#if !defined DHCP_DOES_ARP_CHECK || defined __DOXYGEN__ +#define DHCP_DOES_ARP_CHECK (LWIP_DHCP && LWIP_ARP) +#endif + +/** + * LWIP_DHCP_BOOTP_FILE==1: Store offered_si_addr and boot_file_name. + */ +#if !defined LWIP_DHCP_BOOTP_FILE || defined __DOXYGEN__ +#define LWIP_DHCP_BOOTP_FILE 0 +#endif + +/** + * LWIP_DHCP_GETS_NTP==1: Request NTP servers with discover/select. For each + * response packet, an callback is called, which has to be provided by the port: + * void dhcp_set_ntp_servers(u8_t num_ntp_servers, ip_addr_t* ntp_server_addrs); +*/ +#if !defined LWIP_DHCP_GET_NTP_SRV || defined __DOXYGEN__ +#define LWIP_DHCP_GET_NTP_SRV 0 +#endif + +/** + * The maximum of NTP servers requested + */ +#if !defined LWIP_DHCP_MAX_NTP_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP_MAX_NTP_SERVERS 1 +#endif + +/** + * LWIP_DHCP_MAX_DNS_SERVERS > 0: Request DNS servers with discover/select. + * DNS servers received in the response are passed to DNS via @ref dns_setserver() + * (up to the maximum limit defined here). + */ +#if !defined LWIP_DHCP_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP_MAX_DNS_SERVERS DNS_MAX_SERVERS +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- AUTOIP options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_autoip AUTOIP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_AUTOIP==1: Enable AUTOIP module. + */ +#if !defined LWIP_AUTOIP || defined __DOXYGEN__ +#define LWIP_AUTOIP 0 +#endif +#if !LWIP_IPV4 +/* disable AUTOIP when IPv4 is disabled */ +#undef LWIP_AUTOIP +#define LWIP_AUTOIP 0 +#endif /* !LWIP_IPV4 */ + +/** + * LWIP_DHCP_AUTOIP_COOP==1: Allow DHCP and AUTOIP to be both enabled on + * the same interface at the same time. + */ +#if !defined LWIP_DHCP_AUTOIP_COOP || defined __DOXYGEN__ +#define LWIP_DHCP_AUTOIP_COOP 0 +#endif + +/** + * LWIP_DHCP_AUTOIP_COOP_TRIES: Set to the number of DHCP DISCOVER probes + * that should be sent before falling back on AUTOIP (the DHCP client keeps + * running in this case). This can be set as low as 1 to get an AutoIP address + * very quickly, but you should be prepared to handle a changing IP address + * when DHCP overrides AutoIP. + */ +#if !defined LWIP_DHCP_AUTOIP_COOP_TRIES || defined __DOXYGEN__ +#define LWIP_DHCP_AUTOIP_COOP_TRIES 9 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ----- SNMP MIB2 support ----- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_mib2 SNMP MIB2 callbacks + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MIB2_CALLBACKS==1: Turn on SNMP MIB2 callbacks. + * Turn this on to get callbacks needed to implement MIB2. + * Usually MIB2_STATS should be enabled, too. + */ +#if !defined LWIP_MIB2_CALLBACKS || defined __DOXYGEN__ +#define LWIP_MIB2_CALLBACKS 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + -------- Multicast options ------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_multicast Multicast + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MULTICAST_TX_OPTIONS==1: Enable multicast TX support like the socket options + * IP_MULTICAST_TTL/IP_MULTICAST_IF/IP_MULTICAST_LOOP, as well as (currently only) + * core support for the corresponding IPv6 options. + */ +#if !defined LWIP_MULTICAST_TX_OPTIONS || defined __DOXYGEN__ +#define LWIP_MULTICAST_TX_OPTIONS ((LWIP_IGMP || LWIP_IPV6_MLD) && (LWIP_UDP || LWIP_RAW)) +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- IGMP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_igmp IGMP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_IGMP==1: Turn on IGMP module. + */ +#if !defined LWIP_IGMP || defined __DOXYGEN__ +#define LWIP_IGMP 0 +#endif +#if !LWIP_IPV4 +#undef LWIP_IGMP +#define LWIP_IGMP 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- DNS options ----------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_dns DNS + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_DNS==1: Turn on DNS module. UDP must be available for DNS + * transport. + */ +#if !defined LWIP_DNS || defined __DOXYGEN__ +#define LWIP_DNS 0 +#endif + +/** DNS maximum number of entries to maintain locally. */ +#if !defined DNS_TABLE_SIZE || defined __DOXYGEN__ +#define DNS_TABLE_SIZE 4 +#endif + +/** DNS maximum host name length supported in the name table. */ +#if !defined DNS_MAX_NAME_LENGTH || defined __DOXYGEN__ +#define DNS_MAX_NAME_LENGTH 256 +#endif + +/** The maximum of DNS servers + * The first server can be initialized automatically by defining + * DNS_SERVER_ADDRESS(ipaddr), where 'ipaddr' is an 'ip_addr_t*' + */ +#if !defined DNS_MAX_SERVERS || defined __DOXYGEN__ +#define DNS_MAX_SERVERS 2 +#endif + +/** DNS maximum number of retries when asking for a name, before "timeout". */ +#if !defined DNS_MAX_RETRIES || defined __DOXYGEN__ +#define DNS_MAX_RETRIES 4 +#endif + +/** DNS do a name checking between the query and the response. */ +#if !defined DNS_DOES_NAME_CHECK || defined __DOXYGEN__ +#define DNS_DOES_NAME_CHECK 1 +#endif + +/** LWIP_DNS_SECURE: controls the security level of the DNS implementation + * Use all DNS security features by default. + * This is overridable but should only be needed by very small targets + * or when using against non standard DNS servers. */ +#if !defined LWIP_DNS_SECURE || defined __DOXYGEN__ +#define LWIP_DNS_SECURE (LWIP_DNS_SECURE_RAND_XID | LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT) +#endif + +/* A list of DNS security features follows */ +#define LWIP_DNS_SECURE_RAND_XID 1 +#define LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING 2 +#define LWIP_DNS_SECURE_RAND_SRC_PORT 4 + +/** DNS_LOCAL_HOSTLIST: Implements a local host-to-address list. If enabled, you have to define an initializer: + * \#define DNS_LOCAL_HOSTLIST_INIT {DNS_LOCAL_HOSTLIST_ELEM("host_ip4", IPADDR4_INIT_BYTES(1,2,3,4)), \ + * DNS_LOCAL_HOSTLIST_ELEM("host_ip6", IPADDR6_INIT_HOST(123, 234, 345, 456)} + * + * Instead, you can also use an external function: + * \#define DNS_LOOKUP_LOCAL_EXTERN(x) extern err_t my_lookup_function(const char *name, ip_addr_t *addr, u8_t dns_addrtype) + * that looks up the IP address and returns ERR_OK if found (LWIP_DNS_ADDRTYPE_xxx is passed in dns_addrtype). + */ +#if !defined DNS_LOCAL_HOSTLIST || defined __DOXYGEN__ +#define DNS_LOCAL_HOSTLIST 0 +#endif /* DNS_LOCAL_HOSTLIST */ + +/** If this is turned on, the local host-list can be dynamically changed + * at runtime. */ +#if !defined DNS_LOCAL_HOSTLIST_IS_DYNAMIC || defined __DOXYGEN__ +#define DNS_LOCAL_HOSTLIST_IS_DYNAMIC 0 +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Set this to 1 to enable querying ".local" names via mDNS + * using a One-Shot Multicast DNS Query */ +#if !defined LWIP_DNS_SUPPORT_MDNS_QUERIES || defined __DOXYGEN__ +#define LWIP_DNS_SUPPORT_MDNS_QUERIES 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- UDP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_udp UDP + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_UDP==1: Turn on UDP. + */ +#if !defined LWIP_UDP || defined __DOXYGEN__ +#define LWIP_UDP 1 +#endif + +/** + * LWIP_UDPLITE==1: Turn on UDP-Lite. (Requires LWIP_UDP) + */ +#if !defined LWIP_UDPLITE || defined __DOXYGEN__ +#define LWIP_UDPLITE 0 +#endif + +/** + * UDP_TTL: Default Time-To-Live value. + */ +#if !defined UDP_TTL || defined __DOXYGEN__ +#define UDP_TTL IP_DEFAULT_TTL +#endif + +/** + * LWIP_NETBUF_RECVINFO==1: append destination addr and port to every netbuf. + */ +#if !defined LWIP_NETBUF_RECVINFO || defined __DOXYGEN__ +#define LWIP_NETBUF_RECVINFO 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- TCP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_tcp TCP + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_TCP==1: Turn on TCP. + */ +#if !defined LWIP_TCP || defined __DOXYGEN__ +#define LWIP_TCP 1 +#endif + +/** + * TCP_TTL: Default Time-To-Live value. + */ +#if !defined TCP_TTL || defined __DOXYGEN__ +#define TCP_TTL IP_DEFAULT_TTL +#endif + +/** + * TCP_WND: The size of a TCP window. This must be at least + * (2 * TCP_MSS) for things to work well. + * ATTENTION: when using TCP_RCV_SCALE, TCP_WND is the total size + * with scaling applied. Maximum window value in the TCP header + * will be TCP_WND >> TCP_RCV_SCALE + */ +#if !defined TCP_WND || defined __DOXYGEN__ +#define TCP_WND (4 * TCP_MSS) +#endif + +/** + * TCP_MAXRTX: Maximum number of retransmissions of data segments. + */ +#if !defined TCP_MAXRTX || defined __DOXYGEN__ +#define TCP_MAXRTX 12 +#endif + +/** + * TCP_SYNMAXRTX: Maximum number of retransmissions of SYN segments. + */ +#if !defined TCP_SYNMAXRTX || defined __DOXYGEN__ +#define TCP_SYNMAXRTX 6 +#endif + +/** + * TCP_QUEUE_OOSEQ==1: TCP will queue segments that arrive out of order. + * Define to 0 if your device is low on memory. + */ +#if !defined TCP_QUEUE_OOSEQ || defined __DOXYGEN__ +#define TCP_QUEUE_OOSEQ LWIP_TCP +#endif + +/** + * LWIP_TCP_SACK_OUT==1: TCP will support sending selective acknowledgements (SACKs). + */ +#if !defined LWIP_TCP_SACK_OUT || defined __DOXYGEN__ +#define LWIP_TCP_SACK_OUT 0 +#endif + +/** + * LWIP_TCP_MAX_SACK_NUM: The maximum number of SACK values to include in TCP segments. + * Must be at least 1, but is only used if LWIP_TCP_SACK_OUT is enabled. + * NOTE: Even though we never send more than 3 or 4 SACK ranges in a single segment + * (depending on other options), setting this option to values greater than 4 is not pointless. + * This is basically the max number of SACK ranges we want to keep track of. + * As new data is delivered, some of the SACK ranges may be removed or merged. + * In that case some of those older SACK ranges may be used again. + * The amount of memory used to store SACK ranges is LWIP_TCP_MAX_SACK_NUM * 8 bytes for each TCP PCB. + */ +#if !defined LWIP_TCP_MAX_SACK_NUM || defined __DOXYGEN__ +#define LWIP_TCP_MAX_SACK_NUM 4 +#endif + +/** + * TCP_MSS: TCP Maximum segment size. (default is 536, a conservative default, + * you might want to increase this.) + * For the receive side, this MSS is advertised to the remote side + * when opening a connection. For the transmit size, this MSS sets + * an upper limit on the MSS advertised by the remote host. + */ +#if !defined TCP_MSS || defined __DOXYGEN__ +#define TCP_MSS 536 +#endif + +/** + * TCP_CALCULATE_EFF_SEND_MSS: "The maximum size of a segment that TCP really + * sends, the 'effective send MSS,' MUST be the smaller of the send MSS (which + * reflects the available reassembly buffer size at the remote host) and the + * largest size permitted by the IP layer" (RFC 1122) + * Setting this to 1 enables code that checks TCP_MSS against the MTU of the + * netif used for a connection and limits the MSS if it would be too big otherwise. + */ +#if !defined TCP_CALCULATE_EFF_SEND_MSS || defined __DOXYGEN__ +#define TCP_CALCULATE_EFF_SEND_MSS 1 +#endif + + +/** + * TCP_SND_BUF: TCP sender buffer space (bytes). + * To achieve good performance, this should be at least 2 * TCP_MSS. + */ +#if !defined TCP_SND_BUF || defined __DOXYGEN__ +#define TCP_SND_BUF (2 * TCP_MSS) +#endif + +/** + * TCP_SND_QUEUELEN: TCP sender buffer space (pbufs). This must be at least + * as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. + */ +#if !defined TCP_SND_QUEUELEN || defined __DOXYGEN__ +#define TCP_SND_QUEUELEN ((4 * (TCP_SND_BUF) + (TCP_MSS - 1))/(TCP_MSS)) +#endif + +/** + * TCP_SNDLOWAT: TCP writable space (bytes). This must be less than + * TCP_SND_BUF. It is the amount of space which must be available in the + * TCP snd_buf for select to return writable (combined with TCP_SNDQUEUELOWAT). + */ +#if !defined TCP_SNDLOWAT || defined __DOXYGEN__ +#define TCP_SNDLOWAT LWIP_MIN(LWIP_MAX(((TCP_SND_BUF)/2), (2 * TCP_MSS) + 1), (TCP_SND_BUF) - 1) +#endif + +/** + * TCP_SNDQUEUELOWAT: TCP writable bufs (pbuf count). This must be less + * than TCP_SND_QUEUELEN. If the number of pbufs queued on a pcb drops below + * this number, select returns writable (combined with TCP_SNDLOWAT). + */ +#if !defined TCP_SNDQUEUELOWAT || defined __DOXYGEN__ +#define TCP_SNDQUEUELOWAT LWIP_MAX(((TCP_SND_QUEUELEN)/2), 5) +#endif + +/** + * TCP_OOSEQ_MAX_BYTES: The default maximum number of bytes queued on ooseq per + * pcb if TCP_OOSEQ_BYTES_LIMIT is not defined. Default is 0 (no limit). + * Only valid for TCP_QUEUE_OOSEQ==1. + */ +#if !defined TCP_OOSEQ_MAX_BYTES || defined __DOXYGEN__ +#define TCP_OOSEQ_MAX_BYTES 0 +#endif + +/** + * TCP_OOSEQ_BYTES_LIMIT(pcb): Return the maximum number of bytes to be queued + * on ooseq per pcb, given the pcb. Only valid for TCP_QUEUE_OOSEQ==1 && + * TCP_OOSEQ_MAX_BYTES==1. + * Use this to override TCP_OOSEQ_MAX_BYTES to a dynamic value per pcb. + */ +#if !defined TCP_OOSEQ_BYTES_LIMIT +#if TCP_OOSEQ_MAX_BYTES +#define TCP_OOSEQ_BYTES_LIMIT(pcb) TCP_OOSEQ_MAX_BYTES +#elif defined __DOXYGEN__ +#define TCP_OOSEQ_BYTES_LIMIT(pcb) +#endif +#endif + +/** + * TCP_OOSEQ_MAX_PBUFS: The default maximum number of pbufs queued on ooseq per + * pcb if TCP_OOSEQ_BYTES_LIMIT is not defined. Default is 0 (no limit). + * Only valid for TCP_QUEUE_OOSEQ==1. + */ +#if !defined TCP_OOSEQ_MAX_PBUFS || defined __DOXYGEN__ +#define TCP_OOSEQ_MAX_PBUFS 0 +#endif + +/** + * TCP_OOSEQ_PBUFS_LIMIT(pcb): Return the maximum number of pbufs to be queued + * on ooseq per pcb, given the pcb. Only valid for TCP_QUEUE_OOSEQ==1 && + * TCP_OOSEQ_MAX_PBUFS==1. + * Use this to override TCP_OOSEQ_MAX_PBUFS to a dynamic value per pcb. + */ +#if !defined TCP_OOSEQ_PBUFS_LIMIT +#if TCP_OOSEQ_MAX_PBUFS +#define TCP_OOSEQ_PBUFS_LIMIT(pcb) TCP_OOSEQ_MAX_PBUFS +#elif defined __DOXYGEN__ +#define TCP_OOSEQ_PBUFS_LIMIT(pcb) +#endif +#endif + +/** + * TCP_LISTEN_BACKLOG: Enable the backlog option for tcp listen pcb. + */ +#if !defined TCP_LISTEN_BACKLOG || defined __DOXYGEN__ +#define TCP_LISTEN_BACKLOG 0 +#endif + +/** + * The maximum allowed backlog for TCP listen netconns. + * This backlog is used unless another is explicitly specified. + * 0xff is the maximum (u8_t). + */ +#if !defined TCP_DEFAULT_LISTEN_BACKLOG || defined __DOXYGEN__ +#define TCP_DEFAULT_LISTEN_BACKLOG 0xff +#endif + +/** + * TCP_OVERSIZE: The maximum number of bytes that tcp_write may + * allocate ahead of time in an attempt to create shorter pbuf chains + * for transmission. The meaningful range is 0 to TCP_MSS. Some + * suggested values are: + * + * 0: Disable oversized allocation. Each tcp_write() allocates a new + pbuf (old behaviour). + * 1: Allocate size-aligned pbufs with minimal excess. Use this if your + * scatter-gather DMA requires aligned fragments. + * 128: Limit the pbuf/memory overhead to 20%. + * TCP_MSS: Try to create unfragmented TCP packets. + * TCP_MSS/4: Try to create 4 fragments or less per TCP packet. + */ +#if !defined TCP_OVERSIZE || defined __DOXYGEN__ +#define TCP_OVERSIZE TCP_MSS +#endif + +/** + * LWIP_TCP_TIMESTAMPS==1: support the TCP timestamp option. + * The timestamp option is currently only used to help remote hosts, it is not + * really used locally. Therefore, it is only enabled when a TS option is + * received in the initial SYN packet from a remote host. + */ +#if !defined LWIP_TCP_TIMESTAMPS || defined __DOXYGEN__ +#define LWIP_TCP_TIMESTAMPS 0 +#endif + +/** + * TCP_WND_UPDATE_THRESHOLD: difference in window to trigger an + * explicit window update + */ +#if !defined TCP_WND_UPDATE_THRESHOLD || defined __DOXYGEN__ +#define TCP_WND_UPDATE_THRESHOLD LWIP_MIN((TCP_WND / 4), (TCP_MSS * 4)) +#endif + +/** + * LWIP_EVENT_API and LWIP_CALLBACK_API: Only one of these should be set to 1. + * LWIP_EVENT_API==1: The user defines lwip_tcp_event() to receive all + * events (accept, sent, etc) that happen in the system. + * LWIP_CALLBACK_API==1: The PCB callback function is called directly + * for the event. This is the default. + */ +#if !defined(LWIP_EVENT_API) && !defined(LWIP_CALLBACK_API) || defined __DOXYGEN__ +#define LWIP_EVENT_API 0 +#define LWIP_CALLBACK_API 1 +#else +#ifndef LWIP_EVENT_API +#define LWIP_EVENT_API 0 +#endif +#ifndef LWIP_CALLBACK_API +#define LWIP_CALLBACK_API 0 +#endif +#endif + +/** + * LWIP_WND_SCALE and TCP_RCV_SCALE: + * Set LWIP_WND_SCALE to 1 to enable window scaling. + * Set TCP_RCV_SCALE to the desired scaling factor (shift count in the + * range of [0..14]). + * When LWIP_WND_SCALE is enabled but TCP_RCV_SCALE is 0, we can use a large + * send window while having a small receive window only. + */ +#if !defined LWIP_WND_SCALE || defined __DOXYGEN__ +#define LWIP_WND_SCALE 0 +#define TCP_RCV_SCALE 0 +#endif + +/** + * LWIP_TCP_PCB_NUM_EXT_ARGS: + * When this is > 0, every tcp pcb (including listen pcb) includes a number of + * additional argument entries in an array (see tcp_ext_arg_alloc_id) + */ +#if !defined LWIP_TCP_PCB_NUM_EXT_ARGS || defined __DOXYGEN__ +#define LWIP_TCP_PCB_NUM_EXT_ARGS 0 +#endif + +/** LWIP_ALTCP==1: enable the altcp API. + * altcp is an abstraction layer that prevents applications linking against the + * tcp.h functions but provides the same functionality. It is used to e.g. add + * SSL/TLS or proxy-connect support to an application written for the tcp callback + * API without that application knowing the protocol details. + * + * With LWIP_ALTCP==0, applications written against the altcp API can still be + * compiled but are directly linked against the tcp.h callback API and then + * cannot use layered protocols. + * + * See @ref altcp_api + */ +#if !defined LWIP_ALTCP || defined __DOXYGEN__ +#define LWIP_ALTCP 0 +#endif + +/** LWIP_ALTCP_TLS==1: enable TLS support for altcp API. + * This needs a port of the functions in altcp_tls.h to a TLS library. + * A port to ARM mbedtls is provided with lwIP, see apps/altcp_tls/ directory + * and LWIP_ALTCP_TLS_MBEDTLS option. + */ +#if !defined LWIP_ALTCP_TLS || defined __DOXYGEN__ +#define LWIP_ALTCP_TLS 0 +#endif + +/** + * @} + */ + +/* + ---------------------------------- + ---------- Pbuf options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_pbuf PBUF + * @ingroup lwip_opts + * @{ + */ +/** + * PBUF_LINK_HLEN: the number of bytes that should be allocated for a + * link level header. The default is 14, the standard value for + * Ethernet. + */ +#if !defined PBUF_LINK_HLEN || defined __DOXYGEN__ +#if defined LWIP_HOOK_VLAN_SET && !defined __DOXYGEN__ +#define PBUF_LINK_HLEN (18 + ETH_PAD_SIZE) +#else /* LWIP_HOOK_VLAN_SET */ +#define PBUF_LINK_HLEN (14 + ETH_PAD_SIZE) +#endif /* LWIP_HOOK_VLAN_SET */ +#endif + +/** + * PBUF_LINK_ENCAPSULATION_HLEN: the number of bytes that should be allocated + * for an additional encapsulation header before ethernet headers (e.g. 802.11) + */ +#if !defined PBUF_LINK_ENCAPSULATION_HLEN || defined __DOXYGEN__ +#define PBUF_LINK_ENCAPSULATION_HLEN 0 +#endif + +/** + * PBUF_POOL_BUFSIZE: the size of each pbuf in the pbuf pool. The default is + * designed to accommodate single full size TCP frame in one pbuf, including + * TCP_MSS, IP header, and link header. + */ +#if !defined PBUF_POOL_BUFSIZE || defined __DOXYGEN__ +#define PBUF_POOL_BUFSIZE LWIP_MEM_ALIGN_SIZE(TCP_MSS+40+PBUF_LINK_ENCAPSULATION_HLEN+PBUF_LINK_HLEN) +#endif + +/** + * LWIP_PBUF_REF_T: Refcount type in pbuf. + * Default width of u8_t can be increased if 255 refs are not enough for you. + */ +#if !defined LWIP_PBUF_REF_T || defined __DOXYGEN__ +#define LWIP_PBUF_REF_T u8_t +#endif +/** + * @} + */ + +/* + ------------------------------------------------ + ---------- Network Interfaces options ---------- + ------------------------------------------------ +*/ +/** + * @defgroup lwip_opts_netif NETIF + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_SINGLE_NETIF==1: use a single netif only. This is the common case for + * small real-life targets. Some code like routing etc. can be left out. + */ +#if !defined LWIP_SINGLE_NETIF || defined __DOXYGEN__ +#define LWIP_SINGLE_NETIF 0 +#endif + +/** + * LWIP_NETIF_HOSTNAME==1: use DHCP_OPTION_HOSTNAME with netif's hostname + * field. + */ +#if !defined LWIP_NETIF_HOSTNAME || defined __DOXYGEN__ +#define LWIP_NETIF_HOSTNAME 0 +#endif + +/** + * LWIP_NETIF_API==1: Support netif api (in netifapi.c) + */ +#if !defined LWIP_NETIF_API || defined __DOXYGEN__ +#define LWIP_NETIF_API 0 +#endif + +/** + * LWIP_NETIF_STATUS_CALLBACK==1: Support a callback function whenever an interface + * changes its up/down status (i.e., due to DHCP IP acquisition) + */ +#if !defined LWIP_NETIF_STATUS_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_STATUS_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_EXT_STATUS_CALLBACK==1: Support an extended callback function + * for several netif related event that supports multiple subscribers. + * @see netif_ext_status_callback + */ +#if !defined LWIP_NETIF_EXT_STATUS_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_EXT_STATUS_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_LINK_CALLBACK==1: Support a callback function from an interface + * whenever the link changes (i.e., link down) + */ +#if !defined LWIP_NETIF_LINK_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_LINK_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_REMOVE_CALLBACK==1: Support a callback function that is called + * when a netif has been removed + */ +#if !defined LWIP_NETIF_REMOVE_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_REMOVE_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_HWADDRHINT==1: Cache link-layer-address hints (e.g. table + * indices) in struct netif. TCP and UDP can make use of this to prevent + * scanning the ARP table for every sent packet. While this is faster for big + * ARP tables or many concurrent connections, it might be counterproductive + * if you have a tiny ARP table or if there never are concurrent connections. + */ +#if !defined LWIP_NETIF_HWADDRHINT || defined __DOXYGEN__ +#define LWIP_NETIF_HWADDRHINT 0 +#endif + +/** + * LWIP_NETIF_TX_SINGLE_PBUF: if this is set to 1, lwIP *tries* to put all data + * to be sent into one single pbuf. This is for compatibility with DMA-enabled + * MACs that do not support scatter-gather. + * Beware that this might involve CPU-memcpy before transmitting that would not + * be needed without this flag! Use this only if you need to! + * + * ATTENTION: a driver should *NOT* rely on getting single pbufs but check TX + * pbufs for being in one piece. If not, @ref pbuf_clone can be used to get + * a single pbuf: + * if (p->next != NULL) { + * struct pbuf *q = pbuf_clone(PBUF_RAW, PBUF_RAM, p); + * if (q == NULL) { + * return ERR_MEM; + * } + * p = q; ATTENTION: do NOT free the old 'p' as the ref belongs to the caller! + * } + */ +#if !defined LWIP_NETIF_TX_SINGLE_PBUF || defined __DOXYGEN__ +#define LWIP_NETIF_TX_SINGLE_PBUF 0 +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * LWIP_NUM_NETIF_CLIENT_DATA: Number of clients that may store + * data in client_data member array of struct netif (max. 256). + */ +#if !defined LWIP_NUM_NETIF_CLIENT_DATA || defined __DOXYGEN__ +#define LWIP_NUM_NETIF_CLIENT_DATA 0 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- LOOPIF options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_loop Loopback interface + * @ingroup lwip_opts_netif + * @{ + */ +/** + * LWIP_HAVE_LOOPIF==1: Support loop interface (127.0.0.1). + * This is only needed when no real netifs are available. If at least one other + * netif is available, loopback traffic uses this netif. + */ +#if !defined LWIP_HAVE_LOOPIF || defined __DOXYGEN__ +#define LWIP_HAVE_LOOPIF (LWIP_NETIF_LOOPBACK && !LWIP_SINGLE_NETIF) +#endif + +/** + * LWIP_LOOPIF_MULTICAST==1: Support multicast/IGMP on loop interface (127.0.0.1). + */ +#if !defined LWIP_LOOPIF_MULTICAST || defined __DOXYGEN__ +#define LWIP_LOOPIF_MULTICAST 0 +#endif + +/** + * LWIP_NETIF_LOOPBACK==1: Support sending packets with a destination IP + * address equal to the netif IP address, looping them back up the stack. + */ +#if !defined LWIP_NETIF_LOOPBACK || defined __DOXYGEN__ +#define LWIP_NETIF_LOOPBACK 0 +#endif + +/** + * LWIP_LOOPBACK_MAX_PBUFS: Maximum number of pbufs on queue for loopback + * sending for each netif (0 = disabled) + */ +#if !defined LWIP_LOOPBACK_MAX_PBUFS || defined __DOXYGEN__ +#define LWIP_LOOPBACK_MAX_PBUFS 0 +#endif + +/** + * LWIP_NETIF_LOOPBACK_MULTITHREADING: Indicates whether threading is enabled in + * the system, as netifs must change how they behave depending on this setting + * for the LWIP_NETIF_LOOPBACK option to work. + * Setting this is needed to avoid reentering non-reentrant functions like + * tcp_input(). + * LWIP_NETIF_LOOPBACK_MULTITHREADING==1: Indicates that the user is using a + * multithreaded environment like tcpip.c. In this case, netif->input() + * is called directly. + * LWIP_NETIF_LOOPBACK_MULTITHREADING==0: Indicates a polling (or NO_SYS) setup. + * The packets are put on a list and netif_poll() must be called in + * the main application loop. + */ +#if !defined LWIP_NETIF_LOOPBACK_MULTITHREADING || defined __DOXYGEN__ +#define LWIP_NETIF_LOOPBACK_MULTITHREADING (!NO_SYS) +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Thread options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_thread Threading + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * TCPIP_THREAD_NAME: The name assigned to the main tcpip thread. + */ +#if !defined TCPIP_THREAD_NAME || defined __DOXYGEN__ +#define TCPIP_THREAD_NAME "tcpip_thread" +#endif + +/** + * TCPIP_THREAD_STACKSIZE: The stack size used by the main tcpip thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined TCPIP_THREAD_STACKSIZE || defined __DOXYGEN__ +#define TCPIP_THREAD_STACKSIZE 0 +#endif + +/** + * TCPIP_THREAD_PRIO: The priority assigned to the main tcpip thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined TCPIP_THREAD_PRIO || defined __DOXYGEN__ +#define TCPIP_THREAD_PRIO 1 +#endif + +/** + * TCPIP_MBOX_SIZE: The mailbox size for the tcpip thread messages + * The queue size value itself is platform-dependent, but is passed to + * sys_mbox_new() when tcpip_init is called. + */ +#if !defined TCPIP_MBOX_SIZE || defined __DOXYGEN__ +#define TCPIP_MBOX_SIZE 0 +#endif + +/** + * Define this to something that triggers a watchdog. This is called from + * tcpip_thread after processing a message. + */ +#if !defined LWIP_TCPIP_THREAD_ALIVE || defined __DOXYGEN__ +#define LWIP_TCPIP_THREAD_ALIVE() +#endif + +/** + * SLIPIF_THREAD_NAME: The name assigned to the slipif_loop thread. + */ +#if !defined SLIPIF_THREAD_NAME || defined __DOXYGEN__ +#define SLIPIF_THREAD_NAME "slipif_loop" +#endif + +/** + * SLIP_THREAD_STACKSIZE: The stack size used by the slipif_loop thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined SLIPIF_THREAD_STACKSIZE || defined __DOXYGEN__ +#define SLIPIF_THREAD_STACKSIZE 0 +#endif + +/** + * SLIPIF_THREAD_PRIO: The priority assigned to the slipif_loop thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined SLIPIF_THREAD_PRIO || defined __DOXYGEN__ +#define SLIPIF_THREAD_PRIO 1 +#endif + +/** + * DEFAULT_THREAD_NAME: The name assigned to any other lwIP thread. + */ +#if !defined DEFAULT_THREAD_NAME || defined __DOXYGEN__ +#define DEFAULT_THREAD_NAME "lwIP" +#endif + +/** + * DEFAULT_THREAD_STACKSIZE: The stack size used by any other lwIP thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined DEFAULT_THREAD_STACKSIZE || defined __DOXYGEN__ +#define DEFAULT_THREAD_STACKSIZE 0 +#endif + +/** + * DEFAULT_THREAD_PRIO: The priority assigned to any other lwIP thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined DEFAULT_THREAD_PRIO || defined __DOXYGEN__ +#define DEFAULT_THREAD_PRIO 1 +#endif + +/** + * DEFAULT_RAW_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_RAW. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_RAW_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_RAW_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_UDP_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_UDP. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_UDP_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_UDP_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_TCP_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_TCP. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_TCP_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_TCP_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_ACCEPTMBOX_SIZE: The mailbox size for the incoming connections. + * The queue size value itself is platform-dependent, but is passed to + * sys_mbox_new() when the acceptmbox is created. + */ +#if !defined DEFAULT_ACCEPTMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_ACCEPTMBOX_SIZE 0 +#endif +/** + * @} + */ + +/* + ---------------------------------------------- + ---------- Sequential layer options ---------- + ---------------------------------------------- +*/ +/** + * @defgroup lwip_opts_netconn Netconn + * @ingroup lwip_opts_threadsafe_apis + * @{ + */ +/** + * LWIP_NETCONN==1: Enable Netconn API (require to use api_lib.c) + */ +#if !defined LWIP_NETCONN || defined __DOXYGEN__ +#define LWIP_NETCONN 1 +#endif + +/** LWIP_TCPIP_TIMEOUT==1: Enable tcpip_timeout/tcpip_untimeout to create + * timers running in tcpip_thread from another thread. + */ +#if !defined LWIP_TCPIP_TIMEOUT || defined __DOXYGEN__ +#define LWIP_TCPIP_TIMEOUT 0 +#endif + +/** LWIP_NETCONN_SEM_PER_THREAD==1: Use one (thread-local) semaphore per + * thread calling socket/netconn functions instead of allocating one + * semaphore per netconn (and per select etc.) + * ATTENTION: a thread-local semaphore for API calls is needed: + * - LWIP_NETCONN_THREAD_SEM_GET() returning a sys_sem_t* + * - LWIP_NETCONN_THREAD_SEM_ALLOC() creating the semaphore + * - LWIP_NETCONN_THREAD_SEM_FREE() freeing the semaphore + * The latter 2 can be invoked up by calling netconn_thread_init()/netconn_thread_cleanup(). + * Ports may call these for threads created with sys_thread_new(). + */ +#if !defined LWIP_NETCONN_SEM_PER_THREAD || defined __DOXYGEN__ +#define LWIP_NETCONN_SEM_PER_THREAD 0 +#endif + +/** LWIP_NETCONN_FULLDUPLEX==1: Enable code that allows reading from one thread, + * writing from a 2nd thread and closing from a 3rd thread at the same time. + * ATTENTION: This is currently really alpha! Some requirements: + * - LWIP_NETCONN_SEM_PER_THREAD==1 is required to use one socket/netconn from + * multiple threads at once + * - sys_mbox_free() has to unblock receive tasks waiting on recvmbox/acceptmbox + * and prevent a task pending on this during/after deletion + */ +#if !defined LWIP_NETCONN_FULLDUPLEX || defined __DOXYGEN__ +#define LWIP_NETCONN_FULLDUPLEX 0 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Socket options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_socket Sockets + * @ingroup lwip_opts_threadsafe_apis + * @{ + */ +/** + * LWIP_SOCKET==1: Enable Socket API (require to use sockets.c) + */ +#if !defined LWIP_SOCKET || defined __DOXYGEN__ +#define LWIP_SOCKET 1 +#endif + +/** + * LWIP_COMPAT_SOCKETS==1: Enable BSD-style sockets functions names through defines. + * LWIP_COMPAT_SOCKETS==2: Same as ==1 but correctly named functions are created. + * While this helps code completion, it might conflict with existing libraries. + * (only used if you use sockets.c) + */ +#if !defined LWIP_COMPAT_SOCKETS || defined __DOXYGEN__ +#define LWIP_COMPAT_SOCKETS 1 +#endif + +/** + * LWIP_POSIX_SOCKETS_IO_NAMES==1: Enable POSIX-style sockets functions names. + * Disable this option if you use a POSIX operating system that uses the same + * names (read, write & close). (only used if you use sockets.c) + */ +#if !defined LWIP_POSIX_SOCKETS_IO_NAMES || defined __DOXYGEN__ +#define LWIP_POSIX_SOCKETS_IO_NAMES 1 +#endif + +/** + * LWIP_SOCKET_OFFSET==n: Increases the file descriptor number created by LwIP with n. + * This can be useful when there are multiple APIs which create file descriptors. + * When they all start with a different offset and you won't make them overlap you can + * re implement read/write/close/ioctl/fnctl to send the requested action to the right + * library (sharing select will need more work though). + */ +#if !defined LWIP_SOCKET_OFFSET || defined __DOXYGEN__ +#define LWIP_SOCKET_OFFSET 0 +#endif + +/** + * LWIP_TCP_KEEPALIVE==1: Enable TCP_KEEPIDLE, TCP_KEEPINTVL and TCP_KEEPCNT + * options processing. Note that TCP_KEEPIDLE and TCP_KEEPINTVL have to be set + * in seconds. (does not require sockets.c, and will affect tcp.c) + */ +#if !defined LWIP_TCP_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_TCP_KEEPALIVE 0 +#endif + +/** + * LWIP_SO_SNDTIMEO==1: Enable send timeout for sockets/netconns and + * SO_SNDTIMEO processing. + */ +#if !defined LWIP_SO_SNDTIMEO || defined __DOXYGEN__ +#define LWIP_SO_SNDTIMEO 0 +#endif + +/** + * LWIP_SO_RCVTIMEO==1: Enable receive timeout for sockets/netconns and + * SO_RCVTIMEO processing. + */ +#if !defined LWIP_SO_RCVTIMEO || defined __DOXYGEN__ +#define LWIP_SO_RCVTIMEO 0 +#endif + +/** + * LWIP_SO_SNDRCVTIMEO_NONSTANDARD==1: SO_RCVTIMEO/SO_SNDTIMEO take an int + * (milliseconds, much like winsock does) instead of a struct timeval (default). + */ +#if !defined LWIP_SO_SNDRCVTIMEO_NONSTANDARD || defined __DOXYGEN__ +#define LWIP_SO_SNDRCVTIMEO_NONSTANDARD 0 +#endif + +/** + * LWIP_SO_RCVBUF==1: Enable SO_RCVBUF processing. + */ +#if !defined LWIP_SO_RCVBUF || defined __DOXYGEN__ +#define LWIP_SO_RCVBUF 0 +#endif + +/** + * LWIP_SO_LINGER==1: Enable SO_LINGER processing. + */ +#if !defined LWIP_SO_LINGER || defined __DOXYGEN__ +#define LWIP_SO_LINGER 0 +#endif + +/** + * If LWIP_SO_RCVBUF is used, this is the default value for recv_bufsize. + */ +#if !defined RECV_BUFSIZE_DEFAULT || defined __DOXYGEN__ +#define RECV_BUFSIZE_DEFAULT INT_MAX +#endif + +/** + * By default, TCP socket/netconn close waits 20 seconds max to send the FIN + */ +#if !defined LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT || defined __DOXYGEN__ +#define LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT 20000 +#endif + +/** + * SO_REUSE==1: Enable SO_REUSEADDR option. + */ +#if !defined SO_REUSE || defined __DOXYGEN__ +#define SO_REUSE 0 +#endif + +/** + * SO_REUSE_RXTOALL==1: Pass a copy of incoming broadcast/multicast packets + * to all local matches if SO_REUSEADDR is turned on. + * WARNING: Adds a memcpy for every packet if passing to more than one pcb! + */ +#if !defined SO_REUSE_RXTOALL || defined __DOXYGEN__ +#define SO_REUSE_RXTOALL 0 +#endif + +/** + * LWIP_FIONREAD_LINUXMODE==0 (default): ioctl/FIONREAD returns the amount of + * pending data in the network buffer. This is the way windows does it. It's + * the default for lwIP since it is smaller. + * LWIP_FIONREAD_LINUXMODE==1: ioctl/FIONREAD returns the size of the next + * pending datagram in bytes. This is the way linux does it. This code is only + * here for compatibility. + */ +#if !defined LWIP_FIONREAD_LINUXMODE || defined __DOXYGEN__ +#define LWIP_FIONREAD_LINUXMODE 0 +#endif + +/** + * LWIP_SOCKET_SELECT==1 (default): enable select() for sockets (uses a netconn + * callback to keep track of events). + * This saves RAM (counters per socket) and code (netconn event callback), which + * should improve performance a bit). + */ +#if !defined LWIP_SOCKET_SELECT || defined __DOXYGEN__ +#define LWIP_SOCKET_SELECT 1 +#endif + +/** + * LWIP_SOCKET_POLL==1 (default): enable poll() for sockets (including + * struct pollfd, nfds_t, and constants) + */ +#if !defined LWIP_SOCKET_POLL || defined __DOXYGEN__ +#define LWIP_SOCKET_POLL 1 +#endif +/** + * @} + */ + +/* + ---------------------------------------- + ---------- Statistics options ---------- + ---------------------------------------- +*/ +/** + * @defgroup lwip_opts_stats Statistics + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_STATS==1: Enable statistics collection in lwip_stats. + */ +#if !defined LWIP_STATS || defined __DOXYGEN__ +#define LWIP_STATS 1 +#endif + +#if LWIP_STATS + +/** + * LWIP_STATS_DISPLAY==1: Compile in the statistics output functions. + */ +#if !defined LWIP_STATS_DISPLAY || defined __DOXYGEN__ +#define LWIP_STATS_DISPLAY 0 +#endif + +/** + * LINK_STATS==1: Enable link stats. + */ +#if !defined LINK_STATS || defined __DOXYGEN__ +#define LINK_STATS 1 +#endif + +/** + * ETHARP_STATS==1: Enable etharp stats. + */ +#if !defined ETHARP_STATS || defined __DOXYGEN__ +#define ETHARP_STATS (LWIP_ARP) +#endif + +/** + * IP_STATS==1: Enable IP stats. + */ +#if !defined IP_STATS || defined __DOXYGEN__ +#define IP_STATS 1 +#endif + +/** + * IPFRAG_STATS==1: Enable IP fragmentation stats. Default is + * on if using either frag or reass. + */ +#if !defined IPFRAG_STATS || defined __DOXYGEN__ +#define IPFRAG_STATS (IP_REASSEMBLY || IP_FRAG) +#endif + +/** + * ICMP_STATS==1: Enable ICMP stats. + */ +#if !defined ICMP_STATS || defined __DOXYGEN__ +#define ICMP_STATS 1 +#endif + +/** + * IGMP_STATS==1: Enable IGMP stats. + */ +#if !defined IGMP_STATS || defined __DOXYGEN__ +#define IGMP_STATS (LWIP_IGMP) +#endif + +/** + * UDP_STATS==1: Enable UDP stats. Default is on if + * UDP enabled, otherwise off. + */ +#if !defined UDP_STATS || defined __DOXYGEN__ +#define UDP_STATS (LWIP_UDP) +#endif + +/** + * TCP_STATS==1: Enable TCP stats. Default is on if TCP + * enabled, otherwise off. + */ +#if !defined TCP_STATS || defined __DOXYGEN__ +#define TCP_STATS (LWIP_TCP) +#endif + +/** + * MEM_STATS==1: Enable mem.c stats. + */ +#if !defined MEM_STATS || defined __DOXYGEN__ +#define MEM_STATS ((MEM_LIBC_MALLOC == 0) && (MEM_USE_POOLS == 0)) +#endif + +/** + * MEMP_STATS==1: Enable memp.c pool stats. + */ +#if !defined MEMP_STATS || defined __DOXYGEN__ +#define MEMP_STATS (MEMP_MEM_MALLOC == 0) +#endif + +/** + * SYS_STATS==1: Enable system stats (sem and mbox counts, etc). + */ +#if !defined SYS_STATS || defined __DOXYGEN__ +#define SYS_STATS (NO_SYS == 0) +#endif + +/** + * IP6_STATS==1: Enable IPv6 stats. + */ +#if !defined IP6_STATS || defined __DOXYGEN__ +#define IP6_STATS (LWIP_IPV6) +#endif + +/** + * ICMP6_STATS==1: Enable ICMP for IPv6 stats. + */ +#if !defined ICMP6_STATS || defined __DOXYGEN__ +#define ICMP6_STATS (LWIP_IPV6 && LWIP_ICMP6) +#endif + +/** + * IP6_FRAG_STATS==1: Enable IPv6 fragmentation stats. + */ +#if !defined IP6_FRAG_STATS || defined __DOXYGEN__ +#define IP6_FRAG_STATS (LWIP_IPV6 && (LWIP_IPV6_FRAG || LWIP_IPV6_REASS)) +#endif + +/** + * MLD6_STATS==1: Enable MLD for IPv6 stats. + */ +#if !defined MLD6_STATS || defined __DOXYGEN__ +#define MLD6_STATS (LWIP_IPV6 && LWIP_IPV6_MLD) +#endif + +/** + * ND6_STATS==1: Enable Neighbor discovery for IPv6 stats. + */ +#if !defined ND6_STATS || defined __DOXYGEN__ +#define ND6_STATS (LWIP_IPV6) +#endif + +/** + * MIB2_STATS==1: Stats for SNMP MIB2. + */ +#if !defined MIB2_STATS || defined __DOXYGEN__ +#define MIB2_STATS 0 +#endif + +#else + +#define LINK_STATS 0 +#define ETHARP_STATS 0 +#define IP_STATS 0 +#define IPFRAG_STATS 0 +#define ICMP_STATS 0 +#define IGMP_STATS 0 +#define UDP_STATS 0 +#define TCP_STATS 0 +#define MEM_STATS 0 +#define MEMP_STATS 0 +#define SYS_STATS 0 +#define LWIP_STATS_DISPLAY 0 +#define IP6_STATS 0 +#define ICMP6_STATS 0 +#define IP6_FRAG_STATS 0 +#define MLD6_STATS 0 +#define ND6_STATS 0 +#define MIB2_STATS 0 + +#endif /* LWIP_STATS */ +/** + * @} + */ + +/* + -------------------------------------- + ---------- Checksum options ---------- + -------------------------------------- +*/ +/** + * @defgroup lwip_opts_checksum Checksum + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_CHECKSUM_CTRL_PER_NETIF==1: Checksum generation/check can be enabled/disabled + * per netif. + * ATTENTION: if enabled, the CHECKSUM_GEN_* and CHECKSUM_CHECK_* defines must be enabled! + */ +#if !defined LWIP_CHECKSUM_CTRL_PER_NETIF || defined __DOXYGEN__ +#define LWIP_CHECKSUM_CTRL_PER_NETIF 0 +#endif + +/** + * CHECKSUM_GEN_IP==1: Generate checksums in software for outgoing IP packets. + */ +#if !defined CHECKSUM_GEN_IP || defined __DOXYGEN__ +#define CHECKSUM_GEN_IP 1 +#endif + +/** + * CHECKSUM_GEN_UDP==1: Generate checksums in software for outgoing UDP packets. + */ +#if !defined CHECKSUM_GEN_UDP || defined __DOXYGEN__ +#define CHECKSUM_GEN_UDP 1 +#endif + +/** + * CHECKSUM_GEN_TCP==1: Generate checksums in software for outgoing TCP packets. + */ +#if !defined CHECKSUM_GEN_TCP || defined __DOXYGEN__ +#define CHECKSUM_GEN_TCP 1 +#endif + +/** + * CHECKSUM_GEN_ICMP==1: Generate checksums in software for outgoing ICMP packets. + */ +#if !defined CHECKSUM_GEN_ICMP || defined __DOXYGEN__ +#define CHECKSUM_GEN_ICMP 1 +#endif + +/** + * CHECKSUM_GEN_ICMP6==1: Generate checksums in software for outgoing ICMP6 packets. + */ +#if !defined CHECKSUM_GEN_ICMP6 || defined __DOXYGEN__ +#define CHECKSUM_GEN_ICMP6 1 +#endif + +/** + * CHECKSUM_CHECK_IP==1: Check checksums in software for incoming IP packets. + */ +#if !defined CHECKSUM_CHECK_IP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_IP 1 +#endif + +/** + * CHECKSUM_CHECK_UDP==1: Check checksums in software for incoming UDP packets. + */ +#if !defined CHECKSUM_CHECK_UDP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_UDP 1 +#endif + +/** + * CHECKSUM_CHECK_TCP==1: Check checksums in software for incoming TCP packets. + */ +#if !defined CHECKSUM_CHECK_TCP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_TCP 1 +#endif + +/** + * CHECKSUM_CHECK_ICMP==1: Check checksums in software for incoming ICMP packets. + */ +#if !defined CHECKSUM_CHECK_ICMP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_ICMP 1 +#endif + +/** + * CHECKSUM_CHECK_ICMP6==1: Check checksums in software for incoming ICMPv6 packets + */ +#if !defined CHECKSUM_CHECK_ICMP6 || defined __DOXYGEN__ +#define CHECKSUM_CHECK_ICMP6 1 +#endif + +/** + * LWIP_CHECKSUM_ON_COPY==1: Calculate checksum when copying data from + * application buffers to pbufs. + */ +#if !defined LWIP_CHECKSUM_ON_COPY || defined __DOXYGEN__ +#define LWIP_CHECKSUM_ON_COPY 0 +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- IPv6 options --------------- + --------------------------------------- +*/ +/** + * @defgroup lwip_opts_ipv6 IPv6 + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_IPV6==1: Enable IPv6 + */ +#if !defined LWIP_IPV6 || defined __DOXYGEN__ +#define LWIP_IPV6 0 +#endif + +/** + * IPV6_REASS_MAXAGE: Maximum time (in multiples of IP6_REASS_TMR_INTERVAL - so seconds, normally) + * a fragmented IP packet waits for all fragments to arrive. If not all fragments arrived + * in this time, the whole packet is discarded. + */ +#if !defined IPV6_REASS_MAXAGE || defined __DOXYGEN__ +#define IPV6_REASS_MAXAGE 60 +#endif + +/** + * LWIP_IPV6_SCOPES==1: Enable support for IPv6 address scopes, ensuring that + * e.g. link-local addresses are really treated as link-local. Disable this + * setting only for single-interface configurations. + * All addresses that have a scope according to the default policy (link-local + * unicast addresses, interface-local and link-local multicast addresses) should + * now have a zone set on them before being passed to the core API, although + * lwIP will currently attempt to select a zone on the caller's behalf when + * necessary. Applications that directly assign IPv6 addresses to interfaces + * (which is NOT recommended) must now ensure that link-local addresses carry + * the netif's zone. See the new ip6_zone.h header file for more information and + * relevant macros. For now it is still possible to turn off scopes support + * through the new LWIP_IPV6_SCOPES option. When upgrading an implementation that + * uses the core API directly, it is highly recommended to enable + * LWIP_IPV6_SCOPES_DEBUG at least for a while, to ensure e.g. proper address + * initialization. + */ +#if !defined LWIP_IPV6_SCOPES || defined __DOXYGEN__ +#define LWIP_IPV6_SCOPES (LWIP_IPV6 && !LWIP_SINGLE_NETIF) +#endif + +/** + * LWIP_IPV6_SCOPES_DEBUG==1: Perform run-time checks to verify that addresses + * are properly zoned (see ip6_zone.h on what that means) where it matters. + * Enabling this setting is highly recommended when upgrading from an existing + * installation that is not yet scope-aware; otherwise it may be too expensive. + */ +#if !defined LWIP_IPV6_SCOPES_DEBUG || defined __DOXYGEN__ +#define LWIP_IPV6_SCOPES_DEBUG 0 +#endif + +/** + * LWIP_IPV6_NUM_ADDRESSES: Number of IPv6 addresses per netif. + */ +#if !defined LWIP_IPV6_NUM_ADDRESSES || defined __DOXYGEN__ +#define LWIP_IPV6_NUM_ADDRESSES 3 +#endif + +/** + * LWIP_IPV6_FORWARD==1: Forward IPv6 packets across netifs + */ +#if !defined LWIP_IPV6_FORWARD || defined __DOXYGEN__ +#define LWIP_IPV6_FORWARD 0 +#endif + +/** + * LWIP_IPV6_FRAG==1: Fragment outgoing IPv6 packets that are too big. + */ +#if !defined LWIP_IPV6_FRAG || defined __DOXYGEN__ +#define LWIP_IPV6_FRAG 1 +#endif + +/** + * LWIP_IPV6_REASS==1: reassemble incoming IPv6 packets that fragmented + */ +#if !defined LWIP_IPV6_REASS || defined __DOXYGEN__ +#define LWIP_IPV6_REASS LWIP_IPV6 +#endif + +/** + * LWIP_IPV6_SEND_ROUTER_SOLICIT==1: Send router solicitation messages during + * network startup. + */ +#if !defined LWIP_IPV6_SEND_ROUTER_SOLICIT || defined __DOXYGEN__ +#define LWIP_IPV6_SEND_ROUTER_SOLICIT 1 +#endif + +/** + * LWIP_IPV6_AUTOCONFIG==1: Enable stateless address autoconfiguration as per RFC 4862. + */ +#if !defined LWIP_IPV6_AUTOCONFIG || defined __DOXYGEN__ +#define LWIP_IPV6_AUTOCONFIG LWIP_IPV6 +#endif + +/** + * LWIP_IPV6_ADDRESS_LIFETIMES==1: Keep valid and preferred lifetimes for each + * IPv6 address. Required for LWIP_IPV6_AUTOCONFIG. May still be enabled + * otherwise, in which case the application may assign address lifetimes with + * the appropriate macros. Addresses with no lifetime are assumed to be static. + * If this option is disabled, all addresses are assumed to be static. + */ +#if !defined LWIP_IPV6_ADDRESS_LIFETIMES || defined __DOXYGEN__ +#define LWIP_IPV6_ADDRESS_LIFETIMES LWIP_IPV6_AUTOCONFIG +#endif + +/** + * LWIP_IPV6_DUP_DETECT_ATTEMPTS=[0..7]: Number of duplicate address detection attempts. + */ +#if !defined LWIP_IPV6_DUP_DETECT_ATTEMPTS || defined __DOXYGEN__ +#define LWIP_IPV6_DUP_DETECT_ATTEMPTS 1 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_icmp6 ICMP6 + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_ICMP6==1: Enable ICMPv6 (mandatory per RFC) + */ +#if !defined LWIP_ICMP6 || defined __DOXYGEN__ +#define LWIP_ICMP6 LWIP_IPV6 +#endif + +/** + * LWIP_ICMP6_DATASIZE: bytes from original packet to send back in + * ICMPv6 error messages. + */ +#if !defined LWIP_ICMP6_DATASIZE || defined __DOXYGEN__ +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/** + * LWIP_ICMP6_HL: default hop limit for ICMPv6 messages + */ +#if !defined LWIP_ICMP6_HL || defined __DOXYGEN__ +#define LWIP_ICMP6_HL 255 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_mld6 Multicast listener discovery + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_IPV6_MLD==1: Enable multicast listener discovery protocol. + * If LWIP_IPV6 is enabled but this setting is disabled, the MAC layer must + * indiscriminately pass all inbound IPv6 multicast traffic to lwIP. + */ +#if !defined LWIP_IPV6_MLD || defined __DOXYGEN__ +#define LWIP_IPV6_MLD LWIP_IPV6 +#endif + +/** + * MEMP_NUM_MLD6_GROUP: Max number of IPv6 multicast groups that can be joined. + * There must be enough groups so that each netif can join the solicited-node + * multicast group for each of its local addresses, plus one for MDNS if + * applicable, plus any number of groups to be joined on UDP sockets. + */ +#if !defined MEMP_NUM_MLD6_GROUP || defined __DOXYGEN__ +#define MEMP_NUM_MLD6_GROUP 4 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_nd6 Neighbor discovery + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_ND6_QUEUEING==1: queue outgoing IPv6 packets while MAC address + * is being resolved. + */ +#if !defined LWIP_ND6_QUEUEING || defined __DOXYGEN__ +#define LWIP_ND6_QUEUEING LWIP_IPV6 +#endif + +/** + * MEMP_NUM_ND6_QUEUE: Max number of IPv6 packets to queue during MAC resolution. + */ +#if !defined MEMP_NUM_ND6_QUEUE || defined __DOXYGEN__ +#define MEMP_NUM_ND6_QUEUE 20 +#endif + +/** + * LWIP_ND6_NUM_NEIGHBORS: Number of entries in IPv6 neighbor cache + */ +#if !defined LWIP_ND6_NUM_NEIGHBORS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_NEIGHBORS 10 +#endif + +/** + * LWIP_ND6_NUM_DESTINATIONS: number of entries in IPv6 destination cache + */ +#if !defined LWIP_ND6_NUM_DESTINATIONS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_DESTINATIONS 10 +#endif + +/** + * LWIP_ND6_NUM_PREFIXES: number of entries in IPv6 on-link prefixes cache + */ +#if !defined LWIP_ND6_NUM_PREFIXES || defined __DOXYGEN__ +#define LWIP_ND6_NUM_PREFIXES 5 +#endif + +/** + * LWIP_ND6_NUM_ROUTERS: number of entries in IPv6 default router cache + */ +#if !defined LWIP_ND6_NUM_ROUTERS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_ROUTERS 3 +#endif + +/** + * LWIP_ND6_MAX_MULTICAST_SOLICIT: max number of multicast solicit messages to send + * (neighbor solicit and router solicit) + */ +#if !defined LWIP_ND6_MAX_MULTICAST_SOLICIT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_MULTICAST_SOLICIT 3 +#endif + +/** + * LWIP_ND6_MAX_UNICAST_SOLICIT: max number of unicast neighbor solicitation messages + * to send during neighbor reachability detection. + */ +#if !defined LWIP_ND6_MAX_UNICAST_SOLICIT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_UNICAST_SOLICIT 3 +#endif + +/** + * Unused: See ND RFC (time in milliseconds). + */ +#if !defined LWIP_ND6_MAX_ANYCAST_DELAY_TIME || defined __DOXYGEN__ +#define LWIP_ND6_MAX_ANYCAST_DELAY_TIME 1000 +#endif + +/** + * Unused: See ND RFC + */ +#if !defined LWIP_ND6_MAX_NEIGHBOR_ADVERTISEMENT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_NEIGHBOR_ADVERTISEMENT 3 +#endif + +/** + * LWIP_ND6_REACHABLE_TIME: default neighbor reachable time (in milliseconds). + * May be updated by router advertisement messages. + */ +#if !defined LWIP_ND6_REACHABLE_TIME || defined __DOXYGEN__ +#define LWIP_ND6_REACHABLE_TIME 30000 +#endif + +/** + * LWIP_ND6_RETRANS_TIMER: default retransmission timer for solicitation messages + */ +#if !defined LWIP_ND6_RETRANS_TIMER || defined __DOXYGEN__ +#define LWIP_ND6_RETRANS_TIMER 1000 +#endif + +/** + * LWIP_ND6_DELAY_FIRST_PROBE_TIME: Delay before first unicast neighbor solicitation + * message is sent, during neighbor reachability detection. + */ +#if !defined LWIP_ND6_DELAY_FIRST_PROBE_TIME || defined __DOXYGEN__ +#define LWIP_ND6_DELAY_FIRST_PROBE_TIME 5000 +#endif + +/** + * LWIP_ND6_ALLOW_RA_UPDATES==1: Allow Router Advertisement messages to update + * Reachable time and retransmission timers, and netif MTU. + */ +#if !defined LWIP_ND6_ALLOW_RA_UPDATES || defined __DOXYGEN__ +#define LWIP_ND6_ALLOW_RA_UPDATES 1 +#endif + +/** + * LWIP_ND6_TCP_REACHABILITY_HINTS==1: Allow TCP to provide Neighbor Discovery + * with reachability hints for connected destinations. This helps avoid sending + * unicast neighbor solicitation messages. + */ +#if !defined LWIP_ND6_TCP_REACHABILITY_HINTS || defined __DOXYGEN__ +#define LWIP_ND6_TCP_REACHABILITY_HINTS 1 +#endif + +/** + * LWIP_ND6_RDNSS_MAX_DNS_SERVERS > 0: Use IPv6 Router Advertisement Recursive + * DNS Server Option (as per RFC 6106) to copy a defined maximum number of DNS + * servers to the DNS module. + */ +#if !defined LWIP_ND6_RDNSS_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_ND6_RDNSS_MAX_DNS_SERVERS 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_dhcpv6 DHCPv6 + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_IPV6_DHCP6==1: enable DHCPv6 stateful/stateless address autoconfiguration. + */ +#if !defined LWIP_IPV6_DHCP6 || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6 0 +#endif + +/** + * LWIP_IPV6_DHCP6_STATEFUL==1: enable DHCPv6 stateful address autoconfiguration. + * (not supported, yet!) + */ +#if !defined LWIP_IPV6_DHCP6_STATEFUL || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6_STATEFUL 0 +#endif + +/** + * LWIP_IPV6_DHCP6_STATELESS==1: enable DHCPv6 stateless address autoconfiguration. + */ +#if !defined LWIP_IPV6_DHCP6_STATELESS || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6_STATELESS LWIP_IPV6_DHCP6 +#endif + +/** + * LWIP_DHCP6_GETS_NTP==1: Request NTP servers via DHCPv6. For each + * response packet, a callback is called, which has to be provided by the port: + * void dhcp6_set_ntp_servers(u8_t num_ntp_servers, ip_addr_t* ntp_server_addrs); +*/ +#if !defined LWIP_DHCP6_GET_NTP_SRV || defined __DOXYGEN__ +#define LWIP_DHCP6_GET_NTP_SRV 0 +#endif + +/** + * The maximum of NTP servers requested + */ +#if !defined LWIP_DHCP6_MAX_NTP_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP6_MAX_NTP_SERVERS 1 +#endif + +/** + * LWIP_DHCP6_MAX_DNS_SERVERS > 0: Request DNS servers via DHCPv6. + * DNS servers received in the response are passed to DNS via @ref dns_setserver() + * (up to the maximum limit defined here). + */ +#if !defined LWIP_DHCP6_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP6_MAX_DNS_SERVERS DNS_MAX_SERVERS +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- Hook options --------------- + --------------------------------------- +*/ + +/** + * @defgroup lwip_opts_hooks Hooks + * @ingroup lwip_opts_infrastructure + * Hooks are undefined by default, define them to a function if you need them. + * @{ + */ + +/** + * LWIP_HOOK_FILENAME: Custom filename to \#include in files that provide hooks. + * Declare your hook function prototypes in there, you may also \#include all headers + * providing data types that are need in this file. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_FILENAME "path/to/my/lwip_hooks.h" +#endif + +/** + * LWIP_HOOK_TCP_ISN: + * Hook for generation of the Initial Sequence Number (ISN) for a new TCP + * connection. The default lwIP ISN generation algorithm is very basic and may + * allow for TCP spoofing attacks. This hook provides the means to implement + * the standardized ISN generation algorithm from RFC 6528 (see contrib/adons/tcp_isn), + * or any other desired algorithm as a replacement. + * Called from tcp_connect() and tcp_listen_input() when an ISN is needed for + * a new TCP connection, if TCP support (@ref LWIP_TCP) is enabled.\n + * Signature:\code{.c} + * u32_t my_hook_tcp_isn(const ip_addr_t* local_ip, u16_t local_port, const ip_addr_t* remote_ip, u16_t remote_port); + * \endcode + * - it may be necessary to use "struct ip_addr" (ip4_addr, ip6_addr) instead of "ip_addr_t" in function declarations\n + * Arguments: + * - local_ip: pointer to the local IP address of the connection + * - local_port: local port number of the connection (host-byte order) + * - remote_ip: pointer to the remote IP address of the connection + * - remote_port: remote port number of the connection (host-byte order)\n + * Return value: + * - the 32-bit Initial Sequence Number to use for the new TCP connection. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_ISN(local_ip, local_port, remote_ip, remote_port) +#endif + +/** + * LWIP_HOOK_TCP_INPACKET_PCB: + * Hook for intercepting incoming packets before they are passed to a pcb. This + * allows updating some state or even dropping a packet. + * Signature:\code{.c} + * err_t my_hook_tcp_inpkt(struct tcp_pcb *pcb, struct tcp_hdr *hdr, u16_t optlen, u16_t opt1len, u8_t *opt2, struct pbuf *p); + * \endcode + * Arguments: + * - pcb: tcp_pcb selected for input of this packet (ATTENTION: this may be + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - hdr: pointer to tcp header (ATTENTION: tcp options may not be in one piece!) + * - optlen: tcp option length + * - opt1len: tcp option length 1st part + * - opt2: if this is != NULL, tcp options are split among 2 pbufs. In that case, + * options start at right after the tcp header ('(u8_t*)(hdr + 1)') for + * the first 'opt1len' bytes and the rest starts at 'opt2'. opt2len can + * be simply calculated: 'opt2len = optlen - opt1len;' + * - p: input packet, p->payload points to application data (that's why tcp hdr + * and options are passed in seperately) + * Return value: + * - ERR_OK: continue input of this packet as normal + * - != ERR_OK: drop this packet for input (don't continue input processing) + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_INPACKET_PCB(pcb, hdr, optlen, opt1len, opt2, p) +#endif + +/** + * LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH: + * Hook for increasing the size of the options allocated with a tcp header. + * Together with LWIP_HOOK_TCP_OUT_ADD_TCPOPTS, this can be used to add custom + * options to outgoing tcp segments. + * Signature:\code{.c} + * u8_t my_hook_tcp_out_tcpopt_length(const struct tcp_pcb *pcb, u8_t internal_option_length); + * \endcode + * Arguments: + * - pcb: tcp_pcb that transmits (ATTENTION: this may be NULL or + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - internal_option_length: tcp option length used by the stack internally + * Return value: + * - a number of bytes to allocate for tcp options (internal_option_length <= ret <= 40) + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, internal_len) +#endif + +/** + * LWIP_HOOK_TCP_OUT_ADD_TCPOPTS: + * Hook for adding custom options to outgoing tcp segments. + * Space for these custom options has to be reserved via LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH. + * Signature:\code{.c} + * u32_t *my_hook_tcp_out_add_tcpopts(struct pbuf *p, struct tcp_hdr *hdr, const struct tcp_pcb *pcb, u32_t *opts); + * \endcode + * Arguments: + * - p: output packet, p->payload pointing to tcp header, data follows + * - hdr: tcp header + * - pcb: tcp_pcb that transmits (ATTENTION: this may be NULL or + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - opts: pointer where to add the custom options (there may already be options + * between the header and these) + * Return value: + * - pointer pointing directly after the inserted options + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, hdr, pcb, opts) +#endif + +/** + * LWIP_HOOK_IP4_INPUT(pbuf, input_netif): + * Called from ip_input() (IPv4) + * Signature:\code{.c} + * int my_hook(struct pbuf *pbuf, struct netif *input_netif); + * \endcode + * Arguments: + * - pbuf: received struct pbuf passed to ip_input() + * - input_netif: struct netif on which the packet has been received + * Return values: + * - 0: Hook has not consumed the packet, packet is processed as normal + * - != 0: Hook has consumed the packet. + * If the hook consumed the packet, 'pbuf' is in the responsibility of the hook + * (i.e. free it when done). + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_INPUT(pbuf, input_netif) +#endif + +/** + * LWIP_HOOK_IP4_ROUTE(dest): + * Called from ip_route() (IPv4) + * Signature:\code{.c} + * struct netif *my_hook(const ip4_addr_t *dest); + * \endcode + * Arguments: + * - dest: destination IPv4 address + * Returns values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_ROUTE() +#endif + +/** + * LWIP_HOOK_IP4_ROUTE_SRC(src, dest): + * Source-based routing for IPv4 - called from ip_route() (IPv4) + * Signature:\code{.c} + * struct netif *my_hook(const ip4_addr_t *src, const ip4_addr_t *dest); + * \endcode + * Arguments: + * - src: local/source IPv4 address + * - dest: destination IPv4 address + * Returns values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_ROUTE_SRC(src, dest) +#endif + +/** + * LWIP_HOOK_IP4_CANFORWARD(src, dest): + * Check if an IPv4 can be forwarded - called from: + * ip4_input() -> ip4_forward() -> ip4_canforward() (IPv4) + * - source address is available via ip4_current_src_addr() + * - calling an output function in this context (e.g. multicast router) is allowed + * Signature:\code{.c} + * int my_hook(struct pbuf *p, u32_t dest_addr_hostorder); + * \endcode + * Arguments: + * - p: packet to forward + * - dest: destination IPv4 address + * Returns values: + * - 1: forward + * - 0: don't forward + * - -1: no decision. In that case, ip4_canforward() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_CANFORWARD(src, dest) +#endif + +/** + * LWIP_HOOK_ETHARP_GET_GW(netif, dest): + * Called from etharp_output() (IPv4) + * Signature:\code{.c} + * const ip4_addr_t *my_hook(struct netif *netif, const ip4_addr_t *dest); + * \endcode + * Arguments: + * - netif: the netif used for sending + * - dest: the destination IPv4 address + * Return values: + * - the IPv4 address of the gateway to handle the specified destination IPv4 address + * - NULL, in which case the netif's default gateway is used + * + * The returned address MUST be directly reachable on the specified netif! + * This function is meant to implement advanced IPv4 routing together with + * LWIP_HOOK_IP4_ROUTE(). The actual routing/gateway table implementation is + * not part of lwIP but can e.g. be hidden in the netif's state argument. +*/ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_ETHARP_GET_GW(netif, dest) +#endif + +/** + * LWIP_HOOK_IP6_INPUT(pbuf, input_netif): + * Called from ip6_input() (IPv6) + * Signature:\code{.c} + * int my_hook(struct pbuf *pbuf, struct netif *input_netif); + * \endcode + * Arguments: + * - pbuf: received struct pbuf passed to ip6_input() + * - input_netif: struct netif on which the packet has been received + * Return values: + * - 0: Hook has not consumed the packet, packet is processed as normal + * - != 0: Hook has consumed the packet. + * If the hook consumed the packet, 'pbuf' is in the responsibility of the hook + * (i.e. free it when done). + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP6_INPUT(pbuf, input_netif) +#endif + +/** + * LWIP_HOOK_IP6_ROUTE(src, dest): + * Called from ip_route() (IPv6) + * Signature:\code{.c} + * struct netif *my_hook(const ip6_addr_t *dest, const ip6_addr_t *src); + * \endcode + * Arguments: + * - src: source IPv6 address + * - dest: destination IPv6 address + * Return values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip6_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP6_ROUTE(src, dest) +#endif + +/** + * LWIP_HOOK_ND6_GET_GW(netif, dest): + * Called from nd6_get_next_hop_entry() (IPv6) + * Signature:\code{.c} + * const ip6_addr_t *my_hook(struct netif *netif, const ip6_addr_t *dest); + * \endcode + * Arguments: + * - netif: the netif used for sending + * - dest: the destination IPv6 address + * Return values: + * - the IPv6 address of the next hop to handle the specified destination IPv6 address + * - NULL, in which case a NDP-discovered router is used instead + * + * The returned address MUST be directly reachable on the specified netif! + * This function is meant to implement advanced IPv6 routing together with + * LWIP_HOOK_IP6_ROUTE(). The actual routing/gateway table implementation is + * not part of lwIP but can e.g. be hidden in the netif's state argument. +*/ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_ND6_GET_GW(netif, dest) +#endif + +/** + * LWIP_HOOK_VLAN_CHECK(netif, eth_hdr, vlan_hdr): + * Called from ethernet_input() if VLAN support is enabled + * Signature:\code{.c} + * int my_hook(struct netif *netif, struct eth_hdr *eth_hdr, struct eth_vlan_hdr *vlan_hdr); + * \endcode + * Arguments: + * - netif: struct netif on which the packet has been received + * - eth_hdr: struct eth_hdr of the packet + * - vlan_hdr: struct eth_vlan_hdr of the packet + * Return values: + * - 0: Packet must be dropped. + * - != 0: Packet must be accepted. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_VLAN_CHECK(netif, eth_hdr, vlan_hdr) +#endif + +/** + * LWIP_HOOK_VLAN_SET: + * Hook can be used to set prio_vid field of vlan_hdr. If you need to store data + * on per-netif basis to implement this callback, see @ref netif_cd. + * Called from ethernet_output() if VLAN support (@ref ETHARP_SUPPORT_VLAN) is enabled.\n + * Signature:\code{.c} + * s32_t my_hook_vlan_set(struct netif* netif, struct pbuf* pbuf, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type);\n + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - p: struct pbuf packet to be sent + * - src: source eth address + * - dst: destination eth address + * - eth_type: ethernet type to packet to be sent\n + * + * + * Return values: + * - <0: Packet shall not contain VLAN header. + * - 0 <= return value <= 0xFFFF: Packet shall contain VLAN header. Return value is prio_vid in host byte order. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_VLAN_SET(netif, p, src, dst, eth_type) +#endif + +/** + * LWIP_HOOK_MEMP_AVAILABLE(memp_t_type): + * Called from memp_free() when a memp pool was empty and an item is now available + * Signature:\code{.c} + * void my_hook(memp_t type); + * \endcode + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_MEMP_AVAILABLE(memp_t_type) +#endif + +/** + * LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(pbuf, netif): + * Called from ethernet_input() when an unknown eth type is encountered. + * Signature:\code{.c} + * err_t my_hook(struct pbuf* pbuf, struct netif* netif); + * \endcode + * Arguments: + * - p: rx packet with unknown eth type + * - netif: netif on which the packet has been received + * Return values: + * - ERR_OK if packet is accepted (hook function now owns the pbuf) + * - any error code otherwise (pbuf is freed) + * + * Payload points to ethernet header! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(pbuf, netif) +#endif + +/** + * LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr): + * Called from various dhcp functions when sending a DHCP message. + * This hook is called just before the DHCP message trailer is added, so the + * options are at the end of a DHCP message. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp *dhcp, u8_t state, struct dhcp_msg *msg, + * u8_t msg_type, u16_t *options_len_ptr); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp: struct dhcp on that netif + * - state: current dhcp state (dhcp_state_enum_t as an u8_t) + * - msg: struct dhcp_msg that will be sent + * - msg_type: dhcp message type to be sent (u8_t) + * - options_len_ptr: pointer to the current length of options in the dhcp_msg "msg" + * (must be increased when options are added!) + * + * Options need to appended like this: + * LWIP_ASSERT("dhcp option overflow", *options_len_ptr + option_len + 2 <= DHCP_OPTIONS_LEN); + * msg->options[(*options_len_ptr)++] = <option_number>; + * msg->options[(*options_len_ptr)++] = <option_len>; + * msg->options[(*options_len_ptr)++] = <option_bytes>; + * [...] + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr) +#endif + +/** + * LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, option_value_offset): + * Called from dhcp_parse_reply when receiving a DHCP message. + * This hook is called for every option in the received message that is not handled internally. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp *dhcp, u8_t state, struct dhcp_msg *msg, + * u8_t msg_type, u8_t option, u8_t option_len, struct pbuf *pbuf, u16_t option_value_offset); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp: struct dhcp on that netif + * - state: current dhcp state (dhcp_state_enum_t as an u8_t) + * - msg: struct dhcp_msg that was received + * - msg_type: dhcp message type received (u8_t, ATTENTION: only valid after + * the message type option has been parsed!) + * - option: option value (u8_t) + * - len: option data length (u8_t) + * - pbuf: pbuf where option data is contained + * - option_value_offset: offset in pbuf where option data begins + * + * A nice way to get the option contents is pbuf_get_contiguous(): + * u8_t buf[32]; + * u8_t *ptr = (u8_t*)pbuf_get_contiguous(p, buf, sizeof(buf), LWIP_MIN(option_len, sizeof(buf)), offset); + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, offset) +#endif + +/** + * LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len): + * Called from various dhcp6 functions when sending a DHCP6 message. + * This hook is called just before the DHCP6 message is sent, so the + * options are at the end of a DHCP6 message. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp6 *dhcp, u8_t state, struct dhcp6_msg *msg, + * u8_t msg_type, u16_t *options_len_ptr); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp6: struct dhcp6 on that netif + * - state: current dhcp6 state (dhcp6_state_enum_t as an u8_t) + * - msg: struct dhcp6_msg that will be sent + * - msg_type: dhcp6 message type to be sent (u8_t) + * - options_len_ptr: pointer to the current length of options in the dhcp6_msg "msg" + * (must be increased when options are added!) + * + * Options need to appended like this: + * u8_t *options = (u8_t *)(msg + 1); + * LWIP_ASSERT("dhcp option overflow", sizeof(struct dhcp6_msg) + *options_len_ptr + newoptlen <= max_len); + * options[(*options_len_ptr)++] = <option_data>; + * [...] + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len) +#endif + +/** + * LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, err) + * Called from socket API to implement setsockopt() for options not provided by lwIP. + * Core lock is held when this hook is called. + * Signature:\code{.c} + * int my_hook(int s, struct lwip_sock *sock, int level, int optname, const void *optval, socklen_t optlen, int *err) + * \endcode + * Arguments: + * - s: socket file descriptor + * - sock: internal socket descriptor (see lwip/priv/sockets_priv.h) + * - level: protocol level at which the option resides + * - optname: option to set + * - optval: value to set + * - optlen: size of optval + * - err: output error + * Return values: + * - 0: Hook has not consumed the option, code continues as normal (to internal options) + * - != 0: Hook has consumed the option, 'err' is returned + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, err) +#endif + +/** + * LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, err) + * Called from socket API to implement getsockopt() for options not provided by lwIP. + * Core lock is held when this hook is called. + * Signature:\code{.c} + * int my_hook(int s, struct lwip_sock *sock, int level, int optname, void *optval, socklen_t *optlen, int *err) + * \endcode + * Arguments: + * - s: socket file descriptor + * - sock: internal socket descriptor (see lwip/priv/sockets_priv.h) + * - level: protocol level at which the option resides + * - optname: option to get + * - optval: value to get + * - optlen: size of optval + * - err: output error + * Return values: + * - 0: Hook has not consumed the option, code continues as normal (to internal options) + * - != 0: Hook has consumed the option, 'err' is returned + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, err) +#endif + +/** + * LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, addrtype, err) + * Called from netconn APIs (not usable with callback apps) allowing an + * external DNS resolver (which uses sequential API) to handle the query. + * Signature:\code{.c} + * int my_hook(const char *name, ip_addr_t *addr, u8_t addrtype, err_t *err) + * \endcode + * Arguments: + * - name: hostname to resolve + * - addr: output host address + * - addrtype: type of address to query + * - err: output error + * Return values: + * - 0: Hook has not consumed hostname query, query continues into DNS module + * - != 0: Hook has consumed the query + * + * err must also be checked to determine if the hook consumed the query, but + * the query failed + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, addrtype, err) +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- Debugging options ---------- + --------------------------------------- +*/ +/** + * @defgroup lwip_opts_debugmsg Debug messages + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_DBG_MIN_LEVEL: After masking, the value of the debug is + * compared against this value. If it is smaller, then debugging + * messages are written. + * @see debugging_levels + */ +#if !defined LWIP_DBG_MIN_LEVEL || defined __DOXYGEN__ +#define LWIP_DBG_MIN_LEVEL LWIP_DBG_LEVEL_ALL +#endif + +/** + * LWIP_DBG_TYPES_ON: A mask that can be used to globally enable/disable + * debug messages of certain types. + * @see debugging_levels + */ +#if !defined LWIP_DBG_TYPES_ON || defined __DOXYGEN__ +#define LWIP_DBG_TYPES_ON LWIP_DBG_ON +#endif + +/** + * ETHARP_DEBUG: Enable debugging in etharp.c. + */ +#if !defined ETHARP_DEBUG || defined __DOXYGEN__ +#define ETHARP_DEBUG LWIP_DBG_OFF +#endif + +/** + * NETIF_DEBUG: Enable debugging in netif.c. + */ +#if !defined NETIF_DEBUG || defined __DOXYGEN__ +#define NETIF_DEBUG LWIP_DBG_OFF +#endif + +/** + * PBUF_DEBUG: Enable debugging in pbuf.c. + */ +#if !defined PBUF_DEBUG || defined __DOXYGEN__ +#define PBUF_DEBUG LWIP_DBG_OFF +#endif + +/** + * API_LIB_DEBUG: Enable debugging in api_lib.c. + */ +#if !defined API_LIB_DEBUG || defined __DOXYGEN__ +#define API_LIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * API_MSG_DEBUG: Enable debugging in api_msg.c. + */ +#if !defined API_MSG_DEBUG || defined __DOXYGEN__ +#define API_MSG_DEBUG LWIP_DBG_OFF +#endif + +/** + * SOCKETS_DEBUG: Enable debugging in sockets.c. + */ +#if !defined SOCKETS_DEBUG || defined __DOXYGEN__ +#define SOCKETS_DEBUG LWIP_DBG_OFF +#endif + +/** + * ICMP_DEBUG: Enable debugging in icmp.c. + */ +#if !defined ICMP_DEBUG || defined __DOXYGEN__ +#define ICMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * IGMP_DEBUG: Enable debugging in igmp.c. + */ +#if !defined IGMP_DEBUG || defined __DOXYGEN__ +#define IGMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * INET_DEBUG: Enable debugging in inet.c. + */ +#if !defined INET_DEBUG || defined __DOXYGEN__ +#define INET_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP_DEBUG: Enable debugging for IP. + */ +#if !defined IP_DEBUG || defined __DOXYGEN__ +#define IP_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP_REASS_DEBUG: Enable debugging in ip_frag.c for both frag & reass. + */ +#if !defined IP_REASS_DEBUG || defined __DOXYGEN__ +#define IP_REASS_DEBUG LWIP_DBG_OFF +#endif + +/** + * RAW_DEBUG: Enable debugging in raw.c. + */ +#if !defined RAW_DEBUG || defined __DOXYGEN__ +#define RAW_DEBUG LWIP_DBG_OFF +#endif + +/** + * MEM_DEBUG: Enable debugging in mem.c. + */ +#if !defined MEM_DEBUG || defined __DOXYGEN__ +#define MEM_DEBUG LWIP_DBG_OFF +#endif + +/** + * MEMP_DEBUG: Enable debugging in memp.c. + */ +#if !defined MEMP_DEBUG || defined __DOXYGEN__ +#define MEMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SYS_DEBUG: Enable debugging in sys.c. + */ +#if !defined SYS_DEBUG || defined __DOXYGEN__ +#define SYS_DEBUG LWIP_DBG_OFF +#endif + +/** + * TIMERS_DEBUG: Enable debugging in timers.c. + */ +#if !defined TIMERS_DEBUG || defined __DOXYGEN__ +#define TIMERS_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_DEBUG: Enable debugging for TCP. + */ +#if !defined TCP_DEBUG || defined __DOXYGEN__ +#define TCP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_INPUT_DEBUG: Enable debugging in tcp_in.c for incoming debug. + */ +#if !defined TCP_INPUT_DEBUG || defined __DOXYGEN__ +#define TCP_INPUT_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_FR_DEBUG: Enable debugging in tcp_in.c for fast retransmit. + */ +#if !defined TCP_FR_DEBUG || defined __DOXYGEN__ +#define TCP_FR_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_RTO_DEBUG: Enable debugging in TCP for retransmit + * timeout. + */ +#if !defined TCP_RTO_DEBUG || defined __DOXYGEN__ +#define TCP_RTO_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_CWND_DEBUG: Enable debugging for TCP congestion window. + */ +#if !defined TCP_CWND_DEBUG || defined __DOXYGEN__ +#define TCP_CWND_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_WND_DEBUG: Enable debugging in tcp_in.c for window updating. + */ +#if !defined TCP_WND_DEBUG || defined __DOXYGEN__ +#define TCP_WND_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_OUTPUT_DEBUG: Enable debugging in tcp_out.c output functions. + */ +#if !defined TCP_OUTPUT_DEBUG || defined __DOXYGEN__ +#define TCP_OUTPUT_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_RST_DEBUG: Enable debugging for TCP with the RST message. + */ +#if !defined TCP_RST_DEBUG || defined __DOXYGEN__ +#define TCP_RST_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_QLEN_DEBUG: Enable debugging for TCP queue lengths. + */ +#if !defined TCP_QLEN_DEBUG || defined __DOXYGEN__ +#define TCP_QLEN_DEBUG LWIP_DBG_OFF +#endif + +/** + * UDP_DEBUG: Enable debugging in UDP. + */ +#if !defined UDP_DEBUG || defined __DOXYGEN__ +#define UDP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCPIP_DEBUG: Enable debugging in tcpip.c. + */ +#if !defined TCPIP_DEBUG || defined __DOXYGEN__ +#define TCPIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SLIP_DEBUG: Enable debugging in slipif.c. + */ +#if !defined SLIP_DEBUG || defined __DOXYGEN__ +#define SLIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * DHCP_DEBUG: Enable debugging in dhcp.c. + */ +#if !defined DHCP_DEBUG || defined __DOXYGEN__ +#define DHCP_DEBUG LWIP_DBG_OFF +#endif + +/** + * AUTOIP_DEBUG: Enable debugging in autoip.c. + */ +#if !defined AUTOIP_DEBUG || defined __DOXYGEN__ +#define AUTOIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * DNS_DEBUG: Enable debugging for DNS. + */ +#if !defined DNS_DEBUG || defined __DOXYGEN__ +#define DNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP6_DEBUG: Enable debugging for IPv6. + */ +#if !defined IP6_DEBUG || defined __DOXYGEN__ +#define IP6_DEBUG LWIP_DBG_OFF +#endif + +/** + * DHCP6_DEBUG: Enable debugging in dhcp6.c. + */ +#if !defined DHCP6_DEBUG || defined __DOXYGEN__ +#define DHCP6_DEBUG LWIP_DBG_OFF +#endif +/** + * @} + */ + +/** + * LWIP_TESTMODE: Changes to make unit test possible + */ +#if !defined LWIP_TESTMODE +#define LWIP_TESTMODE 0 +#endif + +/* + -------------------------------------------------- + ---------- Performance tracking options ---------- + -------------------------------------------------- +*/ +/** + * @defgroup lwip_opts_perf Performance + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_PERF: Enable performance testing for lwIP + * (if enabled, arch/perf.h is included) + */ +#if !defined LWIP_PERF || defined __DOXYGEN__ +#define LWIP_PERF 0 +#endif +/** + * @} + */ + +#endif /* LWIP_HDR_OPT_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h b/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h new file mode 100644 index 00000000..82902a4e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h @@ -0,0 +1,322 @@ +/** + * @file + * pbuf API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_PBUF_H +#define LWIP_HDR_PBUF_H + +#include "lwip/opt.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** LWIP_SUPPORT_CUSTOM_PBUF==1: Custom pbufs behave much like their pbuf type + * but they are allocated by external code (initialised by calling + * pbuf_alloced_custom()) and when pbuf_free gives up their last reference, they + * are freed by calling pbuf_custom->custom_free_function(). + * Currently, the pbuf_custom code is only needed for one specific configuration + * of IP_FRAG, unless required by external driver/application code. */ +#ifndef LWIP_SUPPORT_CUSTOM_PBUF +#define LWIP_SUPPORT_CUSTOM_PBUF ((IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF) || (LWIP_IPV6 && LWIP_IPV6_FRAG)) +#endif + +/** @ingroup pbuf + * PBUF_NEEDS_COPY(p): return a boolean value indicating whether the given + * pbuf needs to be copied in order to be kept around beyond the current call + * stack without risking being corrupted. The default setting provides safety: + * it will make a copy iof any pbuf chain that does not consist entirely of + * PBUF_ROM type pbufs. For setups with zero-copy support, it may be redefined + * to evaluate to true in all cases, for example. However, doing so also has an + * effect on the application side: any buffers that are *not* copied must also + * *not* be reused by the application after passing them to lwIP. For example, + * when setting PBUF_NEEDS_COPY to (0), after using udp_send() with a PBUF_RAM + * pbuf, the application must free the pbuf immediately, rather than reusing it + * for other purposes. For more background information on this, see tasks #6735 + * and #7896, and bugs #11400 and #49914. */ +#ifndef PBUF_NEEDS_COPY +#define PBUF_NEEDS_COPY(p) ((p)->type_internal & PBUF_TYPE_FLAG_DATA_VOLATILE) +#endif /* PBUF_NEEDS_COPY */ + +/* @todo: We need a mechanism to prevent wasting memory in every pbuf + (TCP vs. UDP, IPv4 vs. IPv6: UDP/IPv4 packets may waste up to 28 bytes) */ + +#define PBUF_TRANSPORT_HLEN 20 +#if LWIP_IPV6 +#define PBUF_IP_HLEN 40 +#else +#define PBUF_IP_HLEN 20 +#endif + +/** + * @ingroup pbuf + * Enumeration of pbuf layers + */ +typedef enum { + /** Includes spare room for transport layer header, e.g. UDP header. + * Use this if you intend to pass the pbuf to functions like udp_send(). + */ + PBUF_TRANSPORT = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN, + /** Includes spare room for IP header. + * Use this if you intend to pass the pbuf to functions like raw_send(). + */ + PBUF_IP = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN, + /** Includes spare room for link layer header (ethernet header). + * Use this if you intend to pass the pbuf to functions like ethernet_output(). + * @see PBUF_LINK_HLEN + */ + PBUF_LINK = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN, + /** Includes spare room for additional encapsulation header before ethernet + * headers (e.g. 802.11). + * Use this if you intend to pass the pbuf to functions like netif->linkoutput(). + * @see PBUF_LINK_ENCAPSULATION_HLEN + */ + PBUF_RAW_TX = PBUF_LINK_ENCAPSULATION_HLEN, + /** Use this for input packets in a netif driver when calling netif->input() + * in the most common case - ethernet-layer netif driver. */ + PBUF_RAW = 0 +} pbuf_layer; + + +/* Base flags for pbuf_type definitions: */ + +/** Indicates that the payload directly follows the struct pbuf. + * This makes @ref pbuf_header work in both directions. */ +#define PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS 0x80 +/** Indicates the data stored in this pbuf can change. If this pbuf needs + * to be queued, it must be copied/duplicated. */ +#define PBUF_TYPE_FLAG_DATA_VOLATILE 0x40 +/** 4 bits are reserved for 16 allocation sources (e.g. heap, pool1, pool2, etc) + * Internally, we use: 0=heap, 1=MEMP_PBUF, 2=MEMP_PBUF_POOL -> 13 types free*/ +#define PBUF_TYPE_ALLOC_SRC_MASK 0x0F +/** Indicates this pbuf is used for RX (if not set, indicates use for TX). + * This information can be used to keep some spare RX buffers e.g. for + * receiving TCP ACKs to unblock a connection) */ +#define PBUF_ALLOC_FLAG_RX 0x0100 +/** Indicates the application needs the pbuf payload to be in one piece */ +#define PBUF_ALLOC_FLAG_DATA_CONTIGUOUS 0x0200 + +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP 0x00 +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF 0x01 +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL 0x02 +/** First pbuf allocation type for applications */ +#define PBUF_TYPE_ALLOC_SRC_MASK_APP_MIN 0x03 +/** Last pbuf allocation type for applications */ +#define PBUF_TYPE_ALLOC_SRC_MASK_APP_MAX PBUF_TYPE_ALLOC_SRC_MASK + +/** + * @ingroup pbuf + * Enumeration of pbuf types + */ +typedef enum { + /** pbuf data is stored in RAM, used for TX mostly, struct pbuf and its payload + are allocated in one piece of contiguous memory (so the first payload byte + can be calculated from struct pbuf). + pbuf_alloc() allocates PBUF_RAM pbufs as unchained pbufs (although that might + change in future versions). + This should be used for all OUTGOING packets (TX).*/ + PBUF_RAM = (PBUF_ALLOC_FLAG_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP), + /** pbuf data is stored in ROM, i.e. struct pbuf and its payload are located in + totally different memory areas. Since it points to ROM, payload does not + have to be copied when queued for transmission. */ + PBUF_ROM = PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF, + /** pbuf comes from the pbuf pool. Much like PBUF_ROM but payload might change + so it has to be duplicated when queued before transmitting, depending on + who has a 'ref' to it. */ + PBUF_REF = (PBUF_TYPE_FLAG_DATA_VOLATILE | PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF), + /** pbuf payload refers to RAM. This one comes from a pool and should be used + for RX. Payload can be chained (scatter-gather RX) but like PBUF_RAM, struct + pbuf and its payload are allocated in one piece of contiguous memory (so + the first payload byte can be calculated from struct pbuf). + Don't use this for TX, if the pool becomes empty e.g. because of TCP queuing, + you are unable to receive TCP acks! */ + PBUF_POOL = (PBUF_ALLOC_FLAG_RX | PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) +} pbuf_type; + + +/** indicates this packet's data should be immediately passed to the application */ +#define PBUF_FLAG_PUSH 0x01U +/** indicates this is a custom pbuf: pbuf_free calls pbuf_custom->custom_free_function() + when the last reference is released (plus custom PBUF_RAM cannot be trimmed) */ +#define PBUF_FLAG_IS_CUSTOM 0x02U +/** indicates this pbuf is UDP multicast to be looped back */ +#define PBUF_FLAG_MCASTLOOP 0x04U +/** indicates this pbuf was received as link-level broadcast */ +#define PBUF_FLAG_LLBCAST 0x08U +/** indicates this pbuf was received as link-level multicast */ +#define PBUF_FLAG_LLMCAST 0x10U +/** indicates this pbuf includes a TCP FIN flag */ +#define PBUF_FLAG_TCP_FIN 0x20U + +/** Main packet buffer struct */ +struct pbuf { + /** next pbuf in singly linked pbuf chain */ + struct pbuf *next; + + /** pointer to the actual data in the buffer */ + void *payload; + + /** + * total length of this buffer and all next buffers in chain + * belonging to the same packet. + * + * For non-queue packet chains this is the invariant: + * p->tot_len == p->len + (p->next? p->next->tot_len: 0) + */ + u16_t tot_len; + + /** length of this buffer */ + u16_t len; + + /** a bit field indicating pbuf type and allocation sources + (see PBUF_TYPE_FLAG_*, PBUF_ALLOC_FLAG_* and PBUF_TYPE_ALLOC_SRC_MASK) + */ + u8_t type_internal; + + /** misc flags */ + u8_t flags; + + /** + * the reference count always equals the number of pointers + * that refer to this pbuf. This can be pointers from an application, + * the stack itself, or pbuf->next pointers from a chain. + */ + LWIP_PBUF_REF_T ref; + + /** For incoming packets, this contains the input netif's index */ + u8_t if_idx; +}; + + +/** Helper struct for const-correctness only. + * The only meaning of this one is to provide a const payload pointer + * for PBUF_ROM type. + */ +struct pbuf_rom { + /** next pbuf in singly linked pbuf chain */ + struct pbuf *next; + + /** pointer to the actual data in the buffer */ + const void *payload; +}; + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** Prototype for a function to free a custom pbuf */ +typedef void (*pbuf_free_custom_fn)(struct pbuf *p); + +/** A custom pbuf: like a pbuf, but following a function pointer to free it. */ +struct pbuf_custom { + /** The actual pbuf */ + struct pbuf pbuf; + /** This function is called when pbuf_free deallocates this pbuf(_custom) */ + pbuf_free_custom_fn custom_free_function; +}; +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** Define this to 0 to prevent freeing ooseq pbufs when the PBUF_POOL is empty */ +#ifndef PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_FREE_OOSEQ 1 +#endif /* PBUF_POOL_FREE_OOSEQ */ +#if LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ +extern volatile u8_t pbuf_free_ooseq_pending; +void pbuf_free_ooseq(void); +/** When not using sys_check_timeouts(), call PBUF_CHECK_FREE_OOSEQ() + at regular intervals from main level to check if ooseq pbufs need to be + freed! */ +#define PBUF_CHECK_FREE_OOSEQ() do { if(pbuf_free_ooseq_pending) { \ + /* pbuf_alloc() reported PBUF_POOL to be empty -> try to free some \ + ooseq queued pbufs now */ \ + pbuf_free_ooseq(); }}while(0) +#else /* LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ */ + /* Otherwise declare an empty PBUF_CHECK_FREE_OOSEQ */ + #define PBUF_CHECK_FREE_OOSEQ() +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ*/ + +/* Initializes the pbuf module. This call is empty for now, but may not be in future. */ +#define pbuf_init() + +struct pbuf *pbuf_alloc(pbuf_layer l, u16_t length, pbuf_type type); +struct pbuf *pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type); +#if LWIP_SUPPORT_CUSTOM_PBUF +struct pbuf *pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, + struct pbuf_custom *p, void *payload_mem, + u16_t payload_mem_len); +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ +void pbuf_realloc(struct pbuf *p, u16_t size); +#define pbuf_get_allocsrc(p) ((p)->type_internal & PBUF_TYPE_ALLOC_SRC_MASK) +#define pbuf_match_allocsrc(p, type) (pbuf_get_allocsrc(p) == ((type) & PBUF_TYPE_ALLOC_SRC_MASK)) +#define pbuf_match_type(p, type) pbuf_match_allocsrc(p, type) +u8_t pbuf_header(struct pbuf *p, s16_t header_size); +u8_t pbuf_header_force(struct pbuf *p, s16_t header_size); +u8_t pbuf_add_header(struct pbuf *p, size_t header_size_increment); +u8_t pbuf_add_header_force(struct pbuf *p, size_t header_size_increment); +u8_t pbuf_remove_header(struct pbuf *p, size_t header_size); +struct pbuf *pbuf_free_header(struct pbuf *q, u16_t size); +void pbuf_ref(struct pbuf *p); +u8_t pbuf_free(struct pbuf *p); +u16_t pbuf_clen(const struct pbuf *p); +void pbuf_cat(struct pbuf *head, struct pbuf *tail); +void pbuf_chain(struct pbuf *head, struct pbuf *tail); +struct pbuf *pbuf_dechain(struct pbuf *p); +err_t pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from); +u16_t pbuf_copy_partial(const struct pbuf *p, void *dataptr, u16_t len, u16_t offset); +void *pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset); +err_t pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len); +err_t pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset); +struct pbuf *pbuf_skip(struct pbuf* in, u16_t in_offset, u16_t* out_offset); +struct pbuf *pbuf_coalesce(struct pbuf *p, pbuf_layer layer); +struct pbuf *pbuf_clone(pbuf_layer l, pbuf_type type, struct pbuf *p); +#if LWIP_CHECKSUM_ON_COPY +err_t pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum); +#endif /* LWIP_CHECKSUM_ON_COPY */ +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest); +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +u8_t pbuf_get_at(const struct pbuf* p, u16_t offset); +int pbuf_try_get_at(const struct pbuf* p, u16_t offset); +void pbuf_put_at(struct pbuf* p, u16_t offset, u8_t data); +u16_t pbuf_memcmp(const struct pbuf* p, u16_t offset, const void* s2, u16_t n); +u16_t pbuf_memfind(const struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset); +u16_t pbuf_strstr(const struct pbuf* p, const char* substr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PBUF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h new file mode 100644 index 00000000..2d3b2fdb --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h @@ -0,0 +1,146 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_PRIV_H +#define LWIP_HDR_ALTCP_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb *altcp_alloc(void); +void altcp_free(struct altcp_pcb *conn); + +/* Function prototypes for application layers */ +typedef void (*altcp_set_poll_fn)(struct altcp_pcb *conn, u8_t interval); +typedef void (*altcp_recved_fn)(struct altcp_pcb *conn, u16_t len); +typedef err_t (*altcp_bind_fn)(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +typedef err_t (*altcp_connect_fn)(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected); + +typedef struct altcp_pcb *(*altcp_listen_fn)(struct altcp_pcb *conn, u8_t backlog, err_t *err); + +typedef void (*altcp_abort_fn)(struct altcp_pcb *conn); +typedef err_t (*altcp_close_fn)(struct altcp_pcb *conn); +typedef err_t (*altcp_shutdown_fn)(struct altcp_pcb *conn, int shut_rx, int shut_tx); + +typedef err_t (*altcp_write_fn)(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +typedef err_t (*altcp_output_fn)(struct altcp_pcb *conn); + +typedef u16_t (*altcp_mss_fn)(struct altcp_pcb *conn); +typedef u16_t (*altcp_sndbuf_fn)(struct altcp_pcb *conn); +typedef u16_t (*altcp_sndqueuelen_fn)(struct altcp_pcb *conn); +typedef void (*altcp_nagle_disable_fn)(struct altcp_pcb *conn); +typedef void (*altcp_nagle_enable_fn)(struct altcp_pcb *conn); +typedef int (*altcp_nagle_disabled_fn)(struct altcp_pcb *conn); + +typedef void (*altcp_setprio_fn)(struct altcp_pcb *conn, u8_t prio); + +typedef void (*altcp_dealloc_fn)(struct altcp_pcb *conn); + +typedef err_t (*altcp_get_tcp_addrinfo_fn)(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +typedef ip_addr_t *(*altcp_get_ip_fn)(struct altcp_pcb *conn, int local); +typedef u16_t (*altcp_get_port_fn)(struct altcp_pcb *conn, int local); + +#ifdef LWIP_DEBUG +typedef enum tcp_state (*altcp_dbg_get_tcp_state_fn)(struct altcp_pcb *conn); +#endif + +struct altcp_functions { + altcp_set_poll_fn set_poll; + altcp_recved_fn recved; + altcp_bind_fn bind; + altcp_connect_fn connect; + altcp_listen_fn listen; + altcp_abort_fn abort; + altcp_close_fn close; + altcp_shutdown_fn shutdown; + altcp_write_fn write; + altcp_output_fn output; + altcp_mss_fn mss; + altcp_sndbuf_fn sndbuf; + altcp_sndqueuelen_fn sndqueuelen; + altcp_nagle_disable_fn nagle_disable; + altcp_nagle_enable_fn nagle_enable; + altcp_nagle_disabled_fn nagle_disabled; + altcp_setprio_fn setprio; + altcp_dealloc_fn dealloc; + altcp_get_tcp_addrinfo_fn addrinfo; + altcp_get_ip_fn getip; + altcp_get_port_fn getport; +#ifdef LWIP_DEBUG + altcp_dbg_get_tcp_state_fn dbg_get_tcp_state; +#endif +}; + +void altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval); +void altcp_default_recved(struct altcp_pcb *conn, u16_t len); +err_t altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +err_t altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx); +err_t altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +err_t altcp_default_output(struct altcp_pcb *conn); +u16_t altcp_default_mss(struct altcp_pcb *conn); +u16_t altcp_default_sndbuf(struct altcp_pcb *conn); +u16_t altcp_default_sndqueuelen(struct altcp_pcb *conn); +void altcp_default_nagle_disable(struct altcp_pcb *conn); +void altcp_default_nagle_enable(struct altcp_pcb *conn); +int altcp_default_nagle_disabled(struct altcp_pcb *conn); +void altcp_default_setprio(struct altcp_pcb *conn, u8_t prio); +void altcp_default_dealloc(struct altcp_pcb *conn); +err_t altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +ip_addr_t *altcp_default_get_ip(struct altcp_pcb *conn, int local); +u16_t altcp_default_get_port(struct altcp_pcb *conn, int local); +#ifdef LWIP_DEBUG +enum tcp_state altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h new file mode 100644 index 00000000..9e8ffc9e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h @@ -0,0 +1,272 @@ +/** + * @file + * netconn API lwIP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_MSG_H +#define LWIP_HDR_API_MSG_H + +#include "lwip/opt.h" + +#include "lwip/arch.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/sys.h" +#include "lwip/igmp.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#if LWIP_MPU_COMPATIBLE +#if LWIP_NETCONN_SEM_PER_THREAD +#define API_MSG_M_DEF_SEM(m) *m +#else +#define API_MSG_M_DEF_SEM(m) API_MSG_M_DEF(m) +#endif +#else /* LWIP_MPU_COMPATIBLE */ +#define API_MSG_M_DEF_SEM(m) API_MSG_M_DEF(m) +#endif /* LWIP_MPU_COMPATIBLE */ + +/* For the netconn API, these values are use as a bitmask! */ +#define NETCONN_SHUT_RD 1 +#define NETCONN_SHUT_WR 2 +#define NETCONN_SHUT_RDWR (NETCONN_SHUT_RD | NETCONN_SHUT_WR) + +/* IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ +/** This struct includes everything that is necessary to execute a function + for a netconn in another thread context (mainly used to process netconns + in the tcpip_thread context to be thread safe). */ +struct api_msg { + /** The netconn which to process - always needed: it includes the semaphore + which is used to block the application thread until the function finished. */ + struct netconn *conn; + /** The return value of the function executed in tcpip_thread. */ + err_t err; + /** Depending on the executed function, one of these union members is used */ + union { + /** used for lwip_netconn_do_send */ + struct netbuf *b; + /** used for lwip_netconn_do_newconn */ + struct { + u8_t proto; + } n; + /** used for lwip_netconn_do_bind and lwip_netconn_do_connect */ + struct { + API_MSG_M_DEF_C(ip_addr_t, ipaddr); + u16_t port; + u8_t if_idx; + } bc; + /** used for lwip_netconn_do_getaddr */ + struct { + ip_addr_t API_MSG_M_DEF(ipaddr); + u16_t API_MSG_M_DEF(port); + u8_t local; + } ad; + /** used for lwip_netconn_do_write */ + struct { + /** current vector to write */ + const struct netvector *vector; + /** number of unwritten vectors */ + u16_t vector_cnt; + /** offset into current vector */ + size_t vector_off; + /** total length across vectors */ + size_t len; + /** offset into total length/output of bytes written when err == ERR_OK */ + size_t offset; + u8_t apiflags; +#if LWIP_SO_SNDTIMEO + u32_t time_started; +#endif /* LWIP_SO_SNDTIMEO */ + } w; + /** used for lwip_netconn_do_recv */ + struct { + size_t len; + } r; +#if LWIP_TCP + /** used for lwip_netconn_do_close (/shutdown) */ + struct { + u8_t shut; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + u32_t time_started; +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + u8_t polls_left; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + } sd; +#endif /* LWIP_TCP */ +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) + /** used for lwip_netconn_do_join_leave_group */ + struct { + API_MSG_M_DEF_C(ip_addr_t, multiaddr); + API_MSG_M_DEF_C(ip_addr_t, netif_addr); + u8_t if_idx; + enum netconn_igmp join_or_leave; + } jl; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if TCP_LISTEN_BACKLOG + struct { + u8_t backlog; + } lb; +#endif /* TCP_LISTEN_BACKLOG */ + } msg; +#if LWIP_NETCONN_SEM_PER_THREAD + sys_sem_t* op_completed_sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ +}; + +#if LWIP_NETCONN_SEM_PER_THREAD +#define LWIP_API_MSG_SEM(msg) ((msg)->op_completed_sem) +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define LWIP_API_MSG_SEM(msg) (&(msg)->conn->op_completed) +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + +#if LWIP_DNS +/** As lwip_netconn_do_gethostbyname requires more arguments but doesn't require a netconn, + it has its own struct (to avoid struct api_msg getting bigger than necessary). + lwip_netconn_do_gethostbyname must be called using tcpip_callback instead of tcpip_apimsg + (see netconn_gethostbyname). */ +struct dns_api_msg { + /** Hostname to query or dotted IP address string */ +#if LWIP_MPU_COMPATIBLE + char name[DNS_MAX_NAME_LENGTH]; +#else /* LWIP_MPU_COMPATIBLE */ + const char *name; +#endif /* LWIP_MPU_COMPATIBLE */ + /** The resolved address is stored here */ + ip_addr_t API_MSG_M_DEF(addr); +#if LWIP_IPV4 && LWIP_IPV6 + /** Type of resolve call */ + u8_t dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /** This semaphore is posted when the name is resolved, the application thread + should wait on it. */ + sys_sem_t API_MSG_M_DEF_SEM(sem); + /** Errors are given back here */ + err_t API_MSG_M_DEF(err); +}; +#endif /* LWIP_DNS */ + +#if LWIP_NETCONN_FULLDUPLEX +int lwip_netconn_is_deallocated_msg(void *msg); +#endif +int lwip_netconn_is_err_msg(void *msg, err_t *err); +void lwip_netconn_do_newconn (void *m); +void lwip_netconn_do_delconn (void *m); +void lwip_netconn_do_bind (void *m); +void lwip_netconn_do_bind_if (void *m); +void lwip_netconn_do_connect (void *m); +void lwip_netconn_do_disconnect (void *m); +void lwip_netconn_do_listen (void *m); +void lwip_netconn_do_send (void *m); +void lwip_netconn_do_recv (void *m); +#if TCP_LISTEN_BACKLOG +void lwip_netconn_do_accepted (void *m); +#endif /* TCP_LISTEN_BACKLOG */ +void lwip_netconn_do_write (void *m); +void lwip_netconn_do_getaddr (void *m); +void lwip_netconn_do_close (void *m); +void lwip_netconn_do_shutdown (void *m); +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +void lwip_netconn_do_join_leave_group(void *m); +void lwip_netconn_do_join_leave_group_netif(void *m); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +void lwip_netconn_do_gethostbyname(void *arg); +#endif /* LWIP_DNS */ + +struct netconn* netconn_alloc(enum netconn_type t, netconn_callback callback); +void netconn_free(struct netconn *conn); + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +/* netifapi related lwIP internal definitions */ + +#if LWIP_MPU_COMPATIBLE +#define NETIFAPI_IPADDR_DEF(type, m) type m +#else /* LWIP_MPU_COMPATIBLE */ +#define NETIFAPI_IPADDR_DEF(type, m) const type * m +#endif /* LWIP_MPU_COMPATIBLE */ + +typedef void (*netifapi_void_fn)(struct netif *netif); +typedef err_t (*netifapi_errt_fn)(struct netif *netif); + +struct netifapi_msg { + struct tcpip_api_call_data call; + struct netif *netif; + union { + struct { +#if LWIP_IPV4 + NETIFAPI_IPADDR_DEF(ip4_addr_t, ipaddr); + NETIFAPI_IPADDR_DEF(ip4_addr_t, netmask); + NETIFAPI_IPADDR_DEF(ip4_addr_t, gw); +#endif /* LWIP_IPV4 */ + void *state; + netif_init_fn init; + netif_input_fn input; + } add; + struct { + netifapi_void_fn voidfunc; + netifapi_errt_fn errtfunc; + } common; + struct { +#if LWIP_MPU_COMPATIBLE + char name[NETIF_NAMESIZE]; +#else /* LWIP_MPU_COMPATIBLE */ + char *name; +#endif /* LWIP_MPU_COMPATIBLE */ + u8_t index; + } ifs; + } msg; +}; + +#endif /* LWIP_NETIF_API */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_API_MSG_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h new file mode 100644 index 00000000..8630d754 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h @@ -0,0 +1,84 @@ +/** + * @file + * lwIP internal memory implementations (do not use in application code) + */ + +/* + * Copyright (c) 2018 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_MEM_PRIV_H +#define LWIP_HDR_MEM_PRIV_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/mem.h" + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/* if MEM_OVERFLOW_CHECK or MEMP_OVERFLOW_CHECK is turned on, we reserve some + * bytes at the beginning and at the end of each element, initialize them as + * 0xcd and check them later. + * If MEM(P)_OVERFLOW_CHECK is >= 2, on every call to mem(p)_malloc or mem(p)_free, + * every single element in each pool/heap is checked! + * This is VERY SLOW but also very helpful. + * MEM_SANITY_REGION_BEFORE and MEM_SANITY_REGION_AFTER can be overridden in + * lwipopts.h to change the amount reserved for checking. */ +#ifndef MEM_SANITY_REGION_BEFORE +#define MEM_SANITY_REGION_BEFORE 16 +#endif /* MEM_SANITY_REGION_BEFORE*/ +#if MEM_SANITY_REGION_BEFORE > 0 +#define MEM_SANITY_REGION_BEFORE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SANITY_REGION_BEFORE) +#else +#define MEM_SANITY_REGION_BEFORE_ALIGNED 0 +#endif /* MEM_SANITY_REGION_BEFORE*/ +#ifndef MEM_SANITY_REGION_AFTER +#define MEM_SANITY_REGION_AFTER 16 +#endif /* MEM_SANITY_REGION_AFTER*/ +#if MEM_SANITY_REGION_AFTER > 0 +#define MEM_SANITY_REGION_AFTER_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SANITY_REGION_AFTER) +#else +#define MEM_SANITY_REGION_AFTER_ALIGNED 0 +#endif /* MEM_SANITY_REGION_AFTER*/ + +void mem_overflow_init_raw(void *p, size_t size); +void mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2); + +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h new file mode 100644 index 00000000..1f14cb16 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h @@ -0,0 +1,161 @@ +/** + * @file + * memory pools lwIP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_MEMP_PRIV_H +#define LWIP_HDR_MEMP_PRIV_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/mem.h" +#include "lwip/priv/mem_priv.h" + +#if MEMP_OVERFLOW_CHECK + + +/* MEMP_SIZE: save space for struct memp and for sanity check */ +#define MEMP_SIZE (LWIP_MEM_ALIGN_SIZE(sizeof(struct memp)) + MEM_SANITY_REGION_BEFORE_ALIGNED) +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x) + MEM_SANITY_REGION_AFTER_ALIGNED) + +#else /* MEMP_OVERFLOW_CHECK */ + +/* No sanity checks + * We don't need to preserve the struct memp while not allocated, so we + * can save a little space and set MEMP_SIZE to 0. + */ +#define MEMP_SIZE 0 +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x)) + +#endif /* MEMP_OVERFLOW_CHECK */ + +#if !MEMP_MEM_MALLOC || MEMP_OVERFLOW_CHECK +struct memp { + struct memp *next; +#if MEMP_OVERFLOW_CHECK + const char *file; + int line; +#endif /* MEMP_OVERFLOW_CHECK */ +}; +#endif /* !MEMP_MEM_MALLOC || MEMP_OVERFLOW_CHECK */ + +#if MEM_USE_POOLS && MEMP_USE_CUSTOM_POOLS +/* Use a helper type to get the start and end of the user "memory pools" for mem_malloc */ +typedef enum { + /* Get the first (via: + MEMP_POOL_HELPER_START = ((u8_t) 1*MEMP_POOL_A + 0*MEMP_POOL_B + 0*MEMP_POOL_C + 0)*/ + MEMP_POOL_HELPER_FIRST = ((u8_t) +#define LWIP_MEMPOOL(name,num,size,desc) +#define LWIP_MALLOC_MEMPOOL_START 1 +#define LWIP_MALLOC_MEMPOOL(num, size) * MEMP_POOL_##size + 0 +#define LWIP_MALLOC_MEMPOOL_END +#include "lwip/priv/memp_std.h" + ) , + /* Get the last (via: + MEMP_POOL_HELPER_END = ((u8_t) 0 + MEMP_POOL_A*0 + MEMP_POOL_B*0 + MEMP_POOL_C*1) */ + MEMP_POOL_HELPER_LAST = ((u8_t) +#define LWIP_MEMPOOL(name,num,size,desc) +#define LWIP_MALLOC_MEMPOOL_START +#define LWIP_MALLOC_MEMPOOL(num, size) 0 + MEMP_POOL_##size * +#define LWIP_MALLOC_MEMPOOL_END 1 +#include "lwip/priv/memp_std.h" + ) +} memp_pool_helper_t; + +/* The actual start and stop values are here (cast them over) + We use this helper type and these defines so we can avoid using const memp_t values */ +#define MEMP_POOL_FIRST ((memp_t) MEMP_POOL_HELPER_FIRST) +#define MEMP_POOL_LAST ((memp_t) MEMP_POOL_HELPER_LAST) +#endif /* MEM_USE_POOLS && MEMP_USE_CUSTOM_POOLS */ + +/** Memory pool descriptor */ +struct memp_desc { +#if defined(LWIP_DEBUG) || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY + /** Textual description */ + const char *desc; +#endif /* LWIP_DEBUG || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY */ +#if MEMP_STATS + /** Statistics */ + struct stats_mem *stats; +#endif + + /** Element size */ + u16_t size; + +#if !MEMP_MEM_MALLOC + /** Number of elements */ + u16_t num; + + /** Base address */ + u8_t *base; + + /** First free element of each pool. Elements form a linked list. */ + struct memp **tab; +#endif /* MEMP_MEM_MALLOC */ +}; + +#if defined(LWIP_DEBUG) || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY +#define DECLARE_LWIP_MEMPOOL_DESC(desc) (desc), +#else +#define DECLARE_LWIP_MEMPOOL_DESC(desc) +#endif + +#if MEMP_STATS +#define LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(name) static struct stats_mem name; +#define LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(name) &name, +#else +#define LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(name) +#define LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(name) +#endif + +void memp_init_pool(const struct memp_desc *desc); + +#if MEMP_OVERFLOW_CHECK +void *memp_malloc_pool_fn(const struct memp_desc* desc, const char* file, const int line); +#define memp_malloc_pool(d) memp_malloc_pool_fn((d), __FILE__, __LINE__) +#else +void *memp_malloc_pool(const struct memp_desc *desc); +#endif +void memp_free_pool(const struct memp_desc* desc, void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h new file mode 100644 index 00000000..669ad4d7 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h @@ -0,0 +1,153 @@ +/** + * @file + * lwIP internal memory pools (do not use in application code) + * This file is deliberately included multiple times: once with empty + * definition of LWIP_MEMPOOL() to handle all includes and multiple times + * to build up various lists of mem pools. + */ + +/* + * SETUP: Make sure we define everything we will need. + * + * We have create three types of pools: + * 1) MEMPOOL - standard pools + * 2) MALLOC_MEMPOOL - to be used by mem_malloc in mem.c + * 3) PBUF_MEMPOOL - a mempool of pbuf's, so include space for the pbuf struct + * + * If the include'r doesn't require any special treatment of each of the types + * above, then will declare #2 & #3 to be just standard mempools. + */ +#ifndef LWIP_MALLOC_MEMPOOL +/* This treats "malloc pools" just like any other pool. + The pools are a little bigger to provide 'size' as the amount of user data. */ +#define LWIP_MALLOC_MEMPOOL(num, size) LWIP_MEMPOOL(POOL_##size, num, (size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))), "MALLOC_"#size) +#define LWIP_MALLOC_MEMPOOL_START +#define LWIP_MALLOC_MEMPOOL_END +#endif /* LWIP_MALLOC_MEMPOOL */ + +#ifndef LWIP_PBUF_MEMPOOL +/* This treats "pbuf pools" just like any other pool. + * Allocates buffers for a pbuf struct AND a payload size */ +#define LWIP_PBUF_MEMPOOL(name, num, payload, desc) LWIP_MEMPOOL(name, num, (LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) + LWIP_MEM_ALIGN_SIZE(payload)), desc) +#endif /* LWIP_PBUF_MEMPOOL */ + + +/* + * A list of internal pools used by LWIP. + * + * LWIP_MEMPOOL(pool_name, number_elements, element_size, pool_description) + * creates a pool name MEMP_pool_name. description is used in stats.c + */ +#if LWIP_RAW +LWIP_MEMPOOL(RAW_PCB, MEMP_NUM_RAW_PCB, sizeof(struct raw_pcb), "RAW_PCB") +#endif /* LWIP_RAW */ + +#if LWIP_UDP +LWIP_MEMPOOL(UDP_PCB, MEMP_NUM_UDP_PCB, sizeof(struct udp_pcb), "UDP_PCB") +#endif /* LWIP_UDP */ + +#if LWIP_TCP +LWIP_MEMPOOL(TCP_PCB, MEMP_NUM_TCP_PCB, sizeof(struct tcp_pcb), "TCP_PCB") +LWIP_MEMPOOL(TCP_PCB_LISTEN, MEMP_NUM_TCP_PCB_LISTEN, sizeof(struct tcp_pcb_listen), "TCP_PCB_LISTEN") +LWIP_MEMPOOL(TCP_SEG, MEMP_NUM_TCP_SEG, sizeof(struct tcp_seg), "TCP_SEG") +#endif /* LWIP_TCP */ + +#if LWIP_ALTCP && LWIP_TCP +LWIP_MEMPOOL(ALTCP_PCB, MEMP_NUM_ALTCP_PCB, sizeof(struct altcp_pcb), "ALTCP_PCB") +#endif /* LWIP_ALTCP && LWIP_TCP */ + +#if LWIP_IPV4 && IP_REASSEMBLY +LWIP_MEMPOOL(REASSDATA, MEMP_NUM_REASSDATA, sizeof(struct ip_reassdata), "REASSDATA") +#endif /* LWIP_IPV4 && IP_REASSEMBLY */ +#if (IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF) || (LWIP_IPV6 && LWIP_IPV6_FRAG) +LWIP_MEMPOOL(FRAG_PBUF, MEMP_NUM_FRAG_PBUF, sizeof(struct pbuf_custom_ref),"FRAG_PBUF") +#endif /* IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF || (LWIP_IPV6 && LWIP_IPV6_FRAG) */ + +#if LWIP_NETCONN || LWIP_SOCKET +LWIP_MEMPOOL(NETBUF, MEMP_NUM_NETBUF, sizeof(struct netbuf), "NETBUF") +LWIP_MEMPOOL(NETCONN, MEMP_NUM_NETCONN, sizeof(struct netconn), "NETCONN") +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#if NO_SYS==0 +LWIP_MEMPOOL(TCPIP_MSG_API, MEMP_NUM_TCPIP_MSG_API, sizeof(struct tcpip_msg), "TCPIP_MSG_API") +#if LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL(API_MSG, MEMP_NUM_API_MSG, sizeof(struct api_msg), "API_MSG") +#if LWIP_DNS +LWIP_MEMPOOL(DNS_API_MSG, MEMP_NUM_DNS_API_MSG, sizeof(struct dns_api_msg), "DNS_API_MSG") +#endif +#if LWIP_SOCKET && !LWIP_TCPIP_CORE_LOCKING +LWIP_MEMPOOL(SOCKET_SETGETSOCKOPT_DATA, MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA, sizeof(struct lwip_setgetsockopt_data), "SOCKET_SETGETSOCKOPT_DATA") +#endif +#if LWIP_SOCKET && (LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL) +LWIP_MEMPOOL(SELECT_CB, MEMP_NUM_SELECT_CB, sizeof(struct lwip_select_cb), "SELECT_CB") +#endif /* LWIP_SOCKET && (LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL) */ +#if LWIP_NETIF_API +LWIP_MEMPOOL(NETIFAPI_MSG, MEMP_NUM_NETIFAPI_MSG, sizeof(struct netifapi_msg), "NETIFAPI_MSG") +#endif +#endif /* LWIP_MPU_COMPATIBLE */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT +LWIP_MEMPOOL(TCPIP_MSG_INPKT,MEMP_NUM_TCPIP_MSG_INPKT, sizeof(struct tcpip_msg), "TCPIP_MSG_INPKT") +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ +#endif /* NO_SYS==0 */ + +#if LWIP_IPV4 && LWIP_ARP && ARP_QUEUEING +LWIP_MEMPOOL(ARP_QUEUE, MEMP_NUM_ARP_QUEUE, sizeof(struct etharp_q_entry), "ARP_QUEUE") +#endif /* LWIP_IPV4 && LWIP_ARP && ARP_QUEUEING */ + +#if LWIP_IGMP +LWIP_MEMPOOL(IGMP_GROUP, MEMP_NUM_IGMP_GROUP, sizeof(struct igmp_group), "IGMP_GROUP") +#endif /* LWIP_IGMP */ + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM +LWIP_MEMPOOL(SYS_TIMEOUT, MEMP_NUM_SYS_TIMEOUT, sizeof(struct sys_timeo), "SYS_TIMEOUT") +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ + +#if LWIP_DNS && LWIP_SOCKET +LWIP_MEMPOOL(NETDB, MEMP_NUM_NETDB, NETDB_ELEM_SIZE, "NETDB") +#endif /* LWIP_DNS && LWIP_SOCKET */ +#if LWIP_DNS && DNS_LOCAL_HOSTLIST && DNS_LOCAL_HOSTLIST_IS_DYNAMIC +LWIP_MEMPOOL(LOCALHOSTLIST, MEMP_NUM_LOCALHOSTLIST, LOCALHOSTLIST_ELEM_SIZE, "LOCALHOSTLIST") +#endif /* LWIP_DNS && DNS_LOCAL_HOSTLIST && DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +#if LWIP_IPV6 && LWIP_ND6_QUEUEING +LWIP_MEMPOOL(ND6_QUEUE, MEMP_NUM_ND6_QUEUE, sizeof(struct nd6_q_entry), "ND6_QUEUE") +#endif /* LWIP_IPV6 && LWIP_ND6_QUEUEING */ + +#if LWIP_IPV6 && LWIP_IPV6_REASS +LWIP_MEMPOOL(IP6_REASSDATA, MEMP_NUM_REASSDATA, sizeof(struct ip6_reassdata), "IP6_REASSDATA") +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_MLD +LWIP_MEMPOOL(MLD6_GROUP, MEMP_NUM_MLD6_GROUP, sizeof(struct mld_group), "MLD6_GROUP") +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + + +/* + * A list of pools of pbuf's used by LWIP. + * + * LWIP_PBUF_MEMPOOL(pool_name, number_elements, pbuf_payload_size, pool_description) + * creates a pool name MEMP_pool_name. description is used in stats.c + * This allocates enough space for the pbuf struct and a payload. + * (Example: pbuf_payload_size=0 allocates only size for the struct) + */ +LWIP_MEMPOOL(PBUF, MEMP_NUM_PBUF, sizeof(struct pbuf), "PBUF_REF/ROM") +LWIP_PBUF_MEMPOOL(PBUF_POOL, PBUF_POOL_SIZE, PBUF_POOL_BUFSIZE, "PBUF_POOL") + + +/* + * Allow for user-defined pools; this must be explicitly set in lwipopts.h + * since the default is to NOT look for lwippools.h + */ +#if MEMP_USE_CUSTOM_POOLS +#include "lwippools.h" +#endif /* MEMP_USE_CUSTOM_POOLS */ + +/* + * REQUIRED CLEANUP: Clear up so we don't get "multiply defined" error later + * (#undef is ignored for something that is not defined) + */ +#undef LWIP_MEMPOOL +#undef LWIP_MALLOC_MEMPOOL +#undef LWIP_MALLOC_MEMPOOL_START +#undef LWIP_MALLOC_MEMPOOL_END +#undef LWIP_PBUF_MEMPOOL diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h new file mode 100644 index 00000000..cc3007d9 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h @@ -0,0 +1,142 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ND6_PRIV_H +#define LWIP_HDR_ND6_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ND6_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct nd6_q_entry { + struct nd6_q_entry *next; + struct pbuf *p; +}; +#endif /* LWIP_ND6_QUEUEING */ + +/** Struct for tables. */ +struct nd6_neighbor_cache_entry { + ip6_addr_t next_hop_address; + struct netif *netif; + u8_t lladdr[NETIF_MAX_HWADDR_LEN]; + /*u32_t pmtu;*/ +#if LWIP_ND6_QUEUEING + /** Pointer to queue of pending outgoing packets on this entry. */ + struct nd6_q_entry *q; +#else /* LWIP_ND6_QUEUEING */ + /** Pointer to a single pending outgoing packet on this entry. */ + struct pbuf *q; +#endif /* LWIP_ND6_QUEUEING */ + u8_t state; + u8_t isrouter; + union { + u32_t reachable_time; /* in seconds */ + u32_t delay_time; /* ticks (ND6_TMR_INTERVAL) */ + u32_t probes_sent; + u32_t stale_time; /* ticks (ND6_TMR_INTERVAL) */ + } counter; +}; + +struct nd6_destination_cache_entry { + ip6_addr_t destination_addr; + ip6_addr_t next_hop_addr; + u16_t pmtu; + u32_t age; +}; + +struct nd6_prefix_list_entry { + ip6_addr_t prefix; + struct netif *netif; + u32_t invalidation_timer; /* in seconds */ +}; + +struct nd6_router_list_entry { + struct nd6_neighbor_cache_entry *neighbor_entry; + u32_t invalidation_timer; /* in seconds */ + u8_t flags; +}; + +enum nd6_neighbor_cache_entry_state { + ND6_NO_ENTRY = 0, + ND6_INCOMPLETE, + ND6_REACHABLE, + ND6_STALE, + ND6_DELAY, + ND6_PROBE +}; + +#define ND6_HOPLIM 255 /* maximum hop limit, required in all ND packets */ + +#define ND6_2HRS 7200 /* two hours, expressed in number of seconds */ + +/* Router tables. */ +/* @todo make these static? and entries accessible through API? */ +extern struct nd6_neighbor_cache_entry neighbor_cache[]; +extern struct nd6_destination_cache_entry destination_cache[]; +extern struct nd6_prefix_list_entry prefix_list[]; +extern struct nd6_router_list_entry default_router_list[]; + +/* Default values, can be updated by a RA message. */ +extern u32_t reachable_time; +extern u32_t retrans_timer; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_ND6_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h new file mode 100644 index 00000000..d4561d4f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h @@ -0,0 +1,69 @@ +/** + * @file + * raw API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_RAW_PRIV_H +#define LWIP_HDR_RAW_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/raw.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** return codes for raw_input */ +typedef enum raw_input_state +{ + RAW_INPUT_NONE = 0, /* pbuf did not match any pcbs */ + RAW_INPUT_EATEN, /* pbuf handed off and delivered to pcb */ + RAW_INPUT_DELIVERED /* pbuf only delivered to pcb (pbuf can still be referenced) */ +} raw_input_state_t; + +/* The following functions are the lower layer interface to RAW. */ +raw_input_state_t raw_input(struct pbuf *p, struct netif *inp); + +void raw_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_RAW */ + +#endif /* LWIP_HDR_RAW_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h new file mode 100644 index 00000000..d8f9904d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h @@ -0,0 +1,175 @@ +/** + * @file + * Sockets API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#ifndef LWIP_HDR_SOCKETS_PRIV_H +#define LWIP_HDR_SOCKETS_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/sockets.h" +#include "lwip/sys.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NUM_SOCKETS MEMP_NUM_NETCONN + +/** This is overridable for the rare case where more than 255 threads + * select on the same socket... + */ +#ifndef SELWAIT_T +#define SELWAIT_T u8_t +#endif + +union lwip_sock_lastdata { + struct netbuf *netbuf; + struct pbuf *pbuf; +}; + +/** Contains all internal pointers and states used for a socket */ +struct lwip_sock { + /** sockets currently are built on netconns, each socket has one netconn */ + struct netconn *conn; + /** data that was left from the previous read */ + union lwip_sock_lastdata lastdata; +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + /** number of times data was received, set by event_callback(), + tested by the receive and select functions */ + s16_t rcvevent; + /** number of times data was ACKed (free send buffer), set by event_callback(), + tested by select */ + u16_t sendevent; + /** error happened for this socket, set by event_callback(), tested by select */ + u16_t errevent; + /** counter of how many threads are waiting for this socket using select */ + SELWAIT_T select_waiting; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ +#if LWIP_NETCONN_FULLDUPLEX + /* counter of how many threads are using a struct lwip_sock (not the 'int') */ + u8_t fd_used; + /* status of pending close/delete actions */ + u8_t fd_free_pending; +#define LWIP_SOCK_FD_FREE_TCP 1 +#define LWIP_SOCK_FD_FREE_FREE 2 +#endif +}; + +#ifndef set_errno +#define set_errno(err) do { if (err) { errno = (err); } } while(0) +#endif + +#if !LWIP_TCPIP_CORE_LOCKING +/** Maximum optlen used by setsockopt/getsockopt */ +#define LWIP_SETGETSOCKOPT_MAXOPTLEN LWIP_MAX(16, sizeof(struct ifreq)) + +/** This struct is used to pass data to the set/getsockopt_internal + * functions running in tcpip_thread context (only a void* is allowed) */ +struct lwip_setgetsockopt_data { + /** socket index for which to change options */ + int s; + /** level of the option to process */ + int level; + /** name of the option to process */ + int optname; + /** set: value to set the option to + * get: value of the option is stored here */ +#if LWIP_MPU_COMPATIBLE + u8_t optval[LWIP_SETGETSOCKOPT_MAXOPTLEN]; +#else + union { + void *p; + const void *pc; + } optval; +#endif + /** size of *optval */ + socklen_t optlen; + /** if an error occurs, it is temporarily stored here */ + int err; + /** semaphore to wake up the calling task */ + void* completed_sem; +}; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#ifdef __cplusplus +} +#endif + +struct lwip_sock* lwip_socket_dbg_get_socket(int fd); + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + +#if LWIP_NETCONN_SEM_PER_THREAD +#define SELECT_SEM_T sys_sem_t* +#define SELECT_SEM_PTR(sem) (sem) +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define SELECT_SEM_T sys_sem_t +#define SELECT_SEM_PTR(sem) (&(sem)) +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +/** Description for a task waiting in select */ +struct lwip_select_cb { + /** Pointer to the next waiting task */ + struct lwip_select_cb *next; + /** Pointer to the previous waiting task */ + struct lwip_select_cb *prev; +#if LWIP_SOCKET_SELECT + /** readset passed to select */ + fd_set *readset; + /** writeset passed to select */ + fd_set *writeset; + /** unimplemented: exceptset passed to select */ + fd_set *exceptset; +#endif /* LWIP_SOCKET_SELECT */ +#if LWIP_SOCKET_POLL + /** fds passed to poll; NULL if select */ + struct pollfd *poll_fds; + /** nfds passed to poll; 0 if select */ + nfds_t poll_nfds; +#endif /* LWIP_SOCKET_POLL */ + /** don't signal the same semaphore twice: set to 1 when signalled */ + int sem_signalled; + /** semaphore to wake up a task waiting for select */ + SELECT_SEM_T sem; +}; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_SOCKETS_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h new file mode 100644 index 00000000..72f9126d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h @@ -0,0 +1,523 @@ +/** + * @file + * TCP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCP_PRIV_H +#define LWIP_HDR_TCP_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcp.h" +#include "lwip/mem.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/icmp.h" +#include "lwip/err.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/tcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Functions for interfacing with TCP: */ + +/* Lower layer interface to TCP: */ +void tcp_init (void); /* Initialize this module. */ +void tcp_tmr (void); /* Must be called every + TCP_TMR_INTERVAL + ms. (Typically 250 ms). */ +/* It is also possible to call these two functions at the right + intervals (instead of calling tcp_tmr()). */ +void tcp_slowtmr (void); +void tcp_fasttmr (void); + +/* Call this from a netif driver (watch out for threading issues!) that has + returned a memory error on transmit and now has free buffers to send more. + This iterates all active pcbs that had an error and tries to call + tcp_output, so use this with care as it might slow down the system. */ +void tcp_txnow (void); + +/* Only used by IP to pass a TCP segment to TCP: */ +void tcp_input (struct pbuf *p, struct netif *inp); +/* Used within the TCP code only: */ +struct tcp_pcb * tcp_alloc (u8_t prio); +void tcp_free (struct tcp_pcb *pcb); +void tcp_abandon (struct tcp_pcb *pcb, int reset); +err_t tcp_send_empty_ack(struct tcp_pcb *pcb); +err_t tcp_rexmit (struct tcp_pcb *pcb); +err_t tcp_rexmit_rto_prepare(struct tcp_pcb *pcb); +void tcp_rexmit_rto_commit(struct tcp_pcb *pcb); +void tcp_rexmit_rto (struct tcp_pcb *pcb); +void tcp_rexmit_fast (struct tcp_pcb *pcb); +u32_t tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb); +err_t tcp_process_refused_data(struct tcp_pcb *pcb); + +/** + * This is the Nagle algorithm: try to combine user data to send as few TCP + * segments as possible. Only send if + * - no previously transmitted data on the connection remains unacknowledged or + * - the TF_NODELAY flag is set (nagle algorithm turned off for this pcb) or + * - the only unsent segment is at least pcb->mss bytes long (or there is more + * than one unsent segment - with lwIP, this can happen although unsent->len < mss) + * - or if we are in fast-retransmit (TF_INFR) + */ +#define tcp_do_output_nagle(tpcb) ((((tpcb)->unacked == NULL) || \ + ((tpcb)->flags & (TF_NODELAY | TF_INFR)) || \ + (((tpcb)->unsent != NULL) && (((tpcb)->unsent->next != NULL) || \ + ((tpcb)->unsent->len >= (tpcb)->mss))) || \ + ((tcp_sndbuf(tpcb) == 0) || (tcp_sndqueuelen(tpcb) >= TCP_SND_QUEUELEN)) \ + ) ? 1 : 0) +#define tcp_output_nagle(tpcb) (tcp_do_output_nagle(tpcb) ? tcp_output(tpcb) : ERR_OK) + + +#define TCP_SEQ_LT(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) < 0) +#define TCP_SEQ_LEQ(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) <= 0) +#define TCP_SEQ_GT(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) > 0) +#define TCP_SEQ_GEQ(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) >= 0) +/* is b<=a<=c? */ +#if 0 /* see bug #10548 */ +#define TCP_SEQ_BETWEEN(a,b,c) ((c)-(b) >= (a)-(b)) +#endif +#define TCP_SEQ_BETWEEN(a,b,c) (TCP_SEQ_GEQ(a,b) && TCP_SEQ_LEQ(a,c)) + +#ifndef TCP_TMR_INTERVAL +#define TCP_TMR_INTERVAL 250 /* The TCP timer interval in milliseconds. */ +#endif /* TCP_TMR_INTERVAL */ + +#ifndef TCP_FAST_INTERVAL +#define TCP_FAST_INTERVAL TCP_TMR_INTERVAL /* the fine grained timeout in milliseconds */ +#endif /* TCP_FAST_INTERVAL */ + +#ifndef TCP_SLOW_INTERVAL +#define TCP_SLOW_INTERVAL (2*TCP_TMR_INTERVAL) /* the coarse grained timeout in milliseconds */ +#endif /* TCP_SLOW_INTERVAL */ + +#define TCP_FIN_WAIT_TIMEOUT 20000 /* milliseconds */ +#define TCP_SYN_RCVD_TIMEOUT 20000 /* milliseconds */ + +#define TCP_OOSEQ_TIMEOUT 6U /* x RTO */ + +#ifndef TCP_MSL +#define TCP_MSL 60000UL /* The maximum segment lifetime in milliseconds */ +#endif + +/* Keepalive values, compliant with RFC 1122. Don't change this unless you know what you're doing */ +#ifndef TCP_KEEPIDLE_DEFAULT +#define TCP_KEEPIDLE_DEFAULT 7200000UL /* Default KEEPALIVE timer in milliseconds */ +#endif + +#ifndef TCP_KEEPINTVL_DEFAULT +#define TCP_KEEPINTVL_DEFAULT 75000UL /* Default Time between KEEPALIVE probes in milliseconds */ +#endif + +#ifndef TCP_KEEPCNT_DEFAULT +#define TCP_KEEPCNT_DEFAULT 9U /* Default Counter for KEEPALIVE probes */ +#endif + +#define TCP_MAXIDLE TCP_KEEPCNT_DEFAULT * TCP_KEEPINTVL_DEFAULT /* Maximum KEEPALIVE probe time */ + +#define TCP_TCPLEN(seg) ((seg)->len + (((TCPH_FLAGS((seg)->tcphdr) & (TCP_FIN | TCP_SYN)) != 0) ? 1U : 0U)) + +/** Flags used on input processing, not on pcb->flags +*/ +#define TF_RESET (u8_t)0x08U /* Connection was reset. */ +#define TF_CLOSED (u8_t)0x10U /* Connection was successfully closed. */ +#define TF_GOT_FIN (u8_t)0x20U /* Connection was closed by the remote end. */ + + +#if LWIP_EVENT_API + +#define TCP_EVENT_ACCEPT(lpcb,pcb,arg,err,ret) ret = lwip_tcp_event(arg, (pcb),\ + LWIP_EVENT_ACCEPT, NULL, 0, err) +#define TCP_EVENT_SENT(pcb,space,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_SENT, NULL, space, ERR_OK) +#define TCP_EVENT_RECV(pcb,p,err,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_RECV, (p), 0, (err)) +#define TCP_EVENT_CLOSED(pcb,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_RECV, NULL, 0, ERR_OK) +#define TCP_EVENT_CONNECTED(pcb,err,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_CONNECTED, NULL, 0, (err)) +#define TCP_EVENT_POLL(pcb,ret) do { if ((pcb)->state != SYN_RCVD) { \ + ret = lwip_tcp_event((pcb)->callback_arg, (pcb), LWIP_EVENT_POLL, NULL, 0, ERR_OK); \ + } else { \ + ret = ERR_ARG; } } while(0) +/* For event API, last state SYN_RCVD must be excluded here: the application + has not seen this pcb, yet! */ +#define TCP_EVENT_ERR(last_state,errf,arg,err) do { if (last_state != SYN_RCVD) { \ + lwip_tcp_event((arg), NULL, LWIP_EVENT_ERR, NULL, 0, (err)); } } while(0) + +#else /* LWIP_EVENT_API */ + +#define TCP_EVENT_ACCEPT(lpcb,pcb,arg,err,ret) \ + do { \ + if((lpcb)->accept != NULL) \ + (ret) = (lpcb)->accept((arg),(pcb),(err)); \ + else (ret) = ERR_ARG; \ + } while (0) + +#define TCP_EVENT_SENT(pcb,space,ret) \ + do { \ + if((pcb)->sent != NULL) \ + (ret) = (pcb)->sent((pcb)->callback_arg,(pcb),(space)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_RECV(pcb,p,err,ret) \ + do { \ + if((pcb)->recv != NULL) { \ + (ret) = (pcb)->recv((pcb)->callback_arg,(pcb),(p),(err));\ + } else { \ + (ret) = tcp_recv_null(NULL, (pcb), (p), (err)); \ + } \ + } while (0) + +#define TCP_EVENT_CLOSED(pcb,ret) \ + do { \ + if(((pcb)->recv != NULL)) { \ + (ret) = (pcb)->recv((pcb)->callback_arg,(pcb),NULL,ERR_OK);\ + } else { \ + (ret) = ERR_OK; \ + } \ + } while (0) + +#define TCP_EVENT_CONNECTED(pcb,err,ret) \ + do { \ + if((pcb)->connected != NULL) \ + (ret) = (pcb)->connected((pcb)->callback_arg,(pcb),(err)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_POLL(pcb,ret) \ + do { \ + if((pcb)->poll != NULL) \ + (ret) = (pcb)->poll((pcb)->callback_arg,(pcb)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_ERR(last_state,errf,arg,err) \ + do { \ + LWIP_UNUSED_ARG(last_state); \ + if((errf) != NULL) \ + (errf)((arg),(err)); \ + } while (0) + +#endif /* LWIP_EVENT_API */ + +/** Enabled extra-check for TCP_OVERSIZE if LWIP_DEBUG is enabled */ +#if TCP_OVERSIZE && defined(LWIP_DEBUG) +#define TCP_OVERSIZE_DBGCHECK 1 +#else +#define TCP_OVERSIZE_DBGCHECK 0 +#endif + +/** Don't generate checksum on copy if CHECKSUM_GEN_TCP is disabled */ +#define TCP_CHECKSUM_ON_COPY (LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_TCP) + +/* This structure represents a TCP segment on the unsent, unacked and ooseq queues */ +struct tcp_seg { + struct tcp_seg *next; /* used when putting segments on a queue */ + struct pbuf *p; /* buffer containing data + TCP header */ + u16_t len; /* the TCP length of this segment */ +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_left; /* Extra bytes available at the end of the last + pbuf in unsent (used for asserting vs. + tcp_pcb.unsent_oversize only) */ +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + u16_t chksum; + u8_t chksum_swapped; +#endif /* TCP_CHECKSUM_ON_COPY */ + u8_t flags; +#define TF_SEG_OPTS_MSS (u8_t)0x01U /* Include MSS option (only used in SYN segments) */ +#define TF_SEG_OPTS_TS (u8_t)0x02U /* Include timestamp option. */ +#define TF_SEG_DATA_CHECKSUMMED (u8_t)0x04U /* ALL data (not the header) is + checksummed into 'chksum' */ +#define TF_SEG_OPTS_WND_SCALE (u8_t)0x08U /* Include WND SCALE option (only used in SYN segments) */ +#define TF_SEG_OPTS_SACK_PERM (u8_t)0x10U /* Include SACK Permitted option (only used in SYN segments) */ + struct tcp_hdr *tcphdr; /* the TCP header */ +}; + +#define LWIP_TCP_OPT_EOL 0 +#define LWIP_TCP_OPT_NOP 1 +#define LWIP_TCP_OPT_MSS 2 +#define LWIP_TCP_OPT_WS 3 +#define LWIP_TCP_OPT_SACK_PERM 4 +#define LWIP_TCP_OPT_TS 8 + +#define LWIP_TCP_OPT_LEN_MSS 4 +#if LWIP_TCP_TIMESTAMPS +#define LWIP_TCP_OPT_LEN_TS 10 +#define LWIP_TCP_OPT_LEN_TS_OUT 12 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_TS_OUT 0 +#endif +#if LWIP_WND_SCALE +#define LWIP_TCP_OPT_LEN_WS 3 +#define LWIP_TCP_OPT_LEN_WS_OUT 4 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_WS_OUT 0 +#endif + +#if LWIP_TCP_SACK_OUT +#define LWIP_TCP_OPT_LEN_SACK_PERM 2 +#define LWIP_TCP_OPT_LEN_SACK_PERM_OUT 4 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_SACK_PERM_OUT 0 +#endif + +#define LWIP_TCP_OPT_LENGTH(flags) \ + ((flags) & TF_SEG_OPTS_MSS ? LWIP_TCP_OPT_LEN_MSS : 0) + \ + ((flags) & TF_SEG_OPTS_TS ? LWIP_TCP_OPT_LEN_TS_OUT : 0) + \ + ((flags) & TF_SEG_OPTS_WND_SCALE ? LWIP_TCP_OPT_LEN_WS_OUT : 0) + \ + ((flags) & TF_SEG_OPTS_SACK_PERM ? LWIP_TCP_OPT_LEN_SACK_PERM_OUT : 0) + +/** This returns a TCP header option for MSS in an u32_t */ +#define TCP_BUILD_MSS_OPTION(mss) lwip_htonl(0x02040000 | ((mss) & 0xFFFF)) + +#if LWIP_WND_SCALE +#define TCPWNDSIZE_F U32_F +#define TCPWND_MAX 0xFFFFFFFFU +#define TCPWND_CHECK16(x) LWIP_ASSERT("window size > 0xFFFF", (x) <= 0xFFFF) +#define TCPWND_MIN16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) +#else /* LWIP_WND_SCALE */ +#define TCPWNDSIZE_F U16_F +#define TCPWND_MAX 0xFFFFU +#define TCPWND_CHECK16(x) +#define TCPWND_MIN16(x) x +#endif /* LWIP_WND_SCALE */ + +/* Global variables: */ +extern struct tcp_pcb *tcp_input_pcb; +extern u32_t tcp_ticks; +extern u8_t tcp_active_pcbs_changed; + +/* The TCP PCB lists. */ +union tcp_listen_pcbs_t { /* List of all TCP PCBs in LISTEN state. */ + struct tcp_pcb_listen *listen_pcbs; + struct tcp_pcb *pcbs; +}; +extern struct tcp_pcb *tcp_bound_pcbs; +extern union tcp_listen_pcbs_t tcp_listen_pcbs; +extern struct tcp_pcb *tcp_active_pcbs; /* List of all TCP PCBs that are in a + state in which they accept or send + data. */ +extern struct tcp_pcb *tcp_tw_pcbs; /* List of all TCP PCBs in TIME-WAIT. */ + +#define NUM_TCP_PCB_LISTS_NO_TIME_WAIT 3 +#define NUM_TCP_PCB_LISTS 4 +extern struct tcp_pcb ** const tcp_pcb_lists[NUM_TCP_PCB_LISTS]; + +/* Axioms about the above lists: + 1) Every TCP PCB that is not CLOSED is in one of the lists. + 2) A PCB is only in one of the lists. + 3) All PCBs in the tcp_listen_pcbs list is in LISTEN state. + 4) All PCBs in the tcp_tw_pcbs list is in TIME-WAIT state. +*/ +/* Define two macros, TCP_REG and TCP_RMV that registers a TCP PCB + with a PCB list or removes a PCB from a list, respectively. */ +#ifndef TCP_DEBUG_PCB_LISTS +#define TCP_DEBUG_PCB_LISTS 0 +#endif +#if TCP_DEBUG_PCB_LISTS +#define TCP_REG(pcbs, npcb) do {\ + struct tcp_pcb *tcp_tmp_pcb; \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_REG %p local port %"U16_F"\n", (void *)(npcb), (npcb)->local_port)); \ + for (tcp_tmp_pcb = *(pcbs); \ + tcp_tmp_pcb != NULL; \ + tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + LWIP_ASSERT("TCP_REG: already registered\n", tcp_tmp_pcb != (npcb)); \ + } \ + LWIP_ASSERT("TCP_REG: pcb->state != CLOSED", ((pcbs) == &tcp_bound_pcbs) || ((npcb)->state != CLOSED)); \ + (npcb)->next = *(pcbs); \ + LWIP_ASSERT("TCP_REG: npcb->next != npcb", (npcb)->next != (npcb)); \ + *(pcbs) = (npcb); \ + LWIP_ASSERT("TCP_REG: tcp_pcbs sane", tcp_pcbs_sane()); \ + tcp_timer_needed(); \ + } while(0) +#define TCP_RMV(pcbs, npcb) do { \ + struct tcp_pcb *tcp_tmp_pcb; \ + LWIP_ASSERT("TCP_RMV: pcbs != NULL", *(pcbs) != NULL); \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_RMV: removing %p from %p\n", (void *)(npcb), (void *)(*(pcbs)))); \ + if(*(pcbs) == (npcb)) { \ + *(pcbs) = (*pcbs)->next; \ + } else for (tcp_tmp_pcb = *(pcbs); tcp_tmp_pcb != NULL; tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + if(tcp_tmp_pcb->next == (npcb)) { \ + tcp_tmp_pcb->next = (npcb)->next; \ + break; \ + } \ + } \ + (npcb)->next = NULL; \ + LWIP_ASSERT("TCP_RMV: tcp_pcbs sane", tcp_pcbs_sane()); \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_RMV: removed %p from %p\n", (void *)(npcb), (void *)(*(pcbs)))); \ + } while(0) + +#else /* LWIP_DEBUG */ + +#define TCP_REG(pcbs, npcb) \ + do { \ + (npcb)->next = *pcbs; \ + *(pcbs) = (npcb); \ + tcp_timer_needed(); \ + } while (0) + +#define TCP_RMV(pcbs, npcb) \ + do { \ + if(*(pcbs) == (npcb)) { \ + (*(pcbs)) = (*pcbs)->next; \ + } \ + else { \ + struct tcp_pcb *tcp_tmp_pcb; \ + for (tcp_tmp_pcb = *pcbs; \ + tcp_tmp_pcb != NULL; \ + tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + if(tcp_tmp_pcb->next == (npcb)) { \ + tcp_tmp_pcb->next = (npcb)->next; \ + break; \ + } \ + } \ + } \ + (npcb)->next = NULL; \ + } while(0) + +#endif /* LWIP_DEBUG */ + +#define TCP_REG_ACTIVE(npcb) \ + do { \ + TCP_REG(&tcp_active_pcbs, npcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + +#define TCP_RMV_ACTIVE(npcb) \ + do { \ + TCP_RMV(&tcp_active_pcbs, npcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + +#define TCP_PCB_REMOVE_ACTIVE(pcb) \ + do { \ + tcp_pcb_remove(&tcp_active_pcbs, pcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + + +/* Internal functions: */ +struct tcp_pcb *tcp_pcb_copy(struct tcp_pcb *pcb); +void tcp_pcb_purge(struct tcp_pcb *pcb); +void tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb); + +void tcp_segs_free(struct tcp_seg *seg); +void tcp_seg_free(struct tcp_seg *seg); +struct tcp_seg *tcp_seg_copy(struct tcp_seg *seg); + +#define tcp_ack(pcb) \ + do { \ + if((pcb)->flags & TF_ACK_DELAY) { \ + tcp_clear_flags(pcb, TF_ACK_DELAY); \ + tcp_ack_now(pcb); \ + } \ + else { \ + tcp_set_flags(pcb, TF_ACK_DELAY); \ + } \ + } while (0) + +#define tcp_ack_now(pcb) \ + tcp_set_flags(pcb, TF_ACK_NOW) + +err_t tcp_send_fin(struct tcp_pcb *pcb); +err_t tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags); + +void tcp_rexmit_seg(struct tcp_pcb *pcb, struct tcp_seg *seg); + +void tcp_rst(const struct tcp_pcb* pcb, u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port); + +u32_t tcp_next_iss(struct tcp_pcb *pcb); + +err_t tcp_keepalive(struct tcp_pcb *pcb); +err_t tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split); +err_t tcp_zero_window_probe(struct tcp_pcb *pcb); +void tcp_trigger_input_pcb_close(void); + +#if TCP_CALCULATE_EFF_SEND_MSS +u16_t tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, + const ip_addr_t *dest); +#define tcp_eff_send_mss(sendmss, src, dest) \ + tcp_eff_send_mss_netif(sendmss, ip_route(src, dest), dest) +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +#if LWIP_CALLBACK_API +err_t tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err); +#endif /* LWIP_CALLBACK_API */ + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +void tcp_debug_print(struct tcp_hdr *tcphdr); +void tcp_debug_print_flags(u8_t flags); +void tcp_debug_print_state(enum tcp_state s); +void tcp_debug_print_pcbs(void); +s16_t tcp_pcbs_sane(void); +#else +# define tcp_debug_print(tcphdr) +# define tcp_debug_print_flags(flags) +# define tcp_debug_print_state(s) +# define tcp_debug_print_pcbs() +# define tcp_pcbs_sane() 1 +#endif /* TCP_DEBUG */ + +/** External function (implemented in timers.c), called when TCP detects + * that a timer is needed (i.e. active- or time-wait-pcb found). */ +void tcp_timer_needed(void); + +void tcp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#if TCP_QUEUE_OOSEQ +void tcp_free_ooseq(struct tcp_pcb *pcb); +#endif + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +err_t tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCP_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h new file mode 100644 index 00000000..74be634b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h @@ -0,0 +1,170 @@ +/** + * @file + * TCPIP API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPIP_PRIV_H +#define LWIP_HDR_TCPIP_PRIV_H + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpip.h" +#include "lwip/sys.h" +#include "lwip/timeouts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct pbuf; +struct netif; + +#if LWIP_MPU_COMPATIBLE +#define API_VAR_REF(name) (*(name)) +#define API_VAR_DECLARE(type, name) type * name +#define API_VAR_ALLOC_EXT(type, pool, name, errorblock) do { \ + name = (type *)memp_malloc(pool); \ + if (name == NULL) { \ + errorblock; \ + } \ + } while(0) +#define API_VAR_ALLOC(type, pool, name, errorval) API_VAR_ALLOC_EXT(type, pool, name, return errorval) +#define API_VAR_ALLOC_POOL(type, pool, name, errorval) do { \ + name = (type *)LWIP_MEMPOOL_ALLOC(pool); \ + if (name == NULL) { \ + return errorval; \ + } \ + } while(0) +#define API_VAR_FREE(pool, name) memp_free(pool, name) +#define API_VAR_FREE_POOL(pool, name) LWIP_MEMPOOL_FREE(pool, name) +#define API_EXPR_REF(expr) (&(expr)) +#if LWIP_NETCONN_SEM_PER_THREAD +#define API_EXPR_REF_SEM(expr) (expr) +#else +#define API_EXPR_REF_SEM(expr) API_EXPR_REF(expr) +#endif +#define API_EXPR_DEREF(expr) expr +#define API_MSG_M_DEF(m) m +#define API_MSG_M_DEF_C(t, m) t m +#else /* LWIP_MPU_COMPATIBLE */ +#define API_VAR_REF(name) name +#define API_VAR_DECLARE(type, name) type name +#define API_VAR_ALLOC_EXT(type, pool, name, errorblock) +#define API_VAR_ALLOC(type, pool, name, errorval) +#define API_VAR_ALLOC_POOL(type, pool, name, errorval) +#define API_VAR_FREE(pool, name) +#define API_VAR_FREE_POOL(pool, name) +#define API_EXPR_REF(expr) expr +#define API_EXPR_REF_SEM(expr) API_EXPR_REF(expr) +#define API_EXPR_DEREF(expr) (*(expr)) +#define API_MSG_M_DEF(m) *m +#define API_MSG_M_DEF_C(t, m) const t * m +#endif /* LWIP_MPU_COMPATIBLE */ + +err_t tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t* sem); + +struct tcpip_api_call_data +{ +#if !LWIP_TCPIP_CORE_LOCKING + err_t err; +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_t sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ +#else /* !LWIP_TCPIP_CORE_LOCKING */ + u8_t dummy; /* avoid empty struct :-( */ +#endif /* !LWIP_TCPIP_CORE_LOCKING */ +}; +typedef err_t (*tcpip_api_call_fn)(struct tcpip_api_call_data* call); +err_t tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call); + +enum tcpip_msg_type { +#if !LWIP_TCPIP_CORE_LOCKING + TCPIP_MSG_API, + TCPIP_MSG_API_CALL, +#endif /* !LWIP_TCPIP_CORE_LOCKING */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + TCPIP_MSG_INPKT, +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + TCPIP_MSG_TIMEOUT, + TCPIP_MSG_UNTIMEOUT, +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + TCPIP_MSG_CALLBACK, + TCPIP_MSG_CALLBACK_STATIC +}; + +struct tcpip_msg { + enum tcpip_msg_type type; + union { +#if !LWIP_TCPIP_CORE_LOCKING + struct { + tcpip_callback_fn function; + void* msg; + } api_msg; + struct { + tcpip_api_call_fn function; + struct tcpip_api_call_data *arg; + sys_sem_t *sem; + } api_call; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + struct { + struct pbuf *p; + struct netif *netif; + netif_input_fn input_fn; + } inp; +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ + struct { + tcpip_callback_fn function; + void *ctx; + } cb; +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + struct { + u32_t msecs; + sys_timeout_handler h; + void *arg; + } tmo; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + } msg; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* !NO_SYS */ + +#endif /* LWIP_HDR_TCPIP_PRIV_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h new file mode 100644 index 00000000..fd3af8a9 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h @@ -0,0 +1,78 @@ +/** + * @file + * AutoIP protocol definitions + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_PROT_AUTOIP_H +#define LWIP_HDR_PROT_AUTOIP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* 169.254.0.0 */ +#define AUTOIP_NET 0xA9FE0000 +/* 169.254.1.0 */ +#define AUTOIP_RANGE_START (AUTOIP_NET | 0x0100) +/* 169.254.254.255 */ +#define AUTOIP_RANGE_END (AUTOIP_NET | 0xFEFF) + +/* RFC 3927 Constants */ +#define PROBE_WAIT 1 /* second (initial random delay) */ +#define PROBE_MIN 1 /* second (minimum delay till repeated probe) */ +#define PROBE_MAX 2 /* seconds (maximum delay till repeated probe) */ +#define PROBE_NUM 3 /* (number of probe packets) */ +#define ANNOUNCE_NUM 2 /* (number of announcement packets) */ +#define ANNOUNCE_INTERVAL 2 /* seconds (time between announcement packets) */ +#define ANNOUNCE_WAIT 2 /* seconds (delay before announcing) */ +#define MAX_CONFLICTS 10 /* (max conflicts before rate limiting) */ +#define RATE_LIMIT_INTERVAL 60 /* seconds (delay between successive attempts) */ +#define DEFEND_INTERVAL 10 /* seconds (min. wait between defensive ARPs) */ + +/* AutoIP client states */ +typedef enum { + AUTOIP_STATE_OFF = 0, + AUTOIP_STATE_PROBING = 1, + AUTOIP_STATE_ANNOUNCING = 2, + AUTOIP_STATE_BOUND = 3 +} autoip_state_enum_t; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_AUTOIP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h new file mode 100644 index 00000000..ab18dca3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h @@ -0,0 +1,178 @@ +/** + * @file + * DHCP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_PROT_DHCP_H +#define LWIP_HDR_PROT_DHCP_H + +#include "lwip/opt.h" +#include "lwip/arch.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + + /* DHCP message item offsets and length */ +#define DHCP_CHADDR_LEN 16U +#define DHCP_SNAME_OFS 44U +#define DHCP_SNAME_LEN 64U +#define DHCP_FILE_OFS 108U +#define DHCP_FILE_LEN 128U +#define DHCP_MSG_LEN 236U +#define DHCP_OPTIONS_OFS (DHCP_MSG_LEN + 4U) /* 4 byte: cookie */ + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** minimum set of fields of any DHCP message */ +struct dhcp_msg +{ + PACK_STRUCT_FLD_8(u8_t op); + PACK_STRUCT_FLD_8(u8_t htype); + PACK_STRUCT_FLD_8(u8_t hlen); + PACK_STRUCT_FLD_8(u8_t hops); + PACK_STRUCT_FIELD(u32_t xid); + PACK_STRUCT_FIELD(u16_t secs); + PACK_STRUCT_FIELD(u16_t flags); + PACK_STRUCT_FLD_S(ip4_addr_p_t ciaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t yiaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t siaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t giaddr); + PACK_STRUCT_FLD_8(u8_t chaddr[DHCP_CHADDR_LEN]); + PACK_STRUCT_FLD_8(u8_t sname[DHCP_SNAME_LEN]); + PACK_STRUCT_FLD_8(u8_t file[DHCP_FILE_LEN]); + PACK_STRUCT_FIELD(u32_t cookie); +#define DHCP_MIN_OPTIONS_LEN 68U +/** make sure user does not configure this too small */ +#if ((defined(DHCP_OPTIONS_LEN)) && (DHCP_OPTIONS_LEN < DHCP_MIN_OPTIONS_LEN)) +# undef DHCP_OPTIONS_LEN +#endif +/** allow this to be configured in lwipopts.h, but not too small */ +#if (!defined(DHCP_OPTIONS_LEN)) +/** set this to be sufficient for your options in outgoing DHCP msgs */ +# define DHCP_OPTIONS_LEN DHCP_MIN_OPTIONS_LEN +#endif + PACK_STRUCT_FLD_8(u8_t options[DHCP_OPTIONS_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +/* DHCP client states */ +typedef enum { + DHCP_STATE_OFF = 0, + DHCP_STATE_REQUESTING = 1, + DHCP_STATE_INIT = 2, + DHCP_STATE_REBOOTING = 3, + DHCP_STATE_REBINDING = 4, + DHCP_STATE_RENEWING = 5, + DHCP_STATE_SELECTING = 6, + DHCP_STATE_INFORMING = 7, + DHCP_STATE_CHECKING = 8, + DHCP_STATE_PERMANENT = 9, /* not yet implemented */ + DHCP_STATE_BOUND = 10, + DHCP_STATE_RELEASING = 11, /* not yet implemented */ + DHCP_STATE_BACKING_OFF = 12 +} dhcp_state_enum_t; + +/* DHCP op codes */ +#define DHCP_BOOTREQUEST 1 +#define DHCP_BOOTREPLY 2 + +/* DHCP message types */ +#define DHCP_DISCOVER 1 +#define DHCP_OFFER 2 +#define DHCP_REQUEST 3 +#define DHCP_DECLINE 4 +#define DHCP_ACK 5 +#define DHCP_NAK 6 +#define DHCP_RELEASE 7 +#define DHCP_INFORM 8 + +#define DHCP_MAGIC_COOKIE 0x63825363UL + +/* This is a list of options for BOOTP and DHCP, see RFC 2132 for descriptions */ + +/* BootP options */ +#define DHCP_OPTION_PAD 0 +#define DHCP_OPTION_SUBNET_MASK 1 /* RFC 2132 3.3 */ +#define DHCP_OPTION_ROUTER 3 +#define DHCP_OPTION_DNS_SERVER 6 +#define DHCP_OPTION_HOSTNAME 12 +#define DHCP_OPTION_IP_TTL 23 +#define DHCP_OPTION_MTU 26 +#define DHCP_OPTION_BROADCAST 28 +#define DHCP_OPTION_TCP_TTL 37 +#define DHCP_OPTION_NTP 42 +#define DHCP_OPTION_END 255 + +/* DHCP options */ +#define DHCP_OPTION_REQUESTED_IP 50 /* RFC 2132 9.1, requested IP address */ +#define DHCP_OPTION_LEASE_TIME 51 /* RFC 2132 9.2, time in seconds, in 4 bytes */ +#define DHCP_OPTION_OVERLOAD 52 /* RFC2132 9.3, use file and/or sname field for options */ + +#define DHCP_OPTION_MESSAGE_TYPE 53 /* RFC 2132 9.6, important for DHCP */ +#define DHCP_OPTION_MESSAGE_TYPE_LEN 1 + +#define DHCP_OPTION_SERVER_ID 54 /* RFC 2132 9.7, server IP address */ +#define DHCP_OPTION_PARAMETER_REQUEST_LIST 55 /* RFC 2132 9.8, requested option types */ + +#define DHCP_OPTION_MAX_MSG_SIZE 57 /* RFC 2132 9.10, message size accepted >= 576 */ +#define DHCP_OPTION_MAX_MSG_SIZE_LEN 2 + +#define DHCP_OPTION_T1 58 /* T1 renewal time */ +#define DHCP_OPTION_T2 59 /* T2 rebinding time */ +#define DHCP_OPTION_US 60 +#define DHCP_OPTION_CLIENT_ID 61 +#define DHCP_OPTION_TFTP_SERVERNAME 66 +#define DHCP_OPTION_BOOTFILE 67 + +/* possible combinations of overloading the file and sname fields with options */ +#define DHCP_OVERLOAD_NONE 0 +#define DHCP_OVERLOAD_FILE 1 +#define DHCP_OVERLOAD_SNAME 2 +#define DHCP_OVERLOAD_SNAME_FILE 3 + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DHCP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h new file mode 100644 index 00000000..0754c91b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h @@ -0,0 +1,138 @@ +/** + * @file + * DHCPv6 protocol definitions + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_PROT_DHCP6_H +#define LWIP_HDR_PROT_DHCP6_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define DHCP6_CLIENT_PORT 546 +#define DHCP6_SERVER_PORT 547 + + + /* DHCPv6 message item offsets and length */ +#define DHCP6_TRANSACTION_ID_LEN 3 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** minimum set of fields of any DHCPv6 message */ +struct dhcp6_msg +{ + PACK_STRUCT_FLD_8(u8_t msgtype); + PACK_STRUCT_FLD_8(u8_t transaction_id[DHCP6_TRANSACTION_ID_LEN]); + /* options follow */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +/* DHCP6 client states */ +typedef enum { + DHCP6_STATE_OFF = 0, + DHCP6_STATE_STATELESS_IDLE = 1, + DHCP6_STATE_REQUESTING_CONFIG = 2 +} dhcp6_state_enum_t; + +/* DHCPv6 message types */ +#define DHCP6_SOLICIT 1 +#define DHCP6_ADVERTISE 2 +#define DHCP6_REQUEST 3 +#define DHCP6_CONFIRM 4 +#define DHCP6_RENEW 5 +#define DHCP6_REBIND 6 +#define DHCP6_REPLY 7 +#define DHCP6_RELEASE 8 +#define DHCP6_DECLINE 9 +#define DHCP6_RECONFIGURE 10 +#define DHCP6_INFOREQUEST 11 +#define DHCP6_RELAYFORW 12 +#define DHCP6_RELAYREPL 13 +/* More message types see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ + +/** DHCPv6 status codes */ +#define DHCP6_STATUS_SUCCESS 0 /* Success. */ +#define DHCP6_STATUS_UNSPECFAIL 1 /* Failure, reason unspecified; this status code is sent by either a client or a server to indicate a failure not explicitly specified in this document. */ +#define DHCP6_STATUS_NOADDRSAVAIL 2 /* Server has no addresses available to assign to the IA(s). */ +#define DHCP6_STATUS_NOBINDING 3 /* Client record (binding) unavailable. */ +#define DHCP6_STATUS_NOTONLINK 4 /* The prefix for the address is not appropriate for the link to which the client is attached. */ +#define DHCP6_STATUS_USEMULTICAST 5 /* Sent by a server to a client to force the client to send messages to the server using the All_DHCP_Relay_Agents_and_Servers address. */ +/* More status codes see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ + +/** DHCPv6 DUID types */ +#define DHCP6_DUID_LLT 1 /* LLT: Link-layer Address Plus Time */ +#define DHCP6_DUID_EN 2 /* EN: Enterprise number */ +#define DHCP6_DUID_LL 3 /* LL: Link-layer Address */ +#define DHCP6_DUID_UUID 4 /* UUID (RFC 6355) */ + +/* DHCPv6 options */ +#define DHCP6_OPTION_CLIENTID 1 +#define DHCP6_OPTION_SERVERID 2 +#define DHCP6_OPTION_IA_NA 3 +#define DHCP6_OPTION_IA_TA 4 +#define DHCP6_OPTION_IAADDR 5 +#define DHCP6_OPTION_ORO 6 +#define DHCP6_OPTION_PREFERENCE 7 +#define DHCP6_OPTION_ELAPSED_TIME 8 +#define DHCP6_OPTION_RELAY_MSG 9 +#define DHCP6_OPTION_AUTH 11 +#define DHCP6_OPTION_UNICAST 12 +#define DHCP6_OPTION_STATUS_CODE 13 +#define DHCP6_OPTION_RAPID_COMMIT 14 +#define DHCP6_OPTION_USER_CLASS 15 +#define DHCP6_OPTION_VENDOR_CLASS 16 +#define DHCP6_OPTION_VENDOR_OPTS 17 +#define DHCP6_OPTION_INTERFACE_ID 18 +#define DHCP6_OPTION_RECONF_MSG 19 +#define DHCP6_OPTION_RECONF_ACCEPT 20 +/* More options see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ +#define DHCP6_OPTION_DNS_SERVERS 23 /* RFC 3646 */ +#define DHCP6_OPTION_DOMAIN_LIST 24 /* RFC 3646 */ +#define DHCP6_OPTION_SNTP_SERVERS 31 /* RFC 4075 */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DHCP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h new file mode 100644 index 00000000..94782d6e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h @@ -0,0 +1,140 @@ +/** + * @file + * DNS - host name to IP address resolver. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_PROT_DNS_H +#define LWIP_HDR_PROT_DNS_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS server port address */ +#ifndef DNS_SERVER_PORT +#define DNS_SERVER_PORT 53 +#endif + +/* DNS field TYPE used for "Resource Records" */ +#define DNS_RRTYPE_A 1 /* a host address */ +#define DNS_RRTYPE_NS 2 /* an authoritative name server */ +#define DNS_RRTYPE_MD 3 /* a mail destination (Obsolete - use MX) */ +#define DNS_RRTYPE_MF 4 /* a mail forwarder (Obsolete - use MX) */ +#define DNS_RRTYPE_CNAME 5 /* the canonical name for an alias */ +#define DNS_RRTYPE_SOA 6 /* marks the start of a zone of authority */ +#define DNS_RRTYPE_MB 7 /* a mailbox domain name (EXPERIMENTAL) */ +#define DNS_RRTYPE_MG 8 /* a mail group member (EXPERIMENTAL) */ +#define DNS_RRTYPE_MR 9 /* a mail rename domain name (EXPERIMENTAL) */ +#define DNS_RRTYPE_NULL 10 /* a null RR (EXPERIMENTAL) */ +#define DNS_RRTYPE_WKS 11 /* a well known service description */ +#define DNS_RRTYPE_PTR 12 /* a domain name pointer */ +#define DNS_RRTYPE_HINFO 13 /* host information */ +#define DNS_RRTYPE_MINFO 14 /* mailbox or mail list information */ +#define DNS_RRTYPE_MX 15 /* mail exchange */ +#define DNS_RRTYPE_TXT 16 /* text strings */ +#define DNS_RRTYPE_AAAA 28 /* IPv6 address */ +#define DNS_RRTYPE_SRV 33 /* service location */ +#define DNS_RRTYPE_ANY 255 /* any type */ + +/* DNS field CLASS used for "Resource Records" */ +#define DNS_RRCLASS_IN 1 /* the Internet */ +#define DNS_RRCLASS_CS 2 /* the CSNET class (Obsolete - used only for examples in some obsolete RFCs) */ +#define DNS_RRCLASS_CH 3 /* the CHAOS class */ +#define DNS_RRCLASS_HS 4 /* Hesiod [Dyer 87] */ +#define DNS_RRCLASS_ANY 255 /* any class */ +#define DNS_RRCLASS_FLUSH 0x800 /* Flush bit */ + +/* DNS protocol flags */ +#define DNS_FLAG1_RESPONSE 0x80 +#define DNS_FLAG1_OPCODE_STATUS 0x10 +#define DNS_FLAG1_OPCODE_INVERSE 0x08 +#define DNS_FLAG1_OPCODE_STANDARD 0x00 +#define DNS_FLAG1_AUTHORATIVE 0x04 +#define DNS_FLAG1_TRUNC 0x02 +#define DNS_FLAG1_RD 0x01 +#define DNS_FLAG2_RA 0x80 +#define DNS_FLAG2_ERR_MASK 0x0f +#define DNS_FLAG2_ERR_NONE 0x00 +#define DNS_FLAG2_ERR_NAME 0x03 + +#define DNS_HDR_GET_OPCODE(hdr) ((((hdr)->flags1) >> 3) & 0xF) + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** DNS message header */ +struct dns_hdr { + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FLD_8(u8_t flags1); + PACK_STRUCT_FLD_8(u8_t flags2); + PACK_STRUCT_FIELD(u16_t numquestions); + PACK_STRUCT_FIELD(u16_t numanswers); + PACK_STRUCT_FIELD(u16_t numauthrr); + PACK_STRUCT_FIELD(u16_t numextrarr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define SIZEOF_DNS_HDR 12 + + +/* Multicast DNS definitions */ + +/** UDP port for multicast DNS queries */ +#ifndef DNS_MQUERY_PORT +#define DNS_MQUERY_PORT 5353 +#endif + +/* IPv4 group for multicast DNS queries: 224.0.0.251 */ +#ifndef DNS_MQUERY_IPV4_GROUP_INIT +#define DNS_MQUERY_IPV4_GROUP_INIT IPADDR4_INIT_BYTES(224,0,0,251) +#endif + +/* IPv6 group for multicast DNS queries: FF02::FB */ +#ifndef DNS_MQUERY_IPV6_GROUP_INIT +#define DNS_MQUERY_IPV6_GROUP_INIT IPADDR6_INIT_HOST(0xFF020000,0,0,0xFB) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DNS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h new file mode 100644 index 00000000..811c2284 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h @@ -0,0 +1,114 @@ +/** + * @file + * ARP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ETHARP_H +#define LWIP_HDR_PROT_ETHARP_H + +#include "lwip/arch.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ETHARP_HWADDR_LEN +#define ETHARP_HWADDR_LEN ETH_HWADDR_LEN +#endif + +/** + * struct ip4_addr_wordaligned is used in the definition of the ARP packet format in + * order to support compilers that don't have structure packing. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip4_addr_wordaligned { + PACK_STRUCT_FIELD(u16_t addrw[2]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** MEMCPY-like copying of IP addresses where addresses are known to be + * 16-bit-aligned if the port is correctly configured (so a port could define + * this to copying 2 u16_t's) - no NULL-pointer-checking needed. */ +#ifndef IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T +#define IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(dest, src) SMEMCPY(dest, src, sizeof(ip4_addr_t)) +#endif + + /** MEMCPY-like copying of IP addresses where addresses are known to be + * 16-bit-aligned if the port is correctly configured (so a port could define + * this to copying 2 u16_t's) - no NULL-pointer-checking needed. */ +#ifndef IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T +#define IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(dest, src) SMEMCPY(dest, src, sizeof(ip4_addr_t)) +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** the ARP message, see RFC 826 ("Packet format") */ +struct etharp_hdr { + PACK_STRUCT_FIELD(u16_t hwtype); + PACK_STRUCT_FIELD(u16_t proto); + PACK_STRUCT_FLD_8(u8_t hwlen); + PACK_STRUCT_FLD_8(u8_t protolen); + PACK_STRUCT_FIELD(u16_t opcode); + PACK_STRUCT_FLD_S(struct eth_addr shwaddr); + PACK_STRUCT_FLD_S(struct ip4_addr_wordaligned sipaddr); + PACK_STRUCT_FLD_S(struct eth_addr dhwaddr); + PACK_STRUCT_FLD_S(struct ip4_addr_wordaligned dipaddr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_ETHARP_HDR 28 + +/* ARP message types (opcodes) */ +enum etharp_opcode { + ARP_REQUEST = 1, + ARP_REPLY = 2 +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ETHARP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h new file mode 100644 index 00000000..309e5746 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h @@ -0,0 +1,125 @@ +/** + * @file + * Ethernet protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ETHERNET_H +#define LWIP_HDR_PROT_ETHERNET_H + +#include "lwip/arch.h" +#include "lwip/prot/ieee.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ETH_HWADDR_LEN +#ifdef ETHARP_HWADDR_LEN +#define ETH_HWADDR_LEN ETHARP_HWADDR_LEN /* compatibility mode */ +#else +#define ETH_HWADDR_LEN 6 +#endif +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** An Ethernet MAC address */ +struct eth_addr { + PACK_STRUCT_FLD_8(u8_t addr[ETH_HWADDR_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Initialize a struct eth_addr with its 6 bytes (takes care of correct braces) */ +#define ETH_ADDR(b0, b1, b2, b3, b4, b5) {{b0, b1, b2, b3, b4, b5}} + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** Ethernet header */ +struct eth_hdr { +#if ETH_PAD_SIZE + PACK_STRUCT_FLD_8(u8_t padding[ETH_PAD_SIZE]); +#endif + PACK_STRUCT_FLD_S(struct eth_addr dest); + PACK_STRUCT_FLD_S(struct eth_addr src); + PACK_STRUCT_FIELD(u16_t type); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_ETH_HDR (14 + ETH_PAD_SIZE) + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** VLAN header inserted between ethernet header and payload + * if 'type' in ethernet header is ETHTYPE_VLAN. + * See IEEE802.Q */ +struct eth_vlan_hdr { + PACK_STRUCT_FIELD(u16_t prio_vid); + PACK_STRUCT_FIELD(u16_t tpid); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_VLAN_HDR 4 +#define VLAN_ID(vlan_hdr) (lwip_htons((vlan_hdr)->prio_vid) & 0xFFF) + +/** The 24-bit IANA IPv4-multicast OUI is 01-00-5e: */ +#define LL_IP4_MULTICAST_ADDR_0 0x01 +#define LL_IP4_MULTICAST_ADDR_1 0x00 +#define LL_IP4_MULTICAST_ADDR_2 0x5e + +/** IPv6 multicast uses this prefix */ +#define LL_IP6_MULTICAST_ADDR_0 0x33 +#define LL_IP6_MULTICAST_ADDR_1 0x33 + +#define eth_addr_cmp(addr1, addr2) (memcmp((addr1)->addr, (addr2)->addr, ETH_HWADDR_LEN) == 0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ETHERNET_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h new file mode 100644 index 00000000..32890ccc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h @@ -0,0 +1,97 @@ +/** + * @file + * IANA assigned numbers (RFC 1700 and successors) + * + * @defgroup iana IANA assigned numbers + * @ingroup infrastructure + */ + +/* + * Copyright (c) 2017 Dirk Ziegelmeier. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_PROT_IANA_H +#define LWIP_HDR_PROT_IANA_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup iana + * Hardware types + */ +enum lwip_iana_hwtype { + /** Ethernet */ + LWIP_IANA_HWTYPE_ETHERNET = 1 +}; + +/** + * @ingroup iana + * Port numbers + * https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt + */ +enum lwip_iana_port_number { + /** SMTP */ + LWIP_IANA_PORT_SMTP = 25, + /** DHCP server */ + LWIP_IANA_PORT_DHCP_SERVER = 67, + /** DHCP client */ + LWIP_IANA_PORT_DHCP_CLIENT = 68, + /** TFTP */ + LWIP_IANA_PORT_TFTP = 69, + /** HTTP */ + LWIP_IANA_PORT_HTTP = 80, + /** SNTP */ + LWIP_IANA_PORT_SNTP = 123, + /** NETBIOS */ + LWIP_IANA_PORT_NETBIOS = 137, + /** SNMP */ + LWIP_IANA_PORT_SNMP = 161, + /** SNMP traps */ + LWIP_IANA_PORT_SNMP_TRAP = 162, + /** HTTPS */ + LWIP_IANA_PORT_HTTPS = 443, + /** SMTPS */ + LWIP_IANA_PORT_SMTPS = 465, + /** MQTT */ + LWIP_IANA_PORT_MQTT = 1883, + /** MDNS */ + LWIP_IANA_PORT_MDNS = 5353, + /** Secure MQTT */ + LWIP_IANA_PORT_SECURE_MQTT = 8883 +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IANA_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h new file mode 100644 index 00000000..7d19385c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h @@ -0,0 +1,91 @@ +/** + * @file + * ICMP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ICMP_H +#define LWIP_HDR_PROT_ICMP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ICMP_ER 0 /* echo reply */ +#define ICMP_DUR 3 /* destination unreachable */ +#define ICMP_SQ 4 /* source quench */ +#define ICMP_RD 5 /* redirect */ +#define ICMP_ECHO 8 /* echo */ +#define ICMP_TE 11 /* time exceeded */ +#define ICMP_PP 12 /* parameter problem */ +#define ICMP_TS 13 /* timestamp */ +#define ICMP_TSR 14 /* timestamp reply */ +#define ICMP_IRQ 15 /* information request */ +#define ICMP_IR 16 /* information reply */ +#define ICMP_AM 17 /* address mask request */ +#define ICMP_AMR 18 /* address mask reply */ + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +/** This is the standard ICMP header only that the u32_t data + * is split to two u16_t like ICMP echo needs it. + * This header is also used for other ICMP types that do not + * use the data part. + */ +PACK_STRUCT_BEGIN +struct icmp_echo_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FIELD(u16_t seqno); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Compatibility defines, old versions used to combine type and code to an u16_t */ +#define ICMPH_TYPE(hdr) ((hdr)->type) +#define ICMPH_CODE(hdr) ((hdr)->code) +#define ICMPH_TYPE_SET(hdr, t) ((hdr)->type = (t)) +#define ICMPH_CODE_SET(hdr, c) ((hdr)->code = (c)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ICMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h new file mode 100644 index 00000000..34611204 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h @@ -0,0 +1,170 @@ +/** + * @file + * ICMP6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ICMP6_H +#define LWIP_HDR_PROT_ICMP6_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP type */ +enum icmp6_type { + /** Destination unreachable */ + ICMP6_TYPE_DUR = 1, + /** Packet too big */ + ICMP6_TYPE_PTB = 2, + /** Time exceeded */ + ICMP6_TYPE_TE = 3, + /** Parameter problem */ + ICMP6_TYPE_PP = 4, + /** Private experimentation */ + ICMP6_TYPE_PE1 = 100, + /** Private experimentation */ + ICMP6_TYPE_PE2 = 101, + /** Reserved for expansion of error messages */ + ICMP6_TYPE_RSV_ERR = 127, + + /** Echo request */ + ICMP6_TYPE_EREQ = 128, + /** Echo reply */ + ICMP6_TYPE_EREP = 129, + /** Multicast listener query */ + ICMP6_TYPE_MLQ = 130, + /** Multicast listener report */ + ICMP6_TYPE_MLR = 131, + /** Multicast listener done */ + ICMP6_TYPE_MLD = 132, + /** Router solicitation */ + ICMP6_TYPE_RS = 133, + /** Router advertisement */ + ICMP6_TYPE_RA = 134, + /** Neighbor solicitation */ + ICMP6_TYPE_NS = 135, + /** Neighbor advertisement */ + ICMP6_TYPE_NA = 136, + /** Redirect */ + ICMP6_TYPE_RD = 137, + /** Multicast router advertisement */ + ICMP6_TYPE_MRA = 151, + /** Multicast router solicitation */ + ICMP6_TYPE_MRS = 152, + /** Multicast router termination */ + ICMP6_TYPE_MRT = 153, + /** Private experimentation */ + ICMP6_TYPE_PE3 = 200, + /** Private experimentation */ + ICMP6_TYPE_PE4 = 201, + /** Reserved for expansion of informational messages */ + ICMP6_TYPE_RSV_INF = 255 +}; + +/** ICMP destination unreachable codes */ +enum icmp6_dur_code { + /** No route to destination */ + ICMP6_DUR_NO_ROUTE = 0, + /** Communication with destination administratively prohibited */ + ICMP6_DUR_PROHIBITED = 1, + /** Beyond scope of source address */ + ICMP6_DUR_SCOPE = 2, + /** Address unreachable */ + ICMP6_DUR_ADDRESS = 3, + /** Port unreachable */ + ICMP6_DUR_PORT = 4, + /** Source address failed ingress/egress policy */ + ICMP6_DUR_POLICY = 5, + /** Reject route to destination */ + ICMP6_DUR_REJECT_ROUTE = 6 +}; + +/** ICMP time exceeded codes */ +enum icmp6_te_code { + /** Hop limit exceeded in transit */ + ICMP6_TE_HL = 0, + /** Fragment reassembly time exceeded */ + ICMP6_TE_FRAG = 1 +}; + +/** ICMP parameter code */ +enum icmp6_pp_code { + /** Erroneous header field encountered */ + ICMP6_PP_FIELD = 0, + /** Unrecognized next header type encountered */ + ICMP6_PP_HEADER = 1, + /** Unrecognized IPv6 option encountered */ + ICMP6_PP_OPTION = 2 +}; + +/** This is the standard ICMP6 header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct icmp6_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t data); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** This is the ICMP6 header adapted for echo req/resp. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct icmp6_echo_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FIELD(u16_t seqno); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ICMP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h new file mode 100644 index 00000000..abbb9e31 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h @@ -0,0 +1,91 @@ +/** + * @file + * IEEE assigned numbers + * + * @defgroup ieee IEEE assigned numbers + * @ingroup infrastructure + */ + +/* + * Copyright (c) 2017 Dirk Ziegelmeier. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_PROT_IEEE_H +#define LWIP_HDR_PROT_IEEE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup ieee + * A list of often ethtypes (although lwIP does not use all of them). + */ +enum lwip_ieee_eth_type { + /** Internet protocol v4 */ + ETHTYPE_IP = 0x0800U, + /** Address resolution protocol */ + ETHTYPE_ARP = 0x0806U, + /** Wake on lan */ + ETHTYPE_WOL = 0x0842U, + /** RARP */ + ETHTYPE_RARP = 0x8035U, + /** Virtual local area network */ + ETHTYPE_VLAN = 0x8100U, + /** Internet protocol v6 */ + ETHTYPE_IPV6 = 0x86DDU, + /** PPP Over Ethernet Discovery Stage */ + ETHTYPE_PPPOEDISC = 0x8863U, + /** PPP Over Ethernet Session Stage */ + ETHTYPE_PPPOE = 0x8864U, + /** Jumbo Frames */ + ETHTYPE_JUMBO = 0x8870U, + /** Process field network */ + ETHTYPE_PROFINET = 0x8892U, + /** Ethernet for control automation technology */ + ETHTYPE_ETHERCAT = 0x88A4U, + /** Link layer discovery protocol */ + ETHTYPE_LLDP = 0x88CCU, + /** Serial real-time communication system */ + ETHTYPE_SERCOS = 0x88CDU, + /** Media redundancy protocol */ + ETHTYPE_MRP = 0x88E3U, + /** Precision time protocol */ + ETHTYPE_PTP = 0x88F7U, + /** Q-in-Q, 802.1ad */ + ETHTYPE_QINQ = 0x9100U +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IEEE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h new file mode 100644 index 00000000..46b81a9d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h @@ -0,0 +1,90 @@ +/** + * @file + * IGMP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IGMP_H +#define LWIP_HDR_PROT_IGMP_H + +#include "lwip/arch.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * IGMP constants + */ +#define IGMP_TTL 1 +#define IGMP_MINLEN 8 +#define ROUTER_ALERT 0x9404U +#define ROUTER_ALERTLEN 4 + +/* + * IGMP message types, including version number. + */ +#define IGMP_MEMB_QUERY 0x11 /* Membership query */ +#define IGMP_V1_MEMB_REPORT 0x12 /* Ver. 1 membership report */ +#define IGMP_V2_MEMB_REPORT 0x16 /* Ver. 2 membership report */ +#define IGMP_LEAVE_GROUP 0x17 /* Leave-group message */ + +/* Group membership states */ +#define IGMP_GROUP_NON_MEMBER 0 +#define IGMP_GROUP_DELAYING_MEMBER 1 +#define IGMP_GROUP_IDLE_MEMBER 2 + +/** + * IGMP packet format. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct igmp_msg { + PACK_STRUCT_FLD_8(u8_t igmp_msgtype); + PACK_STRUCT_FLD_8(u8_t igmp_maxresp); + PACK_STRUCT_FIELD(u16_t igmp_checksum); + PACK_STRUCT_FLD_S(ip4_addr_p_t igmp_group_address); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IGMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h new file mode 100644 index 00000000..223158f5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h @@ -0,0 +1,59 @@ +/** + * @file + * IP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP_H +#define LWIP_HDR_PROT_IP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define IP_PROTO_ICMP 1 +#define IP_PROTO_IGMP 2 +#define IP_PROTO_UDP 17 +#define IP_PROTO_UDPLITE 136 +#define IP_PROTO_TCP 6 + +/** This operates on a void* by loading the first byte */ +#define IP_HDR_GET_VERSION(ptr) ((*(u8_t*)(ptr)) >> 4) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h new file mode 100644 index 00000000..93474611 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h @@ -0,0 +1,131 @@ +/** + * @file + * IPv4 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP4_H +#define LWIP_HDR_PROT_IP4_H + +#include "lwip/arch.h" +#include "lwip/ip4_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the packed version of ip4_addr_t, + used in network headers that are itself packed */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip4_addr_packed { + PACK_STRUCT_FIELD(u32_t addr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +typedef struct ip4_addr_packed ip4_addr_p_t; + +/* Size of the IPv4 header. Same as 'sizeof(struct ip_hdr)'. */ +#define IP_HLEN 20 +/* Maximum size of the IPv4 header with options. */ +#define IP_HLEN_MAX 60 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/* The IPv4 header */ +struct ip_hdr { + /* version / header length */ + PACK_STRUCT_FLD_8(u8_t _v_hl); + /* type of service */ + PACK_STRUCT_FLD_8(u8_t _tos); + /* total length */ + PACK_STRUCT_FIELD(u16_t _len); + /* identification */ + PACK_STRUCT_FIELD(u16_t _id); + /* fragment offset field */ + PACK_STRUCT_FIELD(u16_t _offset); +#define IP_RF 0x8000U /* reserved fragment flag */ +#define IP_DF 0x4000U /* don't fragment flag */ +#define IP_MF 0x2000U /* more fragments flag */ +#define IP_OFFMASK 0x1fffU /* mask for fragmenting bits */ + /* time to live */ + PACK_STRUCT_FLD_8(u8_t _ttl); + /* protocol*/ + PACK_STRUCT_FLD_8(u8_t _proto); + /* checksum */ + PACK_STRUCT_FIELD(u16_t _chksum); + /* source and destination IP addresses */ + PACK_STRUCT_FLD_S(ip4_addr_p_t src); + PACK_STRUCT_FLD_S(ip4_addr_p_t dest); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Macros to get struct ip_hdr fields: */ +#define IPH_V(hdr) ((hdr)->_v_hl >> 4) +#define IPH_HL(hdr) ((hdr)->_v_hl & 0x0f) +#define IPH_HL_BYTES(hdr) ((u8_t)(IPH_HL(hdr) * 4)) +#define IPH_TOS(hdr) ((hdr)->_tos) +#define IPH_LEN(hdr) ((hdr)->_len) +#define IPH_ID(hdr) ((hdr)->_id) +#define IPH_OFFSET(hdr) ((hdr)->_offset) +#define IPH_OFFSET_BYTES(hdr) ((u16_t)((lwip_ntohs(IPH_OFFSET(hdr)) & IP_OFFMASK) * 8U)) +#define IPH_TTL(hdr) ((hdr)->_ttl) +#define IPH_PROTO(hdr) ((hdr)->_proto) +#define IPH_CHKSUM(hdr) ((hdr)->_chksum) + +/* Macros to set struct ip_hdr fields: */ +#define IPH_VHL_SET(hdr, v, hl) (hdr)->_v_hl = (u8_t)((((v) << 4) | (hl))) +#define IPH_TOS_SET(hdr, tos) (hdr)->_tos = (tos) +#define IPH_LEN_SET(hdr, len) (hdr)->_len = (len) +#define IPH_ID_SET(hdr, id) (hdr)->_id = (id) +#define IPH_OFFSET_SET(hdr, off) (hdr)->_offset = (off) +#define IPH_TTL_SET(hdr, ttl) (hdr)->_ttl = (u8_t)(ttl) +#define IPH_PROTO_SET(hdr, proto) (hdr)->_proto = (u8_t)(proto) +#define IPH_CHKSUM_SET(hdr, chksum) (hdr)->_chksum = (chksum) + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP4_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h new file mode 100644 index 00000000..0f6de455 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h @@ -0,0 +1,233 @@ +/** + * @file + * IPv6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP6_H +#define LWIP_HDR_PROT_IP6_H + +#include "lwip/arch.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the packed version of ip6_addr_t, + used in network headers that are itself packed */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_addr_packed { + PACK_STRUCT_FIELD(u32_t addr[4]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +typedef struct ip6_addr_packed ip6_addr_p_t; + +#define IP6_HLEN 40 + +#define IP6_NEXTH_HOPBYHOP 0 +#define IP6_NEXTH_TCP 6 +#define IP6_NEXTH_UDP 17 +#define IP6_NEXTH_ENCAPS 41 +#define IP6_NEXTH_ROUTING 43 +#define IP6_NEXTH_FRAGMENT 44 +#define IP6_NEXTH_ICMP6 58 +#define IP6_NEXTH_NONE 59 +#define IP6_NEXTH_DESTOPTS 60 +#define IP6_NEXTH_UDPLITE 136 + +/** The IPv6 header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_hdr { + /** version / traffic class / flow label */ + PACK_STRUCT_FIELD(u32_t _v_tc_fl); + /** payload length */ + PACK_STRUCT_FIELD(u16_t _plen); + /** next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /** hop limit */ + PACK_STRUCT_FLD_8(u8_t _hoplim); + /** source and destination IP addresses */ + PACK_STRUCT_FLD_S(ip6_addr_p_t src); + PACK_STRUCT_FLD_S(ip6_addr_p_t dest); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6H_V(hdr) ((lwip_ntohl((hdr)->_v_tc_fl) >> 28) & 0x0f) +#define IP6H_TC(hdr) ((lwip_ntohl((hdr)->_v_tc_fl) >> 20) & 0xff) +#define IP6H_FL(hdr) (lwip_ntohl((hdr)->_v_tc_fl) & 0x000fffff) +#define IP6H_PLEN(hdr) (lwip_ntohs((hdr)->_plen)) +#define IP6H_NEXTH(hdr) ((hdr)->_nexth) +#define IP6H_NEXTH_P(hdr) ((u8_t *)(hdr) + 6) +#define IP6H_HOPLIM(hdr) ((hdr)->_hoplim) +#define IP6H_VTCFL_SET(hdr, v, tc, fl) (hdr)->_v_tc_fl = (lwip_htonl((((u32_t)(v)) << 28) | (((u32_t)(tc)) << 20) | (fl))) +#define IP6H_PLEN_SET(hdr, plen) (hdr)->_plen = lwip_htons(plen) +#define IP6H_NEXTH_SET(hdr, nexth) (hdr)->_nexth = (nexth) +#define IP6H_HOPLIM_SET(hdr, hl) (hdr)->_hoplim = (u8_t)(hl) + +/* ipv6 extended options header */ +#define IP6_PAD1_OPTION 0 +#define IP6_PADN_OPTION 1 +#define IP6_ROUTER_ALERT_OPTION 5 +#define IP6_JUMBO_OPTION 194 +#define IP6_HOME_ADDRESS_OPTION 201 +#define IP6_ROUTER_ALERT_DLEN 2 +#define IP6_ROUTER_ALERT_VALUE_MLD 0 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_opt_hdr { + /* router alert option type */ + PACK_STRUCT_FLD_8(u8_t _opt_type); + /* router alert option data len */ + PACK_STRUCT_FLD_8(u8_t _opt_dlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_OPT_HLEN 2 +#define IP6_OPT_TYPE_ACTION(hdr) ((((hdr)->_opt_type) >> 6) & 0x3) +#define IP6_OPT_TYPE_CHANGE(hdr) ((((hdr)->_opt_type) >> 5) & 0x1) +#define IP6_OPT_TYPE(hdr) ((hdr)->_opt_type) +#define IP6_OPT_DLEN(hdr) ((hdr)->_opt_dlen) + +/* Hop-by-Hop header. */ +#define IP6_HBH_HLEN 2 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_hbh_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* header length in 8-octet units */ + PACK_STRUCT_FLD_8(u8_t _hlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_HBH_NEXTH(hdr) ((hdr)->_nexth) + +/* Destination header. */ +#define IP6_DEST_HLEN 2 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_dest_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* header length in 8-octet units */ + PACK_STRUCT_FLD_8(u8_t _hlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_DEST_NEXTH(hdr) ((hdr)->_nexth) + +/* Routing header */ +#define IP6_ROUT_TYPE2 2 +#define IP6_ROUT_RPL 3 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_rout_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* reserved */ + PACK_STRUCT_FLD_8(u8_t _hlen); + /* fragment offset */ + PACK_STRUCT_FIELD(u8_t _routing_type); + /* fragmented packet identification */ + PACK_STRUCT_FIELD(u8_t _segments_left); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_ROUT_NEXTH(hdr) ((hdr)->_nexth) +#define IP6_ROUT_TYPE(hdr) ((hdr)->_routing_type) +#define IP6_ROUT_SEG_LEFT(hdr) ((hdr)->_segments_left) + +/* Fragment header. */ +#define IP6_FRAG_HLEN 8 +#define IP6_FRAG_OFFSET_MASK 0xfff8 +#define IP6_FRAG_MORE_FLAG 0x0001 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_frag_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* reserved */ + PACK_STRUCT_FLD_8(u8_t reserved); + /* fragment offset */ + PACK_STRUCT_FIELD(u16_t _fragment_offset); + /* fragmented packet identification */ + PACK_STRUCT_FIELD(u32_t _identification); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_FRAG_NEXTH(hdr) ((hdr)->_nexth) +#define IP6_FRAG_MBIT(hdr) (lwip_ntohs((hdr)->_fragment_offset) & 0x1) +#define IP6_FRAG_ID(hdr) (lwip_ntohl((hdr)->_identification)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h new file mode 100644 index 00000000..71f1dcbd --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h @@ -0,0 +1,71 @@ +/** + * @file + * MLD6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_MLD6_H +#define LWIP_HDR_PROT_MLD6_H + +#include "lwip/arch.h" +#include "lwip/prot/ip6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MLD6_HBH_HLEN 8 +/** Multicast listener report/query/done message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct mld_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t max_resp_delay); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t multicast_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_MLD6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h new file mode 100644 index 00000000..c270d07c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h @@ -0,0 +1,274 @@ +/** + * @file + * ND6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ND6_H +#define LWIP_HDR_PROT_ND6_H + +#include "lwip/arch.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Neighbor solicitation message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ns_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Neighbor advertisement message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct na_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FLD_8(u8_t reserved[3]); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define ND6_FLAG_ROUTER (0x80) +#define ND6_FLAG_SOLICITED (0x40) +#define ND6_FLAG_OVERRIDE (0x20) + +/** Router solicitation message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct rs_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Router advertisement message header. */ +#define ND6_RA_FLAG_MANAGED_ADDR_CONFIG (0x80) +#define ND6_RA_FLAG_OTHER_CONFIG (0x40) +#define ND6_RA_FLAG_HOME_AGENT (0x20) +#define ND6_RA_PREFERENCE_MASK (0x18) +#define ND6_RA_PREFERENCE_HIGH (0x08) +#define ND6_RA_PREFERENCE_MEDIUM (0x00) +#define ND6_RA_PREFERENCE_LOW (0x18) +#define ND6_RA_PREFERENCE_DISABLED (0x10) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ra_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FLD_8(u8_t current_hop_limit); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FIELD(u16_t router_lifetime); + PACK_STRUCT_FIELD(u32_t reachable_time); + PACK_STRUCT_FIELD(u32_t retrans_timer); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Redirect message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct redirect_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + PACK_STRUCT_FLD_S(ip6_addr_p_t destination_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Link-layer address option. */ +#define ND6_OPTION_TYPE_SOURCE_LLADDR (0x01) +#define ND6_OPTION_TYPE_TARGET_LLADDR (0x02) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct lladdr_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t addr[NETIF_MAX_HWADDR_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Prefix information option. */ +#define ND6_OPTION_TYPE_PREFIX_INFO (0x03) +#define ND6_PREFIX_FLAG_ON_LINK (0x80) +#define ND6_PREFIX_FLAG_AUTONOMOUS (0x40) +#define ND6_PREFIX_FLAG_ROUTER_ADDRESS (0x20) +#define ND6_PREFIX_FLAG_SITE_PREFIX (0x10) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct prefix_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t prefix_length); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FIELD(u32_t valid_lifetime); + PACK_STRUCT_FIELD(u32_t preferred_lifetime); + PACK_STRUCT_FLD_8(u8_t reserved2[3]); + PACK_STRUCT_FLD_8(u8_t site_prefix_length); + PACK_STRUCT_FLD_S(ip6_addr_p_t prefix); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Redirected header option. */ +#define ND6_OPTION_TYPE_REDIR_HDR (0x04) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct redirected_header_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t reserved[6]); + /* Portion of redirected packet follows. */ + /* PACK_STRUCT_FLD_8(u8_t redirected[8]); */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** MTU option. */ +#define ND6_OPTION_TYPE_MTU (0x05) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct mtu_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FIELD(u32_t mtu); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Route information option. */ +#define ND6_OPTION_TYPE_ROUTE_INFO (24) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct route_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t prefix_length); + PACK_STRUCT_FLD_8(u8_t preference); + PACK_STRUCT_FIELD(u32_t route_lifetime); + PACK_STRUCT_FLD_S(ip6_addr_p_t prefix); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Recursive DNS Server Option. */ +#define ND6_OPTION_TYPE_RDNSS (25) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct rdnss_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FIELD(u32_t lifetime); + PACK_STRUCT_FLD_S(ip6_addr_p_t rdnss_address[1]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_RDNSS_OPTION_BASE 8 /* size without addresses */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ND6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h new file mode 100644 index 00000000..c1d7de1f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h @@ -0,0 +1,100 @@ +/** + * @file + * TCP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_TCP_H +#define LWIP_HDR_PROT_TCP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Length of the TCP header, excluding options. */ +#define TCP_HLEN 20 + +/* Fields are (of course) in network byte order. + * Some fields are converted to host byte order in tcp_input(). + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct tcp_hdr { + PACK_STRUCT_FIELD(u16_t src); + PACK_STRUCT_FIELD(u16_t dest); + PACK_STRUCT_FIELD(u32_t seqno); + PACK_STRUCT_FIELD(u32_t ackno); + PACK_STRUCT_FIELD(u16_t _hdrlen_rsvd_flags); + PACK_STRUCT_FIELD(u16_t wnd); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t urgp); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* TCP header flags bits */ +#define TCP_FIN 0x01U +#define TCP_SYN 0x02U +#define TCP_RST 0x04U +#define TCP_PSH 0x08U +#define TCP_ACK 0x10U +#define TCP_URG 0x20U +#define TCP_ECE 0x40U +#define TCP_CWR 0x80U +/* Valid TCP header flags */ +#define TCP_FLAGS 0x3fU + +#define TCP_MAX_OPTION_BYTES 40 + +#define TCPH_HDRLEN(phdr) ((u16_t)(lwip_ntohs((phdr)->_hdrlen_rsvd_flags) >> 12)) +#define TCPH_HDRLEN_BYTES(phdr) ((u8_t)(TCPH_HDRLEN(phdr) << 2)) +#define TCPH_FLAGS(phdr) ((u8_t)((lwip_ntohs((phdr)->_hdrlen_rsvd_flags) & TCP_FLAGS))) + +#define TCPH_HDRLEN_SET(phdr, len) (phdr)->_hdrlen_rsvd_flags = lwip_htons(((len) << 12) | TCPH_FLAGS(phdr)) +#define TCPH_FLAGS_SET(phdr, flags) (phdr)->_hdrlen_rsvd_flags = (((phdr)->_hdrlen_rsvd_flags & PP_HTONS(~TCP_FLAGS)) | lwip_htons(flags)) +#define TCPH_HDRLEN_FLAGS_SET(phdr, len, flags) (phdr)->_hdrlen_rsvd_flags = (u16_t)(lwip_htons((u16_t)((len) << 12) | (flags))) + +#define TCPH_SET_FLAG(phdr, flags ) (phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags | lwip_htons(flags)) +#define TCPH_UNSET_FLAG(phdr, flags) (phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags & ~lwip_htons(flags)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_TCP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h new file mode 100644 index 00000000..664f19a3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h @@ -0,0 +1,68 @@ +/** + * @file + * UDP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_UDP_H +#define LWIP_HDR_PROT_UDP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define UDP_HLEN 8 + +/* Fields are (of course) in network byte order. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct udp_hdr { + PACK_STRUCT_FIELD(u16_t src); + PACK_STRUCT_FIELD(u16_t dest); /* src/dest UDP ports */ + PACK_STRUCT_FIELD(u16_t len); + PACK_STRUCT_FIELD(u16_t chksum); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_UDP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h b/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h new file mode 100644 index 00000000..b129bd3b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h @@ -0,0 +1,143 @@ +/** + * @file + * raw API (to be used from TCPIP thread)\n + * See also @ref raw_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_RAW_H +#define LWIP_HDR_RAW_H + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/def.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define RAW_FLAGS_CONNECTED 0x01U +#define RAW_FLAGS_HDRINCL 0x02U +#define RAW_FLAGS_MULTICAST_LOOP 0x04U + +struct raw_pcb; + +/** Function prototype for raw pcb receive callback functions. + * @param arg user supplied argument (raw_pcb.recv_arg) + * @param pcb the raw_pcb which received data + * @param p the packet buffer that was received + * @param addr the remote IP address from which the packet was received + * @return 1 if the packet was 'eaten' (aka. deleted), + * 0 if the packet lives on + * If returning 1, the callback is responsible for freeing the pbuf + * if it's not used any more. + */ +typedef u8_t (*raw_recv_fn)(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr); + +/** the RAW protocol control block */ +struct raw_pcb { + /* Common members of all PCB types */ + IP_PCB; + + struct raw_pcb *next; + + u8_t protocol; + u8_t flags; + +#if LWIP_MULTICAST_TX_OPTIONS + /** outgoing network interface for multicast packets, by interface index (if nonzero) */ + u8_t mcast_ifindex; + /** TTL for outgoing multicast packets */ + u8_t mcast_ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + /** receive callback function */ + raw_recv_fn recv; + /* user-supplied argument for the recv callback */ + void *recv_arg; +#if LWIP_IPV6 + /* fields for handling checksum computations as per RFC3542. */ + u16_t chksum_offset; + u8_t chksum_reqd; +#endif +}; + +/* The following functions is the application layer interface to the + RAW code. */ +struct raw_pcb * raw_new (u8_t proto); +struct raw_pcb * raw_new_ip_type(u8_t type, u8_t proto); +void raw_remove (struct raw_pcb *pcb); +err_t raw_bind (struct raw_pcb *pcb, const ip_addr_t *ipaddr); +void raw_bind_netif (struct raw_pcb *pcb, const struct netif *netif); +err_t raw_connect (struct raw_pcb *pcb, const ip_addr_t *ipaddr); +void raw_disconnect (struct raw_pcb *pcb); + +err_t raw_sendto (struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr); +err_t raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, struct netif *netif, const ip_addr_t *src_ip); +err_t raw_send (struct raw_pcb *pcb, struct pbuf *p); + +void raw_recv (struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg); + +#define raw_flags(pcb) ((pcb)->flags) +#define raw_setflags(pcb,f) ((pcb)->flags = (f)) + +#define raw_set_flags(pcb, set_flags) do { (pcb)->flags = (u8_t)((pcb)->flags | (set_flags)); } while(0) +#define raw_clear_flags(pcb, clr_flags) do { (pcb)->flags = (u8_t)((pcb)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define raw_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +#define raw_init() /* Compatibility define, no init needed. */ + +/* for compatibility with older implementation */ +#define raw_new_ip6(proto) raw_new_ip_type(IPADDR_TYPE_V6, proto) + +#if LWIP_MULTICAST_TX_OPTIONS +#define raw_set_multicast_netif_index(pcb, idx) ((pcb)->mcast_ifindex = (idx)) +#define raw_get_multicast_netif_index(pcb) ((pcb)->mcast_ifindex) +#define raw_set_multicast_ttl(pcb, value) ((pcb)->mcast_ttl = (value)) +#define raw_get_multicast_ttl(pcb) ((pcb)->mcast_ttl) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_RAW */ + +#endif /* LWIP_HDR_RAW_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h b/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h new file mode 100644 index 00000000..7643e195 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + */ + +/* + * This is the interface to the platform specific serial IO module + * It needs to be implemented by those platforms which need SLIP or PPP + */ + +#ifndef SIO_H +#define SIO_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If you want to define sio_fd_t elsewhere or differently, + define this in your cc.h file. */ +#ifndef __sio_fd_t_defined +typedef void * sio_fd_t; +#endif + +/* The following functions can be defined to something else in your cc.h file + or be implemented in your custom sio.c file. */ + +#ifndef sio_open +/** + * Opens a serial device for communication. + * + * @param devnum device number + * @return handle to serial device if successful, NULL otherwise + */ +sio_fd_t sio_open(u8_t devnum); +#endif + +#ifndef sio_send +/** + * Sends a single character to the serial device. + * + * @param c character to send + * @param fd serial device handle + * + * @note This function will block until the character can be sent. + */ +void sio_send(u8_t c, sio_fd_t fd); +#endif + +#ifndef sio_recv +/** + * Receives a single character from the serial device. + * + * @param fd serial device handle + * + * @note This function will block until a character is received. + */ +u8_t sio_recv(sio_fd_t fd); +#endif + +#ifndef sio_read +/** + * Reads from the serial device. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received - may be 0 if aborted by sio_read_abort + * + * @note This function will block until data can be received. The blocking + * can be cancelled by calling sio_read_abort(). + */ +u32_t sio_read(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_tryread +/** + * Tries to read from the serial device. Same as sio_read but returns + * immediately if no data is available and never blocks. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received + */ +u32_t sio_tryread(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_write +/** + * Writes to the serial device. + * + * @param fd serial device handle + * @param data pointer to data to send + * @param len length (in bytes) of data to send + * @return number of bytes actually sent + * + * @note This function will block until all data can be sent. + */ +u32_t sio_write(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_read_abort +/** + * Aborts a blocking sio_read() call. + * + * @param fd serial device handle + */ +void sio_read_abort(sio_fd_t fd); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* SIO_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h new file mode 100644 index 00000000..8704d0b4 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h @@ -0,0 +1,213 @@ +/** + * @file + * SNMP support API for implementing netifs and statitics for MIB2 + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_H +#define LWIP_HDR_SNMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct udp_pcb; +struct netif; + +/** + * @defgroup netif_mib2 MIB2 statistics + * @ingroup netif + */ + +/* MIB2 statistics functions */ +#if MIB2_STATS /* don't build if not configured for use in lwipopts.h */ +/** + * @ingroup netif_mib2 + * @see RFC1213, "MIB-II, 6. Definitions" + */ +enum snmp_ifType { + snmp_ifType_other=1, /* none of the following */ + snmp_ifType_regular1822, + snmp_ifType_hdh1822, + snmp_ifType_ddn_x25, + snmp_ifType_rfc877_x25, + snmp_ifType_ethernet_csmacd, + snmp_ifType_iso88023_csmacd, + snmp_ifType_iso88024_tokenBus, + snmp_ifType_iso88025_tokenRing, + snmp_ifType_iso88026_man, + snmp_ifType_starLan, + snmp_ifType_proteon_10Mbit, + snmp_ifType_proteon_80Mbit, + snmp_ifType_hyperchannel, + snmp_ifType_fddi, + snmp_ifType_lapb, + snmp_ifType_sdlc, + snmp_ifType_ds1, /* T-1 */ + snmp_ifType_e1, /* european equiv. of T-1 */ + snmp_ifType_basicISDN, + snmp_ifType_primaryISDN, /* proprietary serial */ + snmp_ifType_propPointToPointSerial, + snmp_ifType_ppp, + snmp_ifType_softwareLoopback, + snmp_ifType_eon, /* CLNP over IP [11] */ + snmp_ifType_ethernet_3Mbit, + snmp_ifType_nsip, /* XNS over IP */ + snmp_ifType_slip, /* generic SLIP */ + snmp_ifType_ultra, /* ULTRA technologies */ + snmp_ifType_ds3, /* T-3 */ + snmp_ifType_sip, /* SMDS */ + snmp_ifType_frame_relay +}; + +/** This macro has a precision of ~49 days because sys_now returns u32_t. \#define your own if you want ~490 days. */ +#ifndef MIB2_COPY_SYSUPTIME_TO +#define MIB2_COPY_SYSUPTIME_TO(ptrToVal) (*(ptrToVal) = (sys_now() / 10)) +#endif + +/** + * @ingroup netif_mib2 + * Increment stats member for SNMP MIB2 stats (struct stats_mib2_netif_ctrs) + */ +#define MIB2_STATS_NETIF_INC(n, x) do { ++(n)->mib2_counters.x; } while(0) +/** + * @ingroup netif_mib2 + * Add value to stats member for SNMP MIB2 stats (struct stats_mib2_netif_ctrs) + */ +#define MIB2_STATS_NETIF_ADD(n, x, val) do { (n)->mib2_counters.x += (val); } while(0) + +/** + * @ingroup netif_mib2 + * Init MIB2 statistic counters in netif + * @param netif Netif to init + * @param type one of enum @ref snmp_ifType + * @param speed your link speed here (units: bits per second) + */ +#define MIB2_INIT_NETIF(netif, type, speed) do { \ + (netif)->link_type = (type); \ + (netif)->link_speed = (speed);\ + (netif)->ts = 0; \ + (netif)->mib2_counters.ifinoctets = 0; \ + (netif)->mib2_counters.ifinucastpkts = 0; \ + (netif)->mib2_counters.ifinnucastpkts = 0; \ + (netif)->mib2_counters.ifindiscards = 0; \ + (netif)->mib2_counters.ifinerrors = 0; \ + (netif)->mib2_counters.ifinunknownprotos = 0; \ + (netif)->mib2_counters.ifoutoctets = 0; \ + (netif)->mib2_counters.ifoutucastpkts = 0; \ + (netif)->mib2_counters.ifoutnucastpkts = 0; \ + (netif)->mib2_counters.ifoutdiscards = 0; \ + (netif)->mib2_counters.ifouterrors = 0; } while(0) +#else /* MIB2_STATS */ +#ifndef MIB2_COPY_SYSUPTIME_TO +#define MIB2_COPY_SYSUPTIME_TO(ptrToVal) +#endif +#define MIB2_INIT_NETIF(netif, type, speed) +#define MIB2_STATS_NETIF_INC(n, x) +#define MIB2_STATS_NETIF_ADD(n, x, val) +#endif /* MIB2_STATS */ + +/* LWIP MIB2 callbacks */ +#if LWIP_MIB2_CALLBACKS /* don't build if not configured for use in lwipopts.h */ +/* network interface */ +void mib2_netif_added(struct netif *ni); +void mib2_netif_removed(struct netif *ni); + +#if LWIP_IPV4 && LWIP_ARP +/* ARP (for atTable and ipNetToMediaTable) */ +void mib2_add_arp_entry(struct netif *ni, ip4_addr_t *ip); +void mib2_remove_arp_entry(struct netif *ni, ip4_addr_t *ip); +#else /* LWIP_IPV4 && LWIP_ARP */ +#define mib2_add_arp_entry(ni,ip) +#define mib2_remove_arp_entry(ni,ip) +#endif /* LWIP_IPV4 && LWIP_ARP */ + +/* IP */ +#if LWIP_IPV4 +void mib2_add_ip4(struct netif *ni); +void mib2_remove_ip4(struct netif *ni); +void mib2_add_route_ip4(u8_t dflt, struct netif *ni); +void mib2_remove_route_ip4(u8_t dflt, struct netif *ni); +#endif /* LWIP_IPV4 */ + +/* UDP */ +#if LWIP_UDP +void mib2_udp_bind(struct udp_pcb *pcb); +void mib2_udp_unbind(struct udp_pcb *pcb); +#endif /* LWIP_UDP */ + +#else /* LWIP_MIB2_CALLBACKS */ +/* LWIP_MIB2_CALLBACKS support not available */ +/* define everything to be empty */ + +/* network interface */ +#define mib2_netif_added(ni) +#define mib2_netif_removed(ni) + +/* ARP */ +#define mib2_add_arp_entry(ni,ip) +#define mib2_remove_arp_entry(ni,ip) + +/* IP */ +#define mib2_add_ip4(ni) +#define mib2_remove_ip4(ni) +#define mib2_add_route_ip4(dflt, ni) +#define mib2_remove_route_ip4(dflt, ni) + +/* UDP */ +#define mib2_udp_bind(pcb) +#define mib2_udp_unbind(pcb) +#endif /* LWIP_MIB2_CALLBACKS */ + +/* for source-code compatibility reasons only, can be removed (not used internally) */ +#define NETIF_INIT_SNMP MIB2_INIT_NETIF +#define snmp_add_ifinoctets(ni,value) MIB2_STATS_NETIF_ADD(ni, ifinoctets, value) +#define snmp_inc_ifinucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifinucastpkts) +#define snmp_inc_ifinnucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifinnucastpkts) +#define snmp_inc_ifindiscards(ni) MIB2_STATS_NETIF_INC(ni, ifindiscards) +#define snmp_inc_ifinerrors(ni) MIB2_STATS_NETIF_INC(ni, ifinerrors) +#define snmp_inc_ifinunknownprotos(ni) MIB2_STATS_NETIF_INC(ni, ifinunknownprotos) +#define snmp_add_ifoutoctets(ni,value) MIB2_STATS_NETIF_ADD(ni, ifoutoctets, value) +#define snmp_inc_ifoutucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifoutucastpkts) +#define snmp_inc_ifoutnucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifoutnucastpkts) +#define snmp_inc_ifoutdiscards(ni) MIB2_STATS_NETIF_INC(ni, ifoutdiscards) +#define snmp_inc_ifouterrors(ni) MIB2_STATS_NETIF_INC(ni, ifouterrors) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_SNMP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h b/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h new file mode 100644 index 00000000..d70d36c4 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h @@ -0,0 +1,688 @@ +/** + * @file + * Socket API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + + +#ifndef LWIP_HDR_SOCKETS_H +#define LWIP_HDR_SOCKETS_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/err.h" +#include "lwip/inet.h" +#include "lwip/errno.h" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's sa_family_t, define SA_FAMILY_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(sa_family_t) && !defined(SA_FAMILY_T_DEFINED) +typedef u8_t sa_family_t; +#endif +/* If your port already typedef's in_port_t, define IN_PORT_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_port_t) && !defined(IN_PORT_T_DEFINED) +typedef u16_t in_port_t; +#endif + +#if LWIP_IPV4 +/* members are in network byte order */ +struct sockaddr_in { + u8_t sin_len; + sa_family_t sin_family; + in_port_t sin_port; + struct in_addr sin_addr; +#define SIN_ZERO_LEN 8 + char sin_zero[SIN_ZERO_LEN]; +}; +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +struct sockaddr_in6 { + u8_t sin6_len; /* length of this structure */ + sa_family_t sin6_family; /* AF_INET6 */ + in_port_t sin6_port; /* Transport layer port # */ + u32_t sin6_flowinfo; /* IPv6 flow information */ + struct in6_addr sin6_addr; /* IPv6 address */ + u32_t sin6_scope_id; /* Set of interfaces for scope */ +}; +#endif /* LWIP_IPV6 */ + +struct sockaddr { + u8_t sa_len; + sa_family_t sa_family; + char sa_data[14]; +}; + +struct sockaddr_storage { + u8_t s2_len; + sa_family_t ss_family; + char s2_data1[2]; + u32_t s2_data2[3]; +#if LWIP_IPV6 + u32_t s2_data3[3]; +#endif /* LWIP_IPV6 */ +}; + +/* If your port already typedef's socklen_t, define SOCKLEN_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(socklen_t) && !defined(SOCKLEN_T_DEFINED) +typedef u32_t socklen_t; +#endif + +#if !defined IOV_MAX +#define IOV_MAX 0xFFFF +#elif IOV_MAX > 0xFFFF +#error "IOV_MAX larger than supported by LwIP" +#endif /* IOV_MAX */ + +#if !defined(iovec) +struct iovec { + void *iov_base; + size_t iov_len; +}; +#endif + +struct msghdr { + void *msg_name; + socklen_t msg_namelen; + struct iovec *msg_iov; + int msg_iovlen; + void *msg_control; + socklen_t msg_controllen; + int msg_flags; +}; + +/* struct msghdr->msg_flags bit field values */ +#define MSG_TRUNC 0x04 +#define MSG_CTRUNC 0x08 + +/* RFC 3542, Section 20: Ancillary Data */ +struct cmsghdr { + socklen_t cmsg_len; /* number of bytes, including header */ + int cmsg_level; /* originating protocol */ + int cmsg_type; /* protocol-specific type */ +}; +/* Data section follows header and possible padding, typically referred to as + unsigned char cmsg_data[]; */ + +/* cmsg header/data alignment. NOTE: we align to native word size (double word +size on 16-bit arch) so structures are not placed at an unaligned address. +16-bit arch needs double word to ensure 32-bit alignment because socklen_t +could be 32 bits. If we ever have cmsg data with a 64-bit variable, alignment +will need to increase long long */ +#define ALIGN_H(size) (((size) + sizeof(long) - 1U) & ~(sizeof(long)-1U)) +#define ALIGN_D(size) ALIGN_H(size) + +#define CMSG_FIRSTHDR(mhdr) \ + ((mhdr)->msg_controllen >= sizeof(struct cmsghdr) ? \ + (struct cmsghdr *)(mhdr)->msg_control : \ + (struct cmsghdr *)NULL) + +#define CMSG_NXTHDR(mhdr, cmsg) \ + (((cmsg) == NULL) ? CMSG_FIRSTHDR(mhdr) : \ + (((u8_t *)(cmsg) + ALIGN_H((cmsg)->cmsg_len) \ + + ALIGN_D(sizeof(struct cmsghdr)) > \ + (u8_t *)((mhdr)->msg_control) + (mhdr)->msg_controllen) ? \ + (struct cmsghdr *)NULL : \ + (struct cmsghdr *)((void*)((u8_t *)(cmsg) + \ + ALIGN_H((cmsg)->cmsg_len))))) + +#define CMSG_DATA(cmsg) ((void*)((u8_t *)(cmsg) + \ + ALIGN_D(sizeof(struct cmsghdr)))) + +#define CMSG_SPACE(length) (ALIGN_D(sizeof(struct cmsghdr)) + \ + ALIGN_H(length)) + +#define CMSG_LEN(length) (ALIGN_D(sizeof(struct cmsghdr)) + \ + length) + +/* Set socket options argument */ +#define IFNAMSIZ NETIF_NAMESIZE +struct ifreq { + char ifr_name[IFNAMSIZ]; /* Interface name */ +}; + +/* Socket protocol types (TCP/UDP/RAW) */ +#define SOCK_STREAM 1 +#define SOCK_DGRAM 2 +#define SOCK_RAW 3 + +/* + * Option flags per-socket. These must match the SOF_ flags in ip.h (checked in init.c) + */ +#define SO_REUSEADDR 0x0004 /* Allow local address reuse */ +#define SO_KEEPALIVE 0x0008 /* keep connections alive */ +#define SO_BROADCAST 0x0020 /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + + +/* + * Additional options, not kept in so_options. + */ +#define SO_DEBUG 0x0001 /* Unimplemented: turn on debugging info recording */ +#define SO_ACCEPTCONN 0x0002 /* socket has had listen() */ +#define SO_DONTROUTE 0x0010 /* Unimplemented: just use interface addresses */ +#define SO_USELOOPBACK 0x0040 /* Unimplemented: bypass hardware when possible */ +#define SO_LINGER 0x0080 /* linger on close if data present */ +#define SO_DONTLINGER ((int)(~SO_LINGER)) +#define SO_OOBINLINE 0x0100 /* Unimplemented: leave received OOB data in line */ +#define SO_REUSEPORT 0x0200 /* Unimplemented: allow local address & port reuse */ +#define SO_SNDBUF 0x1001 /* Unimplemented: send buffer size */ +#define SO_RCVBUF 0x1002 /* receive buffer size */ +#define SO_SNDLOWAT 0x1003 /* Unimplemented: send low-water mark */ +#define SO_RCVLOWAT 0x1004 /* Unimplemented: receive low-water mark */ +#define SO_SNDTIMEO 0x1005 /* send timeout */ +#define SO_RCVTIMEO 0x1006 /* receive timeout */ +#define SO_ERROR 0x1007 /* get error status and clear */ +#define SO_TYPE 0x1008 /* get socket type */ +#define SO_CONTIMEO 0x1009 /* Unimplemented: connect timeout */ +#define SO_NO_CHECK 0x100a /* don't create UDP checksum */ +#define SO_BINDTODEVICE 0x100b /* bind to device */ + +/* + * Structure used for manipulating linger option. + */ +struct linger { + int l_onoff; /* option on/off */ + int l_linger; /* linger time in seconds */ +}; + +/* + * Level number for (get/set)sockopt() to apply to socket itself. + */ +#define SOL_SOCKET 0xfff /* options for socket level */ + + +#define AF_UNSPEC 0 +#define AF_INET 2 +#if LWIP_IPV6 +#define AF_INET6 10 +#else /* LWIP_IPV6 */ +#define AF_INET6 AF_UNSPEC +#endif /* LWIP_IPV6 */ +#define PF_INET AF_INET +#define PF_INET6 AF_INET6 +#define PF_UNSPEC AF_UNSPEC + +#define IPPROTO_IP 0 +#define IPPROTO_ICMP 1 +#define IPPROTO_TCP 6 +#define IPPROTO_UDP 17 +#if LWIP_IPV6 +#define IPPROTO_IPV6 41 +#define IPPROTO_ICMPV6 58 +#endif /* LWIP_IPV6 */ +#define IPPROTO_UDPLITE 136 +#define IPPROTO_RAW 255 + +/* Flags we can use with send and recv. */ +#define MSG_PEEK 0x01 /* Peeks at an incoming message */ +#define MSG_WAITALL 0x02 /* Unimplemented: Requests that the function block until the full amount of data requested can be returned */ +#define MSG_OOB 0x04 /* Unimplemented: Requests out-of-band data. The significance and semantics of out-of-band data are protocol-specific */ +#define MSG_DONTWAIT 0x08 /* Nonblocking i/o for this operation only */ +#define MSG_MORE 0x10 /* Sender will send more */ +#define MSG_NOSIGNAL 0x20 /* Uninmplemented: Requests not to send the SIGPIPE signal if an attempt to send is made on a stream-oriented socket that is no longer connected. */ + + +/* + * Options for level IPPROTO_IP + */ +#define IP_TOS 1 +#define IP_TTL 2 +#define IP_PKTINFO 8 + +#if LWIP_TCP +/* + * Options for level IPPROTO_TCP + */ +#define TCP_NODELAY 0x01 /* don't delay send to coalesce packets */ +#define TCP_KEEPALIVE 0x02 /* send KEEPALIVE probes when idle for pcb->keep_idle milliseconds */ +#define TCP_KEEPIDLE 0x03 /* set pcb->keep_idle - Same as TCP_KEEPALIVE, but use seconds for get/setsockopt */ +#define TCP_KEEPINTVL 0x04 /* set pcb->keep_intvl - Use seconds for get/setsockopt */ +#define TCP_KEEPCNT 0x05 /* set pcb->keep_cnt - Use number of probes sent for get/setsockopt */ +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 +/* + * Options for level IPPROTO_IPV6 + */ +#define IPV6_CHECKSUM 7 /* RFC3542: calculate and insert the ICMPv6 checksum for raw sockets. */ +#define IPV6_V6ONLY 27 /* RFC3493: boolean control to restrict AF_INET6 sockets to IPv6 communications only. */ +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE +/* + * Options for level IPPROTO_UDPLITE + */ +#define UDPLITE_SEND_CSCOV 0x01 /* sender checksum coverage */ +#define UDPLITE_RECV_CSCOV 0x02 /* minimal receiver checksum coverage */ +#endif /* LWIP_UDP && LWIP_UDPLITE*/ + + +#if LWIP_MULTICAST_TX_OPTIONS +/* + * Options and types for UDP multicast traffic handling + */ +#define IP_MULTICAST_TTL 5 +#define IP_MULTICAST_IF 6 +#define IP_MULTICAST_LOOP 7 +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_IGMP +/* + * Options and types related to multicast membership + */ +#define IP_ADD_MEMBERSHIP 3 +#define IP_DROP_MEMBERSHIP 4 + +typedef struct ip_mreq { + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_interface; /* local IP address of interface */ +} ip_mreq; +#endif /* LWIP_IGMP */ + +#if LWIP_IPV4 +struct in_pktinfo { + unsigned int ipi_ifindex; /* Interface index */ + struct in_addr ipi_addr; /* Destination (from header) address */ +}; +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6_MLD +/* + * Options and types related to IPv6 multicast membership + */ +#define IPV6_JOIN_GROUP 12 +#define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP +#define IPV6_LEAVE_GROUP 13 +#define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP + +typedef struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; /* IPv6 multicast addr */ + unsigned int ipv6mr_interface; /* interface index, or 0 */ +} ipv6_mreq; +#endif /* LWIP_IPV6_MLD */ + +/* + * The Type of Service provides an indication of the abstract + * parameters of the quality of service desired. These parameters are + * to be used to guide the selection of the actual service parameters + * when transmitting a datagram through a particular network. Several + * networks offer service precedence, which somehow treats high + * precedence traffic as more important than other traffic (generally + * by accepting only traffic above a certain precedence at time of high + * load). The major choice is a three way tradeoff between low-delay, + * high-reliability, and high-throughput. + * The use of the Delay, Throughput, and Reliability indications may + * increase the cost (in some sense) of the service. In many networks + * better performance for one of these parameters is coupled with worse + * performance on another. Except for very unusual cases at most two + * of these three indications should be set. + */ +#define IPTOS_TOS_MASK 0x1E +#define IPTOS_TOS(tos) ((tos) & IPTOS_TOS_MASK) +#define IPTOS_LOWDELAY 0x10 +#define IPTOS_THROUGHPUT 0x08 +#define IPTOS_RELIABILITY 0x04 +#define IPTOS_LOWCOST 0x02 +#define IPTOS_MINCOST IPTOS_LOWCOST + +/* + * The Network Control precedence designation is intended to be used + * within a network only. The actual use and control of that + * designation is up to each network. The Internetwork Control + * designation is intended for use by gateway control originators only. + * If the actual use of these precedence designations is of concern to + * a particular network, it is the responsibility of that network to + * control the access to, and use of, those precedence designations. + */ +#define IPTOS_PREC_MASK 0xe0 +#define IPTOS_PREC(tos) ((tos) & IPTOS_PREC_MASK) +#define IPTOS_PREC_NETCONTROL 0xe0 +#define IPTOS_PREC_INTERNETCONTROL 0xc0 +#define IPTOS_PREC_CRITIC_ECP 0xa0 +#define IPTOS_PREC_FLASHOVERRIDE 0x80 +#define IPTOS_PREC_FLASH 0x60 +#define IPTOS_PREC_IMMEDIATE 0x40 +#define IPTOS_PREC_PRIORITY 0x20 +#define IPTOS_PREC_ROUTINE 0x00 + + +/* + * Commands for ioctlsocket(), taken from the BSD file fcntl.h. + * lwip_ioctl only supports FIONREAD and FIONBIO, for now + * + * Ioctl's have the command encoded in the lower word, + * and the size of any in or out parameters in the upper + * word. The high 2 bits of the upper word are used + * to encode the in/out status of the parameter; for now + * we restrict parameters to at most 128 bytes. + */ +#if !defined(FIONREAD) || !defined(FIONBIO) +#define IOCPARM_MASK 0x7fU /* parameters must be < 128 bytes */ +#define IOC_VOID 0x20000000UL /* no parameters */ +#define IOC_OUT 0x40000000UL /* copy out parameters */ +#define IOC_IN 0x80000000UL /* copy in parameters */ +#define IOC_INOUT (IOC_IN|IOC_OUT) + /* 0x20000000 distinguishes new & + old ioctl's */ +#define _IO(x,y) ((long)(IOC_VOID|((x)<<8)|(y))) + +#define _IOR(x,y,t) ((long)(IOC_OUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|(y))) + +#define _IOW(x,y,t) ((long)(IOC_IN|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|(y))) +#endif /* !defined(FIONREAD) || !defined(FIONBIO) */ + +#ifndef FIONREAD +#define FIONREAD _IOR('f', 127, unsigned long) /* get # bytes to read */ +#endif +#ifndef FIONBIO +#define FIONBIO _IOW('f', 126, unsigned long) /* set/clear non-blocking i/o */ +#endif + +/* Socket I/O Controls: unimplemented */ +#ifndef SIOCSHIWAT +#define SIOCSHIWAT _IOW('s', 0, unsigned long) /* set high watermark */ +#define SIOCGHIWAT _IOR('s', 1, unsigned long) /* get high watermark */ +#define SIOCSLOWAT _IOW('s', 2, unsigned long) /* set low watermark */ +#define SIOCGLOWAT _IOR('s', 3, unsigned long) /* get low watermark */ +#define SIOCATMARK _IOR('s', 7, unsigned long) /* at oob mark? */ +#endif + +/* commands for fnctl */ +#ifndef F_GETFL +#define F_GETFL 3 +#endif +#ifndef F_SETFL +#define F_SETFL 4 +#endif + +/* File status flags and file access modes for fnctl, + these are bits in an int. */ +#ifndef O_NONBLOCK +#define O_NONBLOCK 1 /* nonblocking I/O */ +#endif +#ifndef O_NDELAY +#define O_NDELAY O_NONBLOCK /* same as O_NONBLOCK, for compatibility */ +#endif +#ifndef O_RDONLY +#define O_RDONLY 2 +#endif +#ifndef O_WRONLY +#define O_WRONLY 4 +#endif +#ifndef O_RDWR +#define O_RDWR (O_RDONLY|O_WRONLY) +#endif + +#ifndef SHUT_RD + #define SHUT_RD 0 + #define SHUT_WR 1 + #define SHUT_RDWR 2 +#endif + +/* FD_SET used for lwip_select */ +#ifndef FD_SET +#undef FD_SETSIZE +/* Make FD_SETSIZE match NUM_SOCKETS in socket.c */ +#define FD_SETSIZE MEMP_NUM_NETCONN +#define LWIP_SELECT_MAXNFDS (FD_SETSIZE + LWIP_SOCKET_OFFSET) +#define FDSETSAFESET(n, code) do { \ + if (((n) - LWIP_SOCKET_OFFSET < MEMP_NUM_NETCONN) && (((int)(n) - LWIP_SOCKET_OFFSET) >= 0)) { \ + code; }} while(0) +#define FDSETSAFEGET(n, code) (((n) - LWIP_SOCKET_OFFSET < MEMP_NUM_NETCONN) && (((int)(n) - LWIP_SOCKET_OFFSET) >= 0) ?\ + (code) : 0) +#define FD_SET(n, p) FDSETSAFESET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] = (u8_t)((p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] | (1 << (((n)-LWIP_SOCKET_OFFSET) & 7)))) +#define FD_CLR(n, p) FDSETSAFESET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] = (u8_t)((p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] & ~(1 << (((n)-LWIP_SOCKET_OFFSET) & 7)))) +#define FD_ISSET(n,p) FDSETSAFEGET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] & (1 << (((n)-LWIP_SOCKET_OFFSET) & 7))) +#define FD_ZERO(p) memset((void*)(p), 0, sizeof(*(p))) + +typedef struct fd_set +{ + unsigned char fd_bits [(FD_SETSIZE+7)/8]; +} fd_set; + +#elif FD_SETSIZE < (LWIP_SOCKET_OFFSET + MEMP_NUM_NETCONN) +#error "external FD_SETSIZE too small for number of sockets" +#else +#define LWIP_SELECT_MAXNFDS FD_SETSIZE +#endif /* FD_SET */ + +/* poll-related defines and types */ +/* @todo: find a better way to guard the definition of these defines and types if already defined */ +#if !defined(POLLIN) && !defined(POLLOUT) +#define POLLIN 0x1 +#define POLLOUT 0x2 +#define POLLERR 0x4 +#define POLLNVAL 0x8 +/* Below values are unimplemented */ +#define POLLRDNORM 0x10 +#define POLLRDBAND 0x20 +#define POLLPRI 0x40 +#define POLLWRNORM 0x80 +#define POLLWRBAND 0x100 +#define POLLHUP 0x200 +typedef unsigned int nfds_t; +struct pollfd +{ + int fd; + short events; + short revents; +}; +#endif + +/** LWIP_TIMEVAL_PRIVATE: if you want to use the struct timeval provided + * by your system, set this to 0 and include in cc.h */ +#ifndef LWIP_TIMEVAL_PRIVATE +#define LWIP_TIMEVAL_PRIVATE 1 +#endif + +#if LWIP_TIMEVAL_PRIVATE +struct timeval { + long tv_sec; /* seconds */ + long tv_usec; /* and microseconds */ +}; +#endif /* LWIP_TIMEVAL_PRIVATE */ + +#define lwip_socket_init() /* Compatibility define, no init needed. */ +void lwip_socket_thread_init(void); /* LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void lwip_socket_thread_cleanup(void); /* LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ + +#if LWIP_COMPAT_SOCKETS == 2 +/* This helps code parsers/code completion by not having the COMPAT functions as defines */ +#define lwip_accept accept +#define lwip_bind bind +#define lwip_shutdown shutdown +#define lwip_getpeername getpeername +#define lwip_getsockname getsockname +#define lwip_setsockopt setsockopt +#define lwip_getsockopt getsockopt +#define lwip_close closesocket +#define lwip_connect connect +#define lwip_listen listen +#define lwip_recv recv +#define lwip_recvmsg recvmsg +#define lwip_recvfrom recvfrom +#define lwip_send send +#define lwip_sendmsg sendmsg +#define lwip_sendto sendto +#define lwip_socket socket +#if LWIP_SOCKET_SELECT +#define lwip_select select +#endif +#if LWIP_SOCKET_POLL +#define lwip_poll poll +#endif +#define lwip_ioctl ioctlsocket +#define lwip_inet_ntop inet_ntop +#define lwip_inet_pton inet_pton + +#if LWIP_POSIX_SOCKETS_IO_NAMES +#define lwip_read read +#define lwip_readv readv +#define lwip_write write +#define lwip_writev writev +#undef lwip_close +#define lwip_close close +#define closesocket(s) close(s) +int fcntl(int s, int cmd, ...); +#undef lwip_ioctl +#define lwip_ioctl ioctl +#define ioctlsocket ioctl +#endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ +#endif /* LWIP_COMPAT_SOCKETS == 2 */ + +int lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen); +int lwip_bind(int s, const struct sockaddr *name, socklen_t namelen); +int lwip_shutdown(int s, int how); +int lwip_getpeername (int s, struct sockaddr *name, socklen_t *namelen); +int lwip_getsockname (int s, struct sockaddr *name, socklen_t *namelen); +int lwip_getsockopt (int s, int level, int optname, void *optval, socklen_t *optlen); +int lwip_setsockopt (int s, int level, int optname, const void *optval, socklen_t optlen); + int lwip_close(int s); +int lwip_connect(int s, const struct sockaddr *name, socklen_t namelen); +int lwip_listen(int s, int backlog); +ssize_t lwip_recv(int s, void *mem, size_t len, int flags); +ssize_t lwip_read(int s, void *mem, size_t len); +ssize_t lwip_readv(int s, const struct iovec *iov, int iovcnt); +ssize_t lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen); +ssize_t lwip_recvmsg(int s, struct msghdr *message, int flags); +ssize_t lwip_send(int s, const void *dataptr, size_t size, int flags); +ssize_t lwip_sendmsg(int s, const struct msghdr *message, int flags); +ssize_t lwip_sendto(int s, const void *dataptr, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen); +int lwip_socket(int domain, int type, int protocol); +ssize_t lwip_write(int s, const void *dataptr, size_t size); +ssize_t lwip_writev(int s, const struct iovec *iov, int iovcnt); +#if LWIP_SOCKET_SELECT +int lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout); +#endif +#if LWIP_SOCKET_POLL +int lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout); +#endif +int lwip_ioctl(int s, long cmd, void *argp); +int lwip_fcntl(int s, int cmd, int val); +const char *lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size); +int lwip_inet_pton(int af, const char *src, void *dst); + +#if LWIP_COMPAT_SOCKETS +#if LWIP_COMPAT_SOCKETS != 2 +/** @ingroup socket */ +#define accept(s,addr,addrlen) lwip_accept(s,addr,addrlen) +/** @ingroup socket */ +#define bind(s,name,namelen) lwip_bind(s,name,namelen) +/** @ingroup socket */ +#define shutdown(s,how) lwip_shutdown(s,how) +/** @ingroup socket */ +#define getpeername(s,name,namelen) lwip_getpeername(s,name,namelen) +/** @ingroup socket */ +#define getsockname(s,name,namelen) lwip_getsockname(s,name,namelen) +/** @ingroup socket */ +#define setsockopt(s,level,optname,opval,optlen) lwip_setsockopt(s,level,optname,opval,optlen) +/** @ingroup socket */ +#define getsockopt(s,level,optname,opval,optlen) lwip_getsockopt(s,level,optname,opval,optlen) +/** @ingroup socket */ +#define closesocket(s) lwip_close(s) +/** @ingroup socket */ +#define connect(s,name,namelen) lwip_connect(s,name,namelen) +/** @ingroup socket */ +#define listen(s,backlog) lwip_listen(s,backlog) +/** @ingroup socket */ +#define recv(s,mem,len,flags) lwip_recv(s,mem,len,flags) +/** @ingroup socket */ +#define recvmsg(s,message,flags) lwip_recvmsg(s,message,flags) +/** @ingroup socket */ +#define recvfrom(s,mem,len,flags,from,fromlen) lwip_recvfrom(s,mem,len,flags,from,fromlen) +/** @ingroup socket */ +#define send(s,dataptr,size,flags) lwip_send(s,dataptr,size,flags) +/** @ingroup socket */ +#define sendmsg(s,message,flags) lwip_sendmsg(s,message,flags) +/** @ingroup socket */ +#define sendto(s,dataptr,size,flags,to,tolen) lwip_sendto(s,dataptr,size,flags,to,tolen) +/** @ingroup socket */ +#define socket(domain,type,protocol) lwip_socket(domain,type,protocol) +#if LWIP_SOCKET_SELECT +/** @ingroup socket */ +#define select(maxfdp1,readset,writeset,exceptset,timeout) lwip_select(maxfdp1,readset,writeset,exceptset,timeout) +#endif +#if LWIP_SOCKET_POLL +/** @ingroup socket */ +#define poll(fds,nfds,timeout) lwip_poll(fds,nfds,timeout) +#endif +/** @ingroup socket */ +#define ioctlsocket(s,cmd,argp) lwip_ioctl(s,cmd,argp) +/** @ingroup socket */ +#define inet_ntop(af,src,dst,size) lwip_inet_ntop(af,src,dst,size) +/** @ingroup socket */ +#define inet_pton(af,src,dst) lwip_inet_pton(af,src,dst) + +#if LWIP_POSIX_SOCKETS_IO_NAMES +/** @ingroup socket */ +#define read(s,mem,len) lwip_read(s,mem,len) +/** @ingroup socket */ +#define readv(s,iov,iovcnt) lwip_readv(s,iov,iovcnt) +/** @ingroup socket */ +#define write(s,dataptr,len) lwip_write(s,dataptr,len) +/** @ingroup socket */ +#define writev(s,iov,iovcnt) lwip_writev(s,iov,iovcnt) +/** @ingroup socket */ +#define close(s) lwip_close(s) +/** @ingroup socket */ +#define fcntl(s,cmd,val) lwip_fcntl(s,cmd,val) +/** @ingroup socket */ +#define ioctl(s,cmd,argp) lwip_ioctl(s,cmd,argp) +#endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ +#endif /* LWIP_COMPAT_SOCKETS != 2 */ + +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_SOCKETS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h b/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h new file mode 100644 index 00000000..b570dbac --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h @@ -0,0 +1,491 @@ +/** + * @file + * Statistics API (to be used from TCPIP thread) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_STATS_H +#define LWIP_HDR_STATS_H + +#include "lwip/opt.h" + +#include "lwip/mem.h" +#include "lwip/memp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_STATS + +#ifndef LWIP_STATS_LARGE +#define LWIP_STATS_LARGE 0 +#endif + +#if LWIP_STATS_LARGE +#define STAT_COUNTER u32_t +#define STAT_COUNTER_F U32_F +#else +#define STAT_COUNTER u16_t +#define STAT_COUNTER_F U16_F +#endif + +/** Protocol related stats */ +struct stats_proto { + STAT_COUNTER xmit; /* Transmitted packets. */ + STAT_COUNTER recv; /* Received packets. */ + STAT_COUNTER fw; /* Forwarded packets. */ + STAT_COUNTER drop; /* Dropped packets. */ + STAT_COUNTER chkerr; /* Checksum error. */ + STAT_COUNTER lenerr; /* Invalid length error. */ + STAT_COUNTER memerr; /* Out of memory error. */ + STAT_COUNTER rterr; /* Routing error. */ + STAT_COUNTER proterr; /* Protocol error. */ + STAT_COUNTER opterr; /* Error in options. */ + STAT_COUNTER err; /* Misc error. */ + STAT_COUNTER cachehit; +}; + +/** IGMP stats */ +struct stats_igmp { + STAT_COUNTER xmit; /* Transmitted packets. */ + STAT_COUNTER recv; /* Received packets. */ + STAT_COUNTER drop; /* Dropped packets. */ + STAT_COUNTER chkerr; /* Checksum error. */ + STAT_COUNTER lenerr; /* Invalid length error. */ + STAT_COUNTER memerr; /* Out of memory error. */ + STAT_COUNTER proterr; /* Protocol error. */ + STAT_COUNTER rx_v1; /* Received v1 frames. */ + STAT_COUNTER rx_group; /* Received group-specific queries. */ + STAT_COUNTER rx_general; /* Received general queries. */ + STAT_COUNTER rx_report; /* Received reports. */ + STAT_COUNTER tx_join; /* Sent joins. */ + STAT_COUNTER tx_leave; /* Sent leaves. */ + STAT_COUNTER tx_report; /* Sent reports. */ +}; + +/** Memory stats */ +struct stats_mem { +#if defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY + const char *name; +#endif /* defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY */ + STAT_COUNTER err; + mem_size_t avail; + mem_size_t used; + mem_size_t max; + STAT_COUNTER illegal; +}; + +/** System element stats */ +struct stats_syselem { + STAT_COUNTER used; + STAT_COUNTER max; + STAT_COUNTER err; +}; + +/** System stats */ +struct stats_sys { + struct stats_syselem sem; + struct stats_syselem mutex; + struct stats_syselem mbox; +}; + +/** SNMP MIB2 stats */ +struct stats_mib2 { + /* IP */ + u32_t ipinhdrerrors; + u32_t ipinaddrerrors; + u32_t ipinunknownprotos; + u32_t ipindiscards; + u32_t ipindelivers; + u32_t ipoutrequests; + u32_t ipoutdiscards; + u32_t ipoutnoroutes; + u32_t ipreasmoks; + u32_t ipreasmfails; + u32_t ipfragoks; + u32_t ipfragfails; + u32_t ipfragcreates; + u32_t ipreasmreqds; + u32_t ipforwdatagrams; + u32_t ipinreceives; + + /* TCP */ + u32_t tcpactiveopens; + u32_t tcppassiveopens; + u32_t tcpattemptfails; + u32_t tcpestabresets; + u32_t tcpoutsegs; + u32_t tcpretranssegs; + u32_t tcpinsegs; + u32_t tcpinerrs; + u32_t tcpoutrsts; + + /* UDP */ + u32_t udpindatagrams; + u32_t udpnoports; + u32_t udpinerrors; + u32_t udpoutdatagrams; + + /* ICMP */ + u32_t icmpinmsgs; + u32_t icmpinerrors; + u32_t icmpindestunreachs; + u32_t icmpintimeexcds; + u32_t icmpinparmprobs; + u32_t icmpinsrcquenchs; + u32_t icmpinredirects; + u32_t icmpinechos; + u32_t icmpinechoreps; + u32_t icmpintimestamps; + u32_t icmpintimestampreps; + u32_t icmpinaddrmasks; + u32_t icmpinaddrmaskreps; + u32_t icmpoutmsgs; + u32_t icmpouterrors; + u32_t icmpoutdestunreachs; + u32_t icmpouttimeexcds; + u32_t icmpoutechos; /* can be incremented by user application ('ping') */ + u32_t icmpoutechoreps; +}; + +/** + * @ingroup netif_mib2 + * SNMP MIB2 interface stats + */ +struct stats_mib2_netif_ctrs { + /** The total number of octets received on the interface, including framing characters */ + u32_t ifinoctets; + /** The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were + * not addressed to a multicast or broadcast address at this sub-layer */ + u32_t ifinucastpkts; + /** The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were + * addressed to a multicast or broadcast address at this sub-layer */ + u32_t ifinnucastpkts; + /** The number of inbound packets which were chosen to be discarded even though no errors had + * been detected to prevent their being deliverable to a higher-layer protocol. One possible + * reason for discarding such a packet could be to free up buffer space */ + u32_t ifindiscards; + /** For packet-oriented interfaces, the number of inbound packets that contained errors + * preventing them from being deliverable to a higher-layer protocol. For character- + * oriented or fixed-length interfaces, the number of inbound transmission units that + * contained errors preventing them from being deliverable to a higher-layer protocol. */ + u32_t ifinerrors; + /** For packet-oriented interfaces, the number of packets received via the interface which + * were discarded because of an unknown or unsupported protocol. For character-oriented + * or fixed-length interfaces that support protocol multiplexing the number of transmission + * units received via the interface which were discarded because of an unknown or unsupported + * protocol. For any interface that does not support protocol multiplexing, this counter will + * always be 0 */ + u32_t ifinunknownprotos; + /** The total number of octets transmitted out of the interface, including framing characters. */ + u32_t ifoutoctets; + /** The total number of packets that higher-level protocols requested be transmitted, and + * which were not addressed to a multicast or broadcast address at this sub-layer, including + * those that were discarded or not sent. */ + u32_t ifoutucastpkts; + /** The total number of packets that higher-level protocols requested be transmitted, and which + * were addressed to a multicast or broadcast address at this sub-layer, including + * those that were discarded or not sent. */ + u32_t ifoutnucastpkts; + /** The number of outbound packets which were chosen to be discarded even though no errors had + * been detected to prevent their being transmitted. One possible reason for discarding + * such a packet could be to free up buffer space. */ + u32_t ifoutdiscards; + /** For packet-oriented interfaces, the number of outbound packets that could not be transmitted + * because of errors. For character-oriented or fixed-length interfaces, the number of outbound + * transmission units that could not be transmitted because of errors. */ + u32_t ifouterrors; +}; + +/** lwIP stats container */ +struct stats_ { +#if LINK_STATS + /** Link level */ + struct stats_proto link; +#endif +#if ETHARP_STATS + /** ARP */ + struct stats_proto etharp; +#endif +#if IPFRAG_STATS + /** Fragmentation */ + struct stats_proto ip_frag; +#endif +#if IP_STATS + /** IP */ + struct stats_proto ip; +#endif +#if ICMP_STATS + /** ICMP */ + struct stats_proto icmp; +#endif +#if IGMP_STATS + /** IGMP */ + struct stats_igmp igmp; +#endif +#if UDP_STATS + /** UDP */ + struct stats_proto udp; +#endif +#if TCP_STATS + /** TCP */ + struct stats_proto tcp; +#endif +#if MEM_STATS + /** Heap */ + struct stats_mem mem; +#endif +#if MEMP_STATS + /** Internal memory pools */ + struct stats_mem *memp[MEMP_MAX]; +#endif +#if SYS_STATS + /** System */ + struct stats_sys sys; +#endif +#if IP6_STATS + /** IPv6 */ + struct stats_proto ip6; +#endif +#if ICMP6_STATS + /** ICMP6 */ + struct stats_proto icmp6; +#endif +#if IP6_FRAG_STATS + /** IPv6 fragmentation */ + struct stats_proto ip6_frag; +#endif +#if MLD6_STATS + /** Multicast listener discovery */ + struct stats_igmp mld6; +#endif +#if ND6_STATS + /** Neighbor discovery */ + struct stats_proto nd6; +#endif +#if MIB2_STATS + /** SNMP MIB2 */ + struct stats_mib2 mib2; +#endif +}; + +/** Global variable containing lwIP internal statistics. Add this to your debugger's watchlist. */ +extern struct stats_ lwip_stats; + +/** Init statistics */ +void stats_init(void); + +#define STATS_INC(x) ++lwip_stats.x +#define STATS_DEC(x) --lwip_stats.x +#define STATS_INC_USED(x, y, type) do { lwip_stats.x.used = (type)(lwip_stats.x.used + y); \ + if (lwip_stats.x.max < lwip_stats.x.used) { \ + lwip_stats.x.max = lwip_stats.x.used; \ + } \ + } while(0) +#define STATS_GET(x) lwip_stats.x +#else /* LWIP_STATS */ +#define stats_init() +#define STATS_INC(x) +#define STATS_DEC(x) +#define STATS_INC_USED(x, y, type) +#endif /* LWIP_STATS */ + +#if TCP_STATS +#define TCP_STATS_INC(x) STATS_INC(x) +#define TCP_STATS_DISPLAY() stats_display_proto(&lwip_stats.tcp, "TCP") +#else +#define TCP_STATS_INC(x) +#define TCP_STATS_DISPLAY() +#endif + +#if UDP_STATS +#define UDP_STATS_INC(x) STATS_INC(x) +#define UDP_STATS_DISPLAY() stats_display_proto(&lwip_stats.udp, "UDP") +#else +#define UDP_STATS_INC(x) +#define UDP_STATS_DISPLAY() +#endif + +#if ICMP_STATS +#define ICMP_STATS_INC(x) STATS_INC(x) +#define ICMP_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp, "ICMP") +#else +#define ICMP_STATS_INC(x) +#define ICMP_STATS_DISPLAY() +#endif + +#if IGMP_STATS +#define IGMP_STATS_INC(x) STATS_INC(x) +#define IGMP_STATS_DISPLAY() stats_display_igmp(&lwip_stats.igmp, "IGMP") +#else +#define IGMP_STATS_INC(x) +#define IGMP_STATS_DISPLAY() +#endif + +#if IP_STATS +#define IP_STATS_INC(x) STATS_INC(x) +#define IP_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip, "IP") +#else +#define IP_STATS_INC(x) +#define IP_STATS_DISPLAY() +#endif + +#if IPFRAG_STATS +#define IPFRAG_STATS_INC(x) STATS_INC(x) +#define IPFRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip_frag, "IP_FRAG") +#else +#define IPFRAG_STATS_INC(x) +#define IPFRAG_STATS_DISPLAY() +#endif + +#if ETHARP_STATS +#define ETHARP_STATS_INC(x) STATS_INC(x) +#define ETHARP_STATS_DISPLAY() stats_display_proto(&lwip_stats.etharp, "ETHARP") +#else +#define ETHARP_STATS_INC(x) +#define ETHARP_STATS_DISPLAY() +#endif + +#if LINK_STATS +#define LINK_STATS_INC(x) STATS_INC(x) +#define LINK_STATS_DISPLAY() stats_display_proto(&lwip_stats.link, "LINK") +#else +#define LINK_STATS_INC(x) +#define LINK_STATS_DISPLAY() +#endif + +#if MEM_STATS +#define MEM_STATS_AVAIL(x, y) lwip_stats.mem.x = y +#define MEM_STATS_INC(x) STATS_INC(mem.x) +#define MEM_STATS_INC_USED(x, y) STATS_INC_USED(mem, y, mem_size_t) +#define MEM_STATS_DEC_USED(x, y) lwip_stats.mem.x = (mem_size_t)((lwip_stats.mem.x) - (y)) +#define MEM_STATS_DISPLAY() stats_display_mem(&lwip_stats.mem, "HEAP") +#else +#define MEM_STATS_AVAIL(x, y) +#define MEM_STATS_INC(x) +#define MEM_STATS_INC_USED(x, y) +#define MEM_STATS_DEC_USED(x, y) +#define MEM_STATS_DISPLAY() +#endif + + #if MEMP_STATS +#define MEMP_STATS_DEC(x, i) STATS_DEC(memp[i]->x) +#define MEMP_STATS_DISPLAY(i) stats_display_memp(lwip_stats.memp[i], i) +#define MEMP_STATS_GET(x, i) STATS_GET(memp[i]->x) + #else +#define MEMP_STATS_DEC(x, i) +#define MEMP_STATS_DISPLAY(i) +#define MEMP_STATS_GET(x, i) 0 +#endif + +#if SYS_STATS +#define SYS_STATS_INC(x) STATS_INC(sys.x) +#define SYS_STATS_DEC(x) STATS_DEC(sys.x) +#define SYS_STATS_INC_USED(x) STATS_INC_USED(sys.x, 1, STAT_COUNTER) +#define SYS_STATS_DISPLAY() stats_display_sys(&lwip_stats.sys) +#else +#define SYS_STATS_INC(x) +#define SYS_STATS_DEC(x) +#define SYS_STATS_INC_USED(x) +#define SYS_STATS_DISPLAY() +#endif + +#if IP6_STATS +#define IP6_STATS_INC(x) STATS_INC(x) +#define IP6_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip6, "IPv6") +#else +#define IP6_STATS_INC(x) +#define IP6_STATS_DISPLAY() +#endif + +#if ICMP6_STATS +#define ICMP6_STATS_INC(x) STATS_INC(x) +#define ICMP6_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp6, "ICMPv6") +#else +#define ICMP6_STATS_INC(x) +#define ICMP6_STATS_DISPLAY() +#endif + +#if IP6_FRAG_STATS +#define IP6_FRAG_STATS_INC(x) STATS_INC(x) +#define IP6_FRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip6_frag, "IPv6 FRAG") +#else +#define IP6_FRAG_STATS_INC(x) +#define IP6_FRAG_STATS_DISPLAY() +#endif + +#if MLD6_STATS +#define MLD6_STATS_INC(x) STATS_INC(x) +#define MLD6_STATS_DISPLAY() stats_display_igmp(&lwip_stats.mld6, "MLDv1") +#else +#define MLD6_STATS_INC(x) +#define MLD6_STATS_DISPLAY() +#endif + +#if ND6_STATS +#define ND6_STATS_INC(x) STATS_INC(x) +#define ND6_STATS_DISPLAY() stats_display_proto(&lwip_stats.nd6, "ND") +#else +#define ND6_STATS_INC(x) +#define ND6_STATS_DISPLAY() +#endif + +#if MIB2_STATS +#define MIB2_STATS_INC(x) STATS_INC(x) +#else +#define MIB2_STATS_INC(x) +#endif + +/* Display of statistics */ +#if LWIP_STATS_DISPLAY +void stats_display(void); +void stats_display_proto(struct stats_proto *proto, const char *name); +void stats_display_igmp(struct stats_igmp *igmp, const char *name); +void stats_display_mem(struct stats_mem *mem, const char *name); +void stats_display_memp(struct stats_mem *mem, int index); +void stats_display_sys(struct stats_sys *sys); +#else /* LWIP_STATS_DISPLAY */ +#define stats_display() +#define stats_display_proto(proto, name) +#define stats_display_igmp(igmp, name) +#define stats_display_mem(mem, name) +#define stats_display_memp(mem, index) +#define stats_display_sys(sys) +#endif /* LWIP_STATS_DISPLAY */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_STATS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h b/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h new file mode 100644 index 00000000..168e465b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h @@ -0,0 +1,560 @@ +/** + * @file + * OS abstraction layer + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#ifndef LWIP_HDR_SYS_H +#define LWIP_HDR_SYS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if NO_SYS + +/* For a totally minimal and standalone system, we provide null + definitions of the sys_ functions. */ +typedef u8_t sys_sem_t; +typedef u8_t sys_mutex_t; +typedef u8_t sys_mbox_t; + +#define sys_sem_new(s, c) ERR_OK +#define sys_sem_signal(s) +#define sys_sem_wait(s) +#define sys_arch_sem_wait(s,t) +#define sys_sem_free(s) +#define sys_sem_valid(s) 0 +#define sys_sem_valid_val(s) 0 +#define sys_sem_set_invalid(s) +#define sys_sem_set_invalid_val(s) +#define sys_mutex_new(mu) ERR_OK +#define sys_mutex_lock(mu) +#define sys_mutex_unlock(mu) +#define sys_mutex_free(mu) +#define sys_mutex_valid(mu) 0 +#define sys_mutex_set_invalid(mu) +#define sys_mbox_new(m, s) ERR_OK +#define sys_mbox_fetch(m,d) +#define sys_mbox_tryfetch(m,d) +#define sys_mbox_post(m,d) +#define sys_mbox_trypost(m,d) +#define sys_mbox_free(m) +#define sys_mbox_valid(m) +#define sys_mbox_valid_val(m) +#define sys_mbox_set_invalid(m) +#define sys_mbox_set_invalid_val(m) + +#define sys_thread_new(n,t,a,s,p) + +#define sys_msleep(t) + +#else /* NO_SYS */ + +/** Return code for timeouts from sys_arch_mbox_fetch and sys_arch_sem_wait */ +#define SYS_ARCH_TIMEOUT 0xffffffffUL + +/** sys_mbox_tryfetch() returns SYS_MBOX_EMPTY if appropriate. + * For now we use the same magic value, but we allow this to change in future. + */ +#define SYS_MBOX_EMPTY SYS_ARCH_TIMEOUT + +#include "lwip/err.h" +#include "arch/sys_arch.h" + +/** Function prototype for thread functions */ +typedef void (*lwip_thread_fn)(void *arg); + +/* Function prototypes for functions to be implemented by platform ports + (in sys_arch.c) */ + +/* Mutex functions: */ + +/** Define LWIP_COMPAT_MUTEX if the port has no mutexes and binary semaphores + should be used instead */ +#ifndef LWIP_COMPAT_MUTEX +#define LWIP_COMPAT_MUTEX 0 +#endif + +#if LWIP_COMPAT_MUTEX +/* for old ports that don't have mutexes: define them to binary semaphores */ +#define sys_mutex_t sys_sem_t +#define sys_mutex_new(mutex) sys_sem_new(mutex, 1) +#define sys_mutex_lock(mutex) sys_sem_wait(mutex) +#define sys_mutex_unlock(mutex) sys_sem_signal(mutex) +#define sys_mutex_free(mutex) sys_sem_free(mutex) +#define sys_mutex_valid(mutex) sys_sem_valid(mutex) +#define sys_mutex_set_invalid(mutex) sys_sem_set_invalid(mutex) + +#else /* LWIP_COMPAT_MUTEX */ + +/** + * @ingroup sys_mutex + * Create a new mutex. + * Note that mutexes are expected to not be taken recursively by the lwIP code, + * so both implementation types (recursive or non-recursive) should work. + * The mutex is allocated to the memory that 'mutex' + * points to (which can be both a pointer or the actual OS structure). + * If the mutex has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param mutex pointer to the mutex to create + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_mutex_new(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Blocks the thread until the mutex can be grabbed. + * @param mutex the mutex to lock + */ +void sys_mutex_lock(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Releases the mutex previously locked through 'sys_mutex_lock()'. + * @param mutex the mutex to unlock + */ +void sys_mutex_unlock(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Deallocates a mutex. + * @param mutex the mutex to delete + */ +void sys_mutex_free(sys_mutex_t *mutex); +#ifndef sys_mutex_valid +/** + * @ingroup sys_mutex + * Returns 1 if the mutes is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_mutex_valid(sys_mutex_t *mutex); +#endif +#ifndef sys_mutex_set_invalid +/** + * @ingroup sys_mutex + * Invalidate a mutex so that sys_mutex_valid() returns 0. + * ATTENTION: This does NOT mean that the mutex shall be deallocated: + * sys_mutex_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_mutex_set_invalid(sys_mutex_t *mutex); +#endif +#endif /* LWIP_COMPAT_MUTEX */ + +/* Semaphore functions: */ + +/** + * @ingroup sys_sem + * Create a new semaphore + * Creates a new semaphore. The semaphore is allocated to the memory that 'sem' + * points to (which can be both a pointer or the actual OS structure). + * The "count" argument specifies the initial state of the semaphore (which is + * either 0 or 1). + * If the semaphore has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param sem pointer to the semaphore to create + * @param count initial count of the semaphore + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_sem_new(sys_sem_t *sem, u8_t count); +/** + * @ingroup sys_sem + * Signals a semaphore + * @param sem the semaphore to signal + */ +void sys_sem_signal(sys_sem_t *sem); +/** + * @ingroup sys_sem + * Blocks the thread while waiting for the semaphore to be signaled. If the + * "timeout" argument is non-zero, the thread should only be blocked for the + * specified time (measured in milliseconds). If the "timeout" argument is zero, + * the thread should be blocked until the semaphore is signalled. + * + * The return value is SYS_ARCH_TIMEOUT if the semaphore wasn't signaled within + * the specified time or any other value if it was signaled (with or without + * waiting). + * Notice that lwIP implements a function with a similar name, + * sys_sem_wait(), that uses the sys_arch_sem_wait() function. + * + * @param sem the semaphore to wait for + * @param timeout timeout in milliseconds to wait (0 = wait forever) + * @return SYS_ARCH_TIMEOUT on timeout, any other value on success + */ +u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout); +/** + * @ingroup sys_sem + * Deallocates a semaphore. + * @param sem semaphore to delete + */ +void sys_sem_free(sys_sem_t *sem); +/** Wait for a semaphore - forever/no timeout */ +#define sys_sem_wait(sem) sys_arch_sem_wait(sem, 0) +#ifndef sys_sem_valid +/** + * @ingroup sys_sem + * Returns 1 if the semaphore is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_sem_valid(sys_sem_t *sem); +#endif +#ifndef sys_sem_set_invalid +/** + * @ingroup sys_sem + * Invalidate a semaphore so that sys_sem_valid() returns 0. + * ATTENTION: This does NOT mean that the semaphore shall be deallocated: + * sys_sem_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_sem_set_invalid(sys_sem_t *sem); +#endif +#ifndef sys_sem_valid_val +/** + * Same as sys_sem_valid() but taking a value, not a pointer + */ +#define sys_sem_valid_val(sem) sys_sem_valid(&(sem)) +#endif +#ifndef sys_sem_set_invalid_val +/** + * Same as sys_sem_set_invalid() but taking a value, not a pointer + */ +#define sys_sem_set_invalid_val(sem) sys_sem_set_invalid(&(sem)) +#endif + +#ifndef sys_msleep +/** + * @ingroup sys_misc + * Sleep for specified number of ms + */ +void sys_msleep(u32_t ms); /* only has a (close to) 1 ms resolution. */ +#endif + +/* Mailbox functions. */ + +/** + * @ingroup sys_mbox + * Creates an empty mailbox for maximum "size" elements. Elements stored + * in mailboxes are pointers. You have to define macros "_MBOX_SIZE" + * in your lwipopts.h, or ignore this parameter in your implementation + * and use a default size. + * If the mailbox has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param mbox pointer to the mbox to create + * @param size (minimum) number of messages in this mbox + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_mbox_new(sys_mbox_t *mbox, int size); +/** + * @ingroup sys_mbox + * Post a message to an mbox - may not fail + * -> blocks if full, only to be used from tasks NOT from ISR! + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +void sys_mbox_post(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Try to post a message to an mbox - may fail if full. + * Can be used from ISR (if the sys arch layer allows this). + * Returns ERR_MEM if it is full, else, ERR_OK if the "msg" is posted. + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +err_t sys_mbox_trypost(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Try to post a message to an mbox - may fail if full. + * To be be used from ISR. + * Returns ERR_MEM if it is full, else, ERR_OK if the "msg" is posted. + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +err_t sys_mbox_trypost_fromisr(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Blocks the thread until a message arrives in the mailbox, but does + * not block the thread longer than "timeout" milliseconds (similar to + * the sys_arch_sem_wait() function). If "timeout" is 0, the thread should + * be blocked until a message arrives. The "msg" argument is a result + * parameter that is set by the function (i.e., by doing "*msg = + * ptr"). The "msg" parameter maybe NULL to indicate that the message + * should be dropped. + * The return values are the same as for the sys_arch_sem_wait() function: + * SYS_ARCH_TIMEOUT if there was a timeout, any other value if a messages + * is received. + * + * Note that a function with a similar name, sys_mbox_fetch(), is + * implemented by lwIP. + * + * @param mbox mbox to get a message from + * @param msg pointer where the message is stored + * @param timeout maximum time (in milliseconds) to wait for a message (0 = wait forever) + * @return SYS_ARCH_TIMEOUT on timeout, any other value if a message has been received + */ +u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout); +/* Allow port to override with a macro, e.g. special timeout for sys_arch_mbox_fetch() */ +#ifndef sys_arch_mbox_tryfetch +/** + * @ingroup sys_mbox + * This is similar to sys_arch_mbox_fetch, however if a message is not + * present in the mailbox, it immediately returns with the code + * SYS_MBOX_EMPTY. On success 0 is returned. + * To allow for efficient implementations, this can be defined as a + * function-like macro in sys_arch.h instead of a normal function. For + * example, a naive implementation could be: + * \#define sys_arch_mbox_tryfetch(mbox,msg) sys_arch_mbox_fetch(mbox,msg,1) + * although this would introduce unnecessary delays. + * + * @param mbox mbox to get a message from + * @param msg pointer where the message is stored + * @return 0 (milliseconds) if a message has been received + * or SYS_MBOX_EMPTY if the mailbox is empty + */ +u32_t sys_arch_mbox_tryfetch(sys_mbox_t *mbox, void **msg); +#endif +/** + * For now, we map straight to sys_arch implementation. + */ +#define sys_mbox_tryfetch(mbox, msg) sys_arch_mbox_tryfetch(mbox, msg) +/** + * @ingroup sys_mbox + * Deallocates a mailbox. If there are messages still present in the + * mailbox when the mailbox is deallocated, it is an indication of a + * programming error in lwIP and the developer should be notified. + * + * @param mbox mbox to delete + */ +void sys_mbox_free(sys_mbox_t *mbox); +#define sys_mbox_fetch(mbox, msg) sys_arch_mbox_fetch(mbox, msg, 0) +#ifndef sys_mbox_valid +/** + * @ingroup sys_mbox + * Returns 1 if the mailbox is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_mbox_valid(sys_mbox_t *mbox); +#endif +#ifndef sys_mbox_set_invalid +/** + * @ingroup sys_mbox + * Invalidate a mailbox so that sys_mbox_valid() returns 0. + * ATTENTION: This does NOT mean that the mailbox shall be deallocated: + * sys_mbox_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_mbox_set_invalid(sys_mbox_t *mbox); +#endif +#ifndef sys_mbox_valid_val +/** + * Same as sys_mbox_valid() but taking a value, not a pointer + */ +#define sys_mbox_valid_val(mbox) sys_mbox_valid(&(mbox)) +#endif +#ifndef sys_mbox_set_invalid_val +/** + * Same as sys_mbox_set_invalid() but taking a value, not a pointer + */ +#define sys_mbox_set_invalid_val(mbox) sys_mbox_set_invalid(&(mbox)) +#endif + + +/** + * @ingroup sys_misc + * The only thread function: + * Starts a new thread named "name" with priority "prio" that will begin its + * execution in the function "thread()". The "arg" argument will be passed as an + * argument to the thread() function. The stack size to used for this thread is + * the "stacksize" parameter. The id of the new thread is returned. Both the id + * and the priority are system dependent. + * ATTENTION: although this function returns a value, it MUST NOT FAIL (ports have to assert this!) + * + * @param name human-readable name for the thread (used for debugging purposes) + * @param thread thread-function + * @param arg parameter passed to 'thread' + * @param stacksize stack size in bytes for the new thread (may be ignored by ports) + * @param prio priority of the new thread (may be ignored by ports) */ +sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg, int stacksize, int prio); + +#endif /* NO_SYS */ + +/** + * @ingroup sys_misc + * sys_init() must be called before anything else. + * Initialize the sys_arch layer. + */ +void sys_init(void); + +#ifndef sys_jiffies +/** + * Ticks/jiffies since power up. + */ +u32_t sys_jiffies(void); +#endif + +/** + * @ingroup sys_time + * Returns the current time in milliseconds, + * may be the same as sys_jiffies or at least based on it. + * Don't care for wraparound, this is only used for time diffs. + * Not implementing this function means you cannot use some modules (e.g. TCP + * timestamps, internal timeouts for NO_SYS==1). + */ +u32_t sys_now(void); + +/* Critical Region Protection */ +/* These functions must be implemented in the sys_arch.c file. + In some implementations they can provide a more light-weight protection + mechanism than using semaphores. Otherwise semaphores can be used for + implementation */ +#ifndef SYS_ARCH_PROTECT +/** SYS_LIGHTWEIGHT_PROT + * define SYS_LIGHTWEIGHT_PROT in lwipopts.h if you want inter-task protection + * for certain critical regions during buffer allocation, deallocation and memory + * allocation and deallocation. + */ +#if SYS_LIGHTWEIGHT_PROT + +/** + * @ingroup sys_prot + * SYS_ARCH_DECL_PROTECT + * declare a protection variable. This macro will default to defining a variable of + * type sys_prot_t. If a particular port needs a different implementation, then + * this macro may be defined in sys_arch.h. + */ +#define SYS_ARCH_DECL_PROTECT(lev) sys_prot_t lev +/** + * @ingroup sys_prot + * SYS_ARCH_PROTECT + * Perform a "fast" protect. This could be implemented by + * disabling interrupts for an embedded system or by using a semaphore or + * mutex. The implementation should allow calling SYS_ARCH_PROTECT when + * already protected. The old protection level is returned in the variable + * "lev". This macro will default to calling the sys_arch_protect() function + * which should be implemented in sys_arch.c. If a particular port needs a + * different implementation, then this macro may be defined in sys_arch.h + */ +#define SYS_ARCH_PROTECT(lev) lev = sys_arch_protect() +/** + * @ingroup sys_prot + * SYS_ARCH_UNPROTECT + * Perform a "fast" set of the protection level to "lev". This could be + * implemented by setting the interrupt level to "lev" within the MACRO or by + * using a semaphore or mutex. This macro will default to calling the + * sys_arch_unprotect() function which should be implemented in + * sys_arch.c. If a particular port needs a different implementation, then + * this macro may be defined in sys_arch.h + */ +#define SYS_ARCH_UNPROTECT(lev) sys_arch_unprotect(lev) +sys_prot_t sys_arch_protect(void); +void sys_arch_unprotect(sys_prot_t pval); + +#else + +#define SYS_ARCH_DECL_PROTECT(lev) +#define SYS_ARCH_PROTECT(lev) +#define SYS_ARCH_UNPROTECT(lev) + +#endif /* SYS_LIGHTWEIGHT_PROT */ + +#endif /* SYS_ARCH_PROTECT */ + +/* + * Macros to set/get and increase/decrease variables in a thread-safe way. + * Use these for accessing variable that are used from more than one thread. + */ + +#ifndef SYS_ARCH_INC +#define SYS_ARCH_INC(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var += val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_INC */ + +#ifndef SYS_ARCH_DEC +#define SYS_ARCH_DEC(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var -= val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_DEC */ + +#ifndef SYS_ARCH_GET +#define SYS_ARCH_GET(var, ret) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + ret = var; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_GET */ + +#ifndef SYS_ARCH_SET +#define SYS_ARCH_SET(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var = val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_SET */ + +#ifndef SYS_ARCH_LOCKED +#define SYS_ARCH_LOCKED(code) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + code; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_LOCKED */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_SYS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h new file mode 100644 index 00000000..daf75994 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h @@ -0,0 +1,500 @@ +/** + * @file + * TCP API (to be used from TCPIP thread)\n + * See also @ref tcp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCP_H +#define LWIP_HDR_TCP_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpbase.h" +#include "lwip/mem.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/icmp.h" +#include "lwip/err.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct tcp_pcb; +struct tcp_pcb_listen; + +/** Function prototype for tcp accept callback functions. Called when a new + * connection can be accepted on a listening pcb. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param newpcb The new connection pcb + * @param err An error code if there has been an error accepting. + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_accept_fn)(void *arg, struct tcp_pcb *newpcb, err_t err); + +/** Function prototype for tcp receive callback functions. Called when data has + * been received. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb which received data + * @param p The received data (or NULL when the connection has been closed!) + * @param err An error code if there has been an error receiving + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_recv_fn)(void *arg, struct tcp_pcb *tpcb, + struct pbuf *p, err_t err); + +/** Function prototype for tcp sent callback functions. Called when sent data has + * been acknowledged by the remote side. Use it to free corresponding resources. + * This also means that the pcb has now space available to send new data. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb for which data has been acknowledged + * @param len The amount of bytes acknowledged + * @return ERR_OK: try to send some data by calling tcp_output + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_sent_fn)(void *arg, struct tcp_pcb *tpcb, + u16_t len); + +/** Function prototype for tcp poll callback functions. Called periodically as + * specified by @see tcp_poll. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb tcp pcb + * @return ERR_OK: try to send some data by calling tcp_output + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_poll_fn)(void *arg, struct tcp_pcb *tpcb); + +/** Function prototype for tcp error callback functions. Called when the pcb + * receives a RST or is unexpectedly closed for any other reason. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param err Error code to indicate why the pcb has been closed + * ERR_ABRT: aborted through tcp_abort or by a TCP timer + * ERR_RST: the connection was reset by the remote host + */ +typedef void (*tcp_err_fn)(void *arg, err_t err); + +/** Function prototype for tcp connected callback functions. Called when a pcb + * is connected to the remote side after initiating a connection attempt by + * calling tcp_connect(). + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb which is connected + * @param err An unused error code, always ERR_OK currently ;-) @todo! + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + * + * @note When a connection attempt fails, the error callback is currently called! + */ +typedef err_t (*tcp_connected_fn)(void *arg, struct tcp_pcb *tpcb, err_t err); + +#if LWIP_WND_SCALE +#define RCV_WND_SCALE(pcb, wnd) (((wnd) >> (pcb)->rcv_scale)) +#define SND_WND_SCALE(pcb, wnd) (((wnd) << (pcb)->snd_scale)) +#define TCPWND16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) +#define TCP_WND_MAX(pcb) ((tcpwnd_size_t)(((pcb)->flags & TF_WND_SCALE) ? TCP_WND : TCPWND16(TCP_WND))) +#else +#define RCV_WND_SCALE(pcb, wnd) (wnd) +#define SND_WND_SCALE(pcb, wnd) (wnd) +#define TCPWND16(x) (x) +#define TCP_WND_MAX(pcb) TCP_WND +#endif +/* Increments a tcpwnd_size_t and holds at max value rather than rollover */ +#define TCP_WND_INC(wnd, inc) do { \ + if ((tcpwnd_size_t)(wnd + inc) >= wnd) { \ + wnd = (tcpwnd_size_t)(wnd + inc); \ + } else { \ + wnd = (tcpwnd_size_t)-1; \ + } \ + } while(0) + +#if LWIP_TCP_SACK_OUT +/** SACK ranges to include in ACK packets. + * SACK entry is invalid if left==right. */ +struct tcp_sack_range { + /** Left edge of the SACK: the first acknowledged sequence number. */ + u32_t left; + /** Right edge of the SACK: the last acknowledged sequence number +1 (so first NOT acknowledged). */ + u32_t right; +}; +#endif /* LWIP_TCP_SACK_OUT */ + +/** Function prototype for deallocation of arguments. Called *just before* the + * pcb is freed, so don't expect to be able to do anything with this pcb! + * + * @param id ext arg id (allocated via @ref tcp_ext_arg_alloc_id) + * @param data pointer to the data (set via @ref tcp_ext_arg_set before) + */ +typedef void (*tcp_extarg_callback_pcb_destroyed_fn)(u8_t id, void *data); + +/** Function prototype to transition arguments from a listening pcb to an accepted pcb + * + * @param id ext arg id (allocated via @ref tcp_ext_arg_alloc_id) + * @param lpcb the listening pcb accepting a connection + * @param cpcb the newly allocated connection pcb + * @return ERR_OK if OK, any error if connection should be dropped + */ +typedef err_t (*tcp_extarg_callback_passive_open_fn)(u8_t id, struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb); + +/** A table of callback functions that is invoked for ext arguments */ +struct tcp_ext_arg_callbacks { + /** @ref tcp_extarg_callback_pcb_destroyed_fn */ + tcp_extarg_callback_pcb_destroyed_fn destroy; + /** @ref tcp_extarg_callback_passive_open_fn */ + tcp_extarg_callback_passive_open_fn passive_open; +}; + +#define LWIP_TCP_PCB_NUM_EXT_ARG_ID_INVALID 0xFF + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +/* This is the structure for ext args in tcp pcbs (used as array) */ +struct tcp_pcb_ext_args { + const struct tcp_ext_arg_callbacks *callbacks; + void *data; +}; +/* This is a helper define to prevent zero size arrays if disabled */ +#define TCP_PCB_EXTARGS struct tcp_pcb_ext_args ext_args[LWIP_TCP_PCB_NUM_EXT_ARGS]; +#else +#define TCP_PCB_EXTARGS +#endif + +typedef u16_t tcpflags_t; +#define TCP_ALLFLAGS 0xffffU + +/** + * members common to struct tcp_pcb and struct tcp_listen_pcb + */ +#define TCP_PCB_COMMON(type) \ + type *next; /* for the linked list */ \ + void *callback_arg; \ + TCP_PCB_EXTARGS \ + enum tcp_state state; /* TCP state */ \ + u8_t prio; \ + /* ports are in host byte order */ \ + u16_t local_port + + +/** the TCP protocol control block for listening pcbs */ +struct tcp_pcb_listen { +/** Common members of all PCB types */ + IP_PCB; +/** Protocol specific PCB members */ + TCP_PCB_COMMON(struct tcp_pcb_listen); + +#if LWIP_CALLBACK_API + /* Function to call when a listener has been connected. */ + tcp_accept_fn accept; +#endif /* LWIP_CALLBACK_API */ + +#if TCP_LISTEN_BACKLOG + u8_t backlog; + u8_t accepts_pending; +#endif /* TCP_LISTEN_BACKLOG */ +}; + + +/** the TCP protocol control block */ +struct tcp_pcb { +/** common PCB members */ + IP_PCB; +/** protocol specific PCB members */ + TCP_PCB_COMMON(struct tcp_pcb); + + /* ports are in host byte order */ + u16_t remote_port; + + tcpflags_t flags; +#define TF_ACK_DELAY 0x01U /* Delayed ACK. */ +#define TF_ACK_NOW 0x02U /* Immediate ACK. */ +#define TF_INFR 0x04U /* In fast recovery. */ +#define TF_CLOSEPEND 0x08U /* If this is set, tcp_close failed to enqueue the FIN (retried in tcp_tmr) */ +#define TF_RXCLOSED 0x10U /* rx closed by tcp_shutdown */ +#define TF_FIN 0x20U /* Connection was closed locally (FIN segment enqueued). */ +#define TF_NODELAY 0x40U /* Disable Nagle algorithm */ +#define TF_NAGLEMEMERR 0x80U /* nagle enabled, memerr, try to output to prevent delayed ACK to happen */ +#if LWIP_WND_SCALE +#define TF_WND_SCALE 0x0100U /* Window Scale option enabled */ +#endif +#if TCP_LISTEN_BACKLOG +#define TF_BACKLOGPEND 0x0200U /* If this is set, a connection pcb has increased the backlog on its listener */ +#endif +#if LWIP_TCP_TIMESTAMPS +#define TF_TIMESTAMP 0x0400U /* Timestamp option enabled */ +#endif +#define TF_RTO 0x0800U /* RTO timer has fired, in-flight data moved to unsent and being retransmitted */ +#if LWIP_TCP_SACK_OUT +#define TF_SACK 0x1000U /* Selective ACKs enabled */ +#endif + + /* the rest of the fields are in host byte order + as we have to do some math with them */ + + /* Timers */ + u8_t polltmr, pollinterval; + u8_t last_timer; + u32_t tmr; + + /* receiver variables */ + u32_t rcv_nxt; /* next seqno expected */ + tcpwnd_size_t rcv_wnd; /* receiver window available */ + tcpwnd_size_t rcv_ann_wnd; /* receiver window to announce */ + u32_t rcv_ann_right_edge; /* announced right edge of window */ + +#if LWIP_TCP_SACK_OUT + /* SACK ranges to include in ACK packets (entry is invalid if left==right) */ + struct tcp_sack_range rcv_sacks[LWIP_TCP_MAX_SACK_NUM]; +#define LWIP_TCP_SACK_VALID(pcb, idx) ((pcb)->rcv_sacks[idx].left != (pcb)->rcv_sacks[idx].right) +#endif /* LWIP_TCP_SACK_OUT */ + + /* Retransmission timer. */ + s16_t rtime; + + u16_t mss; /* maximum segment size */ + + /* RTT (round trip time) estimation variables */ + u32_t rttest; /* RTT estimate in 500ms ticks */ + u32_t rtseq; /* sequence number being timed */ + s16_t sa, sv; /* @see "Congestion Avoidance and Control" by Van Jacobson and Karels */ + + s16_t rto; /* retransmission time-out (in ticks of TCP_SLOW_INTERVAL) */ + u8_t nrtx; /* number of retransmissions */ + + /* fast retransmit/recovery */ + u8_t dupacks; + u32_t lastack; /* Highest acknowledged seqno. */ + + /* congestion avoidance/control variables */ + tcpwnd_size_t cwnd; + tcpwnd_size_t ssthresh; + + /* first byte following last rto byte */ + u32_t rto_end; + + /* sender variables */ + u32_t snd_nxt; /* next new seqno to be sent */ + u32_t snd_wl1, snd_wl2; /* Sequence and acknowledgement numbers of last + window update. */ + u32_t snd_lbb; /* Sequence number of next byte to be buffered. */ + tcpwnd_size_t snd_wnd; /* sender window */ + tcpwnd_size_t snd_wnd_max; /* the maximum sender window announced by the remote host */ + + tcpwnd_size_t snd_buf; /* Available buffer space for sending (in bytes). */ +#define TCP_SNDQUEUELEN_OVERFLOW (0xffffU-3) + u16_t snd_queuelen; /* Number of pbufs currently in the send buffer. */ + +#if TCP_OVERSIZE + /* Extra bytes available at the end of the last pbuf in unsent. */ + u16_t unsent_oversize; +#endif /* TCP_OVERSIZE */ + + tcpwnd_size_t bytes_acked; + + /* These are ordered by sequence number: */ + struct tcp_seg *unsent; /* Unsent (queued) segments. */ + struct tcp_seg *unacked; /* Sent but unacknowledged segments. */ +#if TCP_QUEUE_OOSEQ + struct tcp_seg *ooseq; /* Received out of sequence segments. */ +#endif /* TCP_QUEUE_OOSEQ */ + + struct pbuf *refused_data; /* Data previously received but not yet taken by upper layer */ + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + struct tcp_pcb_listen* listener; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + +#if LWIP_CALLBACK_API + /* Function to be called when more send buffer space is available. */ + tcp_sent_fn sent; + /* Function to be called when (in-sequence) data has arrived. */ + tcp_recv_fn recv; + /* Function to be called when a connection has been set up. */ + tcp_connected_fn connected; + /* Function which is called periodically. */ + tcp_poll_fn poll; + /* Function to be called whenever a fatal error occurs. */ + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + +#if LWIP_TCP_TIMESTAMPS + u32_t ts_lastacksent; + u32_t ts_recent; +#endif /* LWIP_TCP_TIMESTAMPS */ + + /* idle time before KEEPALIVE is sent */ + u32_t keep_idle; +#if LWIP_TCP_KEEPALIVE + u32_t keep_intvl; + u32_t keep_cnt; +#endif /* LWIP_TCP_KEEPALIVE */ + + /* Persist timer counter */ + u8_t persist_cnt; + /* Persist timer back-off */ + u8_t persist_backoff; + /* Number of persist probes */ + u8_t persist_probe; + + /* KEEPALIVE counter */ + u8_t keep_cnt_sent; + +#if LWIP_WND_SCALE + u8_t snd_scale; + u8_t rcv_scale; +#endif +}; + +#if LWIP_EVENT_API + +enum lwip_event { + LWIP_EVENT_ACCEPT, + LWIP_EVENT_SENT, + LWIP_EVENT_RECV, + LWIP_EVENT_CONNECTED, + LWIP_EVENT_POLL, + LWIP_EVENT_ERR +}; + +err_t lwip_tcp_event(void *arg, struct tcp_pcb *pcb, + enum lwip_event, + struct pbuf *p, + u16_t size, + err_t err); + +#endif /* LWIP_EVENT_API */ + +/* Application program's interface: */ +struct tcp_pcb * tcp_new (void); +struct tcp_pcb * tcp_new_ip_type (u8_t type); + +void tcp_arg (struct tcp_pcb *pcb, void *arg); +#if LWIP_CALLBACK_API +void tcp_recv (struct tcp_pcb *pcb, tcp_recv_fn recv); +void tcp_sent (struct tcp_pcb *pcb, tcp_sent_fn sent); +void tcp_err (struct tcp_pcb *pcb, tcp_err_fn err); +void tcp_accept (struct tcp_pcb *pcb, tcp_accept_fn accept); +#endif /* LWIP_CALLBACK_API */ +void tcp_poll (struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval); + +#define tcp_set_flags(pcb, set_flags) do { (pcb)->flags = (tcpflags_t)((pcb)->flags | (set_flags)); } while(0) +#define tcp_clear_flags(pcb, clr_flags) do { (pcb)->flags = (tcpflags_t)((pcb)->flags & (tcpflags_t)(~(clr_flags) & TCP_ALLFLAGS)); } while(0) +#define tcp_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +#if LWIP_TCP_TIMESTAMPS +#define tcp_mss(pcb) (((pcb)->flags & TF_TIMESTAMP) ? ((pcb)->mss - 12) : (pcb)->mss) +#else /* LWIP_TCP_TIMESTAMPS */ +/** @ingroup tcp_raw */ +#define tcp_mss(pcb) ((pcb)->mss) +#endif /* LWIP_TCP_TIMESTAMPS */ +/** @ingroup tcp_raw */ +#define tcp_sndbuf(pcb) (TCPWND16((pcb)->snd_buf)) +/** @ingroup tcp_raw */ +#define tcp_sndqueuelen(pcb) ((pcb)->snd_queuelen) +/** @ingroup tcp_raw */ +#define tcp_nagle_disable(pcb) tcp_set_flags(pcb, TF_NODELAY) +/** @ingroup tcp_raw */ +#define tcp_nagle_enable(pcb) tcp_clear_flags(pcb, TF_NODELAY) +/** @ingroup tcp_raw */ +#define tcp_nagle_disabled(pcb) tcp_is_flag_set(pcb, TF_NODELAY) + +#if TCP_LISTEN_BACKLOG +#define tcp_backlog_set(pcb, new_backlog) do { \ + LWIP_ASSERT("pcb->state == LISTEN (called for wrong pcb?)", (pcb)->state == LISTEN); \ + ((struct tcp_pcb_listen *)(pcb))->backlog = ((new_backlog) ? (new_backlog) : 1); } while(0) +void tcp_backlog_delayed(struct tcp_pcb* pcb); +void tcp_backlog_accepted(struct tcp_pcb* pcb); +#else /* TCP_LISTEN_BACKLOG */ +#define tcp_backlog_set(pcb, new_backlog) +#define tcp_backlog_delayed(pcb) +#define tcp_backlog_accepted(pcb) +#endif /* TCP_LISTEN_BACKLOG */ +#define tcp_accepted(pcb) do { LWIP_UNUSED_ARG(pcb); } while(0) /* compatibility define, not needed any more */ + +void tcp_recved (struct tcp_pcb *pcb, u16_t len); +err_t tcp_bind (struct tcp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif); +err_t tcp_connect (struct tcp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port, tcp_connected_fn connected); + +struct tcp_pcb * tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err); +struct tcp_pcb * tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog); +/** @ingroup tcp_raw */ +#define tcp_listen(pcb) tcp_listen_with_backlog(pcb, TCP_DEFAULT_LISTEN_BACKLOG) + +void tcp_abort (struct tcp_pcb *pcb); +err_t tcp_close (struct tcp_pcb *pcb); +err_t tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx); + +err_t tcp_write (struct tcp_pcb *pcb, const void *dataptr, u16_t len, + u8_t apiflags); + +void tcp_setprio (struct tcp_pcb *pcb, u8_t prio); + +err_t tcp_output (struct tcp_pcb *pcb); + +err_t tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port); + +#define tcp_dbg_get_tcp_state(pcb) ((pcb)->state) + +/* for compatibility with older implementation */ +#define tcp_new_ip6() tcp_new_ip_type(IPADDR_TYPE_V6) + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +u8_t tcp_ext_arg_alloc_id(void); +void tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks); +void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg); +void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h b/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h new file mode 100644 index 00000000..00230746 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h @@ -0,0 +1,88 @@ +/** + * @file + * Base TCP API definitions shared by TCP and ALTCP\n + * See also @ref tcp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPBASE_H +#define LWIP_HDR_TCPBASE_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#ifdef __cplusplus +extern "C" { +#endif + + +#if LWIP_WND_SCALE +typedef u32_t tcpwnd_size_t; +#else +typedef u16_t tcpwnd_size_t; +#endif + +enum tcp_state { + CLOSED = 0, + LISTEN = 1, + SYN_SENT = 2, + SYN_RCVD = 3, + ESTABLISHED = 4, + FIN_WAIT_1 = 5, + FIN_WAIT_2 = 6, + CLOSE_WAIT = 7, + CLOSING = 8, + LAST_ACK = 9, + TIME_WAIT = 10 +}; +/* ATTENTION: this depends on state number ordering! */ +#define TCP_STATE_IS_CLOSING(state) ((state) >= FIN_WAIT_1) + +/* Flags for "apiflags" parameter in tcp_write */ +#define TCP_WRITE_FLAG_COPY 0x01 +#define TCP_WRITE_FLAG_MORE 0x02 + +#define TCP_PRIO_MIN 1 +#define TCP_PRIO_NORMAL 64 +#define TCP_PRIO_MAX 127 + +const char* tcp_debug_state_str(enum tcp_state s); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCPBASE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h b/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h new file mode 100644 index 00000000..0b8880a9 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h @@ -0,0 +1,113 @@ +/** + * @file + * Functions to sync with TCPIP thread + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPIP_H +#define LWIP_HDR_TCPIP_H + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/timeouts.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +extern sys_mutex_t lock_tcpip_core; +#if !defined LOCK_TCPIP_CORE || defined __DOXYGEN__ +/** Lock lwIP core mutex (needs @ref LWIP_TCPIP_CORE_LOCKING 1) */ +#define LOCK_TCPIP_CORE() sys_mutex_lock(&lock_tcpip_core) +/** Unlock lwIP core mutex (needs @ref LWIP_TCPIP_CORE_LOCKING 1) */ +#define UNLOCK_TCPIP_CORE() sys_mutex_unlock(&lock_tcpip_core) +#endif /* LOCK_TCPIP_CORE */ +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define LOCK_TCPIP_CORE() +#define UNLOCK_TCPIP_CORE() +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +struct pbuf; +struct netif; + +/** Function prototype for the init_done function passed to tcpip_init */ +typedef void (*tcpip_init_done_fn)(void *arg); +/** Function prototype for functions passed to tcpip_callback() */ +typedef void (*tcpip_callback_fn)(void *ctx); + +/* Forward declarations */ +struct tcpip_callback_msg; + +void tcpip_init(tcpip_init_done_fn tcpip_init_done, void *arg); + +err_t tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn); +err_t tcpip_input(struct pbuf *p, struct netif *inp); + +err_t tcpip_try_callback(tcpip_callback_fn function, void *ctx); +err_t tcpip_callback(tcpip_callback_fn function, void *ctx); +/** @ingroup lwip_os + * @deprecated use tcpip_try_callback() or tcpip_callback() instead + */ +#define tcpip_callback_with_block(function, ctx, block) ((block != 0)? tcpip_callback(function, ctx) : tcpip_try_callback(function, ctx)) + +struct tcpip_callback_msg* tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx); +void tcpip_callbackmsg_delete(struct tcpip_callback_msg* msg); +err_t tcpip_callbackmsg_trycallback(struct tcpip_callback_msg* msg); +err_t tcpip_callbackmsg_trycallback_fromisr(struct tcpip_callback_msg* msg); + +/* free pbufs or heap memory from another context without blocking */ +err_t pbuf_free_callback(struct pbuf *p); +err_t mem_free_callback(void *m); + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +err_t tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg); +err_t tcpip_untimeout(sys_timeout_handler h, void *arg); +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + +#ifdef TCPIP_THREAD_TEST +int tcpip_thread_poll_one(void); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* !NO_SYS */ + +#endif /* LWIP_HDR_TCPIP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h b/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h new file mode 100644 index 00000000..b601f9eb --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h @@ -0,0 +1,128 @@ +/** + * @file + * Timer implementations + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_TIMEOUTS_H +#define LWIP_HDR_TIMEOUTS_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#if !NO_SYS +#include "lwip/sys.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef LWIP_DEBUG_TIMERNAMES +#ifdef LWIP_DEBUG +#define LWIP_DEBUG_TIMERNAMES SYS_DEBUG +#else /* LWIP_DEBUG */ +#define LWIP_DEBUG_TIMERNAMES 0 +#endif /* LWIP_DEBUG*/ +#endif + +/** Returned by sys_timeouts_sleeptime() to indicate there is no timer, so we + * can sleep forever. + */ +#define SYS_TIMEOUTS_SLEEPTIME_INFINITE 0xFFFFFFFF + +/** Function prototype for a stack-internal timer function that has to be + * called at a defined interval */ +typedef void (* lwip_cyclic_timer_handler)(void); + +/** This struct contains information about a stack-internal timer function + that has to be called at a defined interval */ +struct lwip_cyclic_timer { + u32_t interval_ms; + lwip_cyclic_timer_handler handler; +#if LWIP_DEBUG_TIMERNAMES + const char* handler_name; +#endif /* LWIP_DEBUG_TIMERNAMES */ +}; + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use lwip_num_cyclic_timers */ +extern const struct lwip_cyclic_timer lwip_cyclic_timers[]; +/** Array size of lwip_cyclic_timers[] */ +extern const int lwip_num_cyclic_timers; + +#if LWIP_TIMERS + +/** Function prototype for a timeout callback function. Register such a function + * using sys_timeout(). + * + * @param arg Additional argument to pass to the function - set up by sys_timeout() + */ +typedef void (* sys_timeout_handler)(void *arg); + +struct sys_timeo { + struct sys_timeo *next; + u32_t time; + sys_timeout_handler h; + void *arg; +#if LWIP_DEBUG_TIMERNAMES + const char* handler_name; +#endif /* LWIP_DEBUG_TIMERNAMES */ +}; + +void sys_timeouts_init(void); + +#if LWIP_DEBUG_TIMERNAMES +void sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char* handler_name); +#define sys_timeout(msecs, handler, arg) sys_timeout_debug(msecs, handler, arg, #handler) +#else /* LWIP_DEBUG_TIMERNAMES */ +void sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg); +#endif /* LWIP_DEBUG_TIMERNAMES */ + +void sys_untimeout(sys_timeout_handler handler, void *arg); +void sys_restart_timeouts(void); +void sys_check_timeouts(void); +u32_t sys_timeouts_sleeptime(void); + +#if LWIP_TESTMODE +struct sys_timeo** sys_timeouts_get_next_timeout(void); +void lwip_cyclic_timer(void *arg); +#endif + +#endif /* LWIP_TIMERS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_TIMEOUTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h b/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h new file mode 100644 index 00000000..b1c78e58 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h @@ -0,0 +1,195 @@ +/** + * @file + * UDP API (to be used from TCPIP thread)\n + * See also @ref udp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_UDP_H +#define LWIP_HDR_UDP_H + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define UDP_FLAGS_NOCHKSUM 0x01U +#define UDP_FLAGS_UDPLITE 0x02U +#define UDP_FLAGS_CONNECTED 0x04U +#define UDP_FLAGS_MULTICAST_LOOP 0x08U + +struct udp_pcb; + +/** Function prototype for udp pcb receive callback functions + * addr and port are in same byte order as in the pcb + * The callback is responsible for freeing the pbuf + * if it's not used any more. + * + * ATTENTION: Be aware that 'addr' might point into the pbuf 'p' so freeing this pbuf + * can make 'addr' invalid, too. + * + * @param arg user supplied argument (udp_pcb.recv_arg) + * @param pcb the udp_pcb which received data + * @param p the packet buffer that was received + * @param addr the remote IP address from which the packet was received + * @param port the remote port from which the packet was received + */ +typedef void (*udp_recv_fn)(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port); + +/** the UDP protocol control block */ +struct udp_pcb { +/** Common members of all PCB types */ + IP_PCB; + +/* Protocol specific PCB members */ + + struct udp_pcb *next; + + u8_t flags; + /** ports are in host byte order */ + u16_t local_port, remote_port; + +#if LWIP_MULTICAST_TX_OPTIONS +#if LWIP_IPV4 + /** outgoing network interface for multicast packets, by IPv4 address (if not 'any') */ + ip4_addr_t mcast_ip4; +#endif /* LWIP_IPV4 */ + /** outgoing network interface for multicast packets, by interface index (if nonzero) */ + u8_t mcast_ifindex; + /** TTL for outgoing multicast packets */ + u8_t mcast_ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_UDPLITE + /** used for UDP_LITE only */ + u16_t chksum_len_rx, chksum_len_tx; +#endif /* LWIP_UDPLITE */ + + /** receive callback function */ + udp_recv_fn recv; + /** user-supplied argument for the recv callback */ + void *recv_arg; +}; +/* udp_pcbs export for external reference (e.g. SNMP agent) */ +extern struct udp_pcb *udp_pcbs; + +/* The following functions is the application layer interface to the + UDP code. */ +struct udp_pcb * udp_new (void); +struct udp_pcb * udp_new_ip_type(u8_t type); +void udp_remove (struct udp_pcb *pcb); +err_t udp_bind (struct udp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void udp_bind_netif (struct udp_pcb *pcb, const struct netif* netif); +err_t udp_connect (struct udp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void udp_disconnect (struct udp_pcb *pcb); +void udp_recv (struct udp_pcb *pcb, udp_recv_fn recv, + void *recv_arg); +err_t udp_sendto_if (struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif); +err_t udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif, const ip_addr_t *src_ip); +err_t udp_sendto (struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port); +err_t udp_send (struct udp_pcb *pcb, struct pbuf *p); + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +err_t udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif, u8_t have_chksum, + u16_t chksum); +err_t udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + u8_t have_chksum, u16_t chksum); +err_t udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum); +err_t udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, + u8_t have_chksum, u16_t chksum, const ip_addr_t *src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +#define udp_flags(pcb) ((pcb)->flags) +#define udp_setflags(pcb, f) ((pcb)->flags = (f)) + +#define udp_set_flags(pcb, set_flags) do { (pcb)->flags = (u8_t)((pcb)->flags | (set_flags)); } while(0) +#define udp_clear_flags(pcb, clr_flags) do { (pcb)->flags = (u8_t)((pcb)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define udp_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +/* The following functions are the lower layer interface to UDP. */ +void udp_input (struct pbuf *p, struct netif *inp); + +void udp_init (void); + +/* for compatibility with older implementation */ +#define udp_new_ip6() udp_new_ip_type(IPADDR_TYPE_V6) + +#if LWIP_MULTICAST_TX_OPTIONS +#if LWIP_IPV4 +#define udp_set_multicast_netif_addr(pcb, ip4addr) ip4_addr_copy((pcb)->mcast_ip4, *(ip4addr)) +#define udp_get_multicast_netif_addr(pcb) (&(pcb)->mcast_ip4) +#endif /* LWIP_IPV4 */ +#define udp_set_multicast_netif_index(pcb, idx) ((pcb)->mcast_ifindex = (idx)) +#define udp_get_multicast_netif_index(pcb) ((pcb)->mcast_ifindex) +#define udp_set_multicast_ttl(pcb, value) ((pcb)->mcast_ttl = (value)) +#define udp_get_multicast_ttl(pcb) ((pcb)->mcast_ttl) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if UDP_DEBUG +void udp_debug_print(struct udp_hdr *udphdr); +#else +#define udp_debug_print(udphdr) +#endif + +void udp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_UDP */ + +#endif /* LWIP_HDR_UDP_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h b/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h new file mode 100644 index 00000000..f4f8cf14 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h @@ -0,0 +1,127 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETIF_BRIDGEIF_H +#define LWIP_HDR_NETIF_BRIDGEIF_H + +#include "netif/bridgeif_opts.h" + +#include "lwip/err.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct netif; + +#if (BRIDGEIF_MAX_PORTS < 0) || (BRIDGEIF_MAX_PORTS >= 64) +#error BRIDGEIF_MAX_PORTS must be [1..63] +#elif BRIDGEIF_MAX_PORTS < 8 +typedef u8_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 16 +typedef u16_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 32 +typedef u32_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 64 +typedef u64_t bridgeif_portmask_t; +#endif + +#define BR_FLOOD ((bridgeif_portmask_t)-1) + +/** @ingroup bridgeif + * Initialisation data for @ref bridgeif_init. + * An instance of this type must be passed as parameter 'state' to @ref netif_add + * when the bridge is added. + */ +typedef struct bridgeif_initdata_s { + /** MAC address of the bridge (cannot use the netif's addresses) */ + struct eth_addr ethaddr; + /** Maximum number of ports in the bridge (ports are stored in an array, this + influences memory allocated for netif->state of the bridge netif). */ + u8_t max_ports; + /** Maximum number of dynamic/learning entries in the bridge's forwarding database. + In the default implementation, this controls memory consumption only. */ + u16_t max_fdb_dynamic_entries; + /** Maximum number of static forwarding entries. Influences memory consumption! */ + u16_t max_fdb_static_entries; +} bridgeif_initdata_t; + +/** @ingroup bridgeif + * Use this for constant initialization of a bridgeif_initdat_t + * (ethaddr must be passed as ETH_ADDR()) + */ +#define BRIDGEIF_INITDATA1(max_ports, max_fdb_dynamic_entries, max_fdb_static_entries, ethaddr) {ethaddr, max_ports, max_fdb_dynamic_entries, max_fdb_static_entries} +/** @ingroup bridgeif + * Use this for constant initialization of a bridgeif_initdat_t + * (each byte of ethaddr must be passed) + */ +#define BRIDGEIF_INITDATA2(max_ports, max_fdb_dynamic_entries, max_fdb_static_entries, e0, e1, e2, e3, e4, e5) {{e0, e1, e2, e3, e4, e5}, max_ports, max_fdb_dynamic_entries, max_fdb_static_entries} + +err_t bridgeif_init(struct netif *netif); +err_t bridgeif_add_port(struct netif *bridgeif, struct netif *portif); +err_t bridgeif_fdb_add(struct netif *bridgeif, const struct eth_addr *addr, bridgeif_portmask_t ports); +err_t bridgeif_fdb_remove(struct netif *bridgeif, const struct eth_addr *addr); + +/* FDB interface, can be replaced by own implementation */ +void bridgeif_fdb_update_src(void *fdb_ptr, struct eth_addr *src_addr, u8_t port_idx); +bridgeif_portmask_t bridgeif_fdb_get_dst_ports(void *fdb_ptr, struct eth_addr *dst_addr); +void* bridgeif_fdb_init(u16_t max_fdb_entries); + +#if BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +#ifndef BRIDGEIF_DECL_PROTECT +/* define bridgeif protection to sys_arch_protect... */ +#include "lwip/sys.h" +#define BRIDGEIF_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define BRIDGEIF_READ_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define BRIDGEIF_READ_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +#define BRIDGEIF_WRITE_PROTECT(lev) +#define BRIDGEIF_WRITE_UNPROTECT(lev) +#endif +#else /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ +#include "lwip/tcpip.h" +#define BRIDGEIF_DECL_PROTECT(lev) +#define BRIDGEIF_READ_PROTECT(lev) +#define BRIDGEIF_READ_UNPROTECT(lev) +#define BRIDGEIF_WRITE_PROTECT(lev) +#define BRIDGEIF_WRITE_UNPROTECT(lev) +#endif /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_BRIDGEIF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h b/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h new file mode 100644 index 00000000..b85c3017 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h @@ -0,0 +1,90 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_NETIF_BRIDGEIF_OPTS_H +#define LWIP_HDR_NETIF_BRIDGEIF_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup bridgeif_opts Options + * @ingroup bridgeif + * @{ + */ + +/** BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT==1: set port netif's 'input' function + * to call directly into bridgeif code and on top of that, directly call into + * the selected forwarding port's 'linkoutput' function. + * This means that the bridgeif input/output path is protected from concurrent access + * but as well, *all* bridge port netif's drivers must correctly handle concurrent access! + * == 0: get into tcpip_thread for every input packet (no multithreading) + * ATTENTION: as ==0 relies on tcpip.h, the default depends on NO_SYS setting + */ +#ifndef BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +#define BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT NO_SYS +#endif + +/** BRIDGEIF_MAX_PORTS: this is used to create a typedef used for forwarding + * bit-fields: the number of bits required is this + 1 (for the internal/cpu port) + * (63 is the maximum, resulting in an u64_t for the bit mask) + * ATTENTION: this controls the maximum number of the implementation only! + * The max. number of ports per bridge must still be passed via netif_add parameter! + */ +#ifndef BRIDGEIF_MAX_PORTS +#define BRIDGEIF_MAX_PORTS 7 +#endif + +/** BRIDGEIF_DEBUG: Enable generic debugging in bridgeif.c. */ +#ifndef BRIDGEIF_DEBUG +#define BRIDGEIF_DEBUG LWIP_DBG_OFF +#endif + +/** BRIDGEIF_DEBUG: Enable FDB debugging in bridgeif.c. */ +#ifndef BRIDGEIF_FDB_DEBUG +#define BRIDGEIF_FDB_DEBUG LWIP_DBG_OFF +#endif + +/** BRIDGEIF_DEBUG: Enable forwarding debugging in bridgeif.c. */ +#ifndef BRIDGEIF_FW_DEBUG +#define BRIDGEIF_FW_DEBUG LWIP_DBG_OFF +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_NETIF_BRIDGEIF_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h b/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h new file mode 100644 index 00000000..b536fd28 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h @@ -0,0 +1,3 @@ +/* ARP has been moved to core/ipv4, provide this #include for compatibility only */ +#include "lwip/etharp.h" +#include "netif/ethernet.h" diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h b/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h new file mode 100644 index 00000000..49649cbf --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h @@ -0,0 +1,77 @@ +/** + * @file + * Ethernet input function - handles INCOMING ethernet level traffic + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHERNET_H +#define LWIP_HDR_NETIF_ETHERNET_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ARP || LWIP_ETHERNET + +/** Define this to 1 and define LWIP_ARP_FILTER_NETIF_FN(pbuf, netif, type) + * to a filter function that returns the correct netif when using multiple + * netifs on one hardware interface where the netif's low-level receive + * routine cannot decide for the correct netif (e.g. when mapping multiple + * IP addresses to one hardware interface). + */ +#ifndef LWIP_ARP_FILTER_NETIF +#define LWIP_ARP_FILTER_NETIF 0 +#endif + +err_t ethernet_input(struct pbuf *p, struct netif *netif); +err_t ethernet_output(struct netif* netif, struct pbuf* p, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type); + +extern const struct eth_addr ethbroadcast, ethzero; + +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_ETHERNET_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h b/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h new file mode 100644 index 00000000..54e019fd --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h @@ -0,0 +1,112 @@ +/** + * @file + * Definitions for IEEE 802.15.4 MAC frames + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETIF_IEEE802154_H +#define LWIP_HDR_NETIF_IEEE802154_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** General MAC frame format + * This shows the full featured header, mainly for documentation. + * Some fields are omitted or shortened to achieve frame compression. + */ +struct ieee_802154_hdr { + /** See IEEE_802154_FC_* defines */ + PACK_STRUCT_FIELD(u16_t frame_control); + /** Sequence number is omitted if IEEE_802154_FC_SEQNO_SUPPR is set in frame_control */ + PACK_STRUCT_FLD_8(u8_t sequence_number); + /** Destination PAN ID is omitted if Destination Addressing Mode is 0 */ + PACK_STRUCT_FIELD(u16_t destination_pan_id); + /** Destination Address is omitted if Destination Addressing Mode is 0 */ + PACK_STRUCT_FLD_8(u8_t destination_address[8]); + /** Source PAN ID is omitted if Source Addressing Mode is 0 + or if IEEE_802154_FC_PANID_COMPR is set in frame control*/ + PACK_STRUCT_FIELD(u16_t source_pan_id); + /** Source Address is omitted if Source Addressing Mode is 0 */ + PACK_STRUCT_FLD_8(u8_t source_address[8]); + /* The rest is variable */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Addressing modes (2 bits) */ +#define IEEE_802154_ADDR_MODE_NO_ADDR 0x00 /* PAN ID and address fields are not present */ +#define IEEE_802154_ADDR_MODE_RESERVED 0x01 /* Reserved */ +#define IEEE_802154_ADDR_MODE_SHORT 0x02 /* Address field contains a short address (16 bit) */ +#define IEEE_802154_ADDR_MODE_EXT 0x03 /* Address field contains an extended address (64 bit) */ + +/* IEEE 802.15.4 Frame Control definitions (2 bytes; see IEEE 802.15.4-2015 ch. 7.2.1) */ +#define IEEE_802154_FC_FT_MASK 0x0007 /* bits 0..2: Frame Type */ +#define IEEE_802154_FC_FT_BEACON 0x00 +#define IEEE_802154_FC_FT_DATA 0x01 +#define IEEE_802154_FC_FT_ACK 0x02 +#define IEEE_802154_FC_FT_MAC_CMD 0x03 +#define IEEE_802154_FC_FT_RESERVED 0x04 +#define IEEE_802154_FC_FT_MULTIPURPOSE 0x05 +#define IEEE_802154_FC_FT_FRAG 0x06 +#define IEEE_802154_FC_FT_EXT 0x07 +#define IEEE_802154_FC_SEC_EN 0x0008 /* bit 3: Security Enabled */ +#define IEEE_802154_FC_FRAME_PEND 0x0010 /* bit 4: Frame Pending */ +#define IEEE_802154_FC_ACK_REQ 0x0020 /* bit 5: AR (ACK required) */ +#define IEEE_802154_FC_PANID_COMPR 0x0040 /* bit 6: PAN ID Compression (src and dst are equal, src PAN ID omitted) */ +#define IEEE_802154_FC_RESERVED 0x0080 +#define IEEE_802154_FC_SEQNO_SUPPR 0x0100 /* bit 8: Sequence Number Suppression */ +#define IEEE_802154_FC_IE_PRESENT 0x0200 /* bit 9: IE Present */ +#define IEEE_802154_FC_DST_ADDR_MODE_MASK 0x0c00 /* bits 10..11: Destination Addressing Mode */ +#define IEEE_802154_FC_DST_ADDR_MODE_NO_ADDR (IEEE_802154_ADDR_MODE_NO_ADDR << 10) +#define IEEE_802154_FC_DST_ADDR_MODE_SHORT (IEEE_802154_ADDR_MODE_SHORT << 10) +#define IEEE_802154_FC_DST_ADDR_MODE_EXT (IEEE_802154_ADDR_MODE_EXT << 10) +#define IEEE_802154_FC_FRAME_VERSION_MASK 0x3000 /* bits 12..13: Frame Version */ +#define IEEE_802154_FC_FRAME_VERSION_GET(x) (((x) & IEEE_802154_FC_FRAME_VERSION_MASK) >> 12) +#define IEEE_802154_FC_SRC_ADDR_MODE_MASK 0xc000 /* bits 14..15: Source Addressing Mode */ +#define IEEE_802154_FC_SRC_ADDR_MODE_SHORT (IEEE_802154_ADDR_MODE_SHORT << 14) +#define IEEE_802154_FC_SRC_ADDR_MODE_EXT (IEEE_802154_ADDR_MODE_EXT << 14) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_IEEE802154_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h new file mode 100644 index 00000000..ecff24ba --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h @@ -0,0 +1,89 @@ +/** + * @file + * + * 6LowPAN output for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_H +#define LWIP_HDR_LOWPAN6_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 + +#include "netif/lowpan6_common.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 second period for reassembly */ +#define LOWPAN6_TMR_INTERVAL 1000 + +void lowpan6_tmr(void); + +err_t lowpan6_set_context(u8_t idx, const ip6_addr_t * context); +err_t lowpan6_set_short_addr(u8_t addr_high, u8_t addr_low); + +#if LWIP_IPV4 +err_t lowpan4_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4 */ +err_t lowpan6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); +err_t lowpan6_input(struct pbuf * p, struct netif *netif); +err_t lowpan6_if_init(struct netif *netif); + +/* pan_id in network byte order. */ +err_t lowpan6_set_pan_id(u16_t pan_id); + +u16_t lowpan6_calc_crc(const void *buf, u16_t len); + +#if !NO_SYS +err_t tcpip_6lowpan_input(struct pbuf *p, struct netif *inp); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h new file mode 100644 index 00000000..01896a7f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h @@ -0,0 +1,78 @@ +/** + * @file + * 6LowPAN over BLE for IPv6 (RFC7668). + */ + +/* + * Copyright (c) 2017 Benjamin Aigner + * Copyright (c) 2015 Inico Technologies Ltd. , Author: Ivan Delamer + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Benjamin Aigner + * + * Based on the original 6lowpan implementation of lwIP ( @see 6lowpan.c) + */ + +#ifndef LWIP_HDR_LOWPAN6_BLE_H +#define LWIP_HDR_LOWPAN6_BLE_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "netif/lowpan6_common.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +err_t rfc7668_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); +err_t rfc7668_input(struct pbuf * p, struct netif *netif); +err_t rfc7668_set_local_addr_eui64(struct netif *netif, const u8_t *local_addr, size_t local_addr_len); +err_t rfc7668_set_local_addr_mac48(struct netif *netif, const u8_t *local_addr, size_t local_addr_len, int is_public_addr); +err_t rfc7668_set_peer_addr_eui64(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len); +err_t rfc7668_set_peer_addr_mac48(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len, int is_public_addr); +err_t rfc7668_set_context(u8_t index, const ip6_addr_t * context); +err_t rfc7668_if_init(struct netif *netif); + +#if !NO_SYS +err_t tcpip_rfc7668_input(struct pbuf *p, struct netif *inp); +#endif + +void ble_addr_to_eui64(uint8_t *dst, const uint8_t *src, int public_addr); +void eui64_to_ble_addr(uint8_t *dst, const uint8_t *src); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_BLE_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h new file mode 100644 index 00000000..0dc13ab5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h @@ -0,0 +1,82 @@ +/** + * @file + * + * Common 6LowPAN routines for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_COMMON_H +#define LWIP_HDR_LOWPAN6_COMMON_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 /* don't build if IPv6 is disabled in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Helper define for a link layer address, which can be encoded as 0, 2 or 8 bytes */ +struct lowpan6_link_addr { + /* encoded length of the address */ + u8_t addr_len; + /* address bytes */ + u8_t addr[8]; +}; + +s8_t lowpan6_get_address_mode(const ip6_addr_t *ip6addr, const struct lowpan6_link_addr *mac_addr); + +#if LWIP_6LOWPAN_IPHC +err_t lowpan6_compress_headers(struct netif *netif, u8_t *inbuf, size_t inbuf_size, u8_t *outbuf, size_t outbuf_size, + u8_t *lowpan6_header_len_out, u8_t *hidden_header_len_out, ip6_addr_t *lowpan6_contexts, + const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst); +struct pbuf *lowpan6_decompress(struct pbuf *p, u16_t datagram_size, ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest); +#endif /* LWIP_6LOWPAN_IPHC */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_COMMON_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h new file mode 100644 index 00000000..17d46cdc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h @@ -0,0 +1,122 @@ +/** + * @file + * 6LowPAN options list + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_OPTS_H +#define LWIP_HDR_LOWPAN6_OPTS_H + +#include "lwip/opt.h" + +/** LWIP_6LOWPAN_NUM_CONTEXTS: define the number of compression + * contexts per netif type + */ +#ifndef LWIP_6LOWPAN_NUM_CONTEXTS +#define LWIP_6LOWPAN_NUM_CONTEXTS 10 +#endif + +/** LWIP_6LOWPAN_INFER_SHORT_ADDRESS: set this to 0 to disable creating + * short addresses for matching addresses (debug only) + */ +#ifndef LWIP_6LOWPAN_INFER_SHORT_ADDRESS +#define LWIP_6LOWPAN_INFER_SHORT_ADDRESS 1 +#endif + +/** LWIP_6LOWPAN_IPHC: set this to 0 to disable IP header compression as per + * RFC 6282 (which is mandatory for BLE) + */ +#ifndef LWIP_6LOWPAN_IPHC +#define LWIP_6LOWPAN_IPHC 1 +#endif + +/** Set this to 1 if your IEEE 802.15.4 interface can calculate and check the + * CRC in hardware. This means TX packets get 2 zero bytes added on transmission + * which are to be filled with the CRC. + */ +#ifndef LWIP_6LOWPAN_802154_HW_CRC +#define LWIP_6LOWPAN_802154_HW_CRC 0 +#endif + +/** If LWIP_6LOWPAN_802154_HW_CRC==0, this can override the default slow + * implementation of the CRC used for 6LoWPAN over IEEE 802.15.4 (which uses + * a shift register). + */ +#ifndef LWIP_6LOWPAN_CALC_CRC +#define LWIP_6LOWPAN_CALC_CRC(buf, len) lowpan6_calc_crc(buf, len) +#endif + +/** Debug level for 6LoWPAN in general */ +#ifndef LWIP_LOWPAN6_DEBUG +#define LWIP_LOWPAN6_DEBUG LWIP_DBG_OFF +#endif + +/** Debug level for 6LoWPAN over IEEE 802.15.4 */ +#ifndef LWIP_LOWPAN6_802154_DEBUG +#define LWIP_LOWPAN6_802154_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_LOWPAN6_IP_COMPRESSED_DEBUG: enable compressed IP frame + * output debugging + */ +#ifndef LWIP_LOWPAN6_IP_COMPRESSED_DEBUG +#define LWIP_LOWPAN6_IP_COMPRESSED_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_LOWPAN6_DECOMPRESSION_DEBUG: enable decompression debug output + */ +#ifndef LWIP_LOWPAN6_DECOMPRESSION_DEBUG +#define LWIP_LOWPAN6_DECOMPRESSION_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG: enable decompressed IP frame + * output debugging */ +#ifndef LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG +#define LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS: + * Currently, the linux kernel driver for 6lowpan sets/clears a bit in + * the address, depending on the BD address (either public or not). + * Might not be RFC7668 conform, so you may select to do that (=1) or + * not (=0) */ +#ifndef LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS +#define LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS 1 +#endif + + +#endif /* LWIP_HDR_LOWPAN6_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h new file mode 100644 index 00000000..b2285228 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h @@ -0,0 +1,164 @@ +/* + * ccp.h - Definitions for PPP Compression Control Protocol. + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ccp.h,v 1.12 2004/11/04 10:02:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CCP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CCP_H +#define CCP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * CCP codes. + */ + +#define CCP_CONFREQ 1 +#define CCP_CONFACK 2 +#define CCP_TERMREQ 5 +#define CCP_TERMACK 6 +#define CCP_RESETREQ 14 +#define CCP_RESETACK 15 + +/* + * Max # bytes for a CCP option + */ + +#define CCP_MAX_OPTION_LENGTH 32 + +/* + * Parts of a CCP packet. + */ + +#define CCP_CODE(dp) ((dp)[0]) +#define CCP_ID(dp) ((dp)[1]) +#define CCP_LENGTH(dp) (((dp)[2] << 8) + (dp)[3]) +#define CCP_HDRLEN 4 + +#define CCP_OPT_CODE(dp) ((dp)[0]) +#define CCP_OPT_LENGTH(dp) ((dp)[1]) +#define CCP_OPT_MINLEN 2 + +#if BSDCOMPRESS_SUPPORT +/* + * Definitions for BSD-Compress. + */ + +#define CI_BSD_COMPRESS 21 /* config. option for BSD-Compress */ +#define CILEN_BSD_COMPRESS 3 /* length of config. option */ + +/* Macros for handling the 3rd byte of the BSD-Compress config option. */ +#define BSD_NBITS(x) ((x) & 0x1F) /* number of bits requested */ +#define BSD_VERSION(x) ((x) >> 5) /* version of option format */ +#define BSD_CURRENT_VERSION 1 /* current version number */ +#define BSD_MAKE_OPT(v, n) (((v) << 5) | (n)) + +#define BSD_MIN_BITS 9 /* smallest code size supported */ +#define BSD_MAX_BITS 15 /* largest code size supported */ +#endif /* BSDCOMPRESS_SUPPORT */ + +#if DEFLATE_SUPPORT +/* + * Definitions for Deflate. + */ + +#define CI_DEFLATE 26 /* config option for Deflate */ +#define CI_DEFLATE_DRAFT 24 /* value used in original draft RFC */ +#define CILEN_DEFLATE 4 /* length of its config option */ + +#define DEFLATE_MIN_SIZE 9 +#define DEFLATE_MAX_SIZE 15 +#define DEFLATE_METHOD_VAL 8 +#define DEFLATE_SIZE(x) (((x) >> 4) + 8) +#define DEFLATE_METHOD(x) ((x) & 0x0F) +#define DEFLATE_MAKE_OPT(w) ((((w) - 8) << 4) + DEFLATE_METHOD_VAL) +#define DEFLATE_CHK_SEQUENCE 0 +#endif /* DEFLATE_SUPPORT */ + +#if MPPE_SUPPORT +/* + * Definitions for MPPE. + */ + +#define CI_MPPE 18 /* config option for MPPE */ +#define CILEN_MPPE 6 /* length of config option */ +#endif /* MPPE_SUPPORT */ + +#if PREDICTOR_SUPPORT +/* + * Definitions for other, as yet unsupported, compression methods. + */ + +#define CI_PREDICTOR_1 1 /* config option for Predictor-1 */ +#define CILEN_PREDICTOR_1 2 /* length of its config option */ +#define CI_PREDICTOR_2 2 /* config option for Predictor-2 */ +#define CILEN_PREDICTOR_2 2 /* length of its config option */ +#endif /* PREDICTOR_SUPPORT */ + +typedef struct ccp_options { +#if DEFLATE_SUPPORT + unsigned int deflate :1; /* do Deflate? */ + unsigned int deflate_correct :1; /* use correct code for deflate? */ + unsigned int deflate_draft :1; /* use draft RFC code for deflate? */ +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + unsigned int bsd_compress :1; /* do BSD Compress? */ +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + unsigned int predictor_1 :1; /* do Predictor-1? */ + unsigned int predictor_2 :1; /* do Predictor-2? */ +#endif /* PREDICTOR_SUPPORT */ + +#if MPPE_SUPPORT + u8_t mppe; /* MPPE bitfield */ +#endif /* MPPE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + u_short bsd_bits; /* # bits/code for BSD Compress */ +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + u_short deflate_size; /* lg(window size) for Deflate */ +#endif /* DEFLATE_SUPPORT */ + u8_t method; /* code for chosen compression method */ +} ccp_options; + +extern const struct protent ccp_protent; + +void ccp_resetrequest(ppp_pcb *pcb); /* Issue a reset-request. */ + +#ifdef __cplusplus +} +#endif + +#endif /* CCP_H */ +#endif /* PPP_SUPPORT && CCP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h new file mode 100644 index 00000000..eb0269fe --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h @@ -0,0 +1,36 @@ +/* + * chap-md5.h - New CHAP/MD5 implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +extern const struct chap_digest_type md5_digest; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h new file mode 100644 index 00000000..2d8cd9ca --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h @@ -0,0 +1,200 @@ +/* + * chap-new.c - New CHAP implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CHAP_H +#define CHAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * CHAP packets begin with a standard header with code, id, len (2 bytes). + */ +#define CHAP_HDRLEN 4 + +/* + * Values for the code field. + */ +#define CHAP_CHALLENGE 1 +#define CHAP_RESPONSE 2 +#define CHAP_SUCCESS 3 +#define CHAP_FAILURE 4 + +/* + * CHAP digest codes. + */ +#define CHAP_MD5 5 +#if MSCHAP_SUPPORT +#define CHAP_MICROSOFT 0x80 +#define CHAP_MICROSOFT_V2 0x81 +#endif /* MSCHAP_SUPPORT */ + +/* + * Semi-arbitrary limits on challenge and response fields. + */ +#define MAX_CHALLENGE_LEN 64 +#define MAX_RESPONSE_LEN 64 + +/* + * These limits apply to challenge and response packets we send. + * The +4 is the +1 that we actually need rounded up. + */ +#define CHAL_MAX_PKTLEN (PPP_HDRLEN + CHAP_HDRLEN + 4 + MAX_CHALLENGE_LEN + MAXNAMELEN) +#define RESP_MAX_PKTLEN (PPP_HDRLEN + CHAP_HDRLEN + 4 + MAX_RESPONSE_LEN + MAXNAMELEN) + +/* bitmask of supported algorithms */ +#if MSCHAP_SUPPORT +#define MDTYPE_MICROSOFT_V2 0x1 +#define MDTYPE_MICROSOFT 0x2 +#endif /* MSCHAP_SUPPORT */ +#define MDTYPE_MD5 0x4 +#define MDTYPE_NONE 0 + +#if MSCHAP_SUPPORT +/* Return the digest alg. ID for the most preferred digest type. */ +#define CHAP_DIGEST(mdtype) \ + ((mdtype) & MDTYPE_MD5)? CHAP_MD5: \ + ((mdtype) & MDTYPE_MICROSOFT_V2)? CHAP_MICROSOFT_V2: \ + ((mdtype) & MDTYPE_MICROSOFT)? CHAP_MICROSOFT: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_DIGEST(mdtype) \ + ((mdtype) & MDTYPE_MD5)? CHAP_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* Return the bit flag (lsb set) for our most preferred digest type. */ +#define CHAP_MDTYPE(mdtype) ((mdtype) ^ ((mdtype) - 1)) & (mdtype) + +/* Return the bit flag for a given digest algorithm ID. */ +#if MSCHAP_SUPPORT +#define CHAP_MDTYPE_D(digest) \ + ((digest) == CHAP_MICROSOFT_V2)? MDTYPE_MICROSOFT_V2: \ + ((digest) == CHAP_MICROSOFT)? MDTYPE_MICROSOFT: \ + ((digest) == CHAP_MD5)? MDTYPE_MD5: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_MDTYPE_D(digest) \ + ((digest) == CHAP_MD5)? MDTYPE_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* Can we do the requested digest? */ +#if MSCHAP_SUPPORT +#define CHAP_CANDIGEST(mdtype, digest) \ + ((digest) == CHAP_MICROSOFT_V2)? (mdtype) & MDTYPE_MICROSOFT_V2: \ + ((digest) == CHAP_MICROSOFT)? (mdtype) & MDTYPE_MICROSOFT: \ + ((digest) == CHAP_MD5)? (mdtype) & MDTYPE_MD5: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_CANDIGEST(mdtype, digest) \ + ((digest) == CHAP_MD5)? (mdtype) & MDTYPE_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* + * The code for each digest type has to supply one of these. + */ +struct chap_digest_type { + int code; + +#if PPP_SERVER + /* + * Note: challenge and response arguments below are formatted as + * a length byte followed by the actual challenge/response data. + */ + void (*generate_challenge)(ppp_pcb *pcb, unsigned char *challenge); + int (*verify_response)(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space); +#endif /* PPP_SERVER */ + void (*make_response)(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *priv); + int (*check_success)(ppp_pcb *pcb, unsigned char *pkt, int len, unsigned char *priv); + void (*handle_failure)(ppp_pcb *pcb, unsigned char *pkt, int len); +}; + +/* + * Each interface is described by chap structure. + */ +#if CHAP_SUPPORT +typedef struct chap_client_state { + u8_t flags; + const char *name; + const struct chap_digest_type *digest; + unsigned char priv[64]; /* private area for digest's use */ +} chap_client_state; + +#if PPP_SERVER +typedef struct chap_server_state { + u8_t flags; + u8_t id; + const char *name; + const struct chap_digest_type *digest; + int challenge_xmits; + int challenge_pktlen; + unsigned char challenge[CHAL_MAX_PKTLEN]; +} chap_server_state; +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPORT */ + +#if 0 /* UNUSED */ +/* Hook for a plugin to validate CHAP challenge */ +extern int (*chap_verify_hook)(char *name, char *ourname, int id, + const struct chap_digest_type *digest, + unsigned char *challenge, unsigned char *response, + char *message, int message_space); +#endif /* UNUSED */ + +#if PPP_SERVER +/* Called by authentication code to start authenticating the peer. */ +extern void chap_auth_peer(ppp_pcb *pcb, const char *our_name, int digest_code); +#endif /* PPP_SERVER */ + +/* Called by auth. code to start authenticating us to the peer. */ +extern void chap_auth_with_peer(ppp_pcb *pcb, const char *our_name, int digest_code); + +/* Represents the CHAP protocol to the main pppd code */ +extern const struct protent chap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* CHAP_H */ +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h new file mode 100644 index 00000000..07952911 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h @@ -0,0 +1,44 @@ +/* + * chap_ms.h - Challenge Handshake Authentication Protocol definitions. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: chap_ms.h,v 1.13 2004/11/15 22:13:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CHAPMS_INCLUDE +#define CHAPMS_INCLUDE + +extern const struct chap_digest_type chapms_digest; +extern const struct chap_digest_type chapms2_digest; + +#endif /* CHAPMS_INCLUDE */ + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h new file mode 100644 index 00000000..3ee9aaf8 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h @@ -0,0 +1,169 @@ +/* + * eap.h - Extensible Authentication Protocol for PPP (RFC 2284) + * + * Copyright (c) 2001 by Sun Microsystems, Inc. + * All rights reserved. + * + * Non-exclusive rights to redistribute, modify, translate, and use + * this software in source and binary forms, in whole or in part, is + * hereby granted, provided that the above copyright notice is + * duplicated in any source form, and that neither the name of the + * copyright holder nor the author is used to endorse or promote + * products derived from this software. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Original version by James Carlson + * + * $Id: eap.h,v 1.2 2003/06/11 23:56:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && EAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_EAP_H +#define PPP_EAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define EAP_HEADERLEN 4 + + +/* EAP message codes. */ +#define EAP_REQUEST 1 +#define EAP_RESPONSE 2 +#define EAP_SUCCESS 3 +#define EAP_FAILURE 4 + +/* EAP types */ +#define EAPT_IDENTITY 1 +#define EAPT_NOTIFICATION 2 +#define EAPT_NAK 3 /* (response only) */ +#define EAPT_MD5CHAP 4 +#define EAPT_OTP 5 /* One-Time Password; RFC 1938 */ +#define EAPT_TOKEN 6 /* Generic Token Card */ +/* 7 and 8 are unassigned. */ +#define EAPT_RSA 9 /* RSA Public Key Authentication */ +#define EAPT_DSS 10 /* DSS Unilateral */ +#define EAPT_KEA 11 /* KEA */ +#define EAPT_KEA_VALIDATE 12 /* KEA-VALIDATE */ +#define EAPT_TLS 13 /* EAP-TLS */ +#define EAPT_DEFENDER 14 /* Defender Token (AXENT) */ +#define EAPT_W2K 15 /* Windows 2000 EAP */ +#define EAPT_ARCOT 16 /* Arcot Systems */ +#define EAPT_CISCOWIRELESS 17 /* Cisco Wireless */ +#define EAPT_NOKIACARD 18 /* Nokia IP smart card */ +#define EAPT_SRP 19 /* Secure Remote Password */ +/* 20 is deprecated */ + +/* EAP SRP-SHA1 Subtypes */ +#define EAPSRP_CHALLENGE 1 /* Request 1 - Challenge */ +#define EAPSRP_CKEY 1 /* Response 1 - Client Key */ +#define EAPSRP_SKEY 2 /* Request 2 - Server Key */ +#define EAPSRP_CVALIDATOR 2 /* Response 2 - Client Validator */ +#define EAPSRP_SVALIDATOR 3 /* Request 3 - Server Validator */ +#define EAPSRP_ACK 3 /* Response 3 - final ack */ +#define EAPSRP_LWRECHALLENGE 4 /* Req/resp 4 - Lightweight rechal */ + +#define SRPVAL_EBIT 0x00000001 /* Use shared key for ECP */ + +#define SRP_PSEUDO_ID "pseudo_" +#define SRP_PSEUDO_LEN 7 + +#define MD5_SIGNATURE_SIZE 16 +#define EAP_MIN_CHALLENGE_LENGTH 17 +#define EAP_MAX_CHALLENGE_LENGTH 24 +#define EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH 3 /* 2^3-1 = 7, 17+7 = 24 */ + +#define EAP_STATES \ + "Initial", "Pending", "Closed", "Listen", "Identify", \ + "SRP1", "SRP2", "SRP3", "MD5Chall", "Open", "SRP4", "BadAuth" + +#define eap_client_active(pcb) ((pcb)->eap.es_client.ea_state == eapListen) +#if PPP_SERVER +#define eap_server_active(pcb) \ + ((pcb)->eap.es_server.ea_state >= eapIdentify && \ + (pcb)->eap.es_server.ea_state <= eapMD5Chall) +#endif /* PPP_SERVER */ + +/* + * Complete EAP state for one PPP session. + */ +enum eap_state_code { + eapInitial = 0, /* No EAP authentication yet requested */ + eapPending, /* Waiting for LCP (no timer) */ + eapClosed, /* Authentication not in use */ + eapListen, /* Client ready (and timer running) */ + eapIdentify, /* EAP Identify sent */ + eapSRP1, /* Sent EAP SRP-SHA1 Subtype 1 */ + eapSRP2, /* Sent EAP SRP-SHA1 Subtype 2 */ + eapSRP3, /* Sent EAP SRP-SHA1 Subtype 3 */ + eapMD5Chall, /* Sent MD5-Challenge */ + eapOpen, /* Completed authentication */ + eapSRP4, /* Sent EAP SRP-SHA1 Subtype 4 */ + eapBadAuth /* Failed authentication */ +}; + +struct eap_auth { + const char *ea_name; /* Our name */ + char ea_peer[MAXNAMELEN +1]; /* Peer's name */ + void *ea_session; /* Authentication library linkage */ + u_char *ea_skey; /* Shared encryption key */ + u_short ea_namelen; /* Length of our name */ + u_short ea_peerlen; /* Length of peer's name */ + enum eap_state_code ea_state; + u_char ea_id; /* Current id */ + u_char ea_requests; /* Number of Requests sent/received */ + u_char ea_responses; /* Number of Responses */ + u_char ea_type; /* One of EAPT_* */ + u32_t ea_keyflags; /* SRP shared key usage flags */ +}; + +#ifndef EAP_MAX_CHALLENGE_LENGTH +#define EAP_MAX_CHALLENGE_LENGTH 24 +#endif +typedef struct eap_state { + struct eap_auth es_client; /* Client (authenticatee) data */ +#if PPP_SERVER + struct eap_auth es_server; /* Server (authenticator) data */ +#endif /* PPP_SERVER */ + int es_savedtime; /* Saved timeout */ + int es_rechallenge; /* EAP rechallenge interval */ + int es_lwrechallenge; /* SRP lightweight rechallenge inter */ + u8_t es_usepseudo; /* Use SRP Pseudonym if offered one */ + int es_usedpseudo; /* Set if we already sent PN */ + int es_challen; /* Length of challenge string */ + u_char es_challenge[EAP_MAX_CHALLENGE_LENGTH]; +} eap_state; + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define EAP_DEFTIMEOUT 3 /* Timeout (seconds) for rexmit */ +#define EAP_DEFTRANSMITS 10 /* max # times to transmit */ +#define EAP_DEFREQTIME 20 /* Time to wait for peer request */ +#define EAP_DEFALLOWREQ 20 /* max # times to accept requests */ +#endif /* moved to ppp_opts.h */ + +void eap_authwithpeer(ppp_pcb *pcb, const char *localname); +void eap_authpeer(ppp_pcb *pcb, const char *localname); + +extern const struct protent eap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_EAP_H */ + +#endif /* PPP_SUPPORT && EAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h new file mode 100644 index 00000000..d8808c3a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h @@ -0,0 +1,62 @@ +/* + * ecp.h - Definitions for PPP Encryption Control Protocol. + * + * Copyright (c) 2002 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ecp.h,v 1.2 2003/01/10 07:12:36 fcusack Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && ECP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef ECP_H +#define ECP_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ecp_options { + bool required; /* Is ECP required? */ + unsigned enctype; /* Encryption type */ +} ecp_options; + +extern fsm ecp_fsm[]; +extern ecp_options ecp_wantoptions[]; +extern ecp_options ecp_gotoptions[]; +extern ecp_options ecp_allowoptions[]; +extern ecp_options ecp_hisoptions[]; + +extern const struct protent ecp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* ECP_H */ +#endif /* PPP_SUPPORT && ECP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h new file mode 100644 index 00000000..5adeb482 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h @@ -0,0 +1,102 @@ +/* + * eui64.h - EUI64 routines for IPv6CP. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: eui64.h,v 1.6 2002/12/04 23:03:32 paulus Exp $ +*/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef EUI64_H +#define EUI64_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * @todo: + * + * Maybe this should be done by processing struct in6_addr directly... + */ +typedef union +{ + u8_t e8[8]; + u16_t e16[4]; + u32_t e32[2]; +} eui64_t; + +#define eui64_iszero(e) (((e).e32[0] | (e).e32[1]) == 0) +#define eui64_equals(e, o) (((e).e32[0] == (o).e32[0]) && \ + ((e).e32[1] == (o).e32[1])) +#define eui64_zero(e) (e).e32[0] = (e).e32[1] = 0; + +#define eui64_copy(s, d) memcpy(&(d), &(s), sizeof(eui64_t)) + +#define eui64_magic(e) do { \ + (e).e32[0] = magic(); \ + (e).e32[1] = magic(); \ + (e).e8[0] &= ~2; \ + } while (0) +#define eui64_magic_nz(x) do { \ + eui64_magic(x); \ + } while (eui64_iszero(x)) +#define eui64_magic_ne(x, y) do { \ + eui64_magic(x); \ + } while (eui64_equals(x, y)) + +#define eui64_get(ll, cp) do { \ + eui64_copy((*cp), (ll)); \ + (cp) += sizeof(eui64_t); \ + } while (0) + +#define eui64_put(ll, cp) do { \ + eui64_copy((ll), (*cp)); \ + (cp) += sizeof(eui64_t); \ + } while (0) + +#define eui64_set32(e, l) do { \ + (e).e32[0] = 0; \ + (e).e32[1] = lwip_htonl(l); \ + } while (0) +#define eui64_setlo32(e, l) eui64_set32(e, l) + +char *eui64_ntoa(eui64_t); /* Returns ascii representation of id */ + +#ifdef __cplusplus +} +#endif + +#endif /* EUI64_H */ +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h new file mode 100644 index 00000000..8dec700e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h @@ -0,0 +1,182 @@ +/* + * fsm.h - {Link, IP} Control Protocol Finite State Machine definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: fsm.h,v 1.10 2004/11/13 02:28:15 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef FSM_H +#define FSM_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define HEADERLEN 4 + + +/* + * CP (LCP, IPCP, etc.) codes. + */ +#define CONFREQ 1 /* Configuration Request */ +#define CONFACK 2 /* Configuration Ack */ +#define CONFNAK 3 /* Configuration Nak */ +#define CONFREJ 4 /* Configuration Reject */ +#define TERMREQ 5 /* Termination Request */ +#define TERMACK 6 /* Termination Ack */ +#define CODEREJ 7 /* Code Reject */ + + +/* + * Each FSM is described by an fsm structure and fsm callbacks. + */ +typedef struct fsm { + ppp_pcb *pcb; /* PPP Interface */ + const struct fsm_callbacks *callbacks; /* Callback routines */ + const char *term_reason; /* Reason for closing protocol */ + u8_t seen_ack; /* Have received valid Ack/Nak/Rej to Req */ + /* -- This is our only flag, we might use u_int :1 if we have more flags */ + u16_t protocol; /* Data Link Layer Protocol field value */ + u8_t state; /* State */ + u8_t flags; /* Contains option bits */ + u8_t id; /* Current id */ + u8_t reqid; /* Current request id */ + u8_t retransmits; /* Number of retransmissions left */ + u8_t nakloops; /* Number of nak loops since last ack */ + u8_t rnakloops; /* Number of naks received */ + u8_t maxnakloops; /* Maximum number of nak loops tolerated + (necessary because IPCP require a custom large max nak loops value) */ + u8_t term_reason_len; /* Length of term_reason */ +} fsm; + + +typedef struct fsm_callbacks { + void (*resetci) /* Reset our Configuration Information */ + (fsm *); + int (*cilen) /* Length of our Configuration Information */ + (fsm *); + void (*addci) /* Add our Configuration Information */ + (fsm *, u_char *, int *); + int (*ackci) /* ACK our Configuration Information */ + (fsm *, u_char *, int); + int (*nakci) /* NAK our Configuration Information */ + (fsm *, u_char *, int, int); + int (*rejci) /* Reject our Configuration Information */ + (fsm *, u_char *, int); + int (*reqci) /* Request peer's Configuration Information */ + (fsm *, u_char *, int *, int); + void (*up) /* Called when fsm reaches PPP_FSM_OPENED state */ + (fsm *); + void (*down) /* Called when fsm leaves PPP_FSM_OPENED state */ + (fsm *); + void (*starting) /* Called when we want the lower layer */ + (fsm *); + void (*finished) /* Called when we don't want the lower layer */ + (fsm *); + void (*protreject) /* Called when Protocol-Reject received */ + (int); + void (*retransmit) /* Retransmission is necessary */ + (fsm *); + int (*extcode) /* Called when unknown code received */ + (fsm *, int, int, u_char *, int); + const char *proto_name; /* String name for protocol (for messages) */ +} fsm_callbacks; + + +/* + * Link states. + */ +#define PPP_FSM_INITIAL 0 /* Down, hasn't been opened */ +#define PPP_FSM_STARTING 1 /* Down, been opened */ +#define PPP_FSM_CLOSED 2 /* Up, hasn't been opened */ +#define PPP_FSM_STOPPED 3 /* Open, waiting for down event */ +#define PPP_FSM_CLOSING 4 /* Terminating the connection, not open */ +#define PPP_FSM_STOPPING 5 /* Terminating, but open */ +#define PPP_FSM_REQSENT 6 /* We've sent a Config Request */ +#define PPP_FSM_ACKRCVD 7 /* We've received a Config Ack */ +#define PPP_FSM_ACKSENT 8 /* We've sent a Config Ack */ +#define PPP_FSM_OPENED 9 /* Connection available */ + + +/* + * Flags - indicate options controlling FSM operation + */ +#define OPT_PASSIVE 1 /* Don't die if we don't get a response */ +#define OPT_RESTART 2 /* Treat 2nd OPEN as DOWN, UP */ +#define OPT_SILENT 4 /* Wait for peer to speak first */ + + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define DEFTIMEOUT 3 /* Timeout time in seconds */ +#define DEFMAXTERMREQS 2 /* Maximum Terminate-Request transmissions */ +#define DEFMAXCONFREQS 10 /* Maximum Configure-Request transmissions */ +#define DEFMAXNAKLOOPS 5 /* Maximum number of nak loops */ +#endif /* moved to ppp_opts.h */ + + +/* + * Prototypes + */ +void fsm_init(fsm *f); +void fsm_lowerup(fsm *f); +void fsm_lowerdown(fsm *f); +void fsm_open(fsm *f); +void fsm_close(fsm *f, const char *reason); +void fsm_input(fsm *f, u_char *inpacket, int l); +void fsm_protreject(fsm *f); +void fsm_sdata(fsm *f, u_char code, u_char id, const u_char *data, int datalen); + +#ifdef __cplusplus +} +#endif + +#endif /* FSM_H */ +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h new file mode 100644 index 00000000..32fdd1c6 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h @@ -0,0 +1,134 @@ +/* + * ipcp.h - IP Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipcp.h,v 1.14 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV4_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef IPCP_H +#define IPCP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_ADDRS 1 /* IP Addresses */ +#if VJ_SUPPORT +#define CI_COMPRESSTYPE 2 /* Compression Type */ +#endif /* VJ_SUPPORT */ +#define CI_ADDR 3 + +#if LWIP_DNS +#define CI_MS_DNS1 129 /* Primary DNS value */ +#define CI_MS_DNS2 131 /* Secondary DNS value */ +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ +#define CI_MS_WINS1 130 /* Primary WINS value */ +#define CI_MS_WINS2 132 /* Secondary WINS value */ +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT +#define MAX_STATES 16 /* from slcompress.h */ + +#define IPCP_VJMODE_OLD 1 /* "old" mode (option # = 0x0037) */ +#define IPCP_VJMODE_RFC1172 2 /* "old-rfc"mode (option # = 0x002d) */ +#define IPCP_VJMODE_RFC1332 3 /* "new-rfc"mode (option # = 0x002d, */ + /* maxslot and slot number compression) */ + +#define IPCP_VJ_COMP 0x002d /* current value for VJ compression option*/ +#define IPCP_VJ_COMP_OLD 0x0037 /* "old" (i.e, broken) value for VJ */ + /* compression option*/ +#endif /* VJ_SUPPORT */ + +typedef struct ipcp_options { + unsigned int neg_addr :1; /* Negotiate IP Address? */ + unsigned int old_addrs :1; /* Use old (IP-Addresses) option? */ + unsigned int req_addr :1; /* Ask peer to send IP address? */ +#if 0 /* UNUSED */ + unsigned int default_route :1; /* Assign default route through interface? */ + unsigned int replace_default_route :1; /* Replace default route through interface? */ +#endif /* UNUSED */ +#if 0 /* UNUSED - PROXY ARP */ + unsigned int proxy_arp :1; /* Make proxy ARP entry for peer? */ +#endif /* UNUSED - PROXY ARP */ +#if VJ_SUPPORT + unsigned int neg_vj :1; /* Van Jacobson Compression? */ + unsigned int old_vj :1; /* use old (short) form of VJ option? */ + unsigned int cflag :1; +#endif /* VJ_SUPPORT */ + unsigned int accept_local :1; /* accept peer's value for ouraddr */ + unsigned int accept_remote :1; /* accept peer's value for hisaddr */ +#if LWIP_DNS + unsigned int req_dns1 :1; /* Ask peer to send primary DNS address? */ + unsigned int req_dns2 :1; /* Ask peer to send secondary DNS address? */ +#endif /* LWIP_DNS */ + + u32_t ouraddr, hisaddr; /* Addresses in NETWORK BYTE ORDER */ +#if LWIP_DNS + u32_t dnsaddr[2]; /* Primary and secondary MS DNS entries */ +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + u32_t winsaddr[2]; /* Primary and secondary MS WINS entries */ +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT + u16_t vj_protocol; /* protocol value to use in VJ option */ + u8_t maxslotindex; /* values for RFC1332 VJ compression neg. */ +#endif /* VJ_SUPPORT */ +} ipcp_options; + +#if 0 /* UNUSED, already defined by lwIP */ +char *ip_ntoa (u32_t); +#endif /* UNUSED, already defined by lwIP */ + +extern const struct protent ipcp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* IPCP_H */ +#endif /* PPP_SUPPORT && PPP_IPV4_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h new file mode 100644 index 00000000..90999738 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h @@ -0,0 +1,191 @@ +/* + * ipv6cp.h - PPP IPV6 Control Protocol. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* Original version, based on RFC2023 : + + Copyright (c) 1995, 1996, 1997 Francis.Dupont@inria.fr, INRIA Rocquencourt, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Copyright (c) 1998, 1999 Francis.Dupont@inria.fr, GIE DYADE, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Ce travail a été fait au sein du GIE DYADE (Groupement d'Intérêt + Économique ayant pour membres BULL S.A. et l'INRIA). + + Ce logiciel informatique est disponible aux conditions + usuelles dans la recherche, c'est-à-dire qu'il peut + être utilisé, copié, modifié, distribué à l'unique + condition que ce texte soit conservé afin que + l'origine de ce logiciel soit reconnue. + + Le nom de l'Institut National de Recherche en Informatique + et en Automatique (INRIA), de l'IMAG, ou d'une personne morale + ou physique ayant participé à l'élaboration de ce logiciel ne peut + être utilisé sans son accord préalable explicite. + + Ce logiciel est fourni tel quel sans aucune garantie, + support ou responsabilité d'aucune sorte. + Ce logiciel est dérivé de sources d'origine + "University of California at Berkeley" et + "Digital Equipment Corporation" couvertes par des copyrights. + + L'Institut d'Informatique et de Mathématiques Appliquées de Grenoble (IMAG) + est une fédération d'unités mixtes de recherche du CNRS, de l'Institut National + Polytechnique de Grenoble et de l'Université Joseph Fourier regroupant + sept laboratoires dont le laboratoire Logiciels, Systèmes, Réseaux (LSR). + + This work has been done in the context of GIE DYADE (joint R & D venture + between BULL S.A. and INRIA). + + This software is available with usual "research" terms + with the aim of retain credits of the software. + Permission to use, copy, modify and distribute this software for any + purpose and without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies, + and the name of INRIA, IMAG, or any contributor not be used in advertising + or publicity pertaining to this material without the prior explicit + permission. The software is provided "as is" without any + warranties, support or liabilities of any kind. + This software is derived from source code from + "University of California at Berkeley" and + "Digital Equipment Corporation" protected by copyrights. + + Grenoble's Institute of Computer Science and Applied Mathematics (IMAG) + is a federation of seven research units funded by the CNRS, National + Polytechnic Institute of Grenoble and University Joseph Fourier. + The research unit in Software, Systems, Networks (LSR) is member of IMAG. +*/ + +/* + * Derived from : + * + * + * ipcp.h - IP Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipv6cp.h,v 1.7 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef IPV6CP_H +#define IPV6CP_H + +#include "eui64.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_IFACEID 1 /* Interface Identifier */ +#ifdef IPV6CP_COMP +#define CI_COMPRESSTYPE 2 /* Compression Type */ +#endif /* IPV6CP_COMP */ + +/* No compression types yet defined. + *#define IPV6CP_COMP 0x004f + */ +typedef struct ipv6cp_options { + unsigned int neg_ifaceid :1; /* Negotiate interface identifier? */ + unsigned int req_ifaceid :1; /* Ask peer to send interface identifier? */ + unsigned int accept_local :1; /* accept peer's value for iface id? */ + unsigned int opt_local :1; /* ourtoken set by option */ + unsigned int opt_remote :1; /* histoken set by option */ + unsigned int use_ip :1; /* use IP as interface identifier */ +#if 0 + unsigned int use_persistent :1; /* use uniquely persistent value for address */ +#endif +#ifdef IPV6CP_COMP + unsigned int neg_vj :1; /* Van Jacobson Compression? */ +#endif /* IPV6CP_COMP */ + +#ifdef IPV6CP_COMP + u_short vj_protocol; /* protocol value to use in VJ option */ +#endif /* IPV6CP_COMP */ + eui64_t ourid, hisid; /* Interface identifiers */ +} ipv6cp_options; + +extern const struct protent ipv6cp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* IPV6CP_H */ +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h new file mode 100644 index 00000000..18ad1cb2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h @@ -0,0 +1,179 @@ +/* + * lcp.h - Link Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: lcp.h,v 1.20 2004/11/14 22:53:42 carlsonj Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef LCP_H +#define LCP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_VENDOR 0 /* Vendor Specific */ +#define CI_MRU 1 /* Maximum Receive Unit */ +#define CI_ASYNCMAP 2 /* Async Control Character Map */ +#define CI_AUTHTYPE 3 /* Authentication Type */ +#define CI_QUALITY 4 /* Quality Protocol */ +#define CI_MAGICNUMBER 5 /* Magic Number */ +#define CI_PCOMPRESSION 7 /* Protocol Field Compression */ +#define CI_ACCOMPRESSION 8 /* Address/Control Field Compression */ +#define CI_FCSALTERN 9 /* FCS-Alternatives */ +#define CI_SDP 10 /* Self-Describing-Pad */ +#define CI_NUMBERED 11 /* Numbered-Mode */ +#define CI_CALLBACK 13 /* callback */ +#define CI_MRRU 17 /* max reconstructed receive unit; multilink */ +#define CI_SSNHF 18 /* short sequence numbers for multilink */ +#define CI_EPDISC 19 /* endpoint discriminator */ +#define CI_MPPLUS 22 /* Multi-Link-Plus-Procedure */ +#define CI_LDISC 23 /* Link-Discriminator */ +#define CI_LCPAUTH 24 /* LCP Authentication */ +#define CI_COBS 25 /* Consistent Overhead Byte Stuffing */ +#define CI_PREFELIS 26 /* Prefix Elision */ +#define CI_MPHDRFMT 27 /* MP Header Format */ +#define CI_I18N 28 /* Internationalization */ +#define CI_SDL 29 /* Simple Data Link */ + +/* + * LCP-specific packet types (code numbers). + */ +#define PROTREJ 8 /* Protocol Reject */ +#define ECHOREQ 9 /* Echo Request */ +#define ECHOREP 10 /* Echo Reply */ +#define DISCREQ 11 /* Discard Request */ +#define IDENTIF 12 /* Identification */ +#define TIMEREM 13 /* Time Remaining */ + +/* Value used as data for CI_CALLBACK option */ +#define CBCP_OPT 6 /* Use callback control protocol */ + +#if 0 /* moved to ppp_opts.h */ +#define DEFMRU 1500 /* Try for this */ +#define MINMRU 128 /* No MRUs below this */ +#define MAXMRU 16384 /* Normally limit MRU to this */ +#endif /* moved to ppp_opts.h */ + +/* An endpoint discriminator, used with multilink. */ +#define MAX_ENDP_LEN 20 /* maximum length of discriminator value */ +struct epdisc { + unsigned char class_; /* -- The word "class" is reserved in C++. */ + unsigned char length; + unsigned char value[MAX_ENDP_LEN]; +}; + +/* + * The state of options is described by an lcp_options structure. + */ +typedef struct lcp_options { + unsigned int passive :1; /* Don't die if we don't get a response */ + unsigned int silent :1; /* Wait for the other end to start first */ +#if 0 /* UNUSED */ + unsigned int restart :1; /* Restart vs. exit after close */ +#endif /* UNUSED */ + unsigned int neg_mru :1; /* Negotiate the MRU? */ + unsigned int neg_asyncmap :1; /* Negotiate the async map? */ +#if PAP_SUPPORT + unsigned int neg_upap :1; /* Ask for UPAP authentication? */ +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + unsigned int neg_chap :1; /* Ask for CHAP authentication? */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + unsigned int neg_eap :1; /* Ask for EAP authentication? */ +#endif /* EAP_SUPPORT */ + unsigned int neg_magicnumber :1; /* Ask for magic number? */ + unsigned int neg_pcompression :1; /* HDLC Protocol Field Compression? */ + unsigned int neg_accompression :1; /* HDLC Address/Control Field Compression? */ +#if LQR_SUPPORT + unsigned int neg_lqr :1; /* Negotiate use of Link Quality Reports */ +#endif /* LQR_SUPPORT */ + unsigned int neg_cbcp :1; /* Negotiate use of CBCP */ +#ifdef HAVE_MULTILINK + unsigned int neg_mrru :1; /* negotiate multilink MRRU */ +#endif /* HAVE_MULTILINK */ + unsigned int neg_ssnhf :1; /* negotiate short sequence numbers */ + unsigned int neg_endpoint :1; /* negotiate endpoint discriminator */ + + u16_t mru; /* Value of MRU */ +#ifdef HAVE_MULTILINK + u16_t mrru; /* Value of MRRU, and multilink enable */ +#endif /* MULTILINK */ +#if CHAP_SUPPORT + u8_t chap_mdtype; /* which MD types (hashing algorithm) */ +#endif /* CHAP_SUPPORT */ + u32_t asyncmap; /* Value of async map */ + u32_t magicnumber; + u8_t numloops; /* Number of loops during magic number neg. */ +#if LQR_SUPPORT + u32_t lqr_period; /* Reporting period for LQR 1/100ths second */ +#endif /* LQR_SUPPORT */ + struct epdisc endpoint; /* endpoint discriminator */ +} lcp_options; + +void lcp_open(ppp_pcb *pcb); +void lcp_close(ppp_pcb *pcb, const char *reason); +void lcp_lowerup(ppp_pcb *pcb); +void lcp_lowerdown(ppp_pcb *pcb); +void lcp_sprotrej(ppp_pcb *pcb, u_char *p, int len); /* send protocol reject */ + +extern const struct protent lcp_protent; + +#if 0 /* moved to ppp_opts.h */ +/* Default number of times we receive our magic number from the peer + before deciding the link is looped-back. */ +#define DEFLOOPBACKFAIL 10 +#endif /* moved to ppp_opts.h */ + +#ifdef __cplusplus +} +#endif + +#endif /* LCP_H */ +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h new file mode 100644 index 00000000..a165e18f --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h @@ -0,0 +1,130 @@ +/* + * magic.h - PPP Magic Number definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: magic.h,v 1.5 2003/06/11 23:56:26 paulus Exp $ + */ +/***************************************************************************** +* randm.h - Random number generator header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* Copyright (c) 1998 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-05-29 Guy Lancaster , Global Election Systems Inc. +* Extracted from avos. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef MAGIC_H +#define MAGIC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** +*** PUBLIC FUNCTIONS *** +***********************/ + +/* + * Initialize the random number generator. + */ +void magic_init(void); + +/* + * Randomize our random seed value. To be called for truely random events + * such as user operations and network traffic. + */ +void magic_randomize(void); + +/* + * Return a new random number. + */ +u32_t magic(void); /* Returns the next magic number */ + +/* + * Fill buffer with random bytes + * + * Use the random pool to generate random data. This degrades to pseudo + * random when used faster than randomness is supplied using magic_churnrand(). + * Thus it's important to make sure that the results of this are not + * published directly because one could predict the next result to at + * least some degree. Also, it's important to get a good seed before + * the first use. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len); + +/* + * Return a new random number between 0 and (2^pow)-1 included. + */ +u32_t magic_pow(u8_t pow); + +#ifdef __cplusplus +} +#endif + +#endif /* MAGIC_H */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h new file mode 100644 index 00000000..5de11284 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h @@ -0,0 +1,181 @@ +/* + * mppe.h - Definitions for MPPE + * + * Copyright (c) 2008 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MPPE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef MPPE_H +#define MPPE_H + +#include "netif/ppp/pppcrypt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MPPE_PAD 4 /* MPPE growth per frame */ +#define MPPE_MAX_KEY_LEN 16 /* largest key length (128-bit) */ + +/* option bits for ccp_options.mppe */ +#define MPPE_OPT_40 0x01 /* 40 bit */ +#define MPPE_OPT_128 0x02 /* 128 bit */ +#define MPPE_OPT_STATEFUL 0x04 /* stateful mode */ +/* unsupported opts */ +#define MPPE_OPT_56 0x08 /* 56 bit */ +#define MPPE_OPT_MPPC 0x10 /* MPPC compression */ +#define MPPE_OPT_D 0x20 /* Unknown */ +#define MPPE_OPT_UNSUPPORTED (MPPE_OPT_56|MPPE_OPT_MPPC|MPPE_OPT_D) +#define MPPE_OPT_UNKNOWN 0x40 /* Bits !defined in RFC 3078 were set */ + +/* + * This is not nice ... the alternative is a bitfield struct though. + * And unfortunately, we cannot share the same bits for the option + * names above since C and H are the same bit. We could do a u_int32 + * but then we have to do a lwip_htonl() all the time and/or we still need + * to know which octet is which. + */ +#define MPPE_C_BIT 0x01 /* MPPC */ +#define MPPE_D_BIT 0x10 /* Obsolete, usage unknown */ +#define MPPE_L_BIT 0x20 /* 40-bit */ +#define MPPE_S_BIT 0x40 /* 128-bit */ +#define MPPE_M_BIT 0x80 /* 56-bit, not supported */ +#define MPPE_H_BIT 0x01 /* Stateless (in a different byte) */ + +/* Does not include H bit; used for least significant octet only. */ +#define MPPE_ALL_BITS (MPPE_D_BIT|MPPE_L_BIT|MPPE_S_BIT|MPPE_M_BIT|MPPE_H_BIT) + +/* Build a CI from mppe opts (see RFC 3078) */ +#define MPPE_OPTS_TO_CI(opts, ci) \ + do { \ + u_char *ptr = ci; /* u_char[4] */ \ + \ + /* H bit */ \ + if (opts & MPPE_OPT_STATEFUL) \ + *ptr++ = 0x0; \ + else \ + *ptr++ = MPPE_H_BIT; \ + *ptr++ = 0; \ + *ptr++ = 0; \ + \ + /* S,L bits */ \ + *ptr = 0; \ + if (opts & MPPE_OPT_128) \ + *ptr |= MPPE_S_BIT; \ + if (opts & MPPE_OPT_40) \ + *ptr |= MPPE_L_BIT; \ + /* M,D,C bits not supported */ \ + } while (/* CONSTCOND */ 0) + +/* The reverse of the above */ +#define MPPE_CI_TO_OPTS(ci, opts) \ + do { \ + const u_char *ptr = ci; /* u_char[4] */ \ + \ + opts = 0; \ + \ + /* H bit */ \ + if (!(ptr[0] & MPPE_H_BIT)) \ + opts |= MPPE_OPT_STATEFUL; \ + \ + /* S,L bits */ \ + if (ptr[3] & MPPE_S_BIT) \ + opts |= MPPE_OPT_128; \ + if (ptr[3] & MPPE_L_BIT) \ + opts |= MPPE_OPT_40; \ + \ + /* M,D,C bits */ \ + if (ptr[3] & MPPE_M_BIT) \ + opts |= MPPE_OPT_56; \ + if (ptr[3] & MPPE_D_BIT) \ + opts |= MPPE_OPT_D; \ + if (ptr[3] & MPPE_C_BIT) \ + opts |= MPPE_OPT_MPPC; \ + \ + /* Other bits */ \ + if (ptr[0] & ~MPPE_H_BIT) \ + opts |= MPPE_OPT_UNKNOWN; \ + if (ptr[1] || ptr[2]) \ + opts |= MPPE_OPT_UNKNOWN; \ + if (ptr[3] & ~MPPE_ALL_BITS) \ + opts |= MPPE_OPT_UNKNOWN; \ + } while (/* CONSTCOND */ 0) + +/* Shared MPPE padding between MSCHAP and MPPE */ +#define SHA1_PAD_SIZE 40 + +static const u8_t mppe_sha1_pad1[SHA1_PAD_SIZE] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8_t mppe_sha1_pad2[SHA1_PAD_SIZE] = { + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2 +}; + +/* + * State for an MPPE (de)compressor. + */ +typedef struct ppp_mppe_state { + lwip_arc4_context arc4; + u8_t master_key[MPPE_MAX_KEY_LEN]; + u8_t session_key[MPPE_MAX_KEY_LEN]; + u8_t keylen; /* key length in bytes */ + /* NB: 128-bit == 16, 40-bit == 8! + * If we want to support 56-bit, the unit has to change to bits + */ + u8_t bits; /* MPPE control bits */ + u16_t ccount; /* 12-bit coherency count (seqno) */ + u16_t sanity_errors; /* take down LCP if too many */ + unsigned int stateful :1; /* stateful mode flag */ + unsigned int discard :1; /* stateful mode packet loss flag */ +} ppp_mppe_state; + +void mppe_set_key(ppp_pcb *pcb, ppp_mppe_state *state, u8_t *key); +void mppe_init(ppp_pcb *pcb, ppp_mppe_state *state, u8_t options); +void mppe_comp_reset(ppp_pcb *pcb, ppp_mppe_state *state); +err_t mppe_compress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb, u16_t protocol); +void mppe_decomp_reset(ppp_pcb *pcb, ppp_mppe_state *state); +err_t mppe_decompress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb); + +#ifdef __cplusplus +} +#endif + +#endif /* MPPE_H */ +#endif /* PPP_SUPPORT && MPPE_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h new file mode 100644 index 00000000..3d73c365 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h @@ -0,0 +1,698 @@ +/***************************************************************************** +* ppp.h - Network Point to Point Protocol header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original derived from BSD codes. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_H +#define PPP_H + +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/netif.h" +#include "lwip/sys.h" +#include "lwip/timeouts.h" +#if PPP_IPV6_SUPPORT +#include "lwip/ip6_addr.h" +#endif /* PPP_IPV6_SUPPORT */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Disable non-working or rarely used PPP feature, so rarely that we don't want to bloat ppp_opts.h with them */ +#ifndef PPP_OPTIONS +#define PPP_OPTIONS 0 +#endif + +#ifndef PPP_NOTIFY +#define PPP_NOTIFY 0 +#endif + +#ifndef PPP_REMOTENAME +#define PPP_REMOTENAME 0 +#endif + +#ifndef PPP_IDLETIMELIMIT +#define PPP_IDLETIMELIMIT 0 +#endif + +#ifndef PPP_LCP_ADAPTIVE +#define PPP_LCP_ADAPTIVE 0 +#endif + +#ifndef PPP_MAXCONNECT +#define PPP_MAXCONNECT 0 +#endif + +#ifndef PPP_ALLOWED_ADDRS +#define PPP_ALLOWED_ADDRS 0 +#endif + +#ifndef PPP_PROTOCOLNAME +#define PPP_PROTOCOLNAME 0 +#endif + +#ifndef PPP_STATS_SUPPORT +#define PPP_STATS_SUPPORT 0 +#endif + +#ifndef DEFLATE_SUPPORT +#define DEFLATE_SUPPORT 0 +#endif + +#ifndef BSDCOMPRESS_SUPPORT +#define BSDCOMPRESS_SUPPORT 0 +#endif + +#ifndef PREDICTOR_SUPPORT +#define PREDICTOR_SUPPORT 0 +#endif + +/************************* +*** PUBLIC DEFINITIONS *** +*************************/ + +/* + * The basic PPP frame. + */ +#define PPP_HDRLEN 4 /* octets for standard ppp header */ +#define PPP_FCSLEN 2 /* octets for FCS */ + +/* + * Values for phase. + */ +#define PPP_PHASE_DEAD 0 +#define PPP_PHASE_MASTER 1 +#define PPP_PHASE_HOLDOFF 2 +#define PPP_PHASE_INITIALIZE 3 +#define PPP_PHASE_SERIALCONN 4 +#define PPP_PHASE_DORMANT 5 +#define PPP_PHASE_ESTABLISH 6 +#define PPP_PHASE_AUTHENTICATE 7 +#define PPP_PHASE_CALLBACK 8 +#define PPP_PHASE_NETWORK 9 +#define PPP_PHASE_RUNNING 10 +#define PPP_PHASE_TERMINATE 11 +#define PPP_PHASE_DISCONNECT 12 + +/* Error codes. */ +#define PPPERR_NONE 0 /* No error. */ +#define PPPERR_PARAM 1 /* Invalid parameter. */ +#define PPPERR_OPEN 2 /* Unable to open PPP session. */ +#define PPPERR_DEVICE 3 /* Invalid I/O device for PPP. */ +#define PPPERR_ALLOC 4 /* Unable to allocate resources. */ +#define PPPERR_USER 5 /* User interrupt. */ +#define PPPERR_CONNECT 6 /* Connection lost. */ +#define PPPERR_AUTHFAIL 7 /* Failed authentication challenge. */ +#define PPPERR_PROTOCOL 8 /* Failed to meet protocol. */ +#define PPPERR_PEERDEAD 9 /* Connection timeout */ +#define PPPERR_IDLETIMEOUT 10 /* Idle Timeout */ +#define PPPERR_CONNECTTIME 11 /* Max connect time reached */ +#define PPPERR_LOOPBACK 12 /* Loopback detected */ + +/* Whether auth support is enabled at all */ +#define PPP_AUTH_SUPPORT (PAP_SUPPORT || CHAP_SUPPORT || EAP_SUPPORT) + +/************************ +*** PUBLIC DATA TYPES *** +************************/ + +/* + * Other headers require ppp_pcb definition for prototypes, but ppp_pcb + * require some structure definition from other headers as well, we are + * fixing the dependency loop here by declaring the ppp_pcb type then + * by including headers containing necessary struct definition for ppp_pcb + */ +typedef struct ppp_pcb_s ppp_pcb; + +/* Type definitions for BSD code. */ +#ifndef __u_char_defined +typedef unsigned long u_long; +typedef unsigned int u_int; +typedef unsigned short u_short; +typedef unsigned char u_char; +#endif + +#include "fsm.h" +#include "lcp.h" +#if CCP_SUPPORT +#include "ccp.h" +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT +#include "mppe.h" +#endif /* MPPE_SUPPORT */ +#if PPP_IPV4_SUPPORT +#include "ipcp.h" +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +#include "ipv6cp.h" +#endif /* PPP_IPV6_SUPPORT */ +#if PAP_SUPPORT +#include "upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "eap.h" +#endif /* EAP_SUPPORT */ +#if VJ_SUPPORT +#include "vj.h" +#endif /* VJ_SUPPORT */ + +/* Link status callback function prototype */ +typedef void (*ppp_link_status_cb_fn)(ppp_pcb *pcb, int err_code, void *ctx); + +/* + * PPP configuration. + */ +typedef struct ppp_settings_s { + +#if PPP_SERVER && PPP_AUTH_SUPPORT + unsigned int auth_required :1; /* Peer is required to authenticate */ + unsigned int null_login :1; /* Username of "" and a password of "" are acceptable */ +#endif /* PPP_SERVER && PPP_AUTH_SUPPORT */ +#if PPP_REMOTENAME + unsigned int explicit_remote :1; /* remote_name specified with remotename opt */ +#endif /* PPP_REMOTENAME */ +#if PAP_SUPPORT + unsigned int refuse_pap :1; /* Don't proceed auth. with PAP */ +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + unsigned int refuse_chap :1; /* Don't proceed auth. with CHAP */ +#endif /* CHAP_SUPPORT */ +#if MSCHAP_SUPPORT + unsigned int refuse_mschap :1; /* Don't proceed auth. with MS-CHAP */ + unsigned int refuse_mschap_v2 :1; /* Don't proceed auth. with MS-CHAPv2 */ +#endif /* MSCHAP_SUPPORT */ +#if EAP_SUPPORT + unsigned int refuse_eap :1; /* Don't proceed auth. with EAP */ +#endif /* EAP_SUPPORT */ +#if LWIP_DNS + unsigned int usepeerdns :1; /* Ask peer for DNS adds */ +#endif /* LWIP_DNS */ + unsigned int persist :1; /* Persist mode, always try to open the connection */ +#if PRINTPKT_SUPPORT + unsigned int hide_password :1; /* Hide password in dumped packets */ +#endif /* PRINTPKT_SUPPORT */ + unsigned int noremoteip :1; /* Let him have no IP address */ + unsigned int lax_recv :1; /* accept control chars in asyncmap */ + unsigned int noendpoint :1; /* don't send/accept endpoint discriminator */ +#if PPP_LCP_ADAPTIVE + unsigned int lcp_echo_adaptive :1; /* request echo only if the link was idle */ +#endif /* PPP_LCP_ADAPTIVE */ +#if MPPE_SUPPORT + unsigned int require_mppe :1; /* Require MPPE (Microsoft Point to Point Encryption) */ + unsigned int refuse_mppe_40 :1; /* Allow MPPE 40-bit mode? */ + unsigned int refuse_mppe_128 :1; /* Allow MPPE 128-bit mode? */ + unsigned int refuse_mppe_stateful :1; /* Allow MPPE stateful mode? */ +#endif /* MPPE_SUPPORT */ + + u16_t listen_time; /* time to listen first (ms), waiting for peer to send LCP packet */ + +#if PPP_IDLETIMELIMIT + u16_t idle_time_limit; /* Disconnect if idle for this many seconds */ +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT + u32_t maxconnect; /* Maximum connect time (seconds) */ +#endif /* PPP_MAXCONNECT */ + +#if PPP_AUTH_SUPPORT + /* auth data */ + const char *user; /* Username for PAP */ + const char *passwd; /* Password for PAP, secret for CHAP */ +#if PPP_REMOTENAME + char remote_name[MAXNAMELEN + 1]; /* Peer's name for authentication */ +#endif /* PPP_REMOTENAME */ + +#if PAP_SUPPORT + u8_t pap_timeout_time; /* Timeout (seconds) for auth-req retrans. */ + u8_t pap_max_transmits; /* Number of auth-reqs sent */ +#if PPP_SERVER + u8_t pap_req_timeout; /* Time to wait for auth-req from peer */ +#endif /* PPP_SERVER */ +#endif /* PAP_SUPPPORT */ + +#if CHAP_SUPPORT + u8_t chap_timeout_time; /* Timeout (seconds) for retransmitting req */ + u8_t chap_max_transmits; /* max # times to send challenge */ +#if PPP_SERVER + u8_t chap_rechallenge_time; /* Time to wait for auth-req from peer */ +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPPORT */ + +#if EAP_SUPPORT + u8_t eap_req_time; /* Time to wait (for retransmit/fail) */ + u8_t eap_allow_req; /* Max Requests allowed */ +#if PPP_SERVER + u8_t eap_timeout_time; /* Time to wait (for retransmit/fail) */ + u8_t eap_max_transmits; /* Max Requests allowed */ +#endif /* PPP_SERVER */ +#endif /* EAP_SUPPORT */ + +#endif /* PPP_AUTH_SUPPORT */ + + u8_t fsm_timeout_time; /* Timeout time in seconds */ + u8_t fsm_max_conf_req_transmits; /* Maximum Configure-Request transmissions */ + u8_t fsm_max_term_transmits; /* Maximum Terminate-Request transmissions */ + u8_t fsm_max_nak_loops; /* Maximum number of nak loops tolerated */ + + u8_t lcp_loopbackfail; /* Number of times we receive our magic number from the peer + before deciding the link is looped-back. */ + u8_t lcp_echo_interval; /* Interval between LCP echo-requests */ + u8_t lcp_echo_fails; /* Tolerance to unanswered echo-requests */ + +} ppp_settings; + +#if PPP_SERVER +struct ppp_addrs { +#if PPP_IPV4_SUPPORT + ip4_addr_t our_ipaddr, his_ipaddr, netmask; +#if LWIP_DNS + ip4_addr_t dns1, dns2; +#endif /* LWIP_DNS */ +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + ip6_addr_t our6_ipaddr, his6_ipaddr; +#endif /* PPP_IPV6_SUPPORT */ +}; +#endif /* PPP_SERVER */ + +/* + * PPP interface control block. + */ +struct ppp_pcb_s { + ppp_settings settings; + const struct link_callbacks *link_cb; + void *link_ctx_cb; + void (*link_status_cb)(ppp_pcb *pcb, int err_code, void *ctx); /* Status change callback */ +#if PPP_NOTIFY_PHASE + void (*notify_phase_cb)(ppp_pcb *pcb, u8_t phase, void *ctx); /* Notify phase callback */ +#endif /* PPP_NOTIFY_PHASE */ + void *ctx_cb; /* Callbacks optional pointer */ + struct netif *netif; /* PPP interface */ + u8_t phase; /* where the link is at */ + u8_t err_code; /* Code indicating why interface is down. */ + + /* flags */ +#if PPP_IPV4_SUPPORT + unsigned int ask_for_local :1; /* request our address from peer */ + unsigned int ipcp_is_open :1; /* haven't called np_finished() */ + unsigned int ipcp_is_up :1; /* have called ipcp_up() */ + unsigned int if4_up :1; /* True when the IPv4 interface is up. */ +#if 0 /* UNUSED - PROXY ARP */ + unsigned int proxy_arp_set :1; /* Have created proxy arp entry */ +#endif /* UNUSED - PROXY ARP */ +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + unsigned int ipv6cp_is_up :1; /* have called ip6cp_up() */ + unsigned int if6_up :1; /* True when the IPv6 interface is up. */ +#endif /* PPP_IPV6_SUPPORT */ + unsigned int lcp_echo_timer_running :1; /* set if a timer is running */ +#if VJ_SUPPORT + unsigned int vj_enabled :1; /* Flag indicating VJ compression enabled. */ +#endif /* VJ_SUPPORT */ +#if CCP_SUPPORT + unsigned int ccp_all_rejected :1; /* we rejected all peer's options */ +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT + unsigned int mppe_keys_set :1; /* Have the MPPE keys been set? */ +#endif /* MPPE_SUPPORT */ + +#if PPP_AUTH_SUPPORT + /* auth data */ +#if PPP_SERVER && defined(HAVE_MULTILINK) + char peer_authname[MAXNAMELEN + 1]; /* The name by which the peer authenticated itself to us. */ +#endif /* PPP_SERVER && defined(HAVE_MULTILINK) */ + u16_t auth_pending; /* Records which authentication operations haven't completed yet. */ + u16_t auth_done; /* Records which authentication operations have been completed. */ + +#if PAP_SUPPORT + upap_state upap; /* PAP data */ +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + chap_client_state chap_client; /* CHAP client data */ +#if PPP_SERVER + chap_server_state chap_server; /* CHAP server data */ +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPORT */ + +#if EAP_SUPPORT + eap_state eap; /* EAP data */ +#endif /* EAP_SUPPORT */ +#endif /* PPP_AUTH_SUPPORT */ + + fsm lcp_fsm; /* LCP fsm structure */ + lcp_options lcp_wantoptions; /* Options that we want to request */ + lcp_options lcp_gotoptions; /* Options that peer ack'd */ + lcp_options lcp_allowoptions; /* Options we allow peer to request */ + lcp_options lcp_hisoptions; /* Options that we ack'd */ + u16_t peer_mru; /* currently negotiated peer MRU */ + u8_t lcp_echos_pending; /* Number of outstanding echo msgs */ + u8_t lcp_echo_number; /* ID number of next echo frame */ + + u8_t num_np_open; /* Number of network protocols which we have opened. */ + u8_t num_np_up; /* Number of network protocols which have come up. */ + +#if VJ_SUPPORT + struct vjcompress vj_comp; /* Van Jacobson compression header. */ +#endif /* VJ_SUPPORT */ + +#if CCP_SUPPORT + fsm ccp_fsm; /* CCP fsm structure */ + ccp_options ccp_wantoptions; /* what to request the peer to use */ + ccp_options ccp_gotoptions; /* what the peer agreed to do */ + ccp_options ccp_allowoptions; /* what we'll agree to do */ + ccp_options ccp_hisoptions; /* what we agreed to do */ + u8_t ccp_localstate; /* Local state (mainly for handling reset-reqs and reset-acks). */ + u8_t ccp_receive_method; /* Method chosen on receive path */ + u8_t ccp_transmit_method; /* Method chosen on transmit path */ +#if MPPE_SUPPORT + ppp_mppe_state mppe_comp; /* MPPE "compressor" structure */ + ppp_mppe_state mppe_decomp; /* MPPE "decompressor" structure */ +#endif /* MPPE_SUPPORT */ +#endif /* CCP_SUPPORT */ + +#if PPP_IPV4_SUPPORT + fsm ipcp_fsm; /* IPCP fsm structure */ + ipcp_options ipcp_wantoptions; /* Options that we want to request */ + ipcp_options ipcp_gotoptions; /* Options that peer ack'd */ + ipcp_options ipcp_allowoptions; /* Options we allow peer to request */ + ipcp_options ipcp_hisoptions; /* Options that we ack'd */ +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT + fsm ipv6cp_fsm; /* IPV6CP fsm structure */ + ipv6cp_options ipv6cp_wantoptions; /* Options that we want to request */ + ipv6cp_options ipv6cp_gotoptions; /* Options that peer ack'd */ + ipv6cp_options ipv6cp_allowoptions; /* Options we allow peer to request */ + ipv6cp_options ipv6cp_hisoptions; /* Options that we ack'd */ +#endif /* PPP_IPV6_SUPPORT */ +}; + +/************************ + *** PUBLIC FUNCTIONS *** + ************************/ + +/* + * WARNING: For multi-threads environment, all ppp_set_* functions most + * only be called while the PPP is in the dead phase (i.e. disconnected). + */ + +#if PPP_AUTH_SUPPORT +/* + * Set PPP authentication. + * + * Warning: Using PPPAUTHTYPE_ANY might have security consequences. + * RFC 1994 says: + * + * In practice, within or associated with each PPP server, there is a + * database which associates "user" names with authentication + * information ("secrets"). It is not anticipated that a particular + * named user would be authenticated by multiple methods. This would + * make the user vulnerable to attacks which negotiate the least secure + * method from among a set (such as PAP rather than CHAP). If the same + * secret was used, PAP would reveal the secret to be used later with + * CHAP. + * + * Instead, for each user name there should be an indication of exactly + * one method used to authenticate that user name. If a user needs to + * make use of different authentication methods under different + * circumstances, then distinct user names SHOULD be employed, each of + * which identifies exactly one authentication method. + * + * Default is none auth type, unset (NULL) user and passwd. + */ +#define PPPAUTHTYPE_NONE 0x00 +#define PPPAUTHTYPE_PAP 0x01 +#define PPPAUTHTYPE_CHAP 0x02 +#define PPPAUTHTYPE_MSCHAP 0x04 +#define PPPAUTHTYPE_MSCHAP_V2 0x08 +#define PPPAUTHTYPE_EAP 0x10 +#define PPPAUTHTYPE_ANY 0xff +void ppp_set_auth(ppp_pcb *pcb, u8_t authtype, const char *user, const char *passwd); + +/* + * If set, peer is required to authenticate. This is mostly necessary for PPP server support. + * + * Default is false. + */ +#define ppp_set_auth_required(ppp, boolval) (ppp->settings.auth_required = boolval) +#endif /* PPP_AUTH_SUPPORT */ + +#if PPP_IPV4_SUPPORT +/* + * Set PPP interface "our" and "his" IPv4 addresses. This is mostly necessary for PPP server + * support but it can also be used on a PPP link where each side choose its own IP address. + * + * Default is unset (0.0.0.0). + */ +#define ppp_set_ipcp_ouraddr(ppp, addr) do { ppp->ipcp_wantoptions.ouraddr = ip4_addr_get_u32(addr); \ + ppp->ask_for_local = ppp->ipcp_wantoptions.ouraddr != 0; } while(0) +#define ppp_set_ipcp_hisaddr(ppp, addr) (ppp->ipcp_wantoptions.hisaddr = ip4_addr_get_u32(addr)) +#if LWIP_DNS +/* + * Set DNS server addresses that are sent if the peer asks for them. This is mostly necessary + * for PPP server support. + * + * Default is unset (0.0.0.0). + */ +#define ppp_set_ipcp_dnsaddr(ppp, index, addr) (ppp->ipcp_allowoptions.dnsaddr[index] = ip4_addr_get_u32(addr)) + +/* + * If set, we ask the peer for up to 2 DNS server addresses. Received DNS server addresses are + * registered using the dns_setserver() function. + * + * Default is false. + */ +#define ppp_set_usepeerdns(ppp, boolval) (ppp->settings.usepeerdns = boolval) +#endif /* LWIP_DNS */ +#endif /* PPP_IPV4_SUPPORT */ + +#if MPPE_SUPPORT +/* Disable MPPE (Microsoft Point to Point Encryption). This parameter is exclusive. */ +#define PPP_MPPE_DISABLE 0x00 +/* Require the use of MPPE (Microsoft Point to Point Encryption). */ +#define PPP_MPPE_ENABLE 0x01 +/* Allow MPPE to use stateful mode. Stateless mode is still attempted first. */ +#define PPP_MPPE_ALLOW_STATEFUL 0x02 +/* Refuse the use of MPPE with 40-bit encryption. Conflict with PPP_MPPE_REFUSE_128. */ +#define PPP_MPPE_REFUSE_40 0x04 +/* Refuse the use of MPPE with 128-bit encryption. Conflict with PPP_MPPE_REFUSE_40. */ +#define PPP_MPPE_REFUSE_128 0x08 +/* + * Set MPPE configuration + * + * Default is disabled. + */ +void ppp_set_mppe(ppp_pcb *pcb, u8_t flags); +#endif /* MPPE_SUPPORT */ + +/* + * Wait for up to intval milliseconds for a valid PPP packet from the peer. + * At the end of this time, or when a valid PPP packet is received from the + * peer, we commence negotiation by sending our first LCP packet. + * + * Default is 0. + */ +#define ppp_set_listen_time(ppp, intval) (ppp->settings.listen_time = intval) + +/* + * If set, we will attempt to initiate a connection but if no reply is received from + * the peer, we will then just wait passively for a valid LCP packet from the peer. + * + * Default is false. + */ +#define ppp_set_passive(ppp, boolval) (ppp->lcp_wantoptions.passive = boolval) + +/* + * If set, we will not transmit LCP packets to initiate a connection until a valid + * LCP packet is received from the peer. This is what we usually call the server mode. + * + * Default is false. + */ +#define ppp_set_silent(ppp, boolval) (ppp->lcp_wantoptions.silent = boolval) + +/* + * If set, enable protocol field compression negotiation in both the receive and + * the transmit direction. + * + * Default is true. + */ +#define ppp_set_neg_pcomp(ppp, boolval) (ppp->lcp_wantoptions.neg_pcompression = \ + ppp->lcp_allowoptions.neg_pcompression = boolval) + +/* + * If set, enable Address/Control compression in both the receive and the transmit + * direction. + * + * Default is true. + */ +#define ppp_set_neg_accomp(ppp, boolval) (ppp->lcp_wantoptions.neg_accompression = \ + ppp->lcp_allowoptions.neg_accompression = boolval) + +/* + * If set, enable asyncmap negotiation. Otherwise forcing all control characters to + * be escaped for both the transmit and the receive direction. + * + * Default is true. + */ +#define ppp_set_neg_asyncmap(ppp, boolval) (ppp->lcp_wantoptions.neg_asyncmap = \ + ppp->lcp_allowoptions.neg_asyncmap = boolval) + +/* + * This option sets the Async-Control-Character-Map (ACCM) for this end of the link. + * The ACCM is a set of 32 bits, one for each of the ASCII control characters with + * values from 0 to 31, where a 1 bit indicates that the corresponding control + * character should not be used in PPP packets sent to this system. The map is + * an unsigned 32 bits integer where the least significant bit (00000001) represents + * character 0 and the most significant bit (80000000) represents character 31. + * We will then ask the peer to send these characters as a 2-byte escape sequence. + * + * Default is 0. + */ +#define ppp_set_asyncmap(ppp, intval) (ppp->lcp_wantoptions.asyncmap = intval) + +/* + * Set a PPP interface as the default network interface + * (used to output all packets for which no specific route is found). + */ +#define ppp_set_default(ppp) netif_set_default(ppp->netif) + +#if PPP_NOTIFY_PHASE +/* + * Set a PPP notify phase callback. + * + * This can be used for example to set a LED pattern depending on the + * current phase of the PPP session. + */ +typedef void (*ppp_notify_phase_cb_fn)(ppp_pcb *pcb, u8_t phase, void *ctx); +void ppp_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb); +#endif /* PPP_NOTIFY_PHASE */ + +/* + * Initiate a PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * Holdoff is the time to wait (in seconds) before initiating + * the connection. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_connect(ppp_pcb *pcb, u16_t holdoff); + +#if PPP_SERVER +/* + * Listen for an incoming PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_listen(ppp_pcb *pcb); +#endif /* PPP_SERVER */ + +/* + * Initiate the end of a PPP connection. + * Any outstanding packets in the queues are dropped. + * + * Setting nocarrier to 1 close the PPP connection without initiating the + * shutdown procedure. Always using nocarrier = 0 is still recommended, + * this is going to take a little longer time if your link is down, but + * is a safer choice for the PPP state machine. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_close(ppp_pcb *pcb, u8_t nocarrier); + +/* + * Release the control block. + * + * This can only be called if PPP is in the dead phase. + * + * You must use ppp_close() before if you wish to terminate + * an established PPP session. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_free(ppp_pcb *pcb); + +/* + * PPP IOCTL commands. + * + * Get the up status - 0 for down, non-zero for up. The argument must + * point to an int. + */ +#define PPPCTLG_UPSTATUS 0 + +/* + * Get the PPP error code. The argument must point to an int. + * Returns a PPPERR_* value. + */ +#define PPPCTLG_ERRCODE 1 + +/* + * Get the fd associated with a PPP over serial + */ +#define PPPCTLG_FD 2 + +/* + * Get and set parameters for the given connection. + * Return 0 on success, an error code on failure. + */ +err_t ppp_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg); + +/* Get the PPP netif interface */ +#define ppp_netif(ppp) (ppp->netif) + +/* Set an lwIP-style status-callback for the selected PPP device */ +#define ppp_set_netif_statuscallback(ppp, status_cb) \ + netif_set_status_callback(ppp->netif, status_cb); + +/* Set an lwIP-style link-callback for the selected PPP device */ +#define ppp_set_netif_linkcallback(ppp, link_cb) \ + netif_set_link_callback(ppp->netif, link_cb); + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_H */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h new file mode 100644 index 00000000..40843d5a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h @@ -0,0 +1,722 @@ +/***************************************************************************** +* ppp.h - Network Point to Point Protocol header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original derived from BSD codes. +*****************************************************************************/ +#ifndef LWIP_HDR_PPP_IMPL_H +#define LWIP_HDR_PPP_IMPL_H + +#include "netif/ppp/ppp_opts.h" + +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifdef PPP_INCLUDE_SETTINGS_HEADER +#include "ppp_settings.h" +#endif + +#include /* formats */ +#include +#include +#include /* strtol() */ + +#include "lwip/netif.h" +#include "lwip/def.h" +#include "lwip/timeouts.h" + +#include "ppp.h" +#include "pppdebug.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Memory used for control packets. + * + * PPP_CTRL_PBUF_MAX_SIZE is the amount of memory we allocate when we + * cannot figure out how much we are going to use before filling the buffer. + */ +#if PPP_USE_PBUF_RAM +#define PPP_CTRL_PBUF_TYPE PBUF_RAM +#define PPP_CTRL_PBUF_MAX_SIZE 512 +#else /* PPP_USE_PBUF_RAM */ +#define PPP_CTRL_PBUF_TYPE PBUF_POOL +#define PPP_CTRL_PBUF_MAX_SIZE PBUF_POOL_BUFSIZE +#endif /* PPP_USE_PBUF_RAM */ + +/* + * The basic PPP frame. + */ +#define PPP_ADDRESS(p) (((u_char *)(p))[0]) +#define PPP_CONTROL(p) (((u_char *)(p))[1]) +#define PPP_PROTOCOL(p) ((((u_char *)(p))[2] << 8) + ((u_char *)(p))[3]) + +/* + * Significant octet values. + */ +#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */ +#define PPP_UI 0x03 /* Unnumbered Information */ +#define PPP_FLAG 0x7e /* Flag Sequence */ +#define PPP_ESCAPE 0x7d /* Asynchronous Control Escape */ +#define PPP_TRANS 0x20 /* Asynchronous transparency modifier */ + +/* + * Protocol field values. + */ +#define PPP_IP 0x21 /* Internet Protocol */ +#if 0 /* UNUSED */ +#define PPP_AT 0x29 /* AppleTalk Protocol */ +#define PPP_IPX 0x2b /* IPX protocol */ +#endif /* UNUSED */ +#if VJ_SUPPORT +#define PPP_VJC_COMP 0x2d /* VJ compressed TCP */ +#define PPP_VJC_UNCOMP 0x2f /* VJ uncompressed TCP */ +#endif /* VJ_SUPPORT */ +#if PPP_IPV6_SUPPORT +#define PPP_IPV6 0x57 /* Internet Protocol Version 6 */ +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT +#define PPP_COMP 0xfd /* compressed packet */ +#endif /* CCP_SUPPORT */ +#define PPP_IPCP 0x8021 /* IP Control Protocol */ +#if 0 /* UNUSED */ +#define PPP_ATCP 0x8029 /* AppleTalk Control Protocol */ +#define PPP_IPXCP 0x802b /* IPX Control Protocol */ +#endif /* UNUSED */ +#if PPP_IPV6_SUPPORT +#define PPP_IPV6CP 0x8057 /* IPv6 Control Protocol */ +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT +#define PPP_CCP 0x80fd /* Compression Control Protocol */ +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT +#define PPP_ECP 0x8053 /* Encryption Control Protocol */ +#endif /* ECP_SUPPORT */ +#define PPP_LCP 0xc021 /* Link Control Protocol */ +#if PAP_SUPPORT +#define PPP_PAP 0xc023 /* Password Authentication Protocol */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT +#define PPP_LQR 0xc025 /* Link Quality Report protocol */ +#endif /* LQR_SUPPORT */ +#if CHAP_SUPPORT +#define PPP_CHAP 0xc223 /* Cryptographic Handshake Auth. Protocol */ +#endif /* CHAP_SUPPORT */ +#if CBCP_SUPPORT +#define PPP_CBCP 0xc029 /* Callback Control Protocol */ +#endif /* CBCP_SUPPORT */ +#if EAP_SUPPORT +#define PPP_EAP 0xc227 /* Extensible Authentication Protocol */ +#endif /* EAP_SUPPORT */ + +/* + * The following struct gives the addresses of procedures to call + * for a particular lower link level protocol. + */ +struct link_callbacks { + /* Start a connection (e.g. Initiate discovery phase) */ + void (*connect) (ppp_pcb *pcb, void *ctx); +#if PPP_SERVER + /* Listen for an incoming connection (Passive mode) */ + void (*listen) (ppp_pcb *pcb, void *ctx); +#endif /* PPP_SERVER */ + /* End a connection (i.e. initiate disconnect phase) */ + void (*disconnect) (ppp_pcb *pcb, void *ctx); + /* Free lower protocol control block */ + err_t (*free) (ppp_pcb *pcb, void *ctx); + /* Write a pbuf to a ppp link, only used from PPP functions to send PPP packets. */ + err_t (*write)(ppp_pcb *pcb, void *ctx, struct pbuf *p); + /* Send a packet from lwIP core (IPv4 or IPv6) */ + err_t (*netif_output)(ppp_pcb *pcb, void *ctx, struct pbuf *p, u_short protocol); + /* configure the transmit-side characteristics of the PPP interface */ + void (*send_config)(ppp_pcb *pcb, void *ctx, u32_t accm, int pcomp, int accomp); + /* confire the receive-side characteristics of the PPP interface */ + void (*recv_config)(ppp_pcb *pcb, void *ctx, u32_t accm, int pcomp, int accomp); +}; + +/* + * What to do with network protocol (NP) packets. + */ +enum NPmode { + NPMODE_PASS, /* pass the packet through */ + NPMODE_DROP, /* silently drop the packet */ + NPMODE_ERROR, /* return an error */ + NPMODE_QUEUE /* save it up for later. */ +}; + +/* + * Statistics. + */ +#if PPP_STATS_SUPPORT +struct pppstat { + unsigned int ppp_ibytes; /* bytes received */ + unsigned int ppp_ipackets; /* packets received */ + unsigned int ppp_ierrors; /* receive errors */ + unsigned int ppp_obytes; /* bytes sent */ + unsigned int ppp_opackets; /* packets sent */ + unsigned int ppp_oerrors; /* transmit errors */ +}; + +#if VJ_SUPPORT +struct vjstat { + unsigned int vjs_packets; /* outbound packets */ + unsigned int vjs_compressed; /* outbound compressed packets */ + unsigned int vjs_searches; /* searches for connection state */ + unsigned int vjs_misses; /* times couldn't find conn. state */ + unsigned int vjs_uncompressedin; /* inbound uncompressed packets */ + unsigned int vjs_compressedin; /* inbound compressed packets */ + unsigned int vjs_errorin; /* inbound unknown type packets */ + unsigned int vjs_tossed; /* inbound packets tossed because of error */ +}; +#endif /* VJ_SUPPORT */ + +struct ppp_stats { + struct pppstat p; /* basic PPP statistics */ +#if VJ_SUPPORT + struct vjstat vj; /* VJ header compression statistics */ +#endif /* VJ_SUPPORT */ +}; + +#if CCP_SUPPORT +struct compstat { + unsigned int unc_bytes; /* total uncompressed bytes */ + unsigned int unc_packets; /* total uncompressed packets */ + unsigned int comp_bytes; /* compressed bytes */ + unsigned int comp_packets; /* compressed packets */ + unsigned int inc_bytes; /* incompressible bytes */ + unsigned int inc_packets; /* incompressible packets */ + unsigned int ratio; /* recent compression ratio << 8 */ +}; + +struct ppp_comp_stats { + struct compstat c; /* packet compression statistics */ + struct compstat d; /* packet decompression statistics */ +}; +#endif /* CCP_SUPPORT */ + +#endif /* PPP_STATS_SUPPORT */ + +#if PPP_IDLETIMELIMIT +/* + * The following structure records the time in seconds since + * the last NP packet was sent or received. + */ +struct ppp_idle { + time_t xmit_idle; /* time since last NP packet sent */ + time_t recv_idle; /* time since last NP packet received */ +}; +#endif /* PPP_IDLETIMELIMIT */ + +/* values for epdisc.class */ +#define EPD_NULL 0 /* null discriminator, no data */ +#define EPD_LOCAL 1 +#define EPD_IP 2 +#define EPD_MAC 3 +#define EPD_MAGIC 4 +#define EPD_PHONENUM 5 + +/* + * Global variables. + */ +#ifdef HAVE_MULTILINK +extern u8_t multilink; /* enable multilink operation */ +extern u8_t doing_multilink; +extern u8_t multilink_master; +extern u8_t bundle_eof; +extern u8_t bundle_terminating; +#endif + +#ifdef MAXOCTETS +extern unsigned int maxoctets; /* Maximum octetes per session (in bytes) */ +extern int maxoctets_dir; /* Direction : + 0 - in+out (default) + 1 - in + 2 - out + 3 - max(in,out) */ +extern int maxoctets_timeout; /* Timeout for check of octets limit */ +#define PPP_OCTETS_DIRECTION_SUM 0 +#define PPP_OCTETS_DIRECTION_IN 1 +#define PPP_OCTETS_DIRECTION_OUT 2 +#define PPP_OCTETS_DIRECTION_MAXOVERAL 3 +/* same as previos, but little different on RADIUS side */ +#define PPP_OCTETS_DIRECTION_MAXSESSION 4 +#endif + +/* Data input may be used by CCP and ECP, remove this entry + * from struct protent to save some flash + */ +#define PPP_DATAINPUT 0 + +/* + * The following struct gives the addresses of procedures to call + * for a particular protocol. + */ +struct protent { + u_short protocol; /* PPP protocol number */ + /* Initialization procedure */ + void (*init) (ppp_pcb *pcb); + /* Process a received packet */ + void (*input) (ppp_pcb *pcb, u_char *pkt, int len); + /* Process a received protocol-reject */ + void (*protrej) (ppp_pcb *pcb); + /* Lower layer has come up */ + void (*lowerup) (ppp_pcb *pcb); + /* Lower layer has gone down */ + void (*lowerdown) (ppp_pcb *pcb); + /* Open the protocol */ + void (*open) (ppp_pcb *pcb); + /* Close the protocol */ + void (*close) (ppp_pcb *pcb, const char *reason); +#if PRINTPKT_SUPPORT + /* Print a packet in readable form */ + int (*printpkt) (const u_char *pkt, int len, + void (*printer) (void *, const char *, ...), + void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + /* Process a received data packet */ + void (*datainput) (ppp_pcb *pcb, u_char *pkt, int len); +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + const char *name; /* Text name of protocol */ + const char *data_name; /* Text name of corresponding data protocol */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + option_t *options; /* List of command-line options */ + /* Check requested options, assign defaults */ + void (*check_options) (void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + /* Configure interface for demand-dial */ + int (*demand_conf) (int unit); + /* Say whether to bring up link for this pkt */ + int (*active_pkt) (u_char *pkt, int len); +#endif /* DEMAND_SUPPORT */ +}; + +/* Table of pointers to supported protocols */ +extern const struct protent* const protocols[]; + + +/* Values for auth_pending, auth_done */ +#if PAP_SUPPORT +#define PAP_WITHPEER 0x1 +#define PAP_PEER 0x2 +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#define CHAP_WITHPEER 0x4 +#define CHAP_PEER 0x8 +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#define EAP_WITHPEER 0x10 +#define EAP_PEER 0x20 +#endif /* EAP_SUPPORT */ + +/* Values for auth_done only */ +#if CHAP_SUPPORT +#define CHAP_MD5_WITHPEER 0x40 +#define CHAP_MD5_PEER 0x80 +#if MSCHAP_SUPPORT +#define CHAP_MS_SHIFT 8 /* LSB position for MS auths */ +#define CHAP_MS_WITHPEER 0x100 +#define CHAP_MS_PEER 0x200 +#define CHAP_MS2_WITHPEER 0x400 +#define CHAP_MS2_PEER 0x800 +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ + +/* Supported CHAP protocols */ +#if CHAP_SUPPORT + +#if MSCHAP_SUPPORT +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_MICROSOFT_V2 | MDTYPE_MICROSOFT | MDTYPE_MD5) +#else /* MSCHAP_SUPPORT */ +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_MD5) +#endif /* MSCHAP_SUPPORT */ + +#else /* CHAP_SUPPORT */ +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_NONE) +#endif /* CHAP_SUPPORT */ + +#if PPP_STATS_SUPPORT +/* + * PPP statistics structure + */ +struct pppd_stats { + unsigned int bytes_in; + unsigned int bytes_out; + unsigned int pkts_in; + unsigned int pkts_out; +}; +#endif /* PPP_STATS_SUPPORT */ + + +/* + * PPP private functions + */ + + +/* + * Functions called from lwIP core. + */ + +/* initialize the PPP subsystem */ +int ppp_init(void); + +/* + * Functions called from PPP link protocols. + */ + +/* Create a new PPP control block */ +ppp_pcb *ppp_new(struct netif *pppif, const struct link_callbacks *callbacks, void *link_ctx_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +/* Initiate LCP open request */ +void ppp_start(ppp_pcb *pcb); + +/* Called when link failed to setup */ +void ppp_link_failed(ppp_pcb *pcb); + +/* Called when link is normally down (i.e. it was asked to end) */ +void ppp_link_end(ppp_pcb *pcb); + +/* function called to process input packet */ +void ppp_input(ppp_pcb *pcb, struct pbuf *pb); + + +/* + * Functions called by PPP protocols. + */ + +/* function called by all PPP subsystems to send packets */ +err_t ppp_write(ppp_pcb *pcb, struct pbuf *p); + +/* functions called by auth.c link_terminated() */ +void ppp_link_terminated(ppp_pcb *pcb); + +void new_phase(ppp_pcb *pcb, int p); + +int ppp_send_config(ppp_pcb *pcb, int mtu, u32_t accm, int pcomp, int accomp); +int ppp_recv_config(ppp_pcb *pcb, int mru, u32_t accm, int pcomp, int accomp); + +#if PPP_IPV4_SUPPORT +int sifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr, u32_t netmask); +int cifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr); +#if 0 /* UNUSED - PROXY ARP */ +int sifproxyarp(ppp_pcb *pcb, u32_t his_adr); +int cifproxyarp(ppp_pcb *pcb, u32_t his_adr); +#endif /* UNUSED - PROXY ARP */ +#if LWIP_DNS +int sdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2); +int cdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2); +#endif /* LWIP_DNS */ +#if VJ_SUPPORT +int sifvjcomp(ppp_pcb *pcb, int vjcomp, int cidcomp, int maxcid); +#endif /* VJ_SUPPORT */ +int sifup(ppp_pcb *pcb); +int sifdown (ppp_pcb *pcb); +u32_t get_mask(u32_t addr); +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +int sif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64); +int cif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64); +int sif6up(ppp_pcb *pcb); +int sif6down (ppp_pcb *pcb); +#endif /* PPP_IPV6_SUPPORT */ + +#if DEMAND_SUPPORT +int sifnpmode(ppp_pcb *pcb, int proto, enum NPmode mode); +#endif /* DEMAND_SUPPORt */ + +void netif_set_mtu(ppp_pcb *pcb, int mtu); +int netif_get_mtu(ppp_pcb *pcb); + +#if CCP_SUPPORT +#if 0 /* unused */ +int ccp_test(ppp_pcb *pcb, u_char *opt_ptr, int opt_len, int for_transmit); +#endif /* unused */ +void ccp_set(ppp_pcb *pcb, u8_t isopen, u8_t isup, u8_t receive_method, u8_t transmit_method); +void ccp_reset_comp(ppp_pcb *pcb); +void ccp_reset_decomp(ppp_pcb *pcb); +#if 0 /* unused */ +int ccp_fatal_error(ppp_pcb *pcb); +#endif /* unused */ +#endif /* CCP_SUPPORT */ + +#if PPP_IDLETIMELIMIT +int get_idle_time(ppp_pcb *pcb, struct ppp_idle *ip); +#endif /* PPP_IDLETIMELIMIT */ + +#if DEMAND_SUPPORT +int get_loop_output(void); +#endif /* DEMAND_SUPPORT */ + +/* Optional protocol names list, to make our messages a little more informative. */ +#if PPP_PROTOCOLNAME +const char * protocol_name(int proto); +#endif /* PPP_PROTOCOLNAME */ + +/* Optional stats support, to get some statistics on the PPP interface */ +#if PPP_STATS_SUPPORT +void print_link_stats(void); /* Print stats, if available */ +void reset_link_stats(int u); /* Reset (init) stats when link goes up */ +void update_link_stats(int u); /* Get stats at link termination */ +#endif /* PPP_STATS_SUPPORT */ + + + +/* + * Inline versions of get/put char/short/long. + * Pointer is advanced; we assume that both arguments + * are lvalues and will already be in registers. + * cp MUST be u_char *. + */ +#define GETCHAR(c, cp) { \ + (c) = *(cp)++; \ +} +#define PUTCHAR(c, cp) { \ + *(cp)++ = (u_char) (c); \ +} +#define GETSHORT(s, cp) { \ + (s) = *(cp)++ << 8; \ + (s) |= *(cp)++; \ +} +#define PUTSHORT(s, cp) { \ + *(cp)++ = (u_char) ((s) >> 8); \ + *(cp)++ = (u_char) (s); \ +} +#define GETLONG(l, cp) { \ + (l) = *(cp)++ << 8; \ + (l) |= *(cp)++; (l) <<= 8; \ + (l) |= *(cp)++; (l) <<= 8; \ + (l) |= *(cp)++; \ +} +#define PUTLONG(l, cp) { \ + *(cp)++ = (u_char) ((l) >> 24); \ + *(cp)++ = (u_char) ((l) >> 16); \ + *(cp)++ = (u_char) ((l) >> 8); \ + *(cp)++ = (u_char) (l); \ +} + +#define INCPTR(n, cp) ((cp) += (n)) +#define DECPTR(n, cp) ((cp) -= (n)) + +/* + * System dependent definitions for user-level 4.3BSD UNIX implementation. + */ +#define TIMEOUT(f, a, t) do { sys_untimeout((f), (a)); sys_timeout((t)*1000, (f), (a)); } while(0) +#define TIMEOUTMS(f, a, t) do { sys_untimeout((f), (a)); sys_timeout((t), (f), (a)); } while(0) +#define UNTIMEOUT(f, a) sys_untimeout((f), (a)) + +#define BZERO(s, n) memset(s, 0, n) +#define BCMP(s1, s2, l) memcmp(s1, s2, l) + +#define PRINTMSG(m, l) { ppp_info("Remote message: %0.*v", l, m); } + +/* + * MAKEHEADER - Add Header fields to a packet. + */ +#define MAKEHEADER(p, t) { \ + PUTCHAR(PPP_ALLSTATIONS, p); \ + PUTCHAR(PPP_UI, p); \ + PUTSHORT(t, p); } + +/* Procedures exported from auth.c */ +void link_required(ppp_pcb *pcb); /* we are starting to use the link */ +void link_terminated(ppp_pcb *pcb); /* we are finished with the link */ +void link_down(ppp_pcb *pcb); /* the LCP layer has left the Opened state */ +void upper_layers_down(ppp_pcb *pcb); /* take all NCPs down */ +void link_established(ppp_pcb *pcb); /* the link is up; authenticate now */ +void start_networks(ppp_pcb *pcb); /* start all the network control protos */ +void continue_networks(ppp_pcb *pcb); /* start network [ip, etc] control protos */ +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +int auth_check_passwd(ppp_pcb *pcb, char *auser, int userlen, char *apasswd, int passwdlen, const char **msg, int *msglen); + /* check the user name and passwd against configuration */ +void auth_peer_fail(ppp_pcb *pcb, int protocol); + /* peer failed to authenticate itself */ +void auth_peer_success(ppp_pcb *pcb, int protocol, int prot_flavor, const char *name, int namelen); + /* peer successfully authenticated itself */ +#endif /* PPP_SERVER */ +void auth_withpeer_fail(ppp_pcb *pcb, int protocol); + /* we failed to authenticate ourselves */ +void auth_withpeer_success(ppp_pcb *pcb, int protocol, int prot_flavor); + /* we successfully authenticated ourselves */ +#endif /* PPP_AUTH_SUPPORT */ +void np_up(ppp_pcb *pcb, int proto); /* a network protocol has come up */ +void np_down(ppp_pcb *pcb, int proto); /* a network protocol has gone down */ +void np_finished(ppp_pcb *pcb, int proto); /* a network protocol no longer needs link */ +#if PPP_AUTH_SUPPORT +int get_secret(ppp_pcb *pcb, const char *client, const char *server, char *secret, int *secret_len, int am_server); + /* get "secret" for chap */ +#endif /* PPP_AUTH_SUPPORT */ + +/* Procedures exported from ipcp.c */ +/* int parse_dotted_ip (char *, u32_t *); */ + +/* Procedures exported from demand.c */ +#if DEMAND_SUPPORT +void demand_conf (void); /* config interface(s) for demand-dial */ +void demand_block (void); /* set all NPs to queue up packets */ +void demand_unblock (void); /* set all NPs to pass packets */ +void demand_discard (void); /* set all NPs to discard packets */ +void demand_rexmit (int, u32_t); /* retransmit saved frames for an NP*/ +int loop_chars (unsigned char *, int); /* process chars from loopback */ +int loop_frame (unsigned char *, int); /* should we bring link up? */ +#endif /* DEMAND_SUPPORT */ + +/* Procedures exported from multilink.c */ +#ifdef HAVE_MULTILINK +void mp_check_options (void); /* Check multilink-related options */ +int mp_join_bundle (void); /* join our link to an appropriate bundle */ +void mp_exit_bundle (void); /* have disconnected our link from bundle */ +void mp_bundle_terminated (void); +char *epdisc_to_str (struct epdisc *); /* string from endpoint discrim. */ +int str_to_epdisc (struct epdisc *, char *); /* endpt disc. from str */ +#else +#define mp_bundle_terminated() /* nothing */ +#define mp_exit_bundle() /* nothing */ +#define doing_multilink 0 +#define multilink_master 0 +#endif + +/* Procedures exported from utils.c. */ +void ppp_print_string(const u_char *p, int len, void (*printer) (void *, const char *, ...), void *arg); /* Format a string for output */ +int ppp_slprintf(char *buf, int buflen, const char *fmt, ...); /* sprintf++ */ +int ppp_vslprintf(char *buf, int buflen, const char *fmt, va_list args); /* vsprintf++ */ +size_t ppp_strlcpy(char *dest, const char *src, size_t len); /* safe strcpy */ +size_t ppp_strlcat(char *dest, const char *src, size_t len); /* safe strncpy */ +void ppp_dbglog(const char *fmt, ...); /* log a debug message */ +void ppp_info(const char *fmt, ...); /* log an informational message */ +void ppp_notice(const char *fmt, ...); /* log a notice-level message */ +void ppp_warn(const char *fmt, ...); /* log a warning message */ +void ppp_error(const char *fmt, ...); /* log an error message */ +void ppp_fatal(const char *fmt, ...); /* log an error message and die(1) */ +#if PRINTPKT_SUPPORT +void ppp_dump_packet(ppp_pcb *pcb, const char *tag, unsigned char *p, int len); + /* dump packet to debug log if interesting */ +#endif /* PRINTPKT_SUPPORT */ + +/* + * Number of necessary timers analysis. + * + * PPP use at least one timer per each of its protocol, but not all protocols are + * active at the same time, thus the number of necessary timeouts is actually + * lower than enabled protocols. Here is the actual necessary timeouts based + * on code analysis. + * + * Note that many features analysed here are not working at all and are only + * there for a comprehensive analysis of necessary timers in order to prevent + * having to redo that each time we add a feature. + * + * Timer list + * + * | holdoff timeout + * | low level protocol timeout (PPPoE or PPPoL2P) + * | LCP delayed UP + * | LCP retransmit (FSM) + * | LCP Echo timer + * .| PAP or CHAP or EAP authentication + * . | ECP retransmit (FSM) + * . | CCP retransmit (FSM) when MPPE is enabled + * . | CCP retransmit (FSM) when MPPE is NOT enabled + * . | IPCP retransmit (FSM) + * . .| IP6CP retransmit (FSM) + * . . | Idle time limit + * . . | Max connect time + * . . | Max octets + * . . | CCP RACK timeout + * . . . + * PPP_PHASE_DEAD + * PPP_PHASE_HOLDOFF + * | . . . + * PPP_PHASE_INITIALIZE + * | . . . + * PPP_PHASE_ESTABLISH + * | . . . + * |. . . + * | . . + * PPP_PHASE_AUTHENTICATE + * | . . + * || . . + * PPP_PHASE_NETWORK + * | || . . + * | ||| . + * PPP_PHASE_RUNNING + * | .||||| + * | . |||| + * PPP_PHASE_TERMINATE + * | . |||| + * PPP_PHASE_NETWORK + * |. . + * PPP_PHASE_ESTABLISH + * PPP_PHASE_DISCONNECT + * PPP_PHASE_DEAD + * + * Alright, PPP basic retransmission and LCP Echo consume one timer. + * 1 + * + * If authentication is enabled one timer is necessary during authentication. + * 1 + PPP_AUTH_SUPPORT + * + * If ECP is enabled one timer is necessary before IPCP and/or IP6CP, one more + * is necessary if CCP is enabled (only with MPPE support but we don't care much + * up to this detail level). + * 1 + ECP_SUPPORT + CCP_SUPPORT + * + * If CCP is enabled it might consume a timer during IPCP or IP6CP, thus + * we might use IPCP, IP6CP and CCP timers simultaneously. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT + * + * When entering running phase, IPCP or IP6CP is still running. If idle time limit + * is enabled one more timer is necessary. Same for max connect time and max + * octets features. Furthermore CCP RACK might be used past this point. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS + CCP_SUPPORT + * + * IPv4 or IPv6 must be enabled, therefore we don't need to take care the authentication + * and the CCP + ECP case, thus reducing overall complexity. + * 1 + LWIP_MAX(PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT, PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS + CCP_SUPPORT) + * + * We don't support PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS features + * and adding those defines to ppp_opts.h just for having the value always + * defined to 0 isn't worth it. + * 1 + LWIP_MAX(PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT, PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + CCP_SUPPORT) + * + * Thus, the following is enough for now. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT + */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_SUPPORT */ +#endif /* LWIP_HDR_PPP_IMPL_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h new file mode 100644 index 00000000..6702bec6 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h @@ -0,0 +1,610 @@ +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#ifndef LWIP_PPP_OPTS_H +#define LWIP_PPP_OPTS_H + +#include "lwip/opt.h" + +/** + * PPP_SUPPORT==1: Enable PPP. + */ +#ifndef PPP_SUPPORT +#define PPP_SUPPORT 0 +#endif + +/** + * PPPOE_SUPPORT==1: Enable PPP Over Ethernet + */ +#ifndef PPPOE_SUPPORT +#define PPPOE_SUPPORT 0 +#endif + +/** + * PPPOL2TP_SUPPORT==1: Enable PPP Over L2TP + */ +#ifndef PPPOL2TP_SUPPORT +#define PPPOL2TP_SUPPORT 0 +#endif + +/** + * PPPOL2TP_AUTH_SUPPORT==1: Enable PPP Over L2TP Auth (enable MD5 support) + */ +#ifndef PPPOL2TP_AUTH_SUPPORT +#define PPPOL2TP_AUTH_SUPPORT PPPOL2TP_SUPPORT +#endif + +/** + * PPPOS_SUPPORT==1: Enable PPP Over Serial + */ +#ifndef PPPOS_SUPPORT +#define PPPOS_SUPPORT PPP_SUPPORT +#endif + +/** + * LWIP_PPP_API==1: Enable PPP API (in pppapi.c) + */ +#ifndef LWIP_PPP_API +#define LWIP_PPP_API (PPP_SUPPORT && (NO_SYS == 0)) +#endif + +#if PPP_SUPPORT + +/** + * MEMP_NUM_PPP_PCB: the number of simultaneously active PPP + * connections (requires the PPP_SUPPORT option) + */ +#ifndef MEMP_NUM_PPP_PCB +#define MEMP_NUM_PPP_PCB 1 +#endif + +/** + * PPP_NUM_TIMEOUTS_PER_PCB: the number of sys_timeouts running in parallel per + * ppp_pcb. See the detailed explanation at the end of ppp_impl.h about simultaneous + * timers analysis. + */ +#ifndef PPP_NUM_TIMEOUTS_PER_PCB +#define PPP_NUM_TIMEOUTS_PER_PCB (1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT) +#endif + +/* The number of sys_timeouts required for the PPP module */ +#define PPP_NUM_TIMEOUTS (PPP_SUPPORT * PPP_NUM_TIMEOUTS_PER_PCB * MEMP_NUM_PPP_PCB) + +/** + * MEMP_NUM_PPPOS_INTERFACES: the number of concurrently active PPPoS + * interfaces (only used with PPPOS_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOS_INTERFACES +#define MEMP_NUM_PPPOS_INTERFACES MEMP_NUM_PPP_PCB +#endif + +/** + * MEMP_NUM_PPPOE_INTERFACES: the number of concurrently active PPPoE + * interfaces (only used with PPPOE_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOE_INTERFACES +#define MEMP_NUM_PPPOE_INTERFACES 1 +#endif + +/** + * MEMP_NUM_PPPOL2TP_INTERFACES: the number of concurrently active PPPoL2TP + * interfaces (only used with PPPOL2TP_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOL2TP_INTERFACES +#define MEMP_NUM_PPPOL2TP_INTERFACES 1 +#endif + +/** + * MEMP_NUM_PPP_API_MSG: Number of concurrent PPP API messages (in pppapi.c) + */ +#ifndef MEMP_NUM_PPP_API_MSG +#define MEMP_NUM_PPP_API_MSG 5 +#endif + +/** + * PPP_DEBUG: Enable debugging for PPP. + */ +#ifndef PPP_DEBUG +#define PPP_DEBUG LWIP_DBG_OFF +#endif + +/** + * PPP_INPROC_IRQ_SAFE==1 call pppos_input() using tcpip_callback(). + * + * Please read the "PPPoS input path" chapter in the PPP documentation about this option. + */ +#ifndef PPP_INPROC_IRQ_SAFE +#define PPP_INPROC_IRQ_SAFE 0 +#endif + +/** + * PRINTPKT_SUPPORT==1: Enable PPP print packet support + * + * Mandatory for debugging, it displays exchanged packet content in debug trace. + */ +#ifndef PRINTPKT_SUPPORT +#define PRINTPKT_SUPPORT 0 +#endif + +/** + * PPP_IPV4_SUPPORT==1: Enable PPP IPv4 support + */ +#ifndef PPP_IPV4_SUPPORT +#define PPP_IPV4_SUPPORT (LWIP_IPV4) +#endif + +/** + * PPP_IPV6_SUPPORT==1: Enable PPP IPv6 support + */ +#ifndef PPP_IPV6_SUPPORT +#define PPP_IPV6_SUPPORT (LWIP_IPV6) +#endif + +/** + * PPP_NOTIFY_PHASE==1: Support PPP notify phase support + * + * PPP notify phase support allows you to set a callback which is + * called on change of the internal PPP state machine. + * + * This can be used for example to set a LED pattern depending on the + * current phase of the PPP session. + */ +#ifndef PPP_NOTIFY_PHASE +#define PPP_NOTIFY_PHASE 0 +#endif + +/** + * pbuf_type PPP is using for LCP, PAP, CHAP, EAP, CCP, IPCP and IP6CP packets. + * + * Memory allocated must be single buffered for PPP to works, it requires pbuf + * that are not going to be chained when allocated. This requires setting + * PBUF_POOL_BUFSIZE to at least 512 bytes, which is quite huge for small systems. + * + * Setting PPP_USE_PBUF_RAM to 1 makes PPP use memory from heap where continuous + * buffers are required, allowing you to use a smaller PBUF_POOL_BUFSIZE. + */ +#ifndef PPP_USE_PBUF_RAM +#define PPP_USE_PBUF_RAM 0 +#endif + +/** + * PPP_FCS_TABLE: Keep a 256*2 byte table to speed up FCS calculation for PPPoS + */ +#ifndef PPP_FCS_TABLE +#define PPP_FCS_TABLE 1 +#endif + +/** + * PAP_SUPPORT==1: Support PAP. + */ +#ifndef PAP_SUPPORT +#define PAP_SUPPORT 0 +#endif + +/** + * CHAP_SUPPORT==1: Support CHAP. + */ +#ifndef CHAP_SUPPORT +#define CHAP_SUPPORT 0 +#endif + +/** + * MSCHAP_SUPPORT==1: Support MSCHAP. + */ +#ifndef MSCHAP_SUPPORT +#define MSCHAP_SUPPORT 0 +#endif +#if MSCHAP_SUPPORT +/* MSCHAP requires CHAP support */ +#undef CHAP_SUPPORT +#define CHAP_SUPPORT 1 +#endif /* MSCHAP_SUPPORT */ + +/** + * EAP_SUPPORT==1: Support EAP. + */ +#ifndef EAP_SUPPORT +#define EAP_SUPPORT 0 +#endif + +/** + * CCP_SUPPORT==1: Support CCP. + */ +#ifndef CCP_SUPPORT +#define CCP_SUPPORT 0 +#endif + +/** + * MPPE_SUPPORT==1: Support MPPE. + */ +#ifndef MPPE_SUPPORT +#define MPPE_SUPPORT 0 +#endif +#if MPPE_SUPPORT +/* MPPE requires CCP support */ +#undef CCP_SUPPORT +#define CCP_SUPPORT 1 +/* MPPE requires MSCHAP support */ +#undef MSCHAP_SUPPORT +#define MSCHAP_SUPPORT 1 +/* MSCHAP requires CHAP support */ +#undef CHAP_SUPPORT +#define CHAP_SUPPORT 1 +#endif /* MPPE_SUPPORT */ + +/** + * CBCP_SUPPORT==1: Support CBCP. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef CBCP_SUPPORT +#define CBCP_SUPPORT 0 +#endif + +/** + * ECP_SUPPORT==1: Support ECP. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef ECP_SUPPORT +#define ECP_SUPPORT 0 +#endif + +/** + * DEMAND_SUPPORT==1: Support dial on demand. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef DEMAND_SUPPORT +#define DEMAND_SUPPORT 0 +#endif + +/** + * LQR_SUPPORT==1: Support Link Quality Report. Do nothing except exchanging some LCP packets. + */ +#ifndef LQR_SUPPORT +#define LQR_SUPPORT 0 +#endif + +/** + * PPP_SERVER==1: Enable PPP server support (waiting for incoming PPP session). + * + * Currently only supported for PPPoS. + */ +#ifndef PPP_SERVER +#define PPP_SERVER 0 +#endif + +#if PPP_SERVER +/* + * PPP_OUR_NAME: Our name for authentication purposes + */ +#ifndef PPP_OUR_NAME +#define PPP_OUR_NAME "lwIP" +#endif +#endif /* PPP_SERVER */ + +/** + * VJ_SUPPORT==1: Support VJ header compression. + */ +#ifndef VJ_SUPPORT +#define VJ_SUPPORT 1 +#endif +/* VJ compression is only supported for TCP over IPv4 over PPPoS. */ +#if !PPPOS_SUPPORT || !PPP_IPV4_SUPPORT || !LWIP_TCP +#undef VJ_SUPPORT +#define VJ_SUPPORT 0 +#endif /* !PPPOS_SUPPORT */ + +/** + * PPP_MD5_RANDM==1: Use MD5 for better randomness. + * Enabled by default if CHAP, EAP, or L2TP AUTH support is enabled. + */ +#ifndef PPP_MD5_RANDM +#define PPP_MD5_RANDM (CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT) +#endif + +/** + * PolarSSL embedded library + * + * + * lwIP contains some files fetched from the latest BSD release of + * the PolarSSL project (PolarSSL 0.10.1-bsd) for ciphers and encryption + * methods we need for lwIP PPP support. + * + * The PolarSSL files were cleaned to contain only the necessary struct + * fields and functions needed for lwIP. + * + * The PolarSSL API was not changed at all, so if you are already using + * PolarSSL you can choose to skip the compilation of the included PolarSSL + * library into lwIP. + * + * If you are not using the embedded copy you must include external + * libraries into your arch/cc.h port file. + * + * Beware of the stack requirements which can be a lot larger if you are not + * using our cleaned PolarSSL library. + */ + +/** + * LWIP_USE_EXTERNAL_POLARSSL: Use external PolarSSL library + */ +#ifndef LWIP_USE_EXTERNAL_POLARSSL +#define LWIP_USE_EXTERNAL_POLARSSL 0 +#endif + +/** + * LWIP_USE_EXTERNAL_MBEDTLS: Use external mbed TLS library + */ +#ifndef LWIP_USE_EXTERNAL_MBEDTLS +#define LWIP_USE_EXTERNAL_MBEDTLS 0 +#endif + +/* + * PPP Timeouts + */ + +/** + * FSM_DEFTIMEOUT: Timeout time in seconds + */ +#ifndef FSM_DEFTIMEOUT +#define FSM_DEFTIMEOUT 6 +#endif + +/** + * FSM_DEFMAXTERMREQS: Maximum Terminate-Request transmissions + */ +#ifndef FSM_DEFMAXTERMREQS +#define FSM_DEFMAXTERMREQS 2 +#endif + +/** + * FSM_DEFMAXCONFREQS: Maximum Configure-Request transmissions + */ +#ifndef FSM_DEFMAXCONFREQS +#define FSM_DEFMAXCONFREQS 10 +#endif + +/** + * FSM_DEFMAXNAKLOOPS: Maximum number of nak loops + */ +#ifndef FSM_DEFMAXNAKLOOPS +#define FSM_DEFMAXNAKLOOPS 5 +#endif + +/** + * UPAP_DEFTIMEOUT: Timeout (seconds) for retransmitting req + */ +#ifndef UPAP_DEFTIMEOUT +#define UPAP_DEFTIMEOUT 6 +#endif + +/** + * UPAP_DEFTRANSMITS: Maximum number of auth-reqs to send + */ +#ifndef UPAP_DEFTRANSMITS +#define UPAP_DEFTRANSMITS 10 +#endif + +#if PPP_SERVER +/** + * UPAP_DEFREQTIME: Time to wait for auth-req from peer + */ +#ifndef UPAP_DEFREQTIME +#define UPAP_DEFREQTIME 30 +#endif +#endif /* PPP_SERVER */ + +/** + * CHAP_DEFTIMEOUT: Timeout (seconds) for retransmitting req + */ +#ifndef CHAP_DEFTIMEOUT +#define CHAP_DEFTIMEOUT 6 +#endif + +/** + * CHAP_DEFTRANSMITS: max # times to send challenge + */ +#ifndef CHAP_DEFTRANSMITS +#define CHAP_DEFTRANSMITS 10 +#endif + +#if PPP_SERVER +/** + * CHAP_DEFRECHALLENGETIME: If this option is > 0, rechallenge the peer every n seconds + */ +#ifndef CHAP_DEFRECHALLENGETIME +#define CHAP_DEFRECHALLENGETIME 0 +#endif +#endif /* PPP_SERVER */ + +/** + * EAP_DEFREQTIME: Time to wait for peer request + */ +#ifndef EAP_DEFREQTIME +#define EAP_DEFREQTIME 6 +#endif + +/** + * EAP_DEFALLOWREQ: max # times to accept requests + */ +#ifndef EAP_DEFALLOWREQ +#define EAP_DEFALLOWREQ 10 +#endif + +#if PPP_SERVER +/** + * EAP_DEFTIMEOUT: Timeout (seconds) for rexmit + */ +#ifndef EAP_DEFTIMEOUT +#define EAP_DEFTIMEOUT 6 +#endif + +/** + * EAP_DEFTRANSMITS: max # times to transmit + */ +#ifndef EAP_DEFTRANSMITS +#define EAP_DEFTRANSMITS 10 +#endif +#endif /* PPP_SERVER */ + +/** + * LCP_DEFLOOPBACKFAIL: Default number of times we receive our magic number from the peer + * before deciding the link is looped-back. + */ +#ifndef LCP_DEFLOOPBACKFAIL +#define LCP_DEFLOOPBACKFAIL 10 +#endif + +/** + * LCP_ECHOINTERVAL: Interval in seconds between keepalive echo requests, 0 to disable. + */ +#ifndef LCP_ECHOINTERVAL +#define LCP_ECHOINTERVAL 0 +#endif + +/** + * LCP_MAXECHOFAILS: Number of unanswered echo requests before failure. + */ +#ifndef LCP_MAXECHOFAILS +#define LCP_MAXECHOFAILS 3 +#endif + +/** + * PPP_MAXIDLEFLAG: Max Xmit idle time (in ms) before resend flag char. + */ +#ifndef PPP_MAXIDLEFLAG +#define PPP_MAXIDLEFLAG 100 +#endif + +/** + * PPP Packet sizes + */ + +/** + * PPP_MRU: Default MRU + */ +#ifndef PPP_MRU +#define PPP_MRU 1500 +#endif + +/** + * PPP_DEFMRU: Default MRU to try + */ +#ifndef PPP_DEFMRU +#define PPP_DEFMRU 1500 +#endif + +/** + * PPP_MAXMRU: Normally limit MRU to this (pppd default = 16384) + */ +#ifndef PPP_MAXMRU +#define PPP_MAXMRU 1500 +#endif + +/** + * PPP_MINMRU: No MRUs below this + */ +#ifndef PPP_MINMRU +#define PPP_MINMRU 128 +#endif + +/** + * PPPOL2TP_DEFMRU: Default MTU and MRU for L2TP + * Default = 1500 - PPPoE(6) - PPP Protocol(2) - IPv4 header(20) - UDP Header(8) + * - L2TP Header(6) - HDLC Header(2) - PPP Protocol(2) - MPPE Header(2) - PPP Protocol(2) + */ +#if PPPOL2TP_SUPPORT +#ifndef PPPOL2TP_DEFMRU +#define PPPOL2TP_DEFMRU 1450 +#endif +#endif /* PPPOL2TP_SUPPORT */ + +/** + * MAXNAMELEN: max length of hostname or name for auth + */ +#ifndef MAXNAMELEN +#define MAXNAMELEN 256 +#endif + +/** + * MAXSECRETLEN: max length of password or secret + */ +#ifndef MAXSECRETLEN +#define MAXSECRETLEN 256 +#endif + +/* ------------------------------------------------------------------------- */ + +/* + * Build triggers for embedded PolarSSL + */ +#if !LWIP_USE_EXTERNAL_POLARSSL && !LWIP_USE_EXTERNAL_MBEDTLS + +/* CHAP, EAP, L2TP AUTH and MD5 Random require MD5 support */ +#if CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT || PPP_MD5_RANDM +#define LWIP_INCLUDED_POLARSSL_MD5 1 +#endif /* CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT || PPP_MD5_RANDM */ + +#if MSCHAP_SUPPORT + +/* MSCHAP require MD4 support */ +#define LWIP_INCLUDED_POLARSSL_MD4 1 +/* MSCHAP require SHA1 support */ +#define LWIP_INCLUDED_POLARSSL_SHA1 1 +/* MSCHAP require DES support */ +#define LWIP_INCLUDED_POLARSSL_DES 1 + +/* MS-CHAP support is required for MPPE */ +#if MPPE_SUPPORT +/* MPPE require ARC4 support */ +#define LWIP_INCLUDED_POLARSSL_ARC4 1 +#endif /* MPPE_SUPPORT */ + +#endif /* MSCHAP_SUPPORT */ + +#endif /* !LWIP_USE_EXTERNAL_POLARSSL && !LWIP_USE_EXTERNAL_MBEDTLS */ + +/* Default value if unset */ +#ifndef LWIP_INCLUDED_POLARSSL_MD4 +#define LWIP_INCLUDED_POLARSSL_MD4 0 +#endif /* LWIP_INCLUDED_POLARSSL_MD4 */ +#ifndef LWIP_INCLUDED_POLARSSL_MD5 +#define LWIP_INCLUDED_POLARSSL_MD5 0 +#endif /* LWIP_INCLUDED_POLARSSL_MD5 */ +#ifndef LWIP_INCLUDED_POLARSSL_SHA1 +#define LWIP_INCLUDED_POLARSSL_SHA1 0 +#endif /* LWIP_INCLUDED_POLARSSL_SHA1 */ +#ifndef LWIP_INCLUDED_POLARSSL_DES +#define LWIP_INCLUDED_POLARSSL_DES 0 +#endif /* LWIP_INCLUDED_POLARSSL_DES */ +#ifndef LWIP_INCLUDED_POLARSSL_ARC4 +#define LWIP_INCLUDED_POLARSSL_ARC4 0 +#endif /* LWIP_INCLUDED_POLARSSL_ARC4 */ + +#endif /* PPP_SUPPORT */ + +/* Default value if unset */ +#ifndef PPP_NUM_TIMEOUTS +#define PPP_NUM_TIMEOUTS 0 +#endif /* PPP_NUM_TIMEOUTS */ + +#endif /* LWIP_PPP_OPTS_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h new file mode 100644 index 00000000..913d93f7 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h @@ -0,0 +1,137 @@ +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#ifndef LWIP_PPPAPI_H +#define LWIP_PPPAPI_H + +#include "netif/ppp/ppp_opts.h" + +#if LWIP_PPP_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sys.h" +#include "lwip/netif.h" +#include "lwip/priv/tcpip_priv.h" +#include "netif/ppp/ppp.h" +#if PPPOS_SUPPORT +#include "netif/ppp/pppos.h" +#endif /* PPPOS_SUPPORT */ + +#ifdef __cplusplus +extern "C" { +#endif + +struct pppapi_msg_msg { + ppp_pcb *ppp; + union { +#if PPP_NOTIFY_PHASE + struct { + ppp_notify_phase_cb_fn notify_phase_cb; + } setnotifyphasecb; +#endif /* PPP_NOTIFY_PHASE */ +#if PPPOS_SUPPORT + struct { + struct netif *pppif; + pppos_output_cb_fn output_cb; + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } serialcreate; +#endif /* PPPOS_SUPPORT */ +#if PPPOE_SUPPORT + struct { + struct netif *pppif; + struct netif *ethif; + const char *service_name; + const char *concentrator_name; + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } ethernetcreate; +#endif /* PPPOE_SUPPORT */ +#if PPPOL2TP_SUPPORT + struct { + struct netif *pppif; + struct netif *netif; + API_MSG_M_DEF_C(ip_addr_t, ipaddr); + u16_t port; +#if PPPOL2TP_AUTH_SUPPORT + const u8_t *secret; + u8_t secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } l2tpcreate; +#endif /* PPPOL2TP_SUPPORT */ + struct { + u16_t holdoff; + } connect; + struct { + u8_t nocarrier; + } close; + struct { + u8_t cmd; + void *arg; + } ioctl; + } msg; +}; + +struct pppapi_msg { + struct tcpip_api_call_data call; + struct pppapi_msg_msg msg; +}; + +/* API for application */ +err_t pppapi_set_default(ppp_pcb *pcb); +#if PPP_NOTIFY_PHASE +err_t pppapi_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb); +#endif /* PPP_NOTIFY_PHASE */ +#if PPPOS_SUPPORT +ppp_pcb *pppapi_pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, ppp_link_status_cb_fn link_status_cb, void *ctx_cb); +#endif /* PPPOS_SUPPORT */ +#if PPPOE_SUPPORT +ppp_pcb *pppapi_pppoe_create(struct netif *pppif, struct netif *ethif, const char *service_name, + const char *concentrator_name, ppp_link_status_cb_fn link_status_cb, + void *ctx_cb); +#endif /* PPPOE_SUPPORT */ +#if PPPOL2TP_SUPPORT +ppp_pcb *pppapi_pppol2tp_create(struct netif *pppif, struct netif *netif, ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); +#endif /* PPPOL2TP_SUPPORT */ +err_t pppapi_connect(ppp_pcb *pcb, u16_t holdoff); +#if PPP_SERVER +err_t pppapi_listen(ppp_pcb *pcb); +#endif /* PPP_SERVER */ +err_t pppapi_close(ppp_pcb *pcb, u8_t nocarrier); +err_t pppapi_free(ppp_pcb *pcb); +err_t pppapi_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_PPP_API */ + +#endif /* LWIP_PPPAPI_H */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h new file mode 100644 index 00000000..c0230bbc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h @@ -0,0 +1,144 @@ +/* + * pppcrypt.c - PPP/DES linkage for MS-CHAP and EAP SRP-SHA1 + * + * Extracted from chap_ms.c by James Carlson. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* This header file is included in all PPP modules needing hashes and/or ciphers */ + +#ifndef PPPCRYPT_H +#define PPPCRYPT_H + +/* + * If included PolarSSL copy is not used, user is expected to include + * external libraries in arch/cc.h (which is included by lwip/arch.h). + */ +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Map hashes and ciphers functions to PolarSSL + */ +#if !LWIP_USE_EXTERNAL_MBEDTLS + +#include "netif/ppp/polarssl/md4.h" +#define lwip_md4_context md4_context +#define lwip_md4_init(context) +#define lwip_md4_starts md4_starts +#define lwip_md4_update md4_update +#define lwip_md4_finish md4_finish +#define lwip_md4_free(context) + +#include "netif/ppp/polarssl/md5.h" +#define lwip_md5_context md5_context +#define lwip_md5_init(context) +#define lwip_md5_starts md5_starts +#define lwip_md5_update md5_update +#define lwip_md5_finish md5_finish +#define lwip_md5_free(context) + +#include "netif/ppp/polarssl/sha1.h" +#define lwip_sha1_context sha1_context +#define lwip_sha1_init(context) +#define lwip_sha1_starts sha1_starts +#define lwip_sha1_update sha1_update +#define lwip_sha1_finish sha1_finish +#define lwip_sha1_free(context) + +#include "netif/ppp/polarssl/des.h" +#define lwip_des_context des_context +#define lwip_des_init(context) +#define lwip_des_setkey_enc des_setkey_enc +#define lwip_des_crypt_ecb des_crypt_ecb +#define lwip_des_free(context) + +#include "netif/ppp/polarssl/arc4.h" +#define lwip_arc4_context arc4_context +#define lwip_arc4_init(context) +#define lwip_arc4_setup arc4_setup +#define lwip_arc4_crypt arc4_crypt +#define lwip_arc4_free(context) + +#endif /* !LWIP_USE_EXTERNAL_MBEDTLS */ + +/* + * Map hashes and ciphers functions to mbed TLS + */ +#if LWIP_USE_EXTERNAL_MBEDTLS + +#define lwip_md4_context mbedtls_md4_context +#define lwip_md4_init mbedtls_md4_init +#define lwip_md4_starts mbedtls_md4_starts +#define lwip_md4_update mbedtls_md4_update +#define lwip_md4_finish mbedtls_md4_finish +#define lwip_md4_free mbedtls_md4_free + +#define lwip_md5_context mbedtls_md5_context +#define lwip_md5_init mbedtls_md5_init +#define lwip_md5_starts mbedtls_md5_starts +#define lwip_md5_update mbedtls_md5_update +#define lwip_md5_finish mbedtls_md5_finish +#define lwip_md5_free mbedtls_md5_free + +#define lwip_sha1_context mbedtls_sha1_context +#define lwip_sha1_init mbedtls_sha1_init +#define lwip_sha1_starts mbedtls_sha1_starts +#define lwip_sha1_update mbedtls_sha1_update +#define lwip_sha1_finish mbedtls_sha1_finish +#define lwip_sha1_free mbedtls_sha1_free + +#define lwip_des_context mbedtls_des_context +#define lwip_des_init mbedtls_des_init +#define lwip_des_setkey_enc mbedtls_des_setkey_enc +#define lwip_des_crypt_ecb mbedtls_des_crypt_ecb +#define lwip_des_free mbedtls_des_free + +#define lwip_arc4_context mbedtls_arc4_context +#define lwip_arc4_init mbedtls_arc4_init +#define lwip_arc4_setup mbedtls_arc4_setup +#define lwip_arc4_crypt(context, buffer, length) mbedtls_arc4_crypt(context, length, buffer, buffer) +#define lwip_arc4_free mbedtls_arc4_free + +#endif /* LWIP_USE_EXTERNAL_MBEDTLS */ + +void pppcrypt_56_to_64_bit_key(u_char *key, u_char *des_key); + +#ifdef __cplusplus +} +#endif + +#endif /* PPPCRYPT_H */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h new file mode 100644 index 00000000..36ee4f9b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h @@ -0,0 +1,88 @@ +/***************************************************************************** +* pppdebug.h - System debugging utilities. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1998 Global Election Systems Inc. +* portions Copyright (c) 2001 by Cognizant Pty Ltd. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY (please don't use tabs!) +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-07-29 Guy Lancaster , Global Election Systems Inc. +* Original. +* +***************************************************************************** +*/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPDEBUG_H +#define PPPDEBUG_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Trace levels. */ +#define LOG_CRITICAL (PPP_DEBUG | LWIP_DBG_LEVEL_SEVERE) +#define LOG_ERR (PPP_DEBUG | LWIP_DBG_LEVEL_SEVERE) +#define LOG_NOTICE (PPP_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define LOG_WARNING (PPP_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define LOG_INFO (PPP_DEBUG) +#define LOG_DETAIL (PPP_DEBUG) +#define LOG_DEBUG (PPP_DEBUG) + +#if PPP_DEBUG + +#define MAINDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define SYSDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define FSMDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define LCPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define IPCPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define IPV6CPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define UPAPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define CHAPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define PPPDEBUG(a, b) LWIP_DEBUGF(a, b) + +#else /* PPP_DEBUG */ + +#define MAINDEBUG(a) +#define SYSDEBUG(a) +#define FSMDEBUG(a) +#define LCPDEBUG(a) +#define IPCPDEBUG(a) +#define IPV6CPDEBUG(a) +#define UPAPDEBUG(a) +#define CHAPDEBUG(a) +#define PPPDEBUG(a, b) + +#endif /* PPP_DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPPDEBUG_H */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h new file mode 100644 index 00000000..08ab7ab5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h @@ -0,0 +1,187 @@ +/***************************************************************************** +* pppoe.h - PPP Over Ethernet implementation for lwIP. +* +* Copyright (c) 2006 by Marc Boucher, Services Informatiques (MBSI) inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 06-01-01 Marc Boucher +* Ported to lwIP. +*****************************************************************************/ + + + +/* based on NetBSD: if_pppoe.c,v 1.64 2006/01/31 23:50:15 martin Exp */ + +/*- + * Copyright (c) 2002 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Martin Husemann . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_OE_H +#define PPP_OE_H + +#include "ppp.h" +#include "lwip/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppoehdr { + PACK_STRUCT_FLD_8(u8_t vertype); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t session); + PACK_STRUCT_FIELD(u16_t plen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppoetag { + PACK_STRUCT_FIELD(u16_t tag); + PACK_STRUCT_FIELD(u16_t len); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +#define PPPOE_STATE_INITIAL 0 +#define PPPOE_STATE_PADI_SENT 1 +#define PPPOE_STATE_PADR_SENT 2 +#define PPPOE_STATE_SESSION 3 +/* passive */ +#define PPPOE_STATE_PADO_SENT 1 + +#define PPPOE_HEADERLEN sizeof(struct pppoehdr) +#define PPPOE_VERTYPE 0x11 /* VER=1, TYPE = 1 */ + +#define PPPOE_TAG_EOL 0x0000 /* end of list */ +#define PPPOE_TAG_SNAME 0x0101 /* service name */ +#define PPPOE_TAG_ACNAME 0x0102 /* access concentrator name */ +#define PPPOE_TAG_HUNIQUE 0x0103 /* host unique */ +#define PPPOE_TAG_ACCOOKIE 0x0104 /* AC cookie */ +#define PPPOE_TAG_VENDOR 0x0105 /* vendor specific */ +#define PPPOE_TAG_RELAYSID 0x0110 /* relay session id */ +#define PPPOE_TAG_SNAME_ERR 0x0201 /* service name error */ +#define PPPOE_TAG_ACSYS_ERR 0x0202 /* AC system error */ +#define PPPOE_TAG_GENERIC_ERR 0x0203 /* gerneric error */ + +#define PPPOE_CODE_PADI 0x09 /* Active Discovery Initiation */ +#define PPPOE_CODE_PADO 0x07 /* Active Discovery Offer */ +#define PPPOE_CODE_PADR 0x19 /* Active Discovery Request */ +#define PPPOE_CODE_PADS 0x65 /* Active Discovery Session confirmation */ +#define PPPOE_CODE_PADT 0xA7 /* Active Discovery Terminate */ + +#ifndef PPPOE_MAX_AC_COOKIE_LEN +#define PPPOE_MAX_AC_COOKIE_LEN 64 +#endif + +struct pppoe_softc { + struct pppoe_softc *next; + struct netif *sc_ethif; /* ethernet interface we are using */ + ppp_pcb *pcb; /* PPP PCB */ + + struct eth_addr sc_dest; /* hardware address of concentrator */ + u16_t sc_session; /* PPPoE session id */ + u8_t sc_state; /* discovery phase or session connected */ + +#ifdef PPPOE_TODO + u8_t *sc_service_name; /* if != NULL: requested name of service */ + u8_t *sc_concentrator_name; /* if != NULL: requested concentrator id */ +#endif /* PPPOE_TODO */ + u8_t sc_ac_cookie[PPPOE_MAX_AC_COOKIE_LEN]; /* content of AC cookie we must echo back */ + u8_t sc_ac_cookie_len; /* length of cookie data */ +#ifdef PPPOE_SERVER + u8_t *sc_hunique; /* content of host unique we must echo back */ + u8_t sc_hunique_len; /* length of host unique */ +#endif + u8_t sc_padi_retried; /* number of PADI retries already done */ + u8_t sc_padr_retried; /* number of PADR retries already done */ +}; + + +#define pppoe_init() /* compatibility define, no initialization needed */ + +ppp_pcb *pppoe_create(struct netif *pppif, + struct netif *ethif, + const char *service_name, const char *concentrator_name, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +/* + * Functions called from lwIP + * DO NOT CALL FROM lwIP USER APPLICATION. + */ +void pppoe_disc_input(struct netif *netif, struct pbuf *p); +void pppoe_data_input(struct netif *netif, struct pbuf *p); + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_OE_H */ + +#endif /* PPP_SUPPORT && PPPOE_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h new file mode 100644 index 00000000..1221ca15 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h @@ -0,0 +1,209 @@ +/** + * @file + * Network Point to Point Protocol over Layer 2 Tunneling Protocol header file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOL2TP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPOL2TP_H +#define PPPOL2TP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Timeout */ +#define PPPOL2TP_CONTROL_TIMEOUT (5*1000) /* base for quick timeout calculation */ +#define PPPOL2TP_SLOW_RETRY (60*1000) /* persistent retry interval */ + +#define PPPOL2TP_MAXSCCRQ 4 /* retry SCCRQ four times (quickly) */ +#define PPPOL2TP_MAXICRQ 4 /* retry IRCQ four times */ +#define PPPOL2TP_MAXICCN 4 /* retry ICCN four times */ + +/* L2TP header flags */ +#define PPPOL2TP_HEADERFLAG_CONTROL 0x8000 +#define PPPOL2TP_HEADERFLAG_LENGTH 0x4000 +#define PPPOL2TP_HEADERFLAG_SEQUENCE 0x0800 +#define PPPOL2TP_HEADERFLAG_OFFSET 0x0200 +#define PPPOL2TP_HEADERFLAG_PRIORITY 0x0100 +#define PPPOL2TP_HEADERFLAG_VERSION 0x0002 + +/* Mandatory bits for control: Control, Length, Sequence, Version 2 */ +#define PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY (PPPOL2TP_HEADERFLAG_CONTROL|PPPOL2TP_HEADERFLAG_LENGTH|PPPOL2TP_HEADERFLAG_SEQUENCE|PPPOL2TP_HEADERFLAG_VERSION) +/* Forbidden bits for control: Offset, Priority */ +#define PPPOL2TP_HEADERFLAG_CONTROL_FORBIDDEN (PPPOL2TP_HEADERFLAG_OFFSET|PPPOL2TP_HEADERFLAG_PRIORITY) + +/* Mandatory bits for data: Version 2 */ +#define PPPOL2TP_HEADERFLAG_DATA_MANDATORY (PPPOL2TP_HEADERFLAG_VERSION) + +/* AVP (Attribute Value Pair) header */ +#define PPPOL2TP_AVPHEADERFLAG_MANDATORY 0x8000 +#define PPPOL2TP_AVPHEADERFLAG_HIDDEN 0x4000 +#define PPPOL2TP_AVPHEADERFLAG_LENGTHMASK 0x03ff + +/* -- AVP - Message type */ +#define PPPOL2TP_AVPTYPE_MESSAGE 0 /* Message type */ + +/* Control Connection Management */ +#define PPPOL2TP_MESSAGETYPE_SCCRQ 1 /* Start Control Connection Request */ +#define PPPOL2TP_MESSAGETYPE_SCCRP 2 /* Start Control Connection Reply */ +#define PPPOL2TP_MESSAGETYPE_SCCCN 3 /* Start Control Connection Connected */ +#define PPPOL2TP_MESSAGETYPE_STOPCCN 4 /* Stop Control Connection Notification */ +#define PPPOL2TP_MESSAGETYPE_HELLO 6 /* Hello */ +/* Call Management */ +#define PPPOL2TP_MESSAGETYPE_OCRQ 7 /* Outgoing Call Request */ +#define PPPOL2TP_MESSAGETYPE_OCRP 8 /* Outgoing Call Reply */ +#define PPPOL2TP_MESSAGETYPE_OCCN 9 /* Outgoing Call Connected */ +#define PPPOL2TP_MESSAGETYPE_ICRQ 10 /* Incoming Call Request */ +#define PPPOL2TP_MESSAGETYPE_ICRP 11 /* Incoming Call Reply */ +#define PPPOL2TP_MESSAGETYPE_ICCN 12 /* Incoming Call Connected */ +#define PPPOL2TP_MESSAGETYPE_CDN 14 /* Call Disconnect Notify */ +/* Error reporting */ +#define PPPOL2TP_MESSAGETYPE_WEN 15 /* WAN Error Notify */ +/* PPP Session Control */ +#define PPPOL2TP_MESSAGETYPE_SLI 16 /* Set Link Info */ + +/* -- AVP - Result code */ +#define PPPOL2TP_AVPTYPE_RESULTCODE 1 /* Result code */ +#define PPPOL2TP_RESULTCODE 1 /* General request to clear control connection */ + +/* -- AVP - Protocol version (!= L2TP Header version) */ +#define PPPOL2TP_AVPTYPE_VERSION 2 +#define PPPOL2TP_VERSION 0x0100 /* L2TP Protocol version 1, revision 0 */ + +/* -- AVP - Framing capabilities */ +#define PPPOL2TP_AVPTYPE_FRAMINGCAPABILITIES 3 /* Bearer capabilities */ +#define PPPOL2TP_FRAMINGCAPABILITIES 0x00000003 /* Async + Sync framing */ + +/* -- AVP - Bearer capabilities */ +#define PPPOL2TP_AVPTYPE_BEARERCAPABILITIES 4 /* Bearer capabilities */ +#define PPPOL2TP_BEARERCAPABILITIES 0x00000003 /* Analog + Digital Access */ + +/* -- AVP - Tie breaker */ +#define PPPOL2TP_AVPTYPE_TIEBREAKER 5 + +/* -- AVP - Host name */ +#define PPPOL2TP_AVPTYPE_HOSTNAME 7 /* Host name */ +#define PPPOL2TP_HOSTNAME "lwIP" /* FIXME: make it configurable */ + +/* -- AVP - Vendor name */ +#define PPPOL2TP_AVPTYPE_VENDORNAME 8 /* Vendor name */ +#define PPPOL2TP_VENDORNAME "lwIP" /* FIXME: make it configurable */ + +/* -- AVP - Assign tunnel ID */ +#define PPPOL2TP_AVPTYPE_TUNNELID 9 /* Assign Tunnel ID */ + +/* -- AVP - Receive window size */ +#define PPPOL2TP_AVPTYPE_RECEIVEWINDOWSIZE 10 /* Receive window size */ +#define PPPOL2TP_RECEIVEWINDOWSIZE 8 /* FIXME: make it configurable */ + +/* -- AVP - Challenge */ +#define PPPOL2TP_AVPTYPE_CHALLENGE 11 /* Challenge */ + +/* -- AVP - Cause code */ +#define PPPOL2TP_AVPTYPE_CAUSECODE 12 /* Cause code*/ + +/* -- AVP - Challenge response */ +#define PPPOL2TP_AVPTYPE_CHALLENGERESPONSE 13 /* Challenge response */ +#define PPPOL2TP_AVPTYPE_CHALLENGERESPONSE_SIZE 16 + +/* -- AVP - Assign session ID */ +#define PPPOL2TP_AVPTYPE_SESSIONID 14 /* Assign Session ID */ + +/* -- AVP - Call serial number */ +#define PPPOL2TP_AVPTYPE_CALLSERIALNUMBER 15 /* Call Serial Number */ + +/* -- AVP - Framing type */ +#define PPPOL2TP_AVPTYPE_FRAMINGTYPE 19 /* Framing Type */ +#define PPPOL2TP_FRAMINGTYPE 0x00000001 /* Sync framing */ + +/* -- AVP - TX Connect Speed */ +#define PPPOL2TP_AVPTYPE_TXCONNECTSPEED 24 /* TX Connect Speed */ +#define PPPOL2TP_TXCONNECTSPEED 100000000 /* Connect speed: 100 Mbits/s */ + +/* L2TP Session state */ +#define PPPOL2TP_STATE_INITIAL 0 +#define PPPOL2TP_STATE_SCCRQ_SENT 1 +#define PPPOL2TP_STATE_ICRQ_SENT 2 +#define PPPOL2TP_STATE_ICCN_SENT 3 +#define PPPOL2TP_STATE_DATA 4 + +#define PPPOL2TP_OUTPUT_DATA_HEADER_LEN 6 /* Our data header len */ + +/* + * PPPoL2TP interface control block. + */ +typedef struct pppol2tp_pcb_s pppol2tp_pcb; +struct pppol2tp_pcb_s { + ppp_pcb *ppp; /* PPP PCB */ + u8_t phase; /* L2TP phase */ + struct udp_pcb *udp; /* UDP L2TP Socket */ + struct netif *netif; /* Output interface, used as a default route */ + ip_addr_t remote_ip; /* LNS IP Address */ + u16_t remote_port; /* LNS port */ +#if PPPOL2TP_AUTH_SUPPORT + const u8_t *secret; /* Secret string */ + u8_t secret_len; /* Secret string length */ + u8_t secret_rv[16]; /* Random vector */ + u8_t challenge_hash[16]; /* Challenge response */ + u8_t send_challenge; /* Boolean whether the next sent packet should contains a challenge response */ +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + u16_t tunnel_port; /* Tunnel port */ + u16_t our_ns; /* NS to peer */ + u16_t peer_nr; /* NR from peer */ + u16_t peer_ns; /* Expected NS from peer */ + u16_t source_tunnel_id; /* Tunnel ID assigned by peer */ + u16_t remote_tunnel_id; /* Tunnel ID assigned to peer */ + u16_t source_session_id; /* Session ID assigned by peer */ + u16_t remote_session_id; /* Session ID assigned to peer */ + + u8_t sccrq_retried; /* number of SCCRQ retries already done */ + u8_t icrq_retried; /* number of ICRQ retries already done */ + u8_t iccn_retried; /* number of ICCN retries already done */ +}; + + +/* Create a new L2TP session. */ +ppp_pcb *pppol2tp_create(struct netif *pppif, + struct netif *netif, const ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +#ifdef __cplusplus +} +#endif + +#endif /* PPPOL2TP_H */ +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h new file mode 100644 index 00000000..380a965c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h @@ -0,0 +1,126 @@ +/** + * @file + * Network Point to Point Protocol over Serial header file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOS_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPOS_H +#define PPPOS_H + +#include "lwip/sys.h" + +#include "ppp.h" +#include "vj.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* PPP packet parser states. Current state indicates operation yet to be + * completed. */ +enum { + PDIDLE = 0, /* Idle state - waiting. */ + PDSTART, /* Process start flag. */ + PDADDRESS, /* Process address field. */ + PDCONTROL, /* Process control field. */ + PDPROTOCOL1, /* Process protocol field 1. */ + PDPROTOCOL2, /* Process protocol field 2. */ + PDDATA /* Process data byte. */ +}; + +/* PPPoS serial output callback function prototype */ +typedef u32_t (*pppos_output_cb_fn)(ppp_pcb *pcb, u8_t *data, u32_t len, void *ctx); + +/* + * Extended asyncmap - allows any character to be escaped. + */ +typedef u8_t ext_accm[32]; + +/* + * PPPoS interface control block. + */ +typedef struct pppos_pcb_s pppos_pcb; +struct pppos_pcb_s { + /* -- below are data that will NOT be cleared between two sessions */ + ppp_pcb *ppp; /* PPP PCB */ + pppos_output_cb_fn output_cb; /* PPP serial output callback */ + + /* -- below are data that will be cleared between two sessions + * + * last_xmit must be the first member of cleared members, because it is + * used to know which part must not be cleared. + */ + u32_t last_xmit; /* Time of last transmission. */ + ext_accm out_accm; /* Async-Ctl-Char-Map for output. */ + + /* flags */ + unsigned int open :1; /* Set if PPPoS is open */ + unsigned int pcomp :1; /* Does peer accept protocol compression? */ + unsigned int accomp :1; /* Does peer accept addr/ctl compression? */ + + /* PPPoS rx */ + ext_accm in_accm; /* Async-Ctl-Char-Map for input. */ + struct pbuf *in_head, *in_tail; /* The input packet. */ + u16_t in_protocol; /* The input protocol code. */ + u16_t in_fcs; /* Input Frame Check Sequence value. */ + u8_t in_state; /* The input process state. */ + u8_t in_escaped; /* Escape next character. */ +}; + +/* Create a new PPPoS session. */ +ppp_pcb *pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +/* Pass received raw characters to PPPoS to be decoded through lwIP TCPIP thread. */ +err_t pppos_input_tcpip(ppp_pcb *ppp, u8_t *s, int l); +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +/* PPP over Serial: this is the input function to be called for received data. */ +void pppos_input(ppp_pcb *ppp, u8_t* data, int len); + + +/* + * Functions called from lwIP + * DO NOT CALL FROM lwIP USER APPLICATION. + */ +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +err_t pppos_input_sys(struct pbuf *p, struct netif *inp); +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPPOS_H */ +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h new file mode 100644 index 00000000..540d9814 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h @@ -0,0 +1,131 @@ +/* + * upap.h - User/Password Authentication Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: upap.h,v 1.8 2002/12/04 23:03:33 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef UPAP_H +#define UPAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define UPAP_HEADERLEN 4 + + +/* + * UPAP codes. + */ +#define UPAP_AUTHREQ 1 /* Authenticate-Request */ +#define UPAP_AUTHACK 2 /* Authenticate-Ack */ +#define UPAP_AUTHNAK 3 /* Authenticate-Nak */ + + +/* + * Client states. + */ +#define UPAPCS_INITIAL 0 /* Connection down */ +#define UPAPCS_CLOSED 1 /* Connection up, haven't requested auth */ +#define UPAPCS_PENDING 2 /* Connection down, have requested auth */ +#define UPAPCS_AUTHREQ 3 /* We've sent an Authenticate-Request */ +#define UPAPCS_OPEN 4 /* We've received an Ack */ +#define UPAPCS_BADAUTH 5 /* We've received a Nak */ + +/* + * Server states. + */ +#define UPAPSS_INITIAL 0 /* Connection down */ +#define UPAPSS_CLOSED 1 /* Connection up, haven't requested auth */ +#define UPAPSS_PENDING 2 /* Connection down, have requested auth */ +#define UPAPSS_LISTEN 3 /* Listening for an Authenticate */ +#define UPAPSS_OPEN 4 /* We've sent an Ack */ +#define UPAPSS_BADAUTH 5 /* We've sent a Nak */ + + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define UPAP_DEFTIMEOUT 3 /* Timeout (seconds) for retransmitting req */ +#define UPAP_DEFREQTIME 30 /* Time to wait for auth-req from peer */ +#endif /* moved to ppp_opts.h */ + +/* + * Each interface is described by upap structure. + */ +#if PAP_SUPPORT +typedef struct upap_state { + const char *us_user; /* User */ + u8_t us_userlen; /* User length */ + const char *us_passwd; /* Password */ + u8_t us_passwdlen; /* Password length */ + u8_t us_clientstate; /* Client state */ +#if PPP_SERVER + u8_t us_serverstate; /* Server state */ +#endif /* PPP_SERVER */ + u8_t us_id; /* Current id */ + u8_t us_transmits; /* Number of auth-reqs sent */ +} upap_state; +#endif /* PAP_SUPPORT */ + + +void upap_authwithpeer(ppp_pcb *pcb, const char *user, const char *password); +#if PPP_SERVER +void upap_authpeer(ppp_pcb *pcb); +#endif /* PPP_SERVER */ + +extern const struct protent pap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* UPAP_H */ +#endif /* PPP_SUPPORT && PAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h new file mode 100644 index 00000000..77d9976c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h @@ -0,0 +1,169 @@ +/* + * Definitions for tcp compression routines. + * + * $Id: vj.h,v 1.7 2010/02/22 17:52:09 goldsimon Exp $ + * + * Copyright (c) 1989 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: + * - Initial distribution. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && VJ_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef VJ_H +#define VJ_H + +#include "lwip/ip.h" +#include "lwip/priv/tcp_priv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_SLOTS 16 /* must be > 2 and < 256 */ +#define MAX_HDR 128 + +/* + * Compressed packet format: + * + * The first octet contains the packet type (top 3 bits), TCP + * 'push' bit, and flags that indicate which of the 4 TCP sequence + * numbers have changed (bottom 5 bits). The next octet is a + * conversation number that associates a saved IP/TCP header with + * the compressed packet. The next two octets are the TCP checksum + * from the original datagram. The next 0 to 15 octets are + * sequence number changes, one change per bit set in the header + * (there may be no changes and there are two special cases where + * the receiver implicitly knows what changed -- see below). + * + * There are 5 numbers which can change (they are always inserted + * in the following order): TCP urgent pointer, window, + * acknowlegement, sequence number and IP ID. (The urgent pointer + * is different from the others in that its value is sent, not the + * change in value.) Since typical use of SLIP links is biased + * toward small packets (see comments on MTU/MSS below), changes + * use a variable length coding with one octet for numbers in the + * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the + * range 256 - 65535 or 0. (If the change in sequence number or + * ack is more than 65535, an uncompressed packet is sent.) + */ + +/* + * Packet types (must not conflict with IP protocol version) + * + * The top nibble of the first octet is the packet type. There are + * three possible types: IP (not proto TCP or tcp with one of the + * control flags set); uncompressed TCP (a normal IP/TCP packet but + * with the 8-bit protocol field replaced by an 8-bit connection id -- + * this type of packet syncs the sender & receiver); and compressed + * TCP (described above). + * + * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and + * is logically part of the 4-bit "changes" field that follows. Top + * three bits are actual packet type. For backward compatibility + * and in the interest of conserving bits, numbers are chosen so the + * IP protocol version number (4) which normally appears in this nibble + * means "IP packet". + */ + +/* packet types */ +#define TYPE_IP 0x40 +#define TYPE_UNCOMPRESSED_TCP 0x70 +#define TYPE_COMPRESSED_TCP 0x80 +#define TYPE_ERROR 0x00 + +/* Bits in first octet of compressed packet */ +#define NEW_C 0x40 /* flag bits for what changed in a packet */ +#define NEW_I 0x20 +#define NEW_S 0x08 +#define NEW_A 0x04 +#define NEW_W 0x02 +#define NEW_U 0x01 + +/* reserved, special-case values of above */ +#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */ +#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */ +#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U) + +#define TCP_PUSH_BIT 0x10 + + +/* + * "state" data for each active tcp conversation on the wire. This is + * basically a copy of the entire IP/TCP header from the last packet + * we saw from the conversation together with a small identifier + * the transmit & receive ends of the line use to locate saved header. + */ +struct cstate { + struct cstate *cs_next; /* next most recently used state (xmit only) */ + u16_t cs_hlen; /* size of hdr (receive only) */ + u8_t cs_id; /* connection # associated with this state */ + u8_t cs_filler; + union { + char csu_hdr[MAX_HDR]; + struct ip_hdr csu_ip; /* ip/tcp hdr from most recent packet */ + } vjcs_u; +}; +#define cs_ip vjcs_u.csu_ip +#define cs_hdr vjcs_u.csu_hdr + + +struct vjstat { + u32_t vjs_packets; /* outbound packets */ + u32_t vjs_compressed; /* outbound compressed packets */ + u32_t vjs_searches; /* searches for connection state */ + u32_t vjs_misses; /* times couldn't find conn. state */ + u32_t vjs_uncompressedin; /* inbound uncompressed packets */ + u32_t vjs_compressedin; /* inbound compressed packets */ + u32_t vjs_errorin; /* inbound unknown type packets */ + u32_t vjs_tossed; /* inbound packets tossed because of error */ +}; + +/* + * all the state data for one serial line (we need one of these per line). + */ +struct vjcompress { + struct cstate *last_cs; /* most recently used tstate */ + u8_t last_recv; /* last rcvd conn. id */ + u8_t last_xmit; /* last sent conn. id */ + u16_t flags; + u8_t maxSlotIndex; + u8_t compressSlot; /* Flag indicating OK to compress slot ID. */ +#if LINK_STATS + struct vjstat stats; +#endif + struct cstate tstate[MAX_SLOTS]; /* xmit connection states */ + struct cstate rstate[MAX_SLOTS]; /* receive connection states */ +}; + +/* flag values */ +#define VJF_TOSS 1U /* tossing rcvd frames because of input err */ + +extern void vj_compress_init (struct vjcompress *comp); +extern u8_t vj_compress_tcp (struct vjcompress *comp, struct pbuf **pb); +extern void vj_uncompress_err (struct vjcompress *comp); +extern int vj_uncompress_uncomp(struct pbuf *nb, struct vjcompress *comp); +extern int vj_uncompress_tcp (struct pbuf **nb, struct vjcompress *comp); + +#ifdef __cplusplus +} +#endif + +#endif /* VJ_H */ + +#endif /* PPP_SUPPORT && VJ_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h b/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h new file mode 100644 index 00000000..65ba31f8 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h @@ -0,0 +1,87 @@ +/** + * @file + * + * SLIP netif API + */ + +/* + * Copyright (c) 2001, Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETIF_SLIPIF_H +#define LWIP_HDR_NETIF_SLIPIF_H + +#include "lwip/opt.h" +#include "lwip/netif.h" + +/** Set this to 1 to start a thread that blocks reading on the serial line + * (using sio_read()). + */ +#ifndef SLIP_USE_RX_THREAD +#define SLIP_USE_RX_THREAD !NO_SYS +#endif + +/** Set this to 1 to enable functions to pass in RX bytes from ISR context. + * If enabled, slipif_received_byte[s]() process incoming bytes and put assembled + * packets on a queue, which is fed into lwIP from slipif_poll(). + * If disabled, slipif_poll() polls the serial line (using sio_tryread()). + */ +#ifndef SLIP_RX_FROM_ISR +#define SLIP_RX_FROM_ISR 0 +#endif + +/** Set this to 1 (default for SLIP_RX_FROM_ISR) to queue incoming packets + * received by slipif_received_byte[s]() as long as PBUF_POOL pbufs are available. + * If disabled, packets will be dropped if more than one packet is received. + */ +#ifndef SLIP_RX_QUEUE +#define SLIP_RX_QUEUE SLIP_RX_FROM_ISR +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +err_t slipif_init(struct netif * netif); +void slipif_poll(struct netif *netif); +#if SLIP_RX_FROM_ISR +void slipif_process_rxqueue(struct netif *netif); +void slipif_received_byte(struct netif *netif, u8_t data); +void slipif_received_bytes(struct netif *netif, u8_t *data, u8_t len); +#endif /* SLIP_RX_FROM_ISR */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_SLIPIF_H */ + diff --git a/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h b/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h new file mode 100644 index 00000000..2a801b4c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h @@ -0,0 +1,81 @@ +/** + * @file + * + * A netif implementing the ZigBee Eencapsulation Protocol (ZEP). + * This is used to tunnel 6LowPAN over UDP. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_ZEPIF_H +#define LWIP_HDR_ZEPIF_H + +#include "lwip/opt.h" +#include "netif/lowpan6.h" + +#if LWIP_IPV6 && LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZEPIF_DEFAULT_UDP_PORT 17754 + +/** Pass this struct as 'state' to netif_add to control the behaviour + * of this netif. If NULL is passed, default behaviour is chosen */ +struct zepif_init { + /** The UDP port used to ZEP frames from (0 = default) */ + u16_t zep_src_udp_port; + /** The UDP port used to ZEP frames to (0 = default) */ + u16_t zep_dst_udp_port; + /** The IP address to sed ZEP frames from (NULL = ANY) */ + const ip_addr_t *zep_src_ip_addr; + /** The IP address to sed ZEP frames to (NULL = BROADCAST) */ + const ip_addr_t *zep_dst_ip_addr; + /** If != NULL, the udp pcb is bound to this netif */ + const struct netif *zep_netif; + /** MAC address of the 6LowPAN device */ + u8_t addr[6]; +}; + +err_t zepif_init(struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_UDP */ + +#endif /* LWIP_HDR_ZEPIF_H */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c b/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c new file mode 100644 index 00000000..8a97bce3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c @@ -0,0 +1,563 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +/** + * @defgroup bridgeif IEEE 802.1D bridge + * @ingroup netifs + * This file implements an IEEE 802.1D bridge by using a multilayer netif approach + * (one hardware-independent netif for the bridge that uses hardware netifs for its ports). + * On transmit, the bridge selects the outgoing port(s). + * On receive, the port netif calls into the bridge (via its netif->input function) and + * the bridge selects the port(s) (and/or its netif->input function) to pass the received pbuf to. + * + * Usage: + * - add the port netifs just like you would when using them as dedicated netif without a bridge + * - only NETIF_FLAG_ETHARP/NETIF_FLAG_ETHERNET netifs are supported as bridge ports + * - add the bridge port netifs without IPv4 addresses (i.e. pass 'NULL, NULL, NULL') + * - don't add IPv6 addresses to the port netifs! + * - set up the bridge configuration in a global variable of type 'bridgeif_initdata_t' that contains + * - the MAC address of the bridge + * - some configuration options controlling the memory consumption (maximum number of ports + * and FDB entries) + * - e.g. for a bridge MAC address 00-01-02-03-04-05, 2 bridge ports, 1024 FDB entries + 16 static MAC entries: + * bridgeif_initdata_t mybridge_initdata = BRIDGEIF_INITDATA1(2, 1024, 16, ETH_ADDR(0, 1, 2, 3, 4, 5)); + * - add the bridge netif (with IPv4 config): + * struct netif bridge_netif; + * netif_add(&bridge_netif, &my_ip, &my_netmask, &my_gw, &mybridge_initdata, bridgeif_init, tcpip_input); + * NOTE: the passed 'input' function depends on BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT setting, + * which controls where the forwarding is done (netif low level input context vs. tcpip_thread) + * - set up all ports netifs and the bridge netif + * + * - When adding a port netif, NETIF_FLAG_ETHARP flag will be removed from a port + * to prevent ETHARP working on that port netif (we only want one IP per bridge not per port). + * - When adding a port netif, its input function is changed to call into the bridge. + * + * + * @todo: + * - compact static FDB entries (instead of walking the whole array) + * - add FDB query/read access + * - add FDB change callback (when learning or dropping auto-learned entries) + * - prefill FDB with MAC classes that should never be forwarded + * - multicast snooping? (and only forward group addresses to interested ports) + * - support removing ports + * - check SNMP integration + * - VLAN handling / trunk ports + * - priority handling? (although that largely depends on TX queue limitations and lwIP doesn't provide tx-done handling) + */ + +#include "netif/bridgeif.h" +#include "lwip/netif.h" +#include "lwip/sys.h" +#include "lwip/etharp.h" +#include "lwip/ethip6.h" +#include "lwip/snmp.h" +#include "lwip/timeouts.h" +#include + +#if LWIP_NUM_NETIF_CLIENT_DATA + +/* Define those to better describe your network interface. */ +#define IFNAME0 'b' +#define IFNAME1 'r' + +struct bridgeif_private_s; +typedef struct bridgeif_port_private_s { + struct bridgeif_private_s *bridge; + struct netif *port_netif; + u8_t port_num; +} bridgeif_port_t; + +typedef struct bridgeif_fdb_static_entry_s { + u8_t used; + bridgeif_portmask_t dst_ports; + struct eth_addr addr; +} bridgeif_fdb_static_entry_t; + +typedef struct bridgeif_private_s { + struct netif *netif; + struct eth_addr ethaddr; + u8_t max_ports; + u8_t num_ports; + bridgeif_port_t *ports; + u16_t max_fdbs_entries; + bridgeif_fdb_static_entry_t *fdbs; + u16_t max_fdbd_entries; + void *fdbd; +} bridgeif_private_t; + +/* netif data index to get the bridge on input */ +u8_t bridgeif_netif_client_id = 0xff; + +/** + * @ingroup bridgeif + * Add a static entry to the forwarding database. + * A static entry marks where frames to a specific eth address (unicast or group address) are + * forwarded. + * bits [0..(BRIDGEIF_MAX_PORTS-1)]: hw ports + * bit [BRIDGEIF_MAX_PORTS]: cpu port + * 0: drop + */ +err_t +bridgeif_fdb_add(struct netif *bridgeif, const struct eth_addr *addr, bridgeif_portmask_t ports) +{ + int i; + bridgeif_private_t *br; + BRIDGEIF_DECL_PROTECT(lev); + LWIP_ASSERT("invalid netif", bridgeif != NULL); + br = (bridgeif_private_t *)bridgeif->state; + LWIP_ASSERT("invalid state", br != NULL); + + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->max_fdbs_entries; i++) { + if (!br->fdbs[i].used) { + BRIDGEIF_WRITE_PROTECT(lev); + if (!br->fdbs[i].used) { + br->fdbs[i].used = 1; + br->fdbs[i].dst_ports = ports; + memcpy(&br->fdbs[i].addr, addr, sizeof(struct eth_addr)); + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_OK; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_MEM; +} + +/** + * @ingroup bridgeif + * Remove a static entry from the forwarding database + */ +err_t +bridgeif_fdb_remove(struct netif *bridgeif, const struct eth_addr *addr) +{ + int i; + bridgeif_private_t *br; + BRIDGEIF_DECL_PROTECT(lev); + LWIP_ASSERT("invalid netif", bridgeif != NULL); + br = (bridgeif_private_t *)bridgeif->state; + LWIP_ASSERT("invalid state", br != NULL); + + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->max_fdbs_entries; i++) { + if (br->fdbs[i].used && !memcmp(&br->fdbs[i].addr, addr, sizeof(struct eth_addr))) { + BRIDGEIF_WRITE_PROTECT(lev); + if (br->fdbs[i].used && !memcmp(&br->fdbs[i].addr, addr, sizeof(struct eth_addr))) { + memset(&br->fdbs[i], 0, sizeof(bridgeif_fdb_static_entry_t)); + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_OK; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_VAL; +} + +/** Get the forwarding port(s) (as bit mask) for the specified destination mac address */ +static bridgeif_portmask_t +bridgeif_find_dst_ports(bridgeif_private_t *br, struct eth_addr *dst_addr) +{ + int i; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + /* first check for static entries */ + for (i = 0; i < br->max_fdbs_entries; i++) { + if (br->fdbs[i].used) { + if (!memcmp(&br->fdbs[i].addr, dst_addr, sizeof(struct eth_addr))) { + bridgeif_portmask_t ret = br->fdbs[i].dst_ports; + BRIDGEIF_READ_UNPROTECT(lev); + return ret; + } + } + } + if (dst_addr->addr[0] & 1) { + /* no match found: flood remaining group address */ + BRIDGEIF_READ_UNPROTECT(lev); + return BR_FLOOD; + } + BRIDGEIF_READ_UNPROTECT(lev); + /* no match found: check dynamic fdb for port or fall back to flooding */ + return bridgeif_fdb_get_dst_ports(br->fdbd, dst_addr); +} + +/** Helper function to see if a destination mac belongs to the bridge + * (bridge netif or one of the port netifs), in which case the frame + * is sent to the cpu only. + */ +static int +bridgeif_is_local_mac(bridgeif_private_t *br, struct eth_addr *addr) +{ + int i; + BRIDGEIF_DECL_PROTECT(lev); + if (!memcmp(br->netif->hwaddr, addr, sizeof(struct eth_addr))) { + return 1; + } + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->num_ports; i++) { + struct netif *portif = br->ports[i].port_netif; + if (portif != NULL) { + if (!memcmp(portif->hwaddr, addr, sizeof(struct eth_addr))) { + BRIDGEIF_READ_UNPROTECT(lev); + return 1; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return 0; +} + +/* Output helper function */ +static err_t +bridgeif_send_to_port(bridgeif_private_t *br, struct pbuf *p, u8_t dstport_idx) +{ + if (dstport_idx < BRIDGEIF_MAX_PORTS) { + /* possibly an external port */ + if (dstport_idx < br->max_ports) { + struct netif *portif = br->ports[dstport_idx].port_netif; + if ((portif != NULL) && (portif->linkoutput != NULL)) { + /* prevent sending out to rx port */ + if (netif_get_index(portif) != p->if_idx) { + if (netif_is_link_up(portif)) { + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> flood(%p:%d) -> %d\n", (void *)p, p->if_idx, netif_get_index(portif))); + return portif->linkoutput(portif, p); + } + } + } + } + } else { + LWIP_ASSERT("invalid port index", dstport_idx == BRIDGEIF_MAX_PORTS); + } + return ERR_OK; +} + +/** Helper function to pass a pbuf to all ports marked in 'dstports' + */ +static err_t +bridgeif_send_to_ports(bridgeif_private_t *br, struct pbuf *p, bridgeif_portmask_t dstports) +{ + err_t err, ret_err = ERR_OK; + u8_t i; + bridgeif_portmask_t mask = 1; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < BRIDGEIF_MAX_PORTS; i++, mask = (bridgeif_portmask_t)(mask << 1)) { + if (dstports & mask) { + err = bridgeif_send_to_port(br, p, i); + if (err != ERR_OK) { + ret_err = err; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ret_err; +} + +/** Output function of the application port of the bridge (the one with an ip address). + * The forwarding port(s) where this pbuf is sent on is/are automatically selected + * from the FDB. + */ +static err_t +bridgeif_output(struct netif *netif, struct pbuf *p) +{ + err_t err; + bridgeif_private_t *br = (bridgeif_private_t *)netif->state; + struct eth_addr *dst = (struct eth_addr *)(p->payload); + + bridgeif_portmask_t dstports = bridgeif_find_dst_ports(br, dst); + err = bridgeif_send_to_ports(br, p, dstports); + + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p->tot_len); + if (((u8_t *)p->payload)[0] & 1) { + /* broadcast or multicast packet*/ + MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts); + } else { + /* unicast packet */ + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + } + /* increase ifoutdiscards or ifouterrors on error */ + + LINK_STATS_INC(link.xmit); + + return err; +} + +/** The actual bridge input function. Port netif's input is changed to call + * here. This function decides where the frame is forwarded. + */ +static err_t +bridgeif_input(struct pbuf *p, struct netif *netif) +{ + u8_t rx_idx; + bridgeif_portmask_t dstports; + struct eth_addr *src, *dst; + bridgeif_private_t *br; + bridgeif_port_t *port; + if (p == NULL || netif == NULL) { + return ERR_VAL; + } + port = (bridgeif_port_t *)netif_get_client_data(netif, bridgeif_netif_client_id); + LWIP_ASSERT("port data not set", port != NULL); + if (port == NULL || port->bridge == NULL) { + return ERR_VAL; + } + br = (bridgeif_private_t *)port->bridge; + rx_idx = netif_get_index(netif); + /* store receive index in pbuf */ + p->if_idx = rx_idx; + + dst = (struct eth_addr *)p->payload; + src = (struct eth_addr *)(((u8_t *)p->payload) + sizeof(struct eth_addr)); + + if ((src->addr[0] & 1) == 0) { + /* update src for all non-group addresses */ + bridgeif_fdb_update_src(br->fdbd, src, port->port_num); + } + + if (dst->addr[0] & 1) { + /* group address -> flood + cpu? */ + dstports = bridgeif_find_dst_ports(br, dst); + bridgeif_send_to_ports(br, p, dstports); + if (dstports & (1 << BRIDGEIF_MAX_PORTS)) { + /* we pass the reference to ->input or have to free it */ + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> input(%p)\n", (void *)p)); + if (br->netif->input(p, br->netif) != ERR_OK) { + pbuf_free(p); + } + } else { + /* all references done */ + pbuf_free(p); + } + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; + } else { + /* is this for one of the local ports? */ + if (bridgeif_is_local_mac(br, dst)) { + /* yes, send to cpu port only */ + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> input(%p)\n", (void *)p)); + return br->netif->input(p, br->netif); + } + + /* get dst port */ + dstports = bridgeif_find_dst_ports(br, dst); + bridgeif_send_to_ports(br, p, dstports); + /* no need to send to cpu, flooding is for external ports only */ + /* by this, we consumed the pbuf */ + pbuf_free(p); + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; + } +} + +#if !BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +/** Input function for port netifs used to synchronize into tcpip_thread. + */ +static err_t +bridgeif_tcpip_input(struct pbuf *p, struct netif *netif) +{ + return tcpip_inpkt(p, netif, bridgeif_input); +} +#endif /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ + +/** + * @ingroup bridgeif + * Initialization function passed to netif_add(). + * + * ATTENTION: A pointer to a @ref bridgeif_initdata_t must be passed as 'state' + * to @ref netif_add when adding the bridge. I supplies MAC address + * and controls memory allocation (number of ports, FDB size). + * + * @param netif the lwip network interface structure for this ethernetif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + * any other err_t on error + */ +err_t +bridgeif_init(struct netif *netif) +{ + bridgeif_initdata_t *init_data; + bridgeif_private_t *br; + size_t alloc_len_sizet; + mem_size_t alloc_len; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("bridgeif needs an input callback", (netif->input != NULL)); +#if !BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT + if (netif->input == tcpip_input) { + LWIP_DEBUGF(BRIDGEIF_DEBUG | LWIP_DBG_ON, ("bridgeif does not need tcpip_input, use netif_input/ethernet_input instead")); + } +#endif + + if (bridgeif_netif_client_id == 0xFF) { + bridgeif_netif_client_id = netif_alloc_client_data_id(); + } + + init_data = (bridgeif_initdata_t *)netif->state; + LWIP_ASSERT("init_data != NULL", (init_data != NULL)); + LWIP_ASSERT("init_data->max_ports <= BRIDGEIF_MAX_PORTS", + init_data->max_ports <= BRIDGEIF_MAX_PORTS); + + alloc_len_sizet = sizeof(bridgeif_private_t) + (init_data->max_ports * sizeof(bridgeif_port_t) + (init_data->max_fdb_static_entries * sizeof(bridgeif_fdb_static_entry_t))); + alloc_len = (mem_size_t)alloc_len_sizet; + LWIP_ASSERT("alloc_len == alloc_len_sizet", alloc_len == alloc_len_sizet); + LWIP_DEBUGF(BRIDGEIF_DEBUG, ("bridgeif_init: allocating %d bytes for private data\n", (int)alloc_len)); + br = (bridgeif_private_t *)mem_calloc(1, alloc_len); + if (br == NULL) { + LWIP_DEBUGF(NETIF_DEBUG, ("bridgeif_init: out of memory\n")); + return ERR_MEM; + } + memcpy(&br->ethaddr, &init_data->ethaddr, sizeof(br->ethaddr)); + br->netif = netif; + + br->max_ports = init_data->max_ports; + br->ports = (bridgeif_port_t *)(br + 1); + + br->max_fdbs_entries = init_data->max_fdb_static_entries; + br->fdbs = (bridgeif_fdb_static_entry_t *)(((u8_t *)(br + 1)) + (init_data->max_ports * sizeof(bridgeif_port_t))); + + br->max_fdbd_entries = init_data->max_fdb_dynamic_entries; + br->fdbd = bridgeif_fdb_init(init_data->max_fdb_dynamic_entries); + if (br->fdbd == NULL) { + LWIP_DEBUGF(NETIF_DEBUG, ("bridgeif_init: out of memory in fdb_init\n")); + mem_free(br); + return ERR_MEM; + } + +#if LWIP_NETIF_HOSTNAME + /* Initialize interface hostname */ + netif->hostname = "lwip"; +#endif /* LWIP_NETIF_HOSTNAME */ + + /* + * Initialize the snmp variables and counters inside the struct netif. + * The last argument should be replaced with your link speed, in units + * of bits per second. + */ + MIB2_INIT_NETIF(netif, snmp_ifType_ethernet_csmacd, 0); + + netif->state = br; + netif->name[0] = IFNAME0; + netif->name[1] = IFNAME1; + /* We directly use etharp_output() here to save a function call. + * You can instead declare your own function an call etharp_output() + * from it if you have to do some checks before sending (e.g. if link + * is available...) */ +#if LWIP_IPV4 + netif->output = etharp_output; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + netif->output_ip6 = ethip6_output; +#endif /* LWIP_IPV6 */ + netif->linkoutput = bridgeif_output; + + /* set MAC hardware address length */ + netif->hwaddr_len = ETH_HWADDR_LEN; + + /* set MAC hardware address */ + memcpy(netif->hwaddr, &br->ethaddr, ETH_HWADDR_LEN); + + /* maximum transfer unit */ + netif->mtu = 1500; + + /* device capabilities */ + /* don't set NETIF_FLAG_ETHARP if this device is not an ethernet one */ + netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET | NETIF_FLAG_IGMP | NETIF_FLAG_MLD6 | NETIF_FLAG_LINK_UP; + +#if LWIP_IPV6 && LWIP_IPV6_MLD + /* + * For hardware/netifs that implement MAC filtering. + * All-nodes link-local is handled by default, so we must let the hardware know + * to allow multicast packets in. + * Should set mld_mac_filter previously. */ + if (netif->mld_mac_filter != NULL) { + ip6_addr_t ip6_allnodes_ll; + ip6_addr_set_allnodes_linklocal(&ip6_allnodes_ll); + netif->mld_mac_filter(netif, &ip6_allnodes_ll, NETIF_ADD_MAC_FILTER); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + + return ERR_OK; +} + +/** + * @ingroup bridgeif + * Add a port to the bridge + */ +err_t +bridgeif_add_port(struct netif *bridgeif, struct netif *portif) +{ + bridgeif_private_t *br; + bridgeif_port_t *port; + + LWIP_ASSERT("bridgeif != NULL", bridgeif != NULL); + LWIP_ASSERT("bridgeif->state != NULL", bridgeif->state != NULL); + LWIP_ASSERT("portif != NULL", portif != NULL); + + if (!(portif->flags & NETIF_FLAG_ETHARP) || !(portif->flags & NETIF_FLAG_ETHERNET)) { + /* can only add ETHERNET/ETHARP interfaces */ + return ERR_VAL; + } + + br = (bridgeif_private_t *)bridgeif->state; + + if (br->num_ports >= br->max_ports) { + return ERR_VAL; + } + port = &br->ports[br->num_ports]; + port->port_netif = portif; + port->port_num = br->num_ports; + port->bridge = br; + br->num_ports++; + + /* let the port call us on input */ +#if BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT + portif->input = bridgeif_input; +#else + portif->input = bridgeif_tcpip_input; +#endif + /* store pointer to bridge in netif */ + netif_set_client_data(portif, bridgeif_netif_client_id, port); + /* remove ETHARP flag to prevent sending report events on netif-up */ + netif_clear_flags(portif, NETIF_FLAG_ETHARP); + + return ERR_OK; +} + +#endif /* LWIP_NUM_NETIF_CLIENT_DATA */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c b/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c new file mode 100644 index 00000000..6739fc24 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c @@ -0,0 +1,212 @@ +/** + * @file + * lwIP netif implementing an FDB for IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +/** + * @defgroup bridgeif_fdb FDB example code + * @ingroup bridgeif + * This file implements an example for an FDB (Forwarding DataBase) + */ + +#include "netif/bridgeif.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/timeouts.h" +#include + +#define BRIDGEIF_AGE_TIMER_MS 1000 + +#define BR_FDB_TIMEOUT_SEC (60*5) /* 5 minutes FDB timeout */ + +typedef struct bridgeif_dfdb_entry_s { + u8_t used; + u8_t port; + u32_t ts; + struct eth_addr addr; +} bridgeif_dfdb_entry_t; + +typedef struct bridgeif_dfdb_s { + u16_t max_fdb_entries; + bridgeif_dfdb_entry_t *fdb; +} bridgeif_dfdb_t; + +/** + * @ingroup bridgeif_fdb + * A real simple and slow implementation of an auto-learning forwarding database that + * remembers known src mac addresses to know which port to send frames destined for that + * mac address. + * + * ATTENTION: This is meant as an example only, in real-world use, you should + * provide a better implementation :-) + */ +void +bridgeif_fdb_update_src(void *fdb_ptr, struct eth_addr *src_addr, u8_t port_idx) +{ + int i; + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + if (!memcmp(&e->addr, src_addr, sizeof(struct eth_addr))) { + LWIP_DEBUGF(BRIDGEIF_FDB_DEBUG, ("br: update src %02x:%02x:%02x:%02x:%02x:%02x (from %d) @ idx %d\n", + src_addr->addr[0], src_addr->addr[1], src_addr->addr[2], src_addr->addr[3], src_addr->addr[4], src_addr->addr[5], + port_idx, i)); + BRIDGEIF_WRITE_PROTECT(lev); + e->ts = BR_FDB_TIMEOUT_SEC; + e->port = port_idx; + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return; + } + } + } + /* not found, allocate new entry from free */ + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (!e->used || !e->ts) { + BRIDGEIF_WRITE_PROTECT(lev); + /* check again when protected */ + if (!e->used || !e->ts) { + LWIP_DEBUGF(BRIDGEIF_FDB_DEBUG, ("br: create src %02x:%02x:%02x:%02x:%02x:%02x (from %d) @ idx %d\n", + src_addr->addr[0], src_addr->addr[1], src_addr->addr[2], src_addr->addr[3], src_addr->addr[4], src_addr->addr[5], + port_idx, i)); + memcpy(&e->addr, src_addr, sizeof(struct eth_addr)); + e->ts = BR_FDB_TIMEOUT_SEC; + e->port = port_idx; + e->used = 1; + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + /* not found, no free entry -> flood */ +} + +/** + * @ingroup bridgeif_fdb + * Walk our list of auto-learnt fdb entries and return a port to forward or BR_FLOOD if unknown + */ +bridgeif_portmask_t +bridgeif_fdb_get_dst_ports(void *fdb_ptr, struct eth_addr *dst_addr) +{ + int i; + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + if (!memcmp(&e->addr, dst_addr, sizeof(struct eth_addr))) { + bridgeif_portmask_t ret = (bridgeif_portmask_t)(1 << e->port); + BRIDGEIF_READ_UNPROTECT(lev); + return ret; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return BR_FLOOD; +} + +/** + * @ingroup bridgeif_fdb + * Aging implementation of our simple fdb + */ +static void +bridgeif_fdb_age_one_second(void *fdb_ptr) +{ + int i; + bridgeif_dfdb_t *fdb; + BRIDGEIF_DECL_PROTECT(lev); + + fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_READ_PROTECT(lev); + + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + BRIDGEIF_WRITE_PROTECT(lev); + /* check again when protected */ + if (e->used && e->ts) { + if (--e->ts == 0) { + e->used = 0; + } + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); +} + +/** Timer callback for fdb aging, called once per second */ +static void +bridgeif_age_tmr(void *arg) +{ + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)arg; + + LWIP_ASSERT("invalid arg", arg != NULL); + + bridgeif_fdb_age_one_second(fdb); + sys_timeout(BRIDGEIF_AGE_TIMER_MS, bridgeif_age_tmr, arg); +} + +/** + * @ingroup bridgeif_fdb + * Init our simple fdb list + */ +void * +bridgeif_fdb_init(u16_t max_fdb_entries) +{ + bridgeif_dfdb_t *fdb; + size_t alloc_len_sizet = sizeof(bridgeif_dfdb_t) + (max_fdb_entries * sizeof(bridgeif_dfdb_entry_t)); + mem_size_t alloc_len = (mem_size_t)alloc_len_sizet; + LWIP_ASSERT("alloc_len == alloc_len_sizet", alloc_len == alloc_len_sizet); + LWIP_DEBUGF(BRIDGEIF_DEBUG, ("bridgeif_fdb_init: allocating %d bytes for private FDB data\n", (int)alloc_len)); + fdb = (bridgeif_dfdb_t *)mem_calloc(1, alloc_len); + if (fdb == NULL) { + return NULL; + } + fdb->max_fdb_entries = max_fdb_entries; + fdb->fdb = (bridgeif_dfdb_entry_t *)(fdb + 1); + + sys_timeout(BRIDGEIF_AGE_TIMER_MS, bridgeif_age_tmr, fdb); + + return fdb; +} diff --git a/Middlewares/Third_Party/LwIP/src/netif/ethernet.c b/Middlewares/Third_Party/LwIP/src/netif/ethernet.c new file mode 100644 index 00000000..dd171e28 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ethernet.c @@ -0,0 +1,321 @@ +/** + * @file + * Ethernet common functions + * + * @defgroup ethernet Ethernet + * @ingroup callbackstyle_api + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET + +#include "netif/ethernet.h" +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/etharp.h" +#include "lwip/ip.h" +#include "lwip/snmp.h" + +#include + +#include "netif/ppp/ppp_opts.h" +#if PPPOE_SUPPORT +#include "netif/ppp/pppoe.h" +#endif /* PPPOE_SUPPORT */ + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +const struct eth_addr ethbroadcast = {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}; +const struct eth_addr ethzero = {{0, 0, 0, 0, 0, 0}}; + +/** + * @ingroup lwip_nosys + * Process received ethernet frames. Using this function instead of directly + * calling ip_input and passing ARP frames through etharp in ethernetif_input, + * the ARP cache is protected from concurrent access.\n + * Don't call directly, pass to netif_add() and call netif->input(). + * + * @param p the received packet, p->payload pointing to the ethernet header + * @param netif the network interface on which the packet was received + * + * @see LWIP_HOOK_UNKNOWN_ETH_PROTOCOL + * @see ETHARP_SUPPORT_VLAN + * @see LWIP_HOOK_VLAN_CHECK + */ +err_t +ethernet_input(struct pbuf *p, struct netif *netif) +{ + struct eth_hdr *ethhdr; + u16_t type; +#if LWIP_ARP || ETHARP_SUPPORT_VLAN || LWIP_IPV6 + u16_t next_hdr_offset = SIZEOF_ETH_HDR; +#endif /* LWIP_ARP || ETHARP_SUPPORT_VLAN */ + + LWIP_ASSERT_CORE_LOCKED(); + + if (p->len <= SIZEOF_ETH_HDR) { + /* a packet with only an ethernet header (or less) is not valid for us */ + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinerrors); + goto free_and_return; + } + + if (p->if_idx == NETIF_NO_INDEX) { + p->if_idx = netif_get_index(netif); + } + + /* points to packet payload, which starts with an Ethernet header */ + ethhdr = (struct eth_hdr *)p->payload; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, + ("ethernet_input: dest:%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F", src:%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F", type:%"X16_F"\n", + (unsigned char)ethhdr->dest.addr[0], (unsigned char)ethhdr->dest.addr[1], (unsigned char)ethhdr->dest.addr[2], + (unsigned char)ethhdr->dest.addr[3], (unsigned char)ethhdr->dest.addr[4], (unsigned char)ethhdr->dest.addr[5], + (unsigned char)ethhdr->src.addr[0], (unsigned char)ethhdr->src.addr[1], (unsigned char)ethhdr->src.addr[2], + (unsigned char)ethhdr->src.addr[3], (unsigned char)ethhdr->src.addr[4], (unsigned char)ethhdr->src.addr[5], + lwip_htons(ethhdr->type))); + + type = ethhdr->type; +#if ETHARP_SUPPORT_VLAN + if (type == PP_HTONS(ETHTYPE_VLAN)) { + struct eth_vlan_hdr *vlan = (struct eth_vlan_hdr *)(((char *)ethhdr) + SIZEOF_ETH_HDR); + next_hdr_offset = SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR; + if (p->len <= SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR) { + /* a packet with only an ethernet/vlan header (or less) is not valid for us */ + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinerrors); + goto free_and_return; + } +#if defined(LWIP_HOOK_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK_FN) /* if not, allow all VLANs */ +#ifdef LWIP_HOOK_VLAN_CHECK + if (!LWIP_HOOK_VLAN_CHECK(netif, ethhdr, vlan)) { +#elif defined(ETHARP_VLAN_CHECK_FN) + if (!ETHARP_VLAN_CHECK_FN(ethhdr, vlan)) { +#elif defined(ETHARP_VLAN_CHECK) + if (VLAN_ID(vlan) != ETHARP_VLAN_CHECK) { +#endif + /* silently ignore this packet: not for our VLAN */ + pbuf_free(p); + return ERR_OK; + } +#endif /* defined(LWIP_HOOK_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK_FN) */ + type = vlan->tpid; + } +#endif /* ETHARP_SUPPORT_VLAN */ + +#if LWIP_ARP_FILTER_NETIF + netif = LWIP_ARP_FILTER_NETIF_FN(p, netif, lwip_htons(type)); +#endif /* LWIP_ARP_FILTER_NETIF*/ + + if (ethhdr->dest.addr[0] & 1) { + /* this might be a multicast or broadcast packet */ + if (ethhdr->dest.addr[0] == LL_IP4_MULTICAST_ADDR_0) { +#if LWIP_IPV4 + if ((ethhdr->dest.addr[1] == LL_IP4_MULTICAST_ADDR_1) && + (ethhdr->dest.addr[2] == LL_IP4_MULTICAST_ADDR_2)) { + /* mark the pbuf as link-layer multicast */ + p->flags |= PBUF_FLAG_LLMCAST; + } +#endif /* LWIP_IPV4 */ + } +#if LWIP_IPV6 + else if ((ethhdr->dest.addr[0] == LL_IP6_MULTICAST_ADDR_0) && + (ethhdr->dest.addr[1] == LL_IP6_MULTICAST_ADDR_1)) { + /* mark the pbuf as link-layer multicast */ + p->flags |= PBUF_FLAG_LLMCAST; + } +#endif /* LWIP_IPV6 */ + else if (eth_addr_cmp(ðhdr->dest, ðbroadcast)) { + /* mark the pbuf as link-layer broadcast */ + p->flags |= PBUF_FLAG_LLBCAST; + } + } + + switch (type) { +#if LWIP_IPV4 && LWIP_ARP + /* IP packet? */ + case PP_HTONS(ETHTYPE_IP): + if (!(netif->flags & NETIF_FLAG_ETHARP)) { + goto free_and_return; + } + /* skip Ethernet header (min. size checked above) */ + if (pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: IPv4 packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("Can't move over header in packet")); + goto free_and_return; + } else { + /* pass to IP layer */ + ip4_input(p, netif); + } + break; + + case PP_HTONS(ETHTYPE_ARP): + if (!(netif->flags & NETIF_FLAG_ETHARP)) { + goto free_and_return; + } + /* skip Ethernet header (min. size checked above) */ + if (pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: ARP response packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("Can't move over header in packet")); + ETHARP_STATS_INC(etharp.lenerr); + ETHARP_STATS_INC(etharp.drop); + goto free_and_return; + } else { + /* pass p to ARP module */ + etharp_input(p, netif); + } + break; +#endif /* LWIP_IPV4 && LWIP_ARP */ +#if PPPOE_SUPPORT + case PP_HTONS(ETHTYPE_PPPOEDISC): /* PPP Over Ethernet Discovery Stage */ + pppoe_disc_input(netif, p); + break; + + case PP_HTONS(ETHTYPE_PPPOE): /* PPP Over Ethernet Session Stage */ + pppoe_data_input(netif, p); + break; +#endif /* PPPOE_SUPPORT */ + +#if LWIP_IPV6 + case PP_HTONS(ETHTYPE_IPV6): /* IPv6 */ + /* skip Ethernet header */ + if ((p->len < next_hdr_offset) || pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: IPv6 packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + goto free_and_return; + } else { + /* pass to IPv6 layer */ + ip6_input(p, netif); + } + break; +#endif /* LWIP_IPV6 */ + + default: +#ifdef LWIP_HOOK_UNKNOWN_ETH_PROTOCOL + if (LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(p, netif) == ERR_OK) { + break; + } +#endif + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinunknownprotos); + goto free_and_return; + } + + /* This means the pbuf is freed or consumed, + so the caller doesn't have to free it again */ + return ERR_OK; + +free_and_return: + pbuf_free(p); + return ERR_OK; +} + +/** + * @ingroup ethernet + * Send an ethernet packet on the network using netif->linkoutput(). + * The ethernet header is filled in before sending. + * + * @see LWIP_HOOK_VLAN_SET + * + * @param netif the lwIP network interface on which to send the packet + * @param p the packet to send. pbuf layer must be @ref PBUF_LINK. + * @param src the source MAC address to be copied into the ethernet header + * @param dst the destination MAC address to be copied into the ethernet header + * @param eth_type ethernet type (@ref lwip_ieee_eth_type) + * @return ERR_OK if the packet was sent, any other err_t on failure + */ +err_t +ethernet_output(struct netif * netif, struct pbuf * p, + const struct eth_addr * src, const struct eth_addr * dst, + u16_t eth_type) { + struct eth_hdr *ethhdr; + u16_t eth_type_be = lwip_htons(eth_type); + +#if ETHARP_SUPPORT_VLAN && defined(LWIP_HOOK_VLAN_SET) + s32_t vlan_prio_vid = LWIP_HOOK_VLAN_SET(netif, p, src, dst, eth_type); + if (vlan_prio_vid >= 0) { + struct eth_vlan_hdr *vlanhdr; + + LWIP_ASSERT("prio_vid must be <= 0xFFFF", vlan_prio_vid <= 0xFFFF); + + if (pbuf_add_header(p, SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR) != 0) { + goto pbuf_header_failed; + } + vlanhdr = (struct eth_vlan_hdr *)(((u8_t *)p->payload) + SIZEOF_ETH_HDR); + vlanhdr->tpid = eth_type_be; + vlanhdr->prio_vid = lwip_htons((u16_t)vlan_prio_vid); + + eth_type_be = PP_HTONS(ETHTYPE_VLAN); + } else +#endif /* ETHARP_SUPPORT_VLAN && defined(LWIP_HOOK_VLAN_SET) */ + { + if (pbuf_add_header(p, SIZEOF_ETH_HDR) != 0) { + goto pbuf_header_failed; + } + } + + LWIP_ASSERT_CORE_LOCKED(); + + ethhdr = (struct eth_hdr *)p->payload; + ethhdr->type = eth_type_be; + SMEMCPY(ðhdr->dest, dst, ETH_HWADDR_LEN); + SMEMCPY(ðhdr->src, src, ETH_HWADDR_LEN); + + LWIP_ASSERT("netif->hwaddr_len must be 6 for ethernet_output!", + (netif->hwaddr_len == ETH_HWADDR_LEN)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, + ("ethernet_output: sending packet %p\n", (void *)p)); + + /* send the packet */ + return netif->linkoutput(netif, p); + +pbuf_header_failed: + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("ethernet_output: could not allocate room for header.\n")); + LINK_STATS_INC(link.lenerr); + return ERR_BUF; +} + +#endif /* LWIP_ARP || LWIP_ETHERNET */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c b/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c new file mode 100644 index 00000000..7f0d2767 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c @@ -0,0 +1,920 @@ +/** + * @file + * + * 6LowPAN output for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + * + * This implementation aims to conform to IEEE 802.15.4(-2015), RFC 4944 and RFC 6282. + * @todo: RFC 6775. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/** + * @defgroup sixlowpan 6LoWPAN (RFC4944) + * @ingroup netifs + * 6LowPAN netif implementation + */ + +#include "netif/lowpan6.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/tcpip.h" +#include "lwip/snmp.h" +#include "netif/ieee802154.h" + +#include + +#if LWIP_6LOWPAN_802154_HW_CRC +#define LWIP_6LOWPAN_DO_CALC_CRC(buf, len) 0 +#else +#define LWIP_6LOWPAN_DO_CALC_CRC(buf, len) LWIP_6LOWPAN_CALC_CRC(buf, len) +#endif + +/** This is a helper struct for reassembly of fragments + * (IEEE 802.15.4 limits to 127 bytes) + */ +struct lowpan6_reass_helper { + struct lowpan6_reass_helper *next_packet; + struct pbuf *reass; + struct pbuf *frags; + u8_t timer; + struct lowpan6_link_addr sender_addr; + u16_t datagram_size; + u16_t datagram_tag; +}; + +/** This struct keeps track of per-netif state */ +struct lowpan6_ieee802154_data { + /** fragment reassembly list */ + struct lowpan6_reass_helper *reass_list; +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + /** address context for compression */ + ip6_addr_t lowpan6_context[LWIP_6LOWPAN_NUM_CONTEXTS]; +#endif + /** Datagram Tag for fragmentation */ + u16_t tx_datagram_tag; + /** local PAN ID for IEEE 802.15.4 header */ + u16_t ieee_802154_pan_id; + /** Sequence Number for IEEE 802.15.4 transmission */ + u8_t tx_frame_seq_num; +}; + +/* Maximum frame size is 127 bytes minus CRC size */ +#define LOWPAN6_MAX_PAYLOAD (127 - 2) + +/** Currently, this state is global, since there's only one 6LoWPAN netif */ +static struct lowpan6_ieee802154_data lowpan6_data; + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +#define LWIP_6LOWPAN_CONTEXTS(netif) lowpan6_data.lowpan6_context +#else +#define LWIP_6LOWPAN_CONTEXTS(netif) NULL +#endif + +static const struct lowpan6_link_addr ieee_802154_broadcast = {2, {0xff, 0xff}}; + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS +static struct lowpan6_link_addr short_mac_addr = {2, {0, 0}}; +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +/* IEEE 802.15.4 specific functions: */ + +/** Write the IEEE 802.15.4 header that encapsulates the 6LoWPAN frame. + * Src and dst PAN IDs are filled with the ID set by @ref lowpan6_set_pan_id. + * + * Since the length is variable: + * @returns the header length + */ +static u8_t +lowpan6_write_iee802154_header(struct ieee_802154_hdr *hdr, const struct lowpan6_link_addr *src, + const struct lowpan6_link_addr *dst) +{ + u8_t ieee_header_len; + u8_t *buffer; + u8_t i; + u16_t fc; + + fc = IEEE_802154_FC_FT_DATA; /* send data packet (2003 frame version) */ + fc |= IEEE_802154_FC_PANID_COMPR; /* set PAN ID compression, for now src and dst PANs are equal */ + if (dst != &ieee_802154_broadcast) { + fc |= IEEE_802154_FC_ACK_REQ; /* data packet, no broadcast: ack required. */ + } + if (dst->addr_len == 2) { + fc |= IEEE_802154_FC_DST_ADDR_MODE_SHORT; + } else { + LWIP_ASSERT("invalid dst address length", dst->addr_len == 8); + fc |= IEEE_802154_FC_DST_ADDR_MODE_EXT; + } + if (src->addr_len == 2) { + fc |= IEEE_802154_FC_SRC_ADDR_MODE_SHORT; + } else { + LWIP_ASSERT("invalid src address length", src->addr_len == 8); + fc |= IEEE_802154_FC_SRC_ADDR_MODE_EXT; + } + hdr->frame_control = fc; + hdr->sequence_number = lowpan6_data.tx_frame_seq_num++; + hdr->destination_pan_id = lowpan6_data.ieee_802154_pan_id; /* pan id */ + + buffer = (u8_t *)hdr; + ieee_header_len = 5; + i = dst->addr_len; + /* reverse memcpy of dst addr */ + while (i-- > 0) { + buffer[ieee_header_len++] = dst->addr[i]; + } + /* Source PAN ID skipped due to PAN ID Compression */ + i = src->addr_len; + /* reverse memcpy of src addr */ + while (i-- > 0) { + buffer[ieee_header_len++] = src->addr[i]; + } + return ieee_header_len; +} + +/** Parse the IEEE 802.15.4 header from a pbuf. + * If successful, the header is hidden from the pbuf. + * + * PAN IDs and seuqence number are not checked + * + * @param p input pbuf, p->payload pointing at the IEEE 802.15.4 header + * @param src pointer to source address filled from the header + * @param dest pointer to destination address filled from the header + * @returns ERR_OK if successful + */ +static err_t +lowpan6_parse_iee802154_header(struct pbuf *p, struct lowpan6_link_addr *src, + struct lowpan6_link_addr *dest) +{ + u8_t *puc; + s8_t i; + u16_t frame_control, addr_mode; + u16_t datagram_offset; + + /* Parse IEEE 802.15.4 header */ + puc = (u8_t *)p->payload; + frame_control = puc[0] | (puc[1] << 8); + datagram_offset = 2; + if (frame_control & IEEE_802154_FC_SEQNO_SUPPR) { + if (IEEE_802154_FC_FRAME_VERSION_GET(frame_control) <= 1) { + /* sequence number suppressed, this is not valid for versions 0/1 */ + return ERR_VAL; + } + } else { + datagram_offset++; + } + datagram_offset += 2; /* Skip destination PAN ID */ + addr_mode = frame_control & IEEE_802154_FC_DST_ADDR_MODE_MASK; + if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_EXT) { + /* extended address (64 bit) */ + dest->addr_len = 8; + /* reverse memcpy: */ + for (i = 0; i < 8; i++) { + dest->addr[i] = puc[datagram_offset + 7 - i]; + } + datagram_offset += 8; + } else if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_SHORT) { + /* short address (16 bit) */ + dest->addr_len = 2; + /* reverse memcpy: */ + dest->addr[0] = puc[datagram_offset + 1]; + dest->addr[1] = puc[datagram_offset]; + datagram_offset += 2; + } else { + /* unsupported address mode (do we need "no address"?) */ + return ERR_VAL; + } + + if (!(frame_control & IEEE_802154_FC_PANID_COMPR)) { + /* No PAN ID compression, skip source PAN ID */ + datagram_offset += 2; + } + + addr_mode = frame_control & IEEE_802154_FC_SRC_ADDR_MODE_MASK; + if (addr_mode == IEEE_802154_FC_SRC_ADDR_MODE_EXT) { + /* extended address (64 bit) */ + src->addr_len = 8; + /* reverse memcpy: */ + for (i = 0; i < 8; i++) { + src->addr[i] = puc[datagram_offset + 7 - i]; + } + datagram_offset += 8; + } else if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_SHORT) { + /* short address (16 bit) */ + src->addr_len = 2; + src->addr[0] = puc[datagram_offset + 1]; + src->addr[1] = puc[datagram_offset]; + datagram_offset += 2; + } else { + /* unsupported address mode (do we need "no address"?) */ + return ERR_VAL; + } + + /* hide IEEE802.15.4 header. */ + if (pbuf_remove_header(p, datagram_offset)) { + return ERR_VAL; + } + return ERR_OK; +} + +/** Calculate the 16-bit CRC as required by IEEE 802.15.4 */ +u16_t +lowpan6_calc_crc(const void* buf, u16_t len) +{ +#define CCITT_POLY_16 0x8408U + u16_t i; + u8_t b; + u16_t crc = 0; + const u8_t* p = (const u8_t*)buf; + + for (i = 0; i < len; i++) { + u8_t data = *p; + for (b = 0U; b < 8U; b++) { + if (((data ^ crc) & 1) != 0) { + crc = (u16_t)((crc >> 1) ^ CCITT_POLY_16); + } else { + crc = (u16_t)(crc >> 1); + } + data = (u8_t)(data >> 1); + } + p++; + } + return crc; +} + +/* Fragmentation specific functions: */ + +static void +free_reass_datagram(struct lowpan6_reass_helper *lrh) +{ + if (lrh->reass) { + pbuf_free(lrh->reass); + } + if (lrh->frags) { + pbuf_free(lrh->frags); + } + mem_free(lrh); +} + +/** + * Removes a datagram from the reassembly queue. + **/ +static void +dequeue_datagram(struct lowpan6_reass_helper *lrh, struct lowpan6_reass_helper *prev) +{ + if (lowpan6_data.reass_list == lrh) { + lowpan6_data.reass_list = lowpan6_data.reass_list->next_packet; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", prev != NULL); + prev->next_packet = lrh->next_packet; + } +} + +/** + * Periodic timer for 6LowPAN functions: + * + * - Remove incomplete/old packets + */ +void +lowpan6_tmr(void) +{ + struct lowpan6_reass_helper *lrh, *lrh_next, *lrh_prev = NULL; + + lrh = lowpan6_data.reass_list; + while (lrh != NULL) { + lrh_next = lrh->next_packet; + if ((--lrh->timer) == 0) { + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + } else { + lrh_prev = lrh; + } + lrh = lrh_next; + } +} + +/* + * Encapsulates data into IEEE 802.15.4 frames. + * Fragments an IPv6 datagram into 6LowPAN units, which fit into IEEE 802.15.4 frames. + * If configured, will compress IPv6 and or UDP headers. + * */ +static err_t +lowpan6_frag(struct netif *netif, struct pbuf *p, const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst) +{ + struct pbuf *p_frag; + u16_t frag_len, remaining_len, max_data_len; + u8_t *buffer; + u8_t ieee_header_len; + u8_t lowpan6_header_len; + u8_t hidden_header_len; + u16_t crc; + u16_t datagram_offset; + err_t err = ERR_IF; + + LWIP_ASSERT("lowpan6_frag: netif->linkoutput not set", netif->linkoutput != NULL); + + /* We'll use a dedicated pbuf for building 6LowPAN fragments. */ + p_frag = pbuf_alloc(PBUF_RAW, 127, PBUF_RAM); + if (p_frag == NULL) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece", p_frag->len == p_frag->tot_len); + + /* Write IEEE 802.15.4 header. */ + buffer = (u8_t *)p_frag->payload; + ieee_header_len = lowpan6_write_iee802154_header((struct ieee_802154_hdr *)buffer, src, dst); + LWIP_ASSERT("ieee_header_len < p_frag->len", ieee_header_len < p_frag->len); + +#if LWIP_6LOWPAN_IPHC + /* Perform 6LowPAN IPv6 header compression according to RFC 6282 */ + /* do the header compression (this does NOT copy any non-compressed data) */ + err = lowpan6_compress_headers(netif, (u8_t *)p->payload, p->len, + &buffer[ieee_header_len], p_frag->len - ieee_header_len, &lowpan6_header_len, + &hidden_header_len, LWIP_6LOWPAN_CONTEXTS(netif), src, dst); + if (err != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + pbuf_free(p_frag); + return err; + } + pbuf_remove_header(p, hidden_header_len); + +#else /* LWIP_6LOWPAN_IPHC */ + /* Send uncompressed IPv6 header with appropriate dispatch byte. */ + lowpan6_header_len = 1; + buffer[ieee_header_len] = 0x41; /* IPv6 dispatch */ +#endif /* LWIP_6LOWPAN_IPHC */ + + /* Calculate remaining packet length */ + remaining_len = p->tot_len; + + if (remaining_len > 0x7FF) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + /* datagram_size must fit into 11 bit */ + pbuf_free(p_frag); + return ERR_VAL; + } + + /* Fragment, or 1 packet? */ + max_data_len = LOWPAN6_MAX_PAYLOAD - ieee_header_len - lowpan6_header_len; + if (remaining_len > max_data_len) { + u16_t data_len; + /* We must move the 6LowPAN header to make room for the FRAG header. */ + memmove(&buffer[ieee_header_len + 4], &buffer[ieee_header_len], lowpan6_header_len); + + /* Now we need to fragment the packet. FRAG1 header first */ + buffer[ieee_header_len] = 0xc0 | (((p->tot_len + hidden_header_len) >> 8) & 0x7); + buffer[ieee_header_len + 1] = (p->tot_len + hidden_header_len) & 0xff; + + lowpan6_data.tx_datagram_tag++; + buffer[ieee_header_len + 2] = (lowpan6_data.tx_datagram_tag >> 8) & 0xff; + buffer[ieee_header_len + 3] = lowpan6_data.tx_datagram_tag & 0xff; + + /* Fragment follows. */ + data_len = (max_data_len - 4) & 0xf8; + frag_len = data_len + lowpan6_header_len; + + pbuf_copy_partial(p, buffer + ieee_header_len + lowpan6_header_len + 4, frag_len - lowpan6_header_len, 0); + remaining_len -= frag_len - lowpan6_header_len; + /* datagram offset holds the offset before compression */ + datagram_offset = frag_len - lowpan6_header_len + hidden_header_len; + LWIP_ASSERT("datagram offset must be a multiple of 8", (datagram_offset & 7) == 0); + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = ieee_header_len + 4 + frag_len + 2; /* add 2 bytes for crc*/ + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + + while ((remaining_len > 0) && (err == ERR_OK)) { + struct ieee_802154_hdr *hdr = (struct ieee_802154_hdr *)buffer; + /* new frame, new seq num for ACK */ + hdr->sequence_number = lowpan6_data.tx_frame_seq_num++; + + buffer[ieee_header_len] |= 0x20; /* Change FRAG1 to FRAGN */ + + LWIP_ASSERT("datagram offset must be a multiple of 8", (datagram_offset & 7) == 0); + buffer[ieee_header_len + 4] = (u8_t)(datagram_offset >> 3); /* datagram offset in FRAGN header (datagram_offset is max. 11 bit) */ + + frag_len = (127 - ieee_header_len - 5 - 2) & 0xf8; + if (frag_len > remaining_len) { + frag_len = remaining_len; + } + + pbuf_copy_partial(p, buffer + ieee_header_len + 5, frag_len, p->tot_len - remaining_len); + remaining_len -= frag_len; + datagram_offset += frag_len; + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = frag_len + 5 + ieee_header_len + 2; + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + } + } else { + /* It fits in one frame. */ + frag_len = remaining_len; + + /* Copy IPv6 packet */ + pbuf_copy_partial(p, buffer + ieee_header_len + lowpan6_header_len, frag_len, 0); + remaining_len = 0; + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = frag_len + lowpan6_header_len + ieee_header_len + 2; + LWIP_ASSERT("", p_frag->len <= 127); + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + } + + pbuf_free(p_frag); + + return err; +} + +/** + * @ingroup sixlowpan + * Set context + */ +err_t +lowpan6_set_context(u8_t idx, const ip6_addr_t *context) +{ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + if (idx >= LWIP_6LOWPAN_NUM_CONTEXTS) { + return ERR_ARG; + } + + IP6_ADDR_ZONECHECK(context); + + ip6_addr_set(&lowpan6_data.lowpan6_context[idx], context); + + return ERR_OK; +#else + LWIP_UNUSED_ARG(idx); + LWIP_UNUSED_ARG(context); + return ERR_ARG; +#endif +} + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS +/** + * @ingroup sixlowpan + * Set short address + */ +err_t +lowpan6_set_short_addr(u8_t addr_high, u8_t addr_low) +{ + short_mac_addr.addr[0] = addr_high; + short_mac_addr.addr[1] = addr_low; + + return ERR_OK; +} +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +/* Create IEEE 802.15.4 address from netif address */ +static err_t +lowpan6_hwaddr_to_addr(struct netif *netif, struct lowpan6_link_addr *addr) +{ + addr->addr_len = 8; + if (netif->hwaddr_len == 8) { + LWIP_ERROR("NETIF_MAX_HWADDR_LEN >= 8 required", sizeof(netif->hwaddr) >= 8, return ERR_VAL;); + SMEMCPY(addr->addr, netif->hwaddr, 8); + } else if (netif->hwaddr_len == 6) { + /* Copy from MAC-48 */ + SMEMCPY(addr->addr, netif->hwaddr, 3); + addr->addr[3] = addr->addr[4] = 0xff; + SMEMCPY(&addr->addr[5], &netif->hwaddr[3], 3); + } else { + /* Invalid address length, don't know how to convert this */ + return ERR_VAL; + } + return ERR_OK; +} + +/** + * @ingroup sixlowpan + * Resolve and fill-in IEEE 802.15.4 address header for outgoing IPv6 packet. + * + * Perform Header Compression and fragment if necessary. + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return err_t + */ +err_t +lowpan6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + err_t result; + const u8_t *hwaddr; + struct lowpan6_link_addr src, dest; +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + ip6_addr_t ip6_src; + struct ip6_hdr *ip6_hdr; +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + /* Check if we can compress source address (use aligned copy) */ + ip6_hdr = (struct ip6_hdr *)q->payload; + ip6_addr_copy_from_packed(ip6_src, ip6_hdr->src); + ip6_addr_assign_zone(&ip6_src, IP6_UNICAST, netif); + if (lowpan6_get_address_mode(&ip6_src, &short_mac_addr) == 3) { + src.addr_len = 2; + src.addr[0] = short_mac_addr.addr[0]; + src.addr[1] = short_mac_addr.addr[1]; + } else +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + { + result = lowpan6_hwaddr_to_addr(netif, &src); + if (result != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return result; + } + } + + /* multicast destination IP address? */ + if (ip6_addr_ismulticast(ip6addr)) { + MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts); + /* We need to send to the broadcast address.*/ + return lowpan6_frag(netif, q, &src, &ieee_802154_broadcast); + } + + /* We have a unicast destination IP address */ + /* @todo anycast? */ + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + if (src.addr_len == 2) { + /* If source address was compressable to short_mac_addr, and dest has same subnet and + * is also compressable to 2-bytes, assume we can infer dest as a short address too. */ + dest.addr_len = 2; + dest.addr[0] = ((u8_t *)q->payload)[38]; + dest.addr[1] = ((u8_t *)q->payload)[39]; + if ((src.addr_len == 2) && (ip6_addr_netcmp_zoneless(&ip6_hdr->src, &ip6_hdr->dest)) && + (lowpan6_get_address_mode(ip6addr, &dest) == 3)) { + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + return lowpan6_frag(netif, q, &src, &dest); + } + } +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + + /* Ask ND6 what to do with the packet. */ + result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); + if (result != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return result; + } + + /* If no hardware address is returned, nd6 has queued the packet for later. */ + if (hwaddr == NULL) { + return ERR_OK; + } + + /* Send out the packet using the returned hardware address. */ + dest.addr_len = netif->hwaddr_len; + /* XXX: Inferring the length of the source address from the destination address + * is not correct for IEEE 802.15.4, but currently we don't get this information + * from the neighbor cache */ + SMEMCPY(dest.addr, hwaddr, netif->hwaddr_len); + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + return lowpan6_frag(netif, q, &src, &dest); +} +/** + * @ingroup sixlowpan + * NETIF input function: don't free the input pbuf when returning != ERR_OK! + */ +err_t +lowpan6_input(struct pbuf *p, struct netif *netif) +{ + u8_t *puc, b; + s8_t i; + struct lowpan6_link_addr src, dest; + u16_t datagram_size = 0; + u16_t datagram_offset, datagram_tag; + struct lowpan6_reass_helper *lrh, *lrh_next, *lrh_prev = NULL; + + if (p == NULL) { + return ERR_OK; + } + + MIB2_STATS_NETIF_ADD(netif, ifinoctets, p->tot_len); + + if (p->len != p->tot_len) { + /* for now, this needs a pbuf in one piece */ + goto lowpan6_input_discard; + } + + if (lowpan6_parse_iee802154_header(p, &src, &dest) != ERR_OK) { + goto lowpan6_input_discard; + } + + /* Check dispatch. */ + puc = (u8_t *)p->payload; + + b = *puc; + if ((b & 0xf8) == 0xc0) { + /* FRAG1 dispatch. add this packet to reassembly list. */ + datagram_size = ((u16_t)(puc[0] & 0x07) << 8) | (u16_t)puc[1]; + datagram_tag = ((u16_t)puc[2] << 8) | (u16_t)puc[3]; + + /* check for duplicate */ + lrh = lowpan6_data.reass_list; + while (lrh != NULL) { + uint8_t discard = 0; + lrh_next = lrh->next_packet; + if ((lrh->sender_addr.addr_len == src.addr_len) && + (memcmp(lrh->sender_addr.addr, src.addr, src.addr_len) == 0)) { + /* address match with packet in reassembly. */ + if ((datagram_tag == lrh->datagram_tag) && (datagram_size == lrh->datagram_size)) { + /* duplicate fragment. */ + goto lowpan6_input_discard; + } else { + /* We are receiving the start of a new datagram. Discard old one (incomplete). */ + discard = 1; + } + } + if (discard) { + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + } else { + lrh_prev = lrh; + } + /* Check next datagram in queue. */ + lrh = lrh_next; + } + + pbuf_remove_header(p, 4); /* hide frag1 dispatch */ + + lrh = (struct lowpan6_reass_helper *) mem_malloc(sizeof(struct lowpan6_reass_helper)); + if (lrh == NULL) { + goto lowpan6_input_discard; + } + + lrh->sender_addr.addr_len = src.addr_len; + for (i = 0; i < src.addr_len; i++) { + lrh->sender_addr.addr[i] = src.addr[i]; + } + lrh->datagram_size = datagram_size; + lrh->datagram_tag = datagram_tag; + lrh->frags = NULL; + if (*(u8_t *)p->payload == 0x41) { + /* This is a complete IPv6 packet, just skip dispatch byte. */ + pbuf_remove_header(p, 1); /* hide dispatch byte. */ + lrh->reass = p; + } else if ((*(u8_t *)p->payload & 0xe0 ) == 0x60) { + lrh->reass = lowpan6_decompress(p, datagram_size, LWIP_6LOWPAN_CONTEXTS(netif), &src, &dest); + if (lrh->reass == NULL) { + /* decompression failed */ + mem_free(lrh); + goto lowpan6_input_discard; + } + } + /* TODO: handle the case where we already have FRAGN received */ + lrh->next_packet = lowpan6_data.reass_list; + lrh->timer = 2; + lowpan6_data.reass_list = lrh; + + return ERR_OK; + } else if ((b & 0xf8) == 0xe0) { + /* FRAGN dispatch, find packet being reassembled. */ + datagram_size = ((u16_t)(puc[0] & 0x07) << 8) | (u16_t)puc[1]; + datagram_tag = ((u16_t)puc[2] << 8) | (u16_t)puc[3]; + datagram_offset = (u16_t)puc[4] << 3; + pbuf_remove_header(p, 4); /* hide frag1 dispatch but keep datagram offset for reassembly */ + + for (lrh = lowpan6_data.reass_list; lrh != NULL; lrh_prev = lrh, lrh = lrh->next_packet) { + if ((lrh->sender_addr.addr_len == src.addr_len) && + (memcmp(lrh->sender_addr.addr, src.addr, src.addr_len) == 0) && + (datagram_tag == lrh->datagram_tag) && + (datagram_size == lrh->datagram_size)) { + break; + } + } + if (lrh == NULL) { + /* rogue fragment */ + goto lowpan6_input_discard; + } + /* Insert new pbuf into list of fragments. Each fragment is a pbuf, + this only works for unchained pbufs. */ + LWIP_ASSERT("p->next == NULL", p->next == NULL); + if (lrh->reass != NULL) { + /* FRAG1 already received, check this offset against first len */ + if (datagram_offset < lrh->reass->len) { + /* fragment overlap, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + } + if (lrh->frags == NULL) { + /* first FRAGN */ + lrh->frags = p; + } else { + /* find the correct place to insert */ + struct pbuf *q, *last; + u16_t new_frag_len = p->len - 1; /* p->len includes datagram_offset byte */ + for (q = lrh->frags, last = NULL; q != NULL; last = q, q = q->next) { + u16_t q_datagram_offset = ((u8_t *)q->payload)[0] << 3; + u16_t q_frag_len = q->len - 1; + if (datagram_offset < q_datagram_offset) { + if (datagram_offset + new_frag_len > q_datagram_offset) { + /* overlap, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + /* insert here */ + break; + } else if (datagram_offset == q_datagram_offset) { + if (q_frag_len != new_frag_len) { + /* fragment mismatch, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + /* duplicate, ignore */ + pbuf_free(p); + return ERR_OK; + } + } + /* insert fragment */ + if (last == NULL) { + lrh->frags = p; + } else { + last->next = p; + p->next = q; + } + } + /* check if all fragments were received */ + if (lrh->reass) { + u16_t offset = lrh->reass->len; + struct pbuf *q; + for (q = lrh->frags; q != NULL; q = q->next) { + u16_t q_datagram_offset = ((u8_t *)q->payload)[0] << 3; + if (q_datagram_offset != offset) { + /* not complete, wait for more fragments */ + return ERR_OK; + } + offset += q->len - 1; + } + if (offset == datagram_size) { + /* all fragments received, combine pbufs */ + u16_t datagram_left = datagram_size - lrh->reass->len; + for (q = lrh->frags; q != NULL; q = q->next) { + /* hide datagram_offset byte now */ + pbuf_remove_header(q, 1); + q->tot_len = datagram_left; + datagram_left -= q->len; + } + LWIP_ASSERT("datagram_left == 0", datagram_left == 0); + q = lrh->reass; + q->tot_len = datagram_size; + q->next = lrh->frags; + lrh->frags = NULL; + lrh->reass = NULL; + dequeue_datagram(lrh, lrh_prev); + mem_free(lrh); + + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + return ip6_input(q, netif); + } + } + /* pbuf enqueued, waiting for more fragments */ + return ERR_OK; + } else { + if (b == 0x41) { + /* This is a complete IPv6 packet, just skip dispatch byte. */ + pbuf_remove_header(p, 1); /* hide dispatch byte. */ + } else if ((b & 0xe0 ) == 0x60) { + /* IPv6 headers are compressed using IPHC. */ + p = lowpan6_decompress(p, datagram_size, LWIP_6LOWPAN_CONTEXTS(netif), &src, &dest); + if (p == NULL) { + MIB2_STATS_NETIF_INC(netif, ifindiscards); + return ERR_OK; + } + } else { + goto lowpan6_input_discard; + } + + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + + return ip6_input(p, netif); + } +lowpan6_input_discard: + MIB2_STATS_NETIF_INC(netif, ifindiscards); + pbuf_free(p); + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; +} + +/** + * @ingroup sixlowpan + */ +err_t +lowpan6_if_init(struct netif *netif) +{ + netif->name[0] = 'L'; + netif->name[1] = '6'; + netif->output_ip6 = lowpan6_output; + + MIB2_INIT_NETIF(netif, snmp_ifType_other, 0); + + /* maximum transfer unit */ + netif->mtu = 1280; + + /* broadcast capability */ + netif->flags = NETIF_FLAG_BROADCAST /* | NETIF_FLAG_LOWPAN6 */; + + return ERR_OK; +} + +/** + * @ingroup sixlowpan + * Set PAN ID + */ +err_t +lowpan6_set_pan_id(u16_t pan_id) +{ + lowpan6_data.ieee_802154_pan_id = pan_id; + + return ERR_OK; +} + +#if !NO_SYS +/** + * @ingroup sixlowpan + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet, p->payload pointing to the + * IEEE 802.15.4 header. + * @param inp the network interface on which the packet was received + */ +err_t +tcpip_6lowpan_input(struct pbuf *p, struct netif *inp) +{ + return tcpip_inpkt(p, inp, lowpan6_input); +} +#endif /* !NO_SYS */ + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c b/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c new file mode 100644 index 00000000..d89816d3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c @@ -0,0 +1,447 @@ +/** + * @file + * 6LowPAN over BLE output for IPv6 (RFC7668). +*/ + +/* + * Copyright (c) 2017 Benjamin Aigner + * Copyright (c) 2015 Inico Technologies Ltd. , Author: Ivan Delamer + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Benjamin Aigner + * + * Based on the original 6lowpan implementation of lwIP ( @see 6lowpan.c) + */ + + +/** + * @defgroup rfc7668if 6LoWPAN over BLE (RFC7668) + * @ingroup netifs + * This file implements a RFC7668 implementation for 6LoWPAN over + * Bluetooth Low Energy. The specification is very similar to 6LoWPAN, + * so most of the code is re-used. + * Compared to 6LoWPAN, much functionality is already implemented in + * lower BLE layers (fragmenting, session management,...). + * + * Usage: + * - add this netif + * - don't add IPv4 addresses (no IPv4 support in RFC7668), pass 'NULL','NULL','NULL' + * - use the BLE to EUI64 conversation util to create an IPv6 link-local address from the BLE MAC (@ref ble_addr_to_eui64) + * - input function: @ref rfc7668_input + * - set the link output function, which transmits output data to an established L2CAP channel + * - If data arrives (HCI event "L2CAP_DATA_PACKET"): + * - allocate a @ref PBUF_RAW buffer + * - let the pbuf struct point to the incoming data or copy it to the buffer + * - call netif->input + * + * @todo: + * - further testing + * - support compression contexts + * - support multiple addresses + * - support multicast + * - support neighbor discovery + */ + + +#include "netif/lowpan6_ble.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/tcpip.h" +#include "lwip/snmp.h" + +#include + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +/** context memory, containing IPv6 addresses */ +static ip6_addr_t rfc7668_context[LWIP_6LOWPAN_NUM_CONTEXTS]; +#else +#define rfc7668_context NULL +#endif + +static struct lowpan6_link_addr rfc7668_local_addr; +static struct lowpan6_link_addr rfc7668_peer_addr; + +/** + * @ingroup rfc7668if + * convert BT address to EUI64 addr + * + * This method converts a Bluetooth MAC address to an EUI64 address, + * which is used within IPv6 communication + * + * @param dst IPv6 destination space + * @param src BLE MAC address source + * @param public_addr If the LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + * option is set, bit 0x02 will be set if param=0 (no public addr); cleared otherwise + * + * @see LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + */ +void +ble_addr_to_eui64(uint8_t *dst, const uint8_t *src, int public_addr) +{ + /* according to RFC7668 ch 3.2.2. */ + memcpy(dst, src, 3); + dst[3] = 0xFF; + dst[4] = 0xFE; + memcpy(&dst[5], &src[3], 3); +#if LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + if(public_addr) { + dst[0] &= ~0x02; + } else { + dst[0] |= 0x02; + } +#else + LWIP_UNUSED_ARG(public_addr); +#endif +} + +/** + * @ingroup rfc7668if + * convert EUI64 address to Bluetooth MAC addr + * + * This method converts an EUI64 address to a Bluetooth MAC address, + * + * @param dst BLE MAC address destination + * @param src IPv6 source + * + */ +void +eui64_to_ble_addr(uint8_t *dst, const uint8_t *src) +{ + /* according to RFC7668 ch 3.2.2. */ + memcpy(dst,src,3); + memcpy(&dst[3],&src[5],3); +} + +/** Set an address used for stateful compression. + * This expects an address of 6 or 8 bytes. + */ +static err_t +rfc7668_set_addr(struct lowpan6_link_addr *addr, const u8_t *in_addr, size_t in_addr_len, int is_mac_48, int is_public_addr) +{ + if ((in_addr == NULL) || (addr == NULL)) { + return ERR_VAL; + } + if (is_mac_48) { + if (in_addr_len != 6) { + return ERR_VAL; + } + addr->addr_len = 8; + ble_addr_to_eui64(addr->addr, in_addr, is_public_addr); + } else { + if (in_addr_len != 8) { + return ERR_VAL; + } + addr->addr_len = 8; + memcpy(addr->addr, in_addr, 8); + } + return ERR_OK; +} + + +/** Set the local address used for stateful compression. + * This expects an address of 8 bytes. + */ +err_t +rfc7668_set_local_addr_eui64(struct netif *netif, const u8_t *local_addr, size_t local_addr_len) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_local_addr, local_addr, local_addr_len, 0, 0); +} + +/** Set the local address used for stateful compression. + * This expects an address of 6 bytes. + */ +err_t +rfc7668_set_local_addr_mac48(struct netif *netif, const u8_t *local_addr, size_t local_addr_len, int is_public_addr) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_local_addr, local_addr, local_addr_len, 1, is_public_addr); +} + +/** Set the peer address used for stateful compression. + * This expects an address of 8 bytes. + */ +err_t +rfc7668_set_peer_addr_eui64(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_peer_addr, peer_addr, peer_addr_len, 0, 0); +} + +/** Set the peer address used for stateful compression. + * This expects an address of 6 bytes. + */ +err_t +rfc7668_set_peer_addr_mac48(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len, int is_public_addr) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_peer_addr, peer_addr, peer_addr_len, 1, is_public_addr); +} + +/** Encapsulate IPv6 frames for BLE transmission + * + * This method implements the IPv6 header compression: + * *) According to RFC6282 + * *) See Figure 2, contains base format of bit positions + * *) Fragmentation not necessary (done at L2CAP layer of BLE) + * @note Currently the pbuf allocation uses 256 bytes. If longer packets are used (possible due to MTU=1480Bytes), increase it here! + * + * @param p Pbuf struct, containing the payload data + * @param netif Output network interface. Should be of RFC7668 type + * + * @return Same as netif->output. + */ +static err_t +rfc7668_compress(struct netif *netif, struct pbuf *p) +{ + struct pbuf *p_frag; + u16_t remaining_len; + u8_t *buffer; + u8_t lowpan6_header_len; + u8_t hidden_header_len; + err_t err; + + LWIP_ASSERT("lowpan6_frag: netif->linkoutput not set", netif->linkoutput != NULL); + +#if LWIP_6LOWPAN_IPHC + + /* We'll use a dedicated pbuf for building BLE fragments. + * We'll over-allocate it by the bytes saved for header compression. + */ + p_frag = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM); + if (p_frag == NULL) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece", p_frag->len == p_frag->tot_len); + + /* Write IP6 header (with IPHC). */ + buffer = (u8_t*)p_frag->payload; + + err = lowpan6_compress_headers(netif, (u8_t *)p->payload, p->len, buffer, p_frag->len, + &lowpan6_header_len, &hidden_header_len, rfc7668_context, &rfc7668_local_addr, &rfc7668_peer_addr); + if (err != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + pbuf_free(p_frag); + return err; + } + pbuf_remove_header(p, hidden_header_len); + + /* Calculate remaining packet length */ + remaining_len = p->tot_len; + + /* Copy IPv6 packet */ + pbuf_copy_partial(p, buffer + lowpan6_header_len, remaining_len, 0); + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = remaining_len + lowpan6_header_len; + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG|LWIP_DBG_TRACE, ("rfc7668_output: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + + pbuf_free(p_frag); + + return err; +#else /* LWIP_6LOWPAN_IPHC */ + /* 6LoWPAN over BLE requires IPHC! */ + return ERR_IF; +#endif/* LWIP_6LOWPAN_IPHC */ +} + +/** + * @ingroup rfc7668if + * Set context id IPv6 address + * + * Store one IPv6 address to a given context id. + * + * @param idx Context id + * @param context IPv6 addr for this context + * + * @return ERR_OK (if everything is fine), ERR_ARG (if the context id is out of range), ERR_VAL (if contexts disabled) + */ +err_t +rfc7668_set_context(u8_t idx, const ip6_addr_t *context) +{ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + /* check if the ID is possible */ + if (idx >= LWIP_6LOWPAN_NUM_CONTEXTS) { + return ERR_ARG; + } + /* copy IPv6 address to context storage */ + ip6_addr_set(&rfc7668_context[idx], context); + return ERR_OK; +#else + LWIP_UNUSED_ARG(idx); + LWIP_UNUSED_ARG(context); + return ERR_VAL; +#endif +} + +/** + * @ingroup rfc7668if + * Compress outgoing IPv6 packet and pass it on to netif->linkoutput + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return See rfc7668_compress + */ +err_t +rfc7668_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + /* dst ip6addr is not used here, we only have one peer */ + LWIP_UNUSED_ARG(ip6addr); + + return rfc7668_compress(netif, q); +} + +/** + * @ingroup rfc7668if + * Process a received raw payload from an L2CAP channel + * + * @param p the received packet, p->payload pointing to the + * IPv6 header (maybe compressed) + * @param netif the network interface on which the packet was received + * + * @return ERR_OK if everything was fine + */ +err_t +rfc7668_input(struct pbuf * p, struct netif *netif) +{ + u8_t * puc; + + MIB2_STATS_NETIF_ADD(netif, ifinoctets, p->tot_len); + + /* Load first header byte */ + puc = (u8_t*)p->payload; + + /* no IP header compression */ + if (*puc == 0x41) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, removing dispatch: 0x%2x \n", *puc)); + /* This is a complete IPv6 packet, just skip header byte. */ + pbuf_remove_header(p, 1); + /* IPHC header compression */ + } else if ((*puc & 0xe0 )== 0x60) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, decompress dispatch: 0x%2x \n", *puc)); + /* IPv6 headers are compressed using IPHC. */ + p = lowpan6_decompress(p, 0, rfc7668_context, &rfc7668_peer_addr, &rfc7668_local_addr); + /* if no pbuf is returned, handle as discarded packet */ + if (p == NULL) { + MIB2_STATS_NETIF_INC(netif, ifindiscards); + return ERR_OK; + } + /* invalid header byte, discard */ + } else { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, discarding: 0x%2x \n", *puc)); + MIB2_STATS_NETIF_INC(netif, ifindiscards); + pbuf_free(p); + return ERR_OK; + } + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + +#if LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG + { + u16_t i; + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("IPv6 payload:\n")); + for (i = 0; i < p->len; i++) { + if ((i%4)==0) { + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("\n")); + } + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("%2X ", *((uint8_t *)p->payload+i))); + } + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("\np->len: %d\n", p->len)); + } +#endif + /* pass data to ip6_input */ + return ip6_input(p, netif); +} + +/** + * @ingroup rfc7668if + * Initialize the netif + * + * No flags are used (broadcast not possible, not ethernet, ...) + * The shortname for this netif is "BT" + * + * @param netif the network interface to be initialized as RFC7668 netif + * + * @return ERR_OK if everything went fine + */ +err_t +rfc7668_if_init(struct netif *netif) +{ + netif->name[0] = 'b'; + netif->name[1] = 't'; + /* local function as IPv6 output */ + netif->output_ip6 = rfc7668_output; + + MIB2_INIT_NETIF(netif, snmp_ifType_other, 0); + + /* maximum transfer unit, set according to RFC7668 ch2.4 */ + netif->mtu = 1280; + + /* no flags set (no broadcast, ethernet,...)*/ + netif->flags = 0; + + /* everything fine */ + return ERR_OK; +} + +#if !NO_SYS +/** + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet, p->payload pointing to the + * IEEE 802.15.4 header. + * @param inp the network interface on which the packet was received + * + * @return see @ref tcpip_inpkt, same return values + */ +err_t +tcpip_rfc7668_input(struct pbuf *p, struct netif *inp) +{ + /* send data to upper layer, return the result */ + return tcpip_inpkt(p, inp, rfc7668_input); +} +#endif /* !NO_SYS */ + +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c b/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c new file mode 100644 index 00000000..baea206a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c @@ -0,0 +1,841 @@ +/** + * @file + * + * Common 6LowPAN routines for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + * + * This implementation aims to conform to IEEE 802.15.4(-2015), RFC 4944 and RFC 6282. + * @todo: RFC 6775. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/** + * @defgroup sixlowpan 6LoWPAN (RFC4944) + * @ingroup netifs + * 6LowPAN netif implementation + */ + +#include "netif/lowpan6_common.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/udp.h" + +#include + +/* Determine compression mode for unicast address. */ +s8_t +lowpan6_get_address_mode(const ip6_addr_t *ip6addr, const struct lowpan6_link_addr *mac_addr) +{ + if (mac_addr->addr_len == 2) { + if ((ip6addr->addr[2] == (u32_t)PP_HTONL(0x000000ff)) && + ((ip6addr->addr[3] & PP_HTONL(0xffff0000)) == PP_NTOHL(0xfe000000))) { + if ((ip6addr->addr[3] & PP_HTONL(0x0000ffff)) == lwip_ntohl((mac_addr->addr[0] << 8) | mac_addr->addr[1])) { + return 3; + } + } + } else if (mac_addr->addr_len == 8) { + if ((ip6addr->addr[2] == lwip_ntohl(((mac_addr->addr[0] ^ 2) << 24) | (mac_addr->addr[1] << 16) | mac_addr->addr[2] << 8 | mac_addr->addr[3])) && + (ip6addr->addr[3] == lwip_ntohl((mac_addr->addr[4] << 24) | (mac_addr->addr[5] << 16) | mac_addr->addr[6] << 8 | mac_addr->addr[7]))) { + return 3; + } + } + + if ((ip6addr->addr[2] == PP_HTONL(0x000000ffUL)) && + ((ip6addr->addr[3] & PP_HTONL(0xffff0000)) == PP_NTOHL(0xfe000000UL))) { + return 2; + } + + return 1; +} + +#if LWIP_6LOWPAN_IPHC + +/* Determine compression mode for multicast address. */ +static s8_t +lowpan6_get_address_mode_mc(const ip6_addr_t *ip6addr) +{ + if ((ip6addr->addr[0] == PP_HTONL(0xff020000)) && + (ip6addr->addr[1] == 0) && + (ip6addr->addr[2] == 0) && + ((ip6addr->addr[3] & PP_HTONL(0xffffff00)) == 0)) { + return 3; + } else if (((ip6addr->addr[0] & PP_HTONL(0xff00ffff)) == PP_HTONL(0xff000000)) && + (ip6addr->addr[1] == 0)) { + if ((ip6addr->addr[2] == 0) && + ((ip6addr->addr[3] & PP_HTONL(0xff000000)) == 0)) { + return 2; + } else if ((ip6addr->addr[2] & PP_HTONL(0xffffff00)) == 0) { + return 1; + } + } + + return 0; +} + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +static s8_t +lowpan6_context_lookup(const ip6_addr_t *lowpan6_contexts, const ip6_addr_t *ip6addr) +{ + s8_t i; + + for (i = 0; i < LWIP_6LOWPAN_NUM_CONTEXTS; i++) { + if (ip6_addr_netcmp(&lowpan6_contexts[i], ip6addr)) { + return i; + } + } + return -1; +} +#endif /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + +/* + * Compress IPv6 and/or UDP headers. + * */ +err_t +lowpan6_compress_headers(struct netif *netif, u8_t *inbuf, size_t inbuf_size, u8_t *outbuf, size_t outbuf_size, + u8_t *lowpan6_header_len_out, u8_t *hidden_header_len_out, ip6_addr_t *lowpan6_contexts, + const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst) +{ + u8_t *buffer, *inptr; + u8_t lowpan6_header_len; + u8_t hidden_header_len = 0; + s8_t i; + struct ip6_hdr *ip6hdr; + ip_addr_t ip6src, ip6dst; + + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("inbuf != NULL", inbuf != NULL); + LWIP_ASSERT("outbuf != NULL", outbuf != NULL); + LWIP_ASSERT("lowpan6_header_len_out != NULL", lowpan6_header_len_out != NULL); + LWIP_ASSERT("hidden_header_len_out != NULL", hidden_header_len_out != NULL); + + /* Perform 6LowPAN IPv6 header compression according to RFC 6282 */ + buffer = outbuf; + inptr = inbuf; + + if (inbuf_size < IP6_HLEN) { + /* input buffer too short */ + return ERR_VAL; + } + if (outbuf_size < IP6_HLEN) { + /* output buffer too short for worst case */ + return ERR_MEM; + } + + /* Point to ip6 header and align copies of src/dest addresses. */ + ip6hdr = (struct ip6_hdr *)inptr; + ip_addr_copy_from_ip6_packed(ip6dst, ip6hdr->dest); + ip6_addr_assign_zone(ip_2_ip6(&ip6dst), IP6_UNKNOWN, netif); + ip_addr_copy_from_ip6_packed(ip6src, ip6hdr->src); + ip6_addr_assign_zone(ip_2_ip6(&ip6src), IP6_UNKNOWN, netif); + + /* Basic length of 6LowPAN header, set dispatch and clear fields. */ + lowpan6_header_len = 2; + buffer[0] = 0x60; + buffer[1] = 0; + + /* Determine whether there will be a Context Identifier Extension byte or not. + * If so, set it already. */ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + buffer[2] = 0; + + i = lowpan6_context_lookup(lowpan6_contexts, ip_2_ip6(&ip6src)); + if (i >= 0) { + /* Stateful source address compression. */ + buffer[1] |= 0x40; + buffer[2] |= (i & 0x0f) << 4; + } + + i = lowpan6_context_lookup(lowpan6_contexts, ip_2_ip6(&ip6dst)); + if (i >= 0) { + /* Stateful destination address compression. */ + buffer[1] |= 0x04; + buffer[2] |= i & 0x0f; + } + + if (buffer[2] != 0x00) { + /* Context identifier extension byte is appended. */ + buffer[1] |= 0x80; + lowpan6_header_len++; + } +#else /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + LWIP_UNUSED_ARG(lowpan6_contexts); +#endif /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + + /* Determine TF field: Traffic Class, Flow Label */ + if (IP6H_FL(ip6hdr) == 0) { + /* Flow label is elided. */ + buffer[0] |= 0x10; + if (IP6H_TC(ip6hdr) == 0) { + /* Traffic class (ECN+DSCP) elided too. */ + buffer[0] |= 0x08; + } else { + /* Traffic class (ECN+DSCP) appended. */ + buffer[lowpan6_header_len++] = IP6H_TC(ip6hdr); + } + } else { + if (((IP6H_TC(ip6hdr) & 0x3f) == 0)) { + /* DSCP portion of Traffic Class is elided, ECN and FL are appended (3 bytes) */ + buffer[0] |= 0x08; + + buffer[lowpan6_header_len] = IP6H_TC(ip6hdr) & 0xc0; + buffer[lowpan6_header_len++] |= (IP6H_FL(ip6hdr) >> 16) & 0x0f; + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 8) & 0xff; + buffer[lowpan6_header_len++] = IP6H_FL(ip6hdr) & 0xff; + } else { + /* Traffic class and flow label are appended (4 bytes) */ + buffer[lowpan6_header_len++] = IP6H_TC(ip6hdr); + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 16) & 0x0f; + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 8) & 0xff; + buffer[lowpan6_header_len++] = IP6H_FL(ip6hdr) & 0xff; + } + } + + /* Compress NH? + * Only if UDP for now. @todo support other NH compression. */ + if (IP6H_NEXTH(ip6hdr) == IP6_NEXTH_UDP) { + buffer[0] |= 0x04; + } else { + /* append nexth. */ + buffer[lowpan6_header_len++] = IP6H_NEXTH(ip6hdr); + } + + /* Compress hop limit? */ + if (IP6H_HOPLIM(ip6hdr) == 255) { + buffer[0] |= 0x03; + } else if (IP6H_HOPLIM(ip6hdr) == 64) { + buffer[0] |= 0x02; + } else if (IP6H_HOPLIM(ip6hdr) == 1) { + buffer[0] |= 0x01; + } else { + /* append hop limit */ + buffer[lowpan6_header_len++] = IP6H_HOPLIM(ip6hdr); + } + + /* Compress source address */ + if (((buffer[1] & 0x40) != 0) || + (ip6_addr_islinklocal(ip_2_ip6(&ip6src)))) { + /* Context-based or link-local source address compression. */ + i = lowpan6_get_address_mode(ip_2_ip6(&ip6src), src); + buffer[1] |= (i & 0x03) << 4; + if (i == 1) { + MEMCPY(buffer + lowpan6_header_len, inptr + 16, 8); + lowpan6_header_len += 8; + } else if (i == 2) { + MEMCPY(buffer + lowpan6_header_len, inptr + 22, 2); + lowpan6_header_len += 2; + } + } else if (ip6_addr_isany(ip_2_ip6(&ip6src))) { + /* Special case: mark SAC and leave SAM=0 */ + buffer[1] |= 0x40; + } else { + /* Append full address. */ + MEMCPY(buffer + lowpan6_header_len, inptr + 8, 16); + lowpan6_header_len += 16; + } + + /* Compress destination address */ + if (ip6_addr_ismulticast(ip_2_ip6(&ip6dst))) { + /* @todo support stateful multicast address compression */ + + buffer[1] |= 0x08; + + i = lowpan6_get_address_mode_mc(ip_2_ip6(&ip6dst)); + buffer[1] |= i & 0x03; + if (i == 0) { + MEMCPY(buffer + lowpan6_header_len, inptr + 24, 16); + lowpan6_header_len += 16; + } else if (i == 1) { + buffer[lowpan6_header_len++] = inptr[25]; + MEMCPY(buffer + lowpan6_header_len, inptr + 35, 5); + lowpan6_header_len += 5; + } else if (i == 2) { + buffer[lowpan6_header_len++] = inptr[25]; + MEMCPY(buffer + lowpan6_header_len, inptr + 37, 3); + lowpan6_header_len += 3; + } else if (i == 3) { + buffer[lowpan6_header_len++] = (inptr)[39]; + } + } else if (((buffer[1] & 0x04) != 0) || + (ip6_addr_islinklocal(ip_2_ip6(&ip6dst)))) { + /* Context-based or link-local destination address compression. */ + i = lowpan6_get_address_mode(ip_2_ip6(&ip6dst), dst); + buffer[1] |= i & 0x03; + if (i == 1) { + MEMCPY(buffer + lowpan6_header_len, inptr + 32, 8); + lowpan6_header_len += 8; + } else if (i == 2) { + MEMCPY(buffer + lowpan6_header_len, inptr + 38, 2); + lowpan6_header_len += 2; + } + } else { + /* Append full address. */ + MEMCPY(buffer + lowpan6_header_len, inptr + 24, 16); + lowpan6_header_len += 16; + } + + /* Move to payload. */ + inptr += IP6_HLEN; + hidden_header_len += IP6_HLEN; + +#if LWIP_UDP + /* Compress UDP header? */ + if (IP6H_NEXTH(ip6hdr) == IP6_NEXTH_UDP) { + /* @todo support optional checksum compression */ + + if (inbuf_size < IP6_HLEN + UDP_HLEN) { + /* input buffer too short */ + return ERR_VAL; + } + if (outbuf_size < (size_t)(hidden_header_len + 7)) { + /* output buffer too short for worst case */ + return ERR_MEM; + } + + buffer[lowpan6_header_len] = 0xf0; + + /* determine port compression mode. */ + if ((inptr[0] == 0xf0) && ((inptr[1] & 0xf0) == 0xb0) && + (inptr[2] == 0xf0) && ((inptr[3] & 0xf0) == 0xb0)) { + /* Compress source and dest ports. */ + buffer[lowpan6_header_len++] |= 0x03; + buffer[lowpan6_header_len++] = ((inptr[1] & 0x0f) << 4) | (inptr[3] & 0x0f); + } else if (inptr[0] == 0xf0) { + /* Compress source port. */ + buffer[lowpan6_header_len++] |= 0x02; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[2]; + buffer[lowpan6_header_len++] = inptr[3]; + } else if (inptr[2] == 0xf0) { + /* Compress dest port. */ + buffer[lowpan6_header_len++] |= 0x01; + buffer[lowpan6_header_len++] = inptr[0]; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[3]; + } else { + /* append full ports. */ + lowpan6_header_len++; + buffer[lowpan6_header_len++] = inptr[0]; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[2]; + buffer[lowpan6_header_len++] = inptr[3]; + } + + /* elide length and copy checksum */ + buffer[lowpan6_header_len++] = inptr[6]; + buffer[lowpan6_header_len++] = inptr[7]; + + hidden_header_len += UDP_HLEN; + } +#endif /* LWIP_UDP */ + + *lowpan6_header_len_out = lowpan6_header_len; + *hidden_header_len_out = hidden_header_len; + + return ERR_OK; +} + +/** Decompress IPv6 and UDP headers compressed according to RFC 6282 + * + * @param lowpan6_buffer compressed headers, first byte is the dispatch byte + * @param lowpan6_bufsize size of lowpan6_buffer (may include data after headers) + * @param decomp_buffer buffer where the decompressed headers are stored + * @param decomp_bufsize size of decomp_buffer + * @param hdr_size_comp returns the size of the compressed headers (skip to get to data) + * @param hdr_size_decomp returns the size of the decompressed headers (IPv6 + UDP) + * @param datagram_size datagram size from fragments or 0 if unfragmented + * @param compressed_size compressed datagram size (for unfragmented rx) + * @param lowpan6_contexts context addresses + * @param src source address of the outer layer, used for address compression + * @param dest destination address of the outer layer, used for address compression + * @return ERR_OK if decompression succeeded, an error otherwise + */ +static err_t +lowpan6_decompress_hdr(u8_t *lowpan6_buffer, size_t lowpan6_bufsize, + u8_t *decomp_buffer, size_t decomp_bufsize, + u16_t *hdr_size_comp, u16_t *hdr_size_decomp, + u16_t datagram_size, u16_t compressed_size, + ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest) +{ + u16_t lowpan6_offset; + struct ip6_hdr *ip6hdr; + s8_t i; + u32_t header_temp; + u16_t ip6_offset = IP6_HLEN; + + LWIP_ASSERT("lowpan6_buffer != NULL", lowpan6_buffer != NULL); + LWIP_ASSERT("decomp_buffer != NULL", decomp_buffer != NULL); + LWIP_ASSERT("src != NULL", src != NULL); + LWIP_ASSERT("dest != NULL", dest != NULL); + LWIP_ASSERT("hdr_size_comp != NULL", hdr_size_comp != NULL); + LWIP_ASSERT("dehdr_size_decompst != NULL", hdr_size_decomp != NULL); + + ip6hdr = (struct ip6_hdr *)decomp_buffer; + if (decomp_bufsize < IP6_HLEN) { + return ERR_MEM; + } + + /* output the full compressed packet, if set in @see lowpan6_opts.h */ +#if LWIP_LOWPAN6_IP_COMPRESSED_DEBUG + { + u16_t j; + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("lowpan6_decompress_hdr: IP6 payload (compressed): \n")); + for (j = 0; j < lowpan6_bufsize; j++) { + if ((j % 4) == 0) { + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("\n")); + } + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("%2X ", lowpan6_buffer[j])); + } + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("\np->len: %d", lowpan6_bufsize)); + } +#endif + + /* offset for inline IP headers (RFC 6282 ch3)*/ + lowpan6_offset = 2; + /* if CID is set (context identifier), the context byte + * follows immediately after the header, so other IPHC fields are @+3 */ + if (lowpan6_buffer[1] & 0x80) { + lowpan6_offset++; + } + + /* Set IPv6 version, traffic class and flow label. (RFC6282, ch 3.1.1.)*/ + if ((lowpan6_buffer[0] & 0x18) == 0x00) { + header_temp = ((lowpan6_buffer[lowpan6_offset+1] & 0x0f) << 16) | \ + (lowpan6_buffer[lowpan6_offset + 2] << 8) | lowpan6_buffer[lowpan6_offset+3]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 00, ECN: 0x%2x, Flowlabel+DSCP: 0x%8X\n", \ + lowpan6_buffer[lowpan6_offset],header_temp)); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset], header_temp); + /* increase offset, processed 4 bytes here: + * TF=00: ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)*/ + lowpan6_offset += 4; + } else if ((lowpan6_buffer[0] & 0x18) == 0x08) { + header_temp = ((lowpan6_buffer[lowpan6_offset] & 0x0f) << 16) | (lowpan6_buffer[lowpan6_offset + 1] << 8) | lowpan6_buffer[lowpan6_offset+2]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 01, ECN: 0x%2x, Flowlabel: 0x%2X, DSCP ignored\n", \ + lowpan6_buffer[lowpan6_offset] & 0xc0,header_temp)); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset] & 0xc0, header_temp); + /* increase offset, processed 3 bytes here: + * TF=01: ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided.*/ + lowpan6_offset += 3; + } else if ((lowpan6_buffer[0] & 0x18) == 0x10) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 10, DCSP+ECN: 0x%2x, Flowlabel ignored\n", lowpan6_buffer[lowpan6_offset])); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset],0); + /* increase offset, processed 1 byte here: + * ECN + DSCP (1 byte), Flow Label is elided.*/ + lowpan6_offset += 1; + } else if ((lowpan6_buffer[0] & 0x18) == 0x18) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 11, DCSP/ECN & Flowlabel ignored\n")); + /* don't increase offset, no bytes processed here */ + IP6H_VTCFL_SET(ip6hdr, 6, 0, 0); + } + + /* Set Next Header (NH) */ + if ((lowpan6_buffer[0] & 0x04) == 0x00) { + /* 0: full next header byte carried inline (increase offset)*/ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NH: 0x%2X\n", lowpan6_buffer[lowpan6_offset+1])); + IP6H_NEXTH_SET(ip6hdr, lowpan6_buffer[lowpan6_offset++]); + } else { + /* 1: NH compression, LOWPAN_NHC (RFC6282, ch 4.1) */ + /* We should fill this later with NHC decoding */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NH: skipped, later done with NHC\n")); + IP6H_NEXTH_SET(ip6hdr, 0); + } + + /* Set Hop Limit, either carried inline or 3 different hops (1,64,255) */ + if ((lowpan6_buffer[0] & 0x03) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: full value: %d\n", lowpan6_buffer[lowpan6_offset+1])); + IP6H_HOPLIM_SET(ip6hdr, lowpan6_buffer[lowpan6_offset++]); + } else if ((lowpan6_buffer[0] & 0x03) == 0x01) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 1\n")); + IP6H_HOPLIM_SET(ip6hdr, 1); + } else if ((lowpan6_buffer[0] & 0x03) == 0x02) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 64\n")); + IP6H_HOPLIM_SET(ip6hdr, 64); + } else if ((lowpan6_buffer[0] & 0x03) == 0x03) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 255\n")); + IP6H_HOPLIM_SET(ip6hdr, 255); + } + + /* Source address decoding. */ + if ((lowpan6_buffer[1] & 0x40) == 0x00) { + /* Source address compression (SAC) = 0 -> stateless compression */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAC == 0, no context byte\n")); + /* Stateless compression */ + if ((lowpan6_buffer[1] & 0x30) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 00, no src compression, fetching 128bits inline\n")); + /* copy full address, increase offset by 16 Bytes */ + MEMCPY(&ip6hdr->src.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x30) == 0x10) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 01, src compression, 64bits inline\n")); + /* set 64 bits to link local */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + /* copy 8 Bytes, increase offset */ + MEMCPY(&ip6hdr->src.addr[2], lowpan6_buffer + lowpan6_offset, 8); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x30) == 0x20) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 10, src compression, 16bits inline\n")); + /* set 96 bits to link local */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + /* extract remaining 16bits from inline bytes, increase offset */ + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | + lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x30) == 0x30) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 11, src compression, 0bits inline, using other headers\n")); + /* no information avalaible, using other layers, see RFC6282 ch 3.2.2 */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + if (src->addr_len == 2) { + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (src->addr[0] << 8) | src->addr[1]); + } else if (src->addr_len == 8) { + ip6hdr->src.addr[2] = lwip_htonl(((src->addr[0] ^ 2) << 24) | (src->addr[1] << 16) | + (src->addr[2] << 8) | src->addr[3]); + ip6hdr->src.addr[3] = lwip_htonl((src->addr[4] << 24) | (src->addr[5] << 16) | + (src->addr[6] << 8) | src->addr[7]); + } else { + /* invalid source address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid source address length\n")); + return ERR_VAL; + } + } + } else { + /* Source address compression (SAC) = 1 -> stateful/context-based compression */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAC == 1, additional context byte\n")); + if ((lowpan6_buffer[1] & 0x30) == 0x00) { + /* SAM=00, address=> :: (ANY) */ + ip6hdr->src.addr[0] = 0; + ip6hdr->src.addr[1] = 0; + ip6hdr->src.addr[2] = 0; + ip6hdr->src.addr[3] = 0; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 00, context compression, ANY (::)\n")); + } else { + /* Set prefix from context info */ + if (lowpan6_buffer[1] & 0x80) { + i = (lowpan6_buffer[2] >> 4) & 0x0f; + } else { + i = 0; + } + if (i >= LWIP_6LOWPAN_NUM_CONTEXTS) { + /* Error */ + return ERR_VAL; + } +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + ip6hdr->src.addr[0] = lowpan6_contexts[i].addr[0]; + ip6hdr->src.addr[1] = lowpan6_contexts[i].addr[1]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == xx, context compression found @%d: %8X, %8X\n", (int)i, ip6hdr->src.addr[0], ip6hdr->src.addr[1])); +#else + LWIP_UNUSED_ARG(lowpan6_contexts); +#endif + } + + /* determine further address bits */ + if ((lowpan6_buffer[1] & 0x30) == 0x10) { + /* SAM=01, load additional 64bits */ + MEMCPY(&ip6hdr->src.addr[2], lowpan6_buffer + lowpan6_offset, 8); + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 01, context compression, 64bits inline\n")); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x30) == 0x20) { + /* SAM=01, load additional 16bits */ + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | lowpan6_buffer[lowpan6_offset + 1]); + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 10, context compression, 16bits inline\n")); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x30) == 0x30) { + /* SAM=11, address is fully elided, load from other layers */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 11, context compression, 0bits inline, using other headers\n")); + if (src->addr_len == 2) { + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (src->addr[0] << 8) | src->addr[1]); + } else if (src->addr_len == 8) { + ip6hdr->src.addr[2] = lwip_htonl(((src->addr[0] ^ 2) << 24) | (src->addr[1] << 16) | (src->addr[2] << 8) | src->addr[3]); + ip6hdr->src.addr[3] = lwip_htonl((src->addr[4] << 24) | (src->addr[5] << 16) | (src->addr[6] << 8) | src->addr[7]); + } else { + /* invalid source address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid source address length\n")); + return ERR_VAL; + } + } + } + + /* Destination address decoding. */ + if (lowpan6_buffer[1] & 0x08) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("M=1: multicast\n")); + /* Multicast destination */ + if (lowpan6_buffer[1] & 0x04) { + LWIP_DEBUGF(LWIP_DBG_ON,("DAC == 1, context multicast: unsupported!!!\n")); + /* @todo support stateful multicast addressing */ + return ERR_VAL; + } + + if ((lowpan6_buffer[1] & 0x03) == 0x00) { + /* DAM = 00, copy full address (128bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 00, no dst compression, fetching 128bits inline\n")); + MEMCPY(&ip6hdr->dest.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x03) == 0x01) { + /* DAM = 01, copy 4 bytes (32bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst address form (48bits): ffXX::00XX:XXXX:XXXX\n")); + ip6hdr->dest.addr[0] = lwip_htonl(0xff000000UL | (lowpan6_buffer[lowpan6_offset++] << 16)); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = lwip_htonl(lowpan6_buffer[lowpan6_offset++]); + ip6hdr->dest.addr[3] = lwip_htonl((lowpan6_buffer[lowpan6_offset] << 24) | (lowpan6_buffer[lowpan6_offset + 1] << 16) | (lowpan6_buffer[lowpan6_offset + 2] << 8) | lowpan6_buffer[lowpan6_offset + 3]); + lowpan6_offset += 4; + } else if ((lowpan6_buffer[1] & 0x03) == 0x02) { + /* DAM = 10, copy 3 bytes (24bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 10, dst address form (32bits): ffXX::00XX:XXXX\n")); + ip6hdr->dest.addr[0] = lwip_htonl(0xff000000UL | (lowpan6_buffer[lowpan6_offset++] << 16)); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = 0; + ip6hdr->dest.addr[3] = lwip_htonl((lowpan6_buffer[lowpan6_offset] << 16) | (lowpan6_buffer[lowpan6_offset + 1] << 8) | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if ((lowpan6_buffer[1] & 0x03) == 0x03) { + /* DAM = 11, copy 1 byte (8bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 11, dst address form (8bits): ff02::00XX\n")); + ip6hdr->dest.addr[0] = PP_HTONL(0xff020000UL); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = 0; + ip6hdr->dest.addr[3] = lwip_htonl(lowpan6_buffer[lowpan6_offset++]); + } + + } else { + /* no Multicast (M=0) */ + if (lowpan6_buffer[1] & 0x04) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAC == 1, stateful compression\n")); + /* Stateful destination compression */ + /* Set prefix from context info */ + if (lowpan6_buffer[1] & 0x80) { + i = lowpan6_buffer[2] & 0x0f; + } else { + i = 0; + } + if (i >= LWIP_6LOWPAN_NUM_CONTEXTS) { + /* Error */ + return ERR_VAL; + } +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + ip6hdr->dest.addr[0] = lowpan6_contexts[i].addr[0]; + ip6hdr->dest.addr[1] = lowpan6_contexts[i].addr[1]; +#endif + } else { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAC == 0, stateless compression, setting link local prefix\n")); + /* Link local address compression */ + ip6hdr->dest.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->dest.addr[1] = 0; + } + + /* M=0, DAC=0, determining destination address length via DAM=xx */ + if ((lowpan6_buffer[1] & 0x03) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 00, no dst compression, fetching 128bits inline")); + /* DAM=00, copy full address */ + MEMCPY(&ip6hdr->dest.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x03) == 0x01) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst compression, 64bits inline\n")); + /* DAM=01, copy 64 inline bits, increase offset */ + MEMCPY(&ip6hdr->dest.addr[2], lowpan6_buffer + lowpan6_offset, 8); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x03) == 0x02) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst compression, 16bits inline\n")); + /* DAM=10, copy 16 inline bits, increase offset */ + ip6hdr->dest.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->dest.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x03) == 0x03) { + /* DAM=11, no bits available, use other headers (not done here) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG,("DAM == 01, dst compression, 0bits inline, using other headers\n")); + if (dest->addr_len == 2) { + ip6hdr->dest.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->dest.addr[3] = lwip_htonl(0xfe000000UL | (dest->addr[0] << 8) | dest->addr[1]); + } else if (dest->addr_len == 8) { + ip6hdr->dest.addr[2] = lwip_htonl(((dest->addr[0] ^ 2) << 24) | (dest->addr[1] << 16) | dest->addr[2] << 8 | dest->addr[3]); + ip6hdr->dest.addr[3] = lwip_htonl((dest->addr[4] << 24) | (dest->addr[5] << 16) | dest->addr[6] << 8 | dest->addr[7]); + } else { + /* invalid destination address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid destination address length\n")); + return ERR_VAL; + } + } + } + + + /* Next Header Compression (NHC) decoding? */ + if (lowpan6_buffer[0] & 0x04) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NHC decoding\n")); +#if LWIP_UDP + if ((lowpan6_buffer[lowpan6_offset] & 0xf8) == 0xf0) { + /* NHC: UDP */ + struct udp_hdr *udphdr; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NHC: UDP\n")); + + /* UDP compression */ + IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_UDP); + udphdr = (struct udp_hdr *)((u8_t *)decomp_buffer + ip6_offset); + if (decomp_bufsize < IP6_HLEN + UDP_HLEN) { + return ERR_MEM; + } + + /* Checksum decompression */ + if (lowpan6_buffer[lowpan6_offset] & 0x04) { + /* @todo support checksum decompress */ + LWIP_DEBUGF(LWIP_DBG_ON, ("NHC: UDP chechsum decompression UNSUPPORTED\n")); + return ERR_VAL; + } + + /* Decompress ports, according to RFC4944 */ + i = lowpan6_buffer[lowpan6_offset++] & 0x03; + if (i == 0) { + udphdr->src = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + udphdr->dest = lwip_htons(lowpan6_buffer[lowpan6_offset + 2] << 8 | lowpan6_buffer[lowpan6_offset + 3]); + lowpan6_offset += 4; + } else if (i == 0x01) { + udphdr->src = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + udphdr->dest = lwip_htons(0xf000 | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if (i == 0x02) { + udphdr->src = lwip_htons(0xf000 | lowpan6_buffer[lowpan6_offset]); + udphdr->dest = lwip_htons(lowpan6_buffer[lowpan6_offset + 1] << 8 | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if (i == 0x03) { + udphdr->src = lwip_htons(0xf0b0 | ((lowpan6_buffer[lowpan6_offset] >> 4) & 0x0f)); + udphdr->dest = lwip_htons(0xf0b0 | (lowpan6_buffer[lowpan6_offset] & 0x0f)); + lowpan6_offset += 1; + } + + udphdr->chksum = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + ip6_offset += UDP_HLEN; + if (datagram_size == 0) { + datagram_size = compressed_size - lowpan6_offset + ip6_offset; + } + udphdr->len = lwip_htons(datagram_size - IP6_HLEN); + + } else +#endif /* LWIP_UDP */ + { + LWIP_DEBUGF(LWIP_DBG_ON,("NHC: unsupported protocol!\n")); + /* @todo support NHC other than UDP */ + return ERR_VAL; + } + } + if (datagram_size == 0) { + datagram_size = compressed_size - lowpan6_offset + ip6_offset; + } + /* Infer IPv6 payload length for header */ + IP6H_PLEN_SET(ip6hdr, datagram_size - IP6_HLEN); + + if (lowpan6_offset > lowpan6_bufsize) { + /* input buffer overflow */ + return ERR_VAL; + } + *hdr_size_comp = lowpan6_offset; + *hdr_size_decomp = ip6_offset; + + return ERR_OK; +} + +struct pbuf * +lowpan6_decompress(struct pbuf *p, u16_t datagram_size, ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest) +{ + struct pbuf *q; + u16_t lowpan6_offset, ip6_offset; + err_t err; + +#if LWIP_UDP +#define UDP_HLEN_ALLOC UDP_HLEN +#else +#define UDP_HLEN_ALLOC 0 +#endif + + /* Allocate a buffer for decompression. This buffer will be too big and will be + trimmed once the final size is known. */ + q = pbuf_alloc(PBUF_IP, p->len + IP6_HLEN + UDP_HLEN_ALLOC, PBUF_POOL); + if (q == NULL) { + pbuf_free(p); + return NULL; + } + if (q->len < IP6_HLEN + UDP_HLEN_ALLOC) { + /* The headers need to fit into the first pbuf */ + pbuf_free(p); + pbuf_free(q); + return NULL; + } + + /* Decompress the IPv6 (and possibly UDP) header(s) into the new pbuf */ + err = lowpan6_decompress_hdr((u8_t *)p->payload, p->len, (u8_t *)q->payload, q->len, + &lowpan6_offset, &ip6_offset, datagram_size, p->tot_len, lowpan6_contexts, src, dest); + if (err != ERR_OK) { + pbuf_free(p); + pbuf_free(q); + return NULL; + } + + /* Now we copy leftover contents from p to q, so we have all L2 and L3 headers + (and L4?) in a single pbuf: */ + + /* Hide the compressed headers in p */ + pbuf_remove_header(p, lowpan6_offset); + /* Temporarily hide the headers in q... */ + pbuf_remove_header(q, ip6_offset); + /* ... copy the rest of p into q... */ + pbuf_copy(q, p); + /* ... and reveal the headers again... */ + pbuf_add_header_force(q, ip6_offset); + /* ... trim the pbuf to its correct size... */ + pbuf_realloc(q, ip6_offset + p->len); + /* ... and cat possibly remaining (data-only) pbufs */ + if (p->next != NULL) { + pbuf_cat(q, p->next); + } + /* the original (first) pbuf can now be freed */ + p->next = NULL; + pbuf_free(p); + + /* all done */ + return q; +} + +#endif /* LWIP_6LOWPAN_IPHC */ +#endif /* LWIP_IPV6 */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c new file mode 100644 index 00000000..c8673ad0 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c @@ -0,0 +1,2510 @@ +/* + * auth.c - PPP authentication and phase control. + * + * Copyright (c) 1993-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Derived from main.c, which is: + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(_PATH_LASTLOG) && defined(__linux__) +#include +#endif + +#include +#include +#include + +#ifdef HAS_SHADOW +#include +#ifndef PW_PPP +#define PW_PPP PW_LOGIN +#endif +#endif + +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#if CCP_SUPPORT +#include "netif/ppp/ccp.h" +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT +#include "netif/ppp/ecp.h" +#endif /* ECP_SUPPORT */ +#include "netif/ppp/ipcp.h" +#if PAP_SUPPORT +#include "netif/ppp/upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "netif/ppp/eap.h" +#endif /* EAP_SUPPORT */ +#if CBCP_SUPPORT +#include "netif/ppp/cbcp.h" +#endif + +#if 0 /* UNUSED */ +#include "session.h" +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* Bits in scan_authfile return value */ +#define NONWILD_SERVER 1 +#define NONWILD_CLIENT 2 + +#define ISWILD(word) (word[0] == '*' && word[1] == 0) +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* List of addresses which the peer may use. */ +static struct permitted_ip *addresses[NUM_PPP]; + +/* Wordlist giving addresses which the peer may use + without authenticating itself. */ +static struct wordlist *noauth_addrs; + +/* Remote telephone number, if available */ +char remote_number[MAXNAMELEN]; + +/* Wordlist giving remote telephone numbers which may connect. */ +static struct wordlist *permitted_numbers; + +/* Extra options to apply, from the secrets file entry for the peer. */ +static struct wordlist *extra_options; +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* Set if we require authentication only because we have a default route. */ +static bool default_auth; + +/* Hook to enable a plugin to control the idle time limit */ +int (*idle_time_hook) (struct ppp_idle *) = NULL; + +/* Hook for a plugin to say whether we can possibly authenticate any peer */ +int (*pap_check_hook) (void) = NULL; + +/* Hook for a plugin to check the PAP user and password */ +int (*pap_auth_hook) (char *user, char *passwd, char **msgp, + struct wordlist **paddrs, + struct wordlist **popts) = NULL; + +/* Hook for a plugin to know about the PAP user logout */ +void (*pap_logout_hook) (void) = NULL; + +/* Hook for a plugin to get the PAP password for authenticating us */ +int (*pap_passwd_hook) (char *user, char *passwd) = NULL; + +/* Hook for a plugin to say if we can possibly authenticate a peer using CHAP */ +int (*chap_check_hook) (void) = NULL; + +/* Hook for a plugin to get the CHAP password for authenticating us */ +int (*chap_passwd_hook) (char *user, char *passwd) = NULL; + +/* Hook for a plugin to say whether it is OK if the peer + refuses to authenticate. */ +int (*null_auth_hook) (struct wordlist **paddrs, + struct wordlist **popts) = NULL; + +int (*allowed_address_hook) (u32_t addr) = NULL; +#endif /* UNUSED */ + +#ifdef HAVE_MULTILINK +/* Hook for plugin to hear when an interface joins a multilink bundle */ +void (*multilink_join_hook) (void) = NULL; +#endif + +#if PPP_NOTIFY +/* A notifier for when the peer has authenticated itself, + and we are proceeding to the network phase. */ +struct notifier *auth_up_notifier = NULL; + +/* A notifier for when the link goes down. */ +struct notifier *link_down_notifier = NULL; +#endif /* PPP_NOTIFY */ + +/* + * Option variables. + */ +#if 0 /* MOVED TO ppp_settings */ +bool uselogin = 0; /* Use /etc/passwd for checking PAP */ +bool session_mgmt = 0; /* Do session management (login records) */ +bool cryptpap = 0; /* Passwords in pap-secrets are encrypted */ +bool refuse_pap = 0; /* Don't wanna auth. ourselves with PAP */ +bool refuse_chap = 0; /* Don't wanna auth. ourselves with CHAP */ +bool refuse_eap = 0; /* Don't wanna auth. ourselves with EAP */ +#if MSCHAP_SUPPORT +bool refuse_mschap = 0; /* Don't wanna auth. ourselves with MS-CHAP */ +bool refuse_mschap_v2 = 0; /* Don't wanna auth. ourselves with MS-CHAPv2 */ +#else /* MSCHAP_SUPPORT */ +bool refuse_mschap = 1; /* Don't wanna auth. ourselves with MS-CHAP */ +bool refuse_mschap_v2 = 1; /* Don't wanna auth. ourselves with MS-CHAPv2 */ +#endif /* MSCHAP_SUPPORT */ +bool usehostname = 0; /* Use hostname for our_name */ +bool auth_required = 0; /* Always require authentication from peer */ +bool allow_any_ip = 0; /* Allow peer to use any IP address */ +bool explicit_remote = 0; /* User specified explicit remote name */ +bool explicit_user = 0; /* Set if "user" option supplied */ +bool explicit_passwd = 0; /* Set if "password" option supplied */ +char remote_name[MAXNAMELEN]; /* Peer's name for authentication */ +static char *uafname; /* name of most recent +ua file */ + +extern char *crypt (const char *, const char *); +#endif /* UNUSED */ +/* Prototypes for procedures local to this file. */ + +static void network_phase(ppp_pcb *pcb); +#if PPP_IDLETIMELIMIT +static void check_idle(void *arg); +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT +static void connect_time_expired(void *arg); +#endif /* PPP_MAXCONNECT */ +#if 0 /* UNUSED */ +static int null_login (int); +/* static int get_pap_passwd (char *); */ +static int have_pap_secret (int *); +static int have_chap_secret (char *, char *, int, int *); +static int have_srp_secret (char *client, char *server, int need_ip, + int *lacks_ipp); +static int ip_addr_check (u32_t, struct permitted_ip *); +static int scan_authfile (FILE *, char *, char *, char *, + struct wordlist **, struct wordlist **, + char *, int); +static void free_wordlist (struct wordlist *); +static void set_allowed_addrs (int, struct wordlist *, struct wordlist *); +static int some_ip_ok (struct wordlist *); +static int setupapfile (char **); +static int privgroup (char **); +static int set_noauth_addr (char **); +static int set_permitted_number (char **); +static void check_access (FILE *, char *); +static int wordlist_count (struct wordlist *); +#endif /* UNUSED */ + +#ifdef MAXOCTETS +static void check_maxoctets (void *); +#endif + +#if PPP_OPTIONS +/* + * Authentication-related options. + */ +option_t auth_options[] = { + { "auth", o_bool, &auth_required, + "Require authentication from peer", OPT_PRIO | 1 }, + { "noauth", o_bool, &auth_required, + "Don't require peer to authenticate", OPT_PRIOSUB | OPT_PRIV, + &allow_any_ip }, + { "require-pap", o_bool, &lcp_wantoptions[0].neg_upap, + "Require PAP authentication from peer", + OPT_PRIOSUB | 1, &auth_required }, + { "+pap", o_bool, &lcp_wantoptions[0].neg_upap, + "Require PAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | 1, &auth_required }, + { "require-chap", o_bool, &auth_required, + "Require CHAP authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MD5, + &lcp_wantoptions[0].chap_mdtype }, + { "+chap", o_bool, &auth_required, + "Require CHAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MD5, + &lcp_wantoptions[0].chap_mdtype }, +#if MSCHAP_SUPPORT + { "require-mschap", o_bool, &auth_required, + "Require MS-CHAP authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT, + &lcp_wantoptions[0].chap_mdtype }, + { "+mschap", o_bool, &auth_required, + "Require MS-CHAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT, + &lcp_wantoptions[0].chap_mdtype }, + { "require-mschap-v2", o_bool, &auth_required, + "Require MS-CHAPv2 authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT_V2, + &lcp_wantoptions[0].chap_mdtype }, + { "+mschap-v2", o_bool, &auth_required, + "Require MS-CHAPv2 authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT_V2, + &lcp_wantoptions[0].chap_mdtype }, +#endif /* MSCHAP_SUPPORT */ +#if 0 + { "refuse-pap", o_bool, &refuse_pap, + "Don't agree to auth to peer with PAP", 1 }, + { "-pap", o_bool, &refuse_pap, + "Don't allow PAP authentication with peer", OPT_ALIAS | 1 }, + { "refuse-chap", o_bool, &refuse_chap, + "Don't agree to auth to peer with CHAP", + OPT_A2CLRB | MDTYPE_MD5, + &lcp_allowoptions[0].chap_mdtype }, + { "-chap", o_bool, &refuse_chap, + "Don't allow CHAP authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MD5, + &lcp_allowoptions[0].chap_mdtype }, +#endif +#if MSCHAP_SUPPORT +#if 0 + { "refuse-mschap", o_bool, &refuse_mschap, + "Don't agree to auth to peer with MS-CHAP", + OPT_A2CLRB | MDTYPE_MICROSOFT, + &lcp_allowoptions[0].chap_mdtype }, + { "-mschap", o_bool, &refuse_mschap, + "Don't allow MS-CHAP authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MICROSOFT, + &lcp_allowoptions[0].chap_mdtype }, + { "refuse-mschap-v2", o_bool, &refuse_mschap_v2, + "Don't agree to auth to peer with MS-CHAPv2", + OPT_A2CLRB | MDTYPE_MICROSOFT_V2, + &lcp_allowoptions[0].chap_mdtype }, + { "-mschap-v2", o_bool, &refuse_mschap_v2, + "Don't allow MS-CHAPv2 authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MICROSOFT_V2, + &lcp_allowoptions[0].chap_mdtype }, +#endif +#endif /* MSCHAP_SUPPORT*/ +#if EAP_SUPPORT + { "require-eap", o_bool, &lcp_wantoptions[0].neg_eap, + "Require EAP authentication from peer", OPT_PRIOSUB | 1, + &auth_required }, +#if 0 + { "refuse-eap", o_bool, &refuse_eap, + "Don't agree to authenticate to peer with EAP", 1 }, +#endif +#endif /* EAP_SUPPORT */ + { "name", o_string, our_name, + "Set local name for authentication", + OPT_PRIO | OPT_PRIV | OPT_STATIC, NULL, MAXNAMELEN }, + + { "+ua", o_special, (void *)setupapfile, + "Get PAP user and password from file", + OPT_PRIO | OPT_A2STRVAL, &uafname }, + +#if 0 + { "user", o_string, user, + "Set name for auth with peer", OPT_PRIO | OPT_STATIC, + &explicit_user, MAXNAMELEN }, + + { "password", o_string, passwd, + "Password for authenticating us to the peer", + OPT_PRIO | OPT_STATIC | OPT_HIDE, + &explicit_passwd, MAXSECRETLEN }, +#endif + + { "usehostname", o_bool, &usehostname, + "Must use hostname for authentication", 1 }, + + { "remotename", o_string, remote_name, + "Set remote name for authentication", OPT_PRIO | OPT_STATIC, + &explicit_remote, MAXNAMELEN }, + + { "login", o_bool, &uselogin, + "Use system password database for PAP", OPT_A2COPY | 1 , + &session_mgmt }, + { "enable-session", o_bool, &session_mgmt, + "Enable session accounting for remote peers", OPT_PRIV | 1 }, + + { "papcrypt", o_bool, &cryptpap, + "PAP passwords are encrypted", 1 }, + + { "privgroup", o_special, (void *)privgroup, + "Allow group members to use privileged options", OPT_PRIV | OPT_A2LIST }, + + { "allow-ip", o_special, (void *)set_noauth_addr, + "Set IP address(es) which can be used without authentication", + OPT_PRIV | OPT_A2LIST }, + + { "remotenumber", o_string, remote_number, + "Set remote telephone number for authentication", OPT_PRIO | OPT_STATIC, + NULL, MAXNAMELEN }, + + { "allow-number", o_special, (void *)set_permitted_number, + "Set telephone number(s) which are allowed to connect", + OPT_PRIV | OPT_A2LIST }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +#if 0 /* UNUSED */ +/* + * setupapfile - specifies UPAP info for authenticating with peer. + */ +static int +setupapfile(argv) + char **argv; +{ + FILE *ufile; + int l; + uid_t euid; + char u[MAXNAMELEN], p[MAXSECRETLEN]; + char *fname; + + lcp_allowoptions[0].neg_upap = 1; + + /* open user info file */ + fname = strdup(*argv); + if (fname == NULL) + novm("+ua file name"); + euid = geteuid(); + if (seteuid(getuid()) == -1) { + option_error("unable to reset uid before opening %s: %m", fname); + return 0; + } + ufile = fopen(fname, "r"); + if (seteuid(euid) == -1) + fatal("unable to regain privileges: %m"); + if (ufile == NULL) { + option_error("unable to open user login data file %s", fname); + return 0; + } + check_access(ufile, fname); + uafname = fname; + + /* get username */ + if (fgets(u, MAXNAMELEN - 1, ufile) == NULL + || fgets(p, MAXSECRETLEN - 1, ufile) == NULL) { + fclose(ufile); + option_error("unable to read user login data file %s", fname); + return 0; + } + fclose(ufile); + + /* get rid of newlines */ + l = strlen(u); + if (l > 0 && u[l-1] == '\n') + u[l-1] = 0; + l = strlen(p); + if (l > 0 && p[l-1] == '\n') + p[l-1] = 0; + + if (override_value("user", option_priority, fname)) { + strlcpy(ppp_settings.user, u, sizeof(ppp_settings.user)); + explicit_user = 1; + } + if (override_value("passwd", option_priority, fname)) { + strlcpy(ppp_settings.passwd, p, sizeof(ppp_settings.passwd)); + explicit_passwd = 1; + } + + return (1); +} + +/* + * privgroup - allow members of the group to have privileged access. + */ +static int +privgroup(argv) + char **argv; +{ + struct group *g; + int i; + + g = getgrnam(*argv); + if (g == 0) { + option_error("group %s is unknown", *argv); + return 0; + } + for (i = 0; i < ngroups; ++i) { + if (groups[i] == g->gr_gid) { + privileged = 1; + break; + } + } + return 1; +} + + +/* + * set_noauth_addr - set address(es) that can be used without authentication. + * Equivalent to specifying an entry like `"" * "" addr' in pap-secrets. + */ +static int +set_noauth_addr(argv) + char **argv; +{ + char *addr = *argv; + int l = strlen(addr) + 1; + struct wordlist *wp; + + wp = (struct wordlist *) malloc(sizeof(struct wordlist) + l); + if (wp == NULL) + novm("allow-ip argument"); + wp->word = (char *) (wp + 1); + wp->next = noauth_addrs; + MEMCPY(wp->word, addr, l); + noauth_addrs = wp; + return 1; +} + + +/* + * set_permitted_number - set remote telephone number(s) that may connect. + */ +static int +set_permitted_number(argv) + char **argv; +{ + char *number = *argv; + int l = strlen(number) + 1; + struct wordlist *wp; + + wp = (struct wordlist *) malloc(sizeof(struct wordlist) + l); + if (wp == NULL) + novm("allow-number argument"); + wp->word = (char *) (wp + 1); + wp->next = permitted_numbers; + MEMCPY(wp->word, number, l); + permitted_numbers = wp; + return 1; +} +#endif + +/* + * An Open on LCP has requested a change from Dead to Establish phase. + */ +void link_required(ppp_pcb *pcb) { + LWIP_UNUSED_ARG(pcb); +} + +#if 0 +/* + * Bring the link up to the point of being able to do ppp. + */ +void start_link(unit) + int unit; +{ + ppp_pcb *pcb = &ppp_pcb_list[unit]; + char *msg; + + status = EXIT_NEGOTIATION_FAILED; + new_phase(pcb, PPP_PHASE_SERIALCONN); + + hungup = 0; + devfd = the_channel->connect(); + msg = "Connect script failed"; + if (devfd < 0) + goto fail; + + /* set up the serial device as a ppp interface */ + /* + * N.B. we used to do tdb_writelock/tdb_writeunlock around this + * (from establish_ppp to set_ifunit). However, we won't be + * doing the set_ifunit in multilink mode, which is the only time + * we need the atomicity that the tdb_writelock/tdb_writeunlock + * gives us. Thus we don't need the tdb_writelock/tdb_writeunlock. + */ + fd_ppp = the_channel->establish_ppp(devfd); + msg = "ppp establishment failed"; + if (fd_ppp < 0) { + status = EXIT_FATAL_ERROR; + goto disconnect; + } + + if (!demand && ifunit >= 0) + set_ifunit(1); + + /* + * Start opening the connection and wait for + * incoming events (reply, timeout, etc.). + */ + if (ifunit >= 0) + ppp_notice("Connect: %s <--> %s", ifname, ppp_devnam); + else + ppp_notice("Starting negotiation on %s", ppp_devnam); + add_fd(fd_ppp); + + new_phase(pcb, PPP_PHASE_ESTABLISH); + + lcp_lowerup(pcb); + return; + + disconnect: + new_phase(pcb, PPP_PHASE_DISCONNECT); + if (the_channel->disconnect) + the_channel->disconnect(); + + fail: + new_phase(pcb, PPP_PHASE_DEAD); + if (the_channel->cleanup) + (*the_channel->cleanup)(); +} +#endif + +/* + * LCP has terminated the link; go to the Dead phase and take the + * physical layer down. + */ +void link_terminated(ppp_pcb *pcb) { + if (pcb->phase == PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + || pcb->phase == PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + return; + new_phase(pcb, PPP_PHASE_DISCONNECT); + +#if 0 /* UNUSED */ + if (pap_logout_hook) { + pap_logout_hook(); + } + session_end(devnam); +#endif /* UNUSED */ + + if (!doing_multilink) { + ppp_notice("Connection terminated."); +#if PPP_STATS_SUPPORT + print_link_stats(); +#endif /* PPP_STATS_SUPPORT */ + } else + ppp_notice("Link terminated."); + + lcp_lowerdown(pcb); + + ppp_link_terminated(pcb); +#if 0 + /* + * Delete pid files before disestablishing ppp. Otherwise it + * can happen that another pppd gets the same unit and then + * we delete its pid file. + */ + if (!doing_multilink && !demand) + remove_pidfiles(); + + /* + * If we may want to bring the link up again, transfer + * the ppp unit back to the loopback. Set the + * real serial device back to its normal mode of operation. + */ + if (fd_ppp >= 0) { + remove_fd(fd_ppp); + clean_check(); + the_channel->disestablish_ppp(devfd); + if (doing_multilink) + mp_exit_bundle(); + fd_ppp = -1; + } + if (!hungup) + lcp_lowerdown(pcb); + if (!doing_multilink && !demand) + script_unsetenv("IFNAME"); + + /* + * Run disconnector script, if requested. + * XXX we may not be able to do this if the line has hung up! + */ + if (devfd >= 0 && the_channel->disconnect) { + the_channel->disconnect(); + devfd = -1; + } + if (the_channel->cleanup) + (*the_channel->cleanup)(); + + if (doing_multilink && multilink_master) { + if (!bundle_terminating) + new_phase(pcb, PPP_PHASE_MASTER); + else + mp_bundle_terminated(); + } else + new_phase(pcb, PPP_PHASE_DEAD); +#endif +} + +/* + * LCP has gone down; it will either die or try to re-establish. + */ +void link_down(ppp_pcb *pcb) { +#if PPP_NOTIFY + notify(link_down_notifier, 0); +#endif /* PPP_NOTIFY */ + + if (!doing_multilink) { + upper_layers_down(pcb); + if (pcb->phase != PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + && pcb->phase != PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + new_phase(pcb, PPP_PHASE_ESTABLISH); + } + /* XXX if doing_multilink, should do something to stop + network-layer traffic on the link */ +} + +void upper_layers_down(ppp_pcb *pcb) { + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol != PPP_LCP && protp->lowerdown != NULL) + (*protp->lowerdown)(pcb); + if (protp->protocol < 0xC000 && protp->close != NULL) + (*protp->close)(pcb, "LCP down"); + } + pcb->num_np_open = 0; + pcb->num_np_up = 0; +} + +/* + * The link is established. + * Proceed to the Dead, Authenticate or Network phase as appropriate. + */ +void link_established(ppp_pcb *pcb) { +#if PPP_AUTH_SUPPORT + int auth; +#if PPP_SERVER +#if PAP_SUPPORT + lcp_options *wo = &pcb->lcp_wantoptions; +#endif /* PAP_SUPPORT */ + lcp_options *go = &pcb->lcp_gotoptions; +#endif /* PPP_SERVER */ + lcp_options *ho = &pcb->lcp_hisoptions; +#endif /* PPP_AUTH_SUPPORT */ + int i; + const struct protent *protp; + + /* + * Tell higher-level protocols that LCP is up. + */ + if (!doing_multilink) { + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol != PPP_LCP + && protp->lowerup != NULL) + (*protp->lowerup)(pcb); + } + +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +#if PPP_ALLOWED_ADDRS + if (!auth_required && noauth_addrs != NULL) + set_allowed_addrs(unit, NULL, NULL); +#endif /* PPP_ALLOWED_ADDRS */ + + if (pcb->settings.auth_required && !(0 +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + )) { + +#if PPP_ALLOWED_ADDRS + /* + * We wanted the peer to authenticate itself, and it refused: + * if we have some address(es) it can use without auth, fine, + * otherwise treat it as though it authenticated with PAP using + * a username of "" and a password of "". If that's not OK, + * boot it out. + */ + if (noauth_addrs != NULL) { + set_allowed_addrs(unit, NULL, NULL); + } else +#endif /* PPP_ALLOWED_ADDRS */ + if (!pcb->settings.null_login +#if PAP_SUPPORT + || !wo->neg_upap +#endif /* PAP_SUPPORT */ + ) { + ppp_warn("peer refused to authenticate: terminating link"); +#if 0 /* UNUSED */ + status = EXIT_PEER_AUTH_FAILED; +#endif /* UNUSED */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "peer refused to authenticate"); + return; + } + } +#endif /* PPP_SERVER */ + + new_phase(pcb, PPP_PHASE_AUTHENTICATE); + auth = 0; +#if PPP_SERVER +#if EAP_SUPPORT + if (go->neg_eap) { + eap_authpeer(pcb, PPP_OUR_NAME); + auth |= EAP_PEER; + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (go->neg_chap) { + chap_auth_peer(pcb, PPP_OUR_NAME, CHAP_DIGEST(go->chap_mdtype)); + auth |= CHAP_PEER; + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if (go->neg_upap) { + upap_authpeer(pcb); + auth |= PAP_PEER; + } else +#endif /* PAP_SUPPORT */ + {} +#endif /* PPP_SERVER */ + +#if EAP_SUPPORT + if (ho->neg_eap) { + eap_authwithpeer(pcb, pcb->settings.user); + auth |= EAP_WITHPEER; + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (ho->neg_chap) { + chap_auth_with_peer(pcb, pcb->settings.user, CHAP_DIGEST(ho->chap_mdtype)); + auth |= CHAP_WITHPEER; + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if (ho->neg_upap) { + upap_authwithpeer(pcb, pcb->settings.user, pcb->settings.passwd); + auth |= PAP_WITHPEER; + } else +#endif /* PAP_SUPPORT */ + {} + + pcb->auth_pending = auth; + pcb->auth_done = 0; + + if (!auth) +#endif /* PPP_AUTH_SUPPORT */ + network_phase(pcb); +} + +/* + * Proceed to the network phase. + */ +static void network_phase(ppp_pcb *pcb) { +#if CBCP_SUPPORT + ppp_pcb *pcb = &ppp_pcb_list[unit]; +#endif +#if 0 /* UNUSED */ + lcp_options *go = &lcp_gotoptions[unit]; +#endif /* UNUSED */ + +#if 0 /* UNUSED */ + /* Log calling number. */ + if (*remote_number) + ppp_notice("peer from calling number %q authorized", remote_number); +#endif /* UNUSED */ + +#if PPP_NOTIFY + /* + * If the peer had to authenticate, notify it now. + */ + if (0 +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + ) { + notify(auth_up_notifier, 0); + } +#endif /* PPP_NOTIFY */ + +#if CBCP_SUPPORT + /* + * If we negotiated callback, do it now. + */ + if (go->neg_cbcp) { + new_phase(pcb, PPP_PHASE_CALLBACK); + (*cbcp_protent.open)(pcb); + return; + } +#endif + +#if PPP_OPTIONS + /* + * Process extra options from the secrets file + */ + if (extra_options) { + options_from_list(extra_options, 1); + free_wordlist(extra_options); + extra_options = 0; + } +#endif /* PPP_OPTIONS */ + start_networks(pcb); +} + +void start_networks(ppp_pcb *pcb) { +#if CCP_SUPPORT || ECP_SUPPORT + int i; + const struct protent *protp; +#endif /* CCP_SUPPORT || ECP_SUPPORT */ + + new_phase(pcb, PPP_PHASE_NETWORK); + +#ifdef HAVE_MULTILINK + if (multilink) { + if (mp_join_bundle()) { + if (multilink_join_hook) + (*multilink_join_hook)(); + if (updetach && !nodetach) + detach(); + return; + } + } +#endif /* HAVE_MULTILINK */ + +#ifdef PPP_FILTER + if (!demand) + set_filters(&pass_filter, &active_filter); +#endif +#if CCP_SUPPORT || ECP_SUPPORT + /* Start CCP and ECP */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if ( + (0 +#if ECP_SUPPORT + || protp->protocol == PPP_ECP +#endif /* ECP_SUPPORT */ +#if CCP_SUPPORT + || protp->protocol == PPP_CCP +#endif /* CCP_SUPPORT */ + ) + && protp->open != NULL) + (*protp->open)(pcb); +#endif /* CCP_SUPPORT || ECP_SUPPORT */ + + /* + * Bring up other network protocols iff encryption is not required. + */ + if (1 +#if ECP_SUPPORT + && !ecp_gotoptions[unit].required +#endif /* ECP_SUPPORT */ +#if MPPE_SUPPORT + && !pcb->ccp_gotoptions.mppe +#endif /* MPPE_SUPPORT */ + ) + continue_networks(pcb); +} + +void continue_networks(ppp_pcb *pcb) { + int i; + const struct protent *protp; + + /* + * Start the "real" network protocols. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol < 0xC000 +#if CCP_SUPPORT + && protp->protocol != PPP_CCP +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT + && protp->protocol != PPP_ECP +#endif /* ECP_SUPPORT */ + && protp->open != NULL) { + (*protp->open)(pcb); + ++pcb->num_np_open; + } + + if (pcb->num_np_open == 0) + /* nothing to do */ + lcp_close(pcb, "No network protocols running"); +} + +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +/* + * auth_check_passwd - Check the user name and passwd against configuration. + * + * returns: + * 0: Authentication failed. + * 1: Authentication succeeded. + * In either case, msg points to an appropriate message and msglen to the message len. + */ +int auth_check_passwd(ppp_pcb *pcb, char *auser, int userlen, char *apasswd, int passwdlen, const char **msg, int *msglen) { + int secretuserlen; + int secretpasswdlen; + + if (pcb->settings.user && pcb->settings.passwd) { + secretuserlen = (int)strlen(pcb->settings.user); + secretpasswdlen = (int)strlen(pcb->settings.passwd); + if (secretuserlen == userlen + && secretpasswdlen == passwdlen + && !memcmp(auser, pcb->settings.user, userlen) + && !memcmp(apasswd, pcb->settings.passwd, passwdlen) ) { + *msg = "Login ok"; + *msglen = sizeof("Login ok")-1; + return 1; + } + } + + *msg = "Login incorrect"; + *msglen = sizeof("Login incorrect")-1; + return 0; +} + +/* + * The peer has failed to authenticate himself using `protocol'. + */ +void auth_peer_fail(ppp_pcb *pcb, int protocol) { + LWIP_UNUSED_ARG(protocol); + /* + * Authentication failure: take the link down + */ +#if 0 /* UNUSED */ + status = EXIT_PEER_AUTH_FAILED; +#endif /* UNUSED */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "Authentication failed"); +} + +/* + * The peer has been successfully authenticated using `protocol'. + */ +void auth_peer_success(ppp_pcb *pcb, int protocol, int prot_flavor, const char *name, int namelen) { + int bit; +#ifndef HAVE_MULTILINK + LWIP_UNUSED_ARG(name); + LWIP_UNUSED_ARG(namelen); +#endif /* HAVE_MULTILINK */ + + switch (protocol) { +#if CHAP_SUPPORT + case PPP_CHAP: + bit = CHAP_PEER; + switch (prot_flavor) { + case CHAP_MD5: + bit |= CHAP_MD5_PEER; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + bit |= CHAP_MS_PEER; + break; + case CHAP_MICROSOFT_V2: + bit |= CHAP_MS2_PEER; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + break; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + case PPP_PAP: + bit = PAP_PEER; + break; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + bit = EAP_PEER; + break; +#endif /* EAP_SUPPORT */ + default: + ppp_warn("auth_peer_success: unknown protocol %x", protocol); + return; + } + +#ifdef HAVE_MULTILINK + /* + * Save the authenticated name of the peer for later. + */ + if (namelen > (int)sizeof(pcb->peer_authname) - 1) + namelen = (int)sizeof(pcb->peer_authname) - 1; + MEMCPY(pcb->peer_authname, name, namelen); + pcb->peer_authname[namelen] = 0; +#endif /* HAVE_MULTILINK */ +#if 0 /* UNUSED */ + script_setenv("PEERNAME", , 0); +#endif /* UNUSED */ + + /* Save the authentication method for later. */ + pcb->auth_done |= bit; + + /* + * If there is no more authentication still to be done, + * proceed to the network (or callback) phase. + */ + if ((pcb->auth_pending &= ~bit) == 0) + network_phase(pcb); +} +#endif /* PPP_SERVER */ + +/* + * We have failed to authenticate ourselves to the peer using `protocol'. + */ +void auth_withpeer_fail(ppp_pcb *pcb, int protocol) { + LWIP_UNUSED_ARG(protocol); + /* + * We've failed to authenticate ourselves to our peer. + * + * Some servers keep sending CHAP challenges, but there + * is no point in persisting without any way to get updated + * authentication secrets. + * + * He'll probably take the link down, and there's not much + * we can do except wait for that. + */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "Failed to authenticate ourselves to peer"); +} + +/* + * We have successfully authenticated ourselves with the peer using `protocol'. + */ +void auth_withpeer_success(ppp_pcb *pcb, int protocol, int prot_flavor) { + int bit; + const char *prot = ""; + + switch (protocol) { +#if CHAP_SUPPORT + case PPP_CHAP: + bit = CHAP_WITHPEER; + prot = "CHAP"; + switch (prot_flavor) { + case CHAP_MD5: + bit |= CHAP_MD5_WITHPEER; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + bit |= CHAP_MS_WITHPEER; + break; + case CHAP_MICROSOFT_V2: + bit |= CHAP_MS2_WITHPEER; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + break; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + case PPP_PAP: + bit = PAP_WITHPEER; + prot = "PAP"; + break; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + bit = EAP_WITHPEER; + prot = "EAP"; + break; +#endif /* EAP_SUPPORT */ + default: + ppp_warn("auth_withpeer_success: unknown protocol %x", protocol); + bit = 0; + /* no break */ + } + + ppp_notice("%s authentication succeeded", prot); + + /* Save the authentication method for later. */ + pcb->auth_done |= bit; + + /* + * If there is no more authentication still being done, + * proceed to the network (or callback) phase. + */ + if ((pcb->auth_pending &= ~bit) == 0) + network_phase(pcb); +} +#endif /* PPP_AUTH_SUPPORT */ + + +/* + * np_up - a network protocol has come up. + */ +void np_up(ppp_pcb *pcb, int proto) { +#if PPP_IDLETIMELIMIT + int tlim; +#endif /* PPP_IDLETIMELIMIT */ + LWIP_UNUSED_ARG(proto); + + if (pcb->num_np_up == 0) { + /* + * At this point we consider that the link has come up successfully. + */ + new_phase(pcb, PPP_PHASE_RUNNING); + +#if PPP_IDLETIMELIMIT +#if 0 /* UNUSED */ + if (idle_time_hook != 0) + tlim = (*idle_time_hook)(NULL); + else +#endif /* UNUSED */ + tlim = pcb->settings.idle_time_limit; + if (tlim > 0) + TIMEOUT(check_idle, (void*)pcb, tlim); +#endif /* PPP_IDLETIMELIMIT */ + +#if PPP_MAXCONNECT + /* + * Set a timeout to close the connection once the maximum + * connect time has expired. + */ + if (pcb->settings.maxconnect > 0) + TIMEOUT(connect_time_expired, (void*)pcb, pcb->settings.maxconnect); +#endif /* PPP_MAXCONNECT */ + +#ifdef MAXOCTETS + if (maxoctets > 0) + TIMEOUT(check_maxoctets, NULL, maxoctets_timeout); +#endif + +#if 0 /* Unused */ + /* + * Detach now, if the updetach option was given. + */ + if (updetach && !nodetach) + detach(); +#endif /* Unused */ + } + ++pcb->num_np_up; +} + +/* + * np_down - a network protocol has gone down. + */ +void np_down(ppp_pcb *pcb, int proto) { + LWIP_UNUSED_ARG(proto); + if (--pcb->num_np_up == 0) { +#if PPP_IDLETIMELIMIT + UNTIMEOUT(check_idle, (void*)pcb); +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT + UNTIMEOUT(connect_time_expired, NULL); +#endif /* PPP_MAXCONNECT */ +#ifdef MAXOCTETS + UNTIMEOUT(check_maxoctets, NULL); +#endif + new_phase(pcb, PPP_PHASE_NETWORK); + } +} + +/* + * np_finished - a network protocol has finished using the link. + */ +void np_finished(ppp_pcb *pcb, int proto) { + LWIP_UNUSED_ARG(proto); + if (--pcb->num_np_open <= 0) { + /* no further use for the link: shut up shop. */ + lcp_close(pcb, "No network protocols running"); + } +} + +#ifdef MAXOCTETS +static void +check_maxoctets(arg) + void *arg; +{ +#if PPP_STATS_SUPPORT + unsigned int used; + + update_link_stats(ifunit); + link_stats_valid=0; + + switch(maxoctets_dir) { + case PPP_OCTETS_DIRECTION_IN: + used = link_stats.bytes_in; + break; + case PPP_OCTETS_DIRECTION_OUT: + used = link_stats.bytes_out; + break; + case PPP_OCTETS_DIRECTION_MAXOVERAL: + case PPP_OCTETS_DIRECTION_MAXSESSION: + used = (link_stats.bytes_in > link_stats.bytes_out) ? link_stats.bytes_in : link_stats.bytes_out; + break; + default: + used = link_stats.bytes_in+link_stats.bytes_out; + break; + } + if (used > maxoctets) { + ppp_notice("Traffic limit reached. Limit: %u Used: %u", maxoctets, used); + status = EXIT_TRAFFIC_LIMIT; + lcp_close(pcb, "Traffic limit"); +#if 0 /* UNUSED */ + need_holdoff = 0; +#endif /* UNUSED */ + } else { + TIMEOUT(check_maxoctets, NULL, maxoctets_timeout); + } +#endif /* PPP_STATS_SUPPORT */ +} +#endif /* MAXOCTETS */ + +#if PPP_IDLETIMELIMIT +/* + * check_idle - check whether the link has been idle for long + * enough that we can shut it down. + */ +static void check_idle(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + struct ppp_idle idle; + time_t itime; + int tlim; + + if (!get_idle_time(pcb, &idle)) + return; +#if 0 /* UNUSED */ + if (idle_time_hook != 0) { + tlim = idle_time_hook(&idle); + } else { +#endif /* UNUSED */ + itime = LWIP_MIN(idle.xmit_idle, idle.recv_idle); + tlim = pcb->settings.idle_time_limit - itime; +#if 0 /* UNUSED */ + } +#endif /* UNUSED */ + if (tlim <= 0) { + /* link is idle: shut it down. */ + ppp_notice("Terminating connection due to lack of activity."); + pcb->err_code = PPPERR_IDLETIMEOUT; + lcp_close(pcb, "Link inactive"); +#if 0 /* UNUSED */ + need_holdoff = 0; +#endif /* UNUSED */ + } else { + TIMEOUT(check_idle, (void*)pcb, tlim); + } +} +#endif /* PPP_IDLETIMELIMIT */ + +#if PPP_MAXCONNECT +/* + * connect_time_expired - log a message and close the connection. + */ +static void connect_time_expired(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + ppp_info("Connect time expired"); + pcb->err_code = PPPERR_CONNECTTIME; + lcp_close(pcb, "Connect time expired"); /* Close connection */ +} +#endif /* PPP_MAXCONNECT */ + +#if PPP_OPTIONS +/* + * auth_check_options - called to check authentication options. + */ +void +auth_check_options() +{ + lcp_options *wo = &lcp_wantoptions[0]; + int can_auth; + int lacks_ip; + + /* Default our_name to hostname, and user to our_name */ + if (our_name[0] == 0 || usehostname) + strlcpy(our_name, hostname, sizeof(our_name)); + /* If a blank username was explicitly given as an option, trust + the user and don't use our_name */ + if (ppp_settings.user[0] == 0 && !explicit_user) + strlcpy(ppp_settings.user, our_name, sizeof(ppp_settings.user)); + + /* + * If we have a default route, require the peer to authenticate + * unless the noauth option was given or the real user is root. + */ + if (!auth_required && !allow_any_ip && have_route_to(0) && !privileged) { + auth_required = 1; + default_auth = 1; + } + +#if CHAP_SUPPORT + /* If we selected any CHAP flavors, we should probably negotiate it. :-) */ + if (wo->chap_mdtype) + wo->neg_chap = 1; +#endif /* CHAP_SUPPORT */ + + /* If authentication is required, ask peer for CHAP, PAP, or EAP. */ + if (auth_required) { + allow_any_ip = 0; + if (1 +#if CHAP_SUPPORT + && !wo->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + && !wo->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + && !wo->neg_eap +#endif /* EAP_SUPPORT */ + ) { +#if CHAP_SUPPORT + wo->neg_chap = CHAP_MDTYPE_SUPPORTED != MDTYPE_NONE; + wo->chap_mdtype = CHAP_MDTYPE_SUPPORTED; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + wo->neg_upap = 1; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + wo->neg_eap = 1; +#endif /* EAP_SUPPORT */ + } + } else { +#if CHAP_SUPPORT + wo->neg_chap = 0; + wo->chap_mdtype = MDTYPE_NONE; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + wo->neg_upap = 0; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + wo->neg_eap = 0; +#endif /* EAP_SUPPORT */ + } + + /* + * Check whether we have appropriate secrets to use + * to authenticate the peer. Note that EAP can authenticate by way + * of a CHAP-like exchanges as well as SRP. + */ + lacks_ip = 0; +#if PAP_SUPPORT + can_auth = wo->neg_upap && (uselogin || have_pap_secret(&lacks_ip)); +#else + can_auth = 0; +#endif /* PAP_SUPPORT */ + if (!can_auth && (0 +#if CHAP_SUPPORT + || wo->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || wo->neg_eap +#endif /* EAP_SUPPORT */ + )) { +#if CHAP_SUPPORT + can_auth = have_chap_secret((explicit_remote? remote_name: NULL), + our_name, 1, &lacks_ip); +#else + can_auth = 0; +#endif + } + if (!can_auth +#if EAP_SUPPORT + && wo->neg_eap +#endif /* EAP_SUPPORT */ + ) { + can_auth = have_srp_secret((explicit_remote? remote_name: NULL), + our_name, 1, &lacks_ip); + } + + if (auth_required && !can_auth && noauth_addrs == NULL) { + if (default_auth) { + option_error( +"By default the remote system is required to authenticate itself"); + option_error( +"(because this system has a default route to the internet)"); + } else if (explicit_remote) + option_error( +"The remote system (%s) is required to authenticate itself", + remote_name); + else + option_error( +"The remote system is required to authenticate itself"); + option_error( +"but I couldn't find any suitable secret (password) for it to use to do so."); + if (lacks_ip) + option_error( +"(None of the available passwords would let it use an IP address.)"); + + exit(1); + } + + /* + * Early check for remote number authorization. + */ + if (!auth_number()) { + ppp_warn("calling number %q is not authorized", remote_number); + exit(EXIT_CNID_AUTH_FAILED); + } +} +#endif /* PPP_OPTIONS */ + +#if 0 /* UNUSED */ +/* + * auth_reset - called when LCP is starting negotiations to recheck + * authentication options, i.e. whether we have appropriate secrets + * to use for authenticating ourselves and/or the peer. + */ +void +auth_reset(unit) + int unit; +{ + lcp_options *go = &lcp_gotoptions[unit]; + lcp_options *ao = &lcp_allowoptions[unit]; + int hadchap; + + hadchap = -1; + ao->neg_upap = !refuse_pap && (passwd[0] != 0 || get_pap_passwd(NULL)); + ao->neg_chap = (!refuse_chap || !refuse_mschap || !refuse_mschap_v2) + && (passwd[0] != 0 || + (hadchap = have_chap_secret(user, (explicit_remote? remote_name: + NULL), 0, NULL))); + ao->neg_eap = !refuse_eap && ( + passwd[0] != 0 || + (hadchap == 1 || (hadchap == -1 && have_chap_secret(user, + (explicit_remote? remote_name: NULL), 0, NULL))) || + have_srp_secret(user, (explicit_remote? remote_name: NULL), 0, NULL)); + + hadchap = -1; + if (go->neg_upap && !uselogin && !have_pap_secret(NULL)) + go->neg_upap = 0; + if (go->neg_chap) { + if (!(hadchap = have_chap_secret((explicit_remote? remote_name: NULL), + our_name, 1, NULL))) + go->neg_chap = 0; + } + if (go->neg_eap && + (hadchap == 0 || (hadchap == -1 && + !have_chap_secret((explicit_remote? remote_name: NULL), our_name, + 1, NULL))) && + !have_srp_secret((explicit_remote? remote_name: NULL), our_name, 1, + NULL)) + go->neg_eap = 0; +} + +/* + * check_passwd - Check the user name and passwd against the PAP secrets + * file. If requested, also check against the system password database, + * and login the user if OK. + * + * returns: + * UPAP_AUTHNAK: Authentication failed. + * UPAP_AUTHACK: Authentication succeeded. + * In either case, msg points to an appropriate message. + */ +int +check_passwd(unit, auser, userlen, apasswd, passwdlen, msg) + int unit; + char *auser; + int userlen; + char *apasswd; + int passwdlen; + char **msg; +{ + return UPAP_AUTHNAK; + int ret; + char *filename; + FILE *f; + struct wordlist *addrs = NULL, *opts = NULL; + char passwd[256], user[256]; + char secret[MAXWORDLEN]; + static int attempts = 0; + + /* + * Make copies of apasswd and auser, then null-terminate them. + * If there are unprintable characters in the password, make + * them visible. + */ + slprintf(ppp_settings.passwd, sizeof(ppp_settings.passwd), "%.*v", passwdlen, apasswd); + slprintf(ppp_settings.user, sizeof(ppp_settings.user), "%.*v", userlen, auser); + *msg = ""; + + /* + * Check if a plugin wants to handle this. + */ + if (pap_auth_hook) { + ret = (*pap_auth_hook)(ppp_settings.user, ppp_settings.passwd, msg, &addrs, &opts); + if (ret >= 0) { + /* note: set_allowed_addrs() saves opts (but not addrs): + don't free it! */ + if (ret) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + BZERO(ppp_settings.passwd, sizeof(ppp_settings.passwd)); + return ret? UPAP_AUTHACK: UPAP_AUTHNAK; + } + } + + /* + * Open the file of pap secrets and scan for a suitable secret + * for authenticating this user. + */ + filename = _PATH_UPAPFILE; + addrs = opts = NULL; + ret = UPAP_AUTHNAK; + f = fopen(filename, "r"); + if (f == NULL) { + ppp_error("Can't open PAP password file %s: %m", filename); + + } else { + check_access(f, filename); + if (scan_authfile(f, ppp_settings.user, our_name, secret, &addrs, &opts, filename, 0) < 0) { + ppp_warn("no PAP secret found for %s", user); + } else { + /* + * If the secret is "@login", it means to check + * the password against the login database. + */ + int login_secret = strcmp(secret, "@login") == 0; + ret = UPAP_AUTHACK; + if (uselogin || login_secret) { + /* login option or secret is @login */ + if (session_full(ppp_settings.user, ppp_settings.passwd, devnam, msg) == 0) { + ret = UPAP_AUTHNAK; + } + } else if (session_mgmt) { + if (session_check(ppp_settings.user, NULL, devnam, NULL) == 0) { + ppp_warn("Peer %q failed PAP Session verification", user); + ret = UPAP_AUTHNAK; + } + } + if (secret[0] != 0 && !login_secret) { + /* password given in pap-secrets - must match */ + if ((cryptpap || strcmp(ppp_settings.passwd, secret) != 0) + && strcmp(crypt(ppp_settings.passwd, secret), secret) != 0) + ret = UPAP_AUTHNAK; + } + } + fclose(f); + } + + if (ret == UPAP_AUTHNAK) { + if (**msg == 0) + *msg = "Login incorrect"; + /* + * XXX can we ever get here more than once?? + * Frustrate passwd stealer programs. + * Allow 10 tries, but start backing off after 3 (stolen from login). + * On 10'th, drop the connection. + */ + if (attempts++ >= 10) { + ppp_warn("%d LOGIN FAILURES ON %s, %s", attempts, devnam, user); + lcp_close(pcb, "login failed"); + } + if (attempts > 3) + sleep((u_int) (attempts - 3) * 5); + if (opts != NULL) + free_wordlist(opts); + + } else { + attempts = 0; /* Reset count */ + if (**msg == 0) + *msg = "Login ok"; + set_allowed_addrs(unit, addrs, opts); + } + + if (addrs != NULL) + free_wordlist(addrs); + BZERO(ppp_settings.passwd, sizeof(ppp_settings.passwd)); + BZERO(secret, sizeof(secret)); + + return ret; +} + +/* + * null_login - Check if a username of "" and a password of "" are + * acceptable, and iff so, set the list of acceptable IP addresses + * and return 1. + */ +static int +null_login(unit) + int unit; +{ + char *filename; + FILE *f; + int i, ret; + struct wordlist *addrs, *opts; + char secret[MAXWORDLEN]; + + /* + * Check if a plugin wants to handle this. + */ + ret = -1; + if (null_auth_hook) + ret = (*null_auth_hook)(&addrs, &opts); + + /* + * Open the file of pap secrets and scan for a suitable secret. + */ + if (ret <= 0) { + filename = _PATH_UPAPFILE; + addrs = NULL; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + check_access(f, filename); + + i = scan_authfile(f, "", our_name, secret, &addrs, &opts, filename, 0); + ret = i >= 0 && secret[0] == 0; + BZERO(secret, sizeof(secret)); + fclose(f); + } + + if (ret) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + + return ret; +} + +/* + * get_pap_passwd - get a password for authenticating ourselves with + * our peer using PAP. Returns 1 on success, 0 if no suitable password + * could be found. + * Assumes passwd points to MAXSECRETLEN bytes of space (if non-null). + */ +static int +get_pap_passwd(passwd) + char *passwd; +{ + char *filename; + FILE *f; + int ret; + char secret[MAXWORDLEN]; + + /* + * Check whether a plugin wants to supply this. + */ + if (pap_passwd_hook) { + ret = (*pap_passwd_hook)(ppp_settings,user, ppp_settings.passwd); + if (ret >= 0) + return ret; + } + + filename = _PATH_UPAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + check_access(f, filename); + ret = scan_authfile(f, user, + (remote_name[0]? remote_name: NULL), + secret, NULL, NULL, filename, 0); + fclose(f); + if (ret < 0) + return 0; + if (passwd != NULL) + strlcpy(passwd, secret, MAXSECRETLEN); + BZERO(secret, sizeof(secret)); + return 1; +} + +/* + * have_pap_secret - check whether we have a PAP file with any + * secrets that we could possibly use for authenticating the peer. + */ +static int +have_pap_secret(lacks_ipp) + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + /* let the plugin decide, if there is one */ + if (pap_check_hook) { + ret = (*pap_check_hook)(); + if (ret >= 0) + return ret; + } + + filename = _PATH_UPAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + ret = scan_authfile(f, (explicit_remote? remote_name: NULL), our_name, + NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} + +/* + * have_chap_secret - check whether we have a CHAP file with a + * secret that we could possibly use for authenticating `client' + * on `server'. Either can be the null string, meaning we don't + * know the identity yet. + */ +static int +have_chap_secret(client, server, need_ip, lacks_ipp) + char *client; + char *server; + int need_ip; + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + if (chap_check_hook) { + ret = (*chap_check_hook)(); + if (ret >= 0) { + return ret; + } + } + + filename = _PATH_CHAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + if (client != NULL && client[0] == 0) + client = NULL; + else if (server != NULL && server[0] == 0) + server = NULL; + + ret = scan_authfile(f, client, server, NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && need_ip && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} + +/* + * have_srp_secret - check whether we have a SRP file with a + * secret that we could possibly use for authenticating `client' + * on `server'. Either can be the null string, meaning we don't + * know the identity yet. + */ +static int +have_srp_secret(client, server, need_ip, lacks_ipp) + char *client; + char *server; + int need_ip; + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + filename = _PATH_SRPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + if (client != NULL && client[0] == 0) + client = NULL; + else if (server != NULL && server[0] == 0) + server = NULL; + + ret = scan_authfile(f, client, server, NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && need_ip && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} +#endif /* UNUSED */ + +#if PPP_AUTH_SUPPORT +/* + * get_secret - open the CHAP secret file and return the secret + * for authenticating the given client on the given server. + * (We could be either client or server). + */ +int get_secret(ppp_pcb *pcb, const char *client, const char *server, char *secret, int *secret_len, int am_server) { + int len; + LWIP_UNUSED_ARG(server); + LWIP_UNUSED_ARG(am_server); + + if (!client || !client[0] || !pcb->settings.user || !pcb->settings.passwd || strcmp(client, pcb->settings.user)) { + return 0; + } + + len = (int)strlen(pcb->settings.passwd); + if (len > MAXSECRETLEN) { + ppp_error("Secret for %s on %s is too long", client, server); + len = MAXSECRETLEN; + } + + MEMCPY(secret, pcb->settings.passwd, len); + *secret_len = len; + return 1; + +#if 0 /* UNUSED */ + FILE *f; + int ret, len; + char *filename; + struct wordlist *addrs, *opts; + char secbuf[MAXWORDLEN]; + struct wordlist *addrs; + addrs = NULL; + + if (!am_server && ppp_settings.passwd[0] != 0) { + strlcpy(secbuf, ppp_settings.passwd, sizeof(secbuf)); + } else if (!am_server && chap_passwd_hook) { + if ( (*chap_passwd_hook)(client, secbuf) < 0) { + ppp_error("Unable to obtain CHAP password for %s on %s from plugin", + client, server); + return 0; + } + } else { + filename = _PATH_CHAPFILE; + addrs = NULL; + secbuf[0] = 0; + + f = fopen(filename, "r"); + if (f == NULL) { + ppp_error("Can't open chap secret file %s: %m", filename); + return 0; + } + check_access(f, filename); + + ret = scan_authfile(f, client, server, secbuf, &addrs, &opts, filename, 0); + fclose(f); + if (ret < 0) + return 0; + + if (am_server) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + } + + len = strlen(secbuf); + if (len > MAXSECRETLEN) { + ppp_error("Secret for %s on %s is too long", client, server); + len = MAXSECRETLEN; + } + MEMCPY(secret, secbuf, len); + BZERO(secbuf, sizeof(secbuf)); + *secret_len = len; + + return 1; +#endif /* UNUSED */ +} +#endif /* PPP_AUTH_SUPPORT */ + + +#if 0 /* UNUSED */ +/* + * get_srp_secret - open the SRP secret file and return the secret + * for authenticating the given client on the given server. + * (We could be either client or server). + */ +int +get_srp_secret(unit, client, server, secret, am_server) + int unit; + char *client; + char *server; + char *secret; + int am_server; +{ + FILE *fp; + int ret; + char *filename; + struct wordlist *addrs, *opts; + + if (!am_server && ppp_settings.passwd[0] != '\0') { + strlcpy(secret, ppp_settings.passwd, MAXWORDLEN); + } else { + filename = _PATH_SRPFILE; + addrs = NULL; + + fp = fopen(filename, "r"); + if (fp == NULL) { + ppp_error("Can't open srp secret file %s: %m", filename); + return 0; + } + check_access(fp, filename); + + secret[0] = '\0'; + ret = scan_authfile(fp, client, server, secret, &addrs, &opts, + filename, am_server); + fclose(fp); + if (ret < 0) + return 0; + + if (am_server) + set_allowed_addrs(unit, addrs, opts); + else if (opts != NULL) + free_wordlist(opts); + if (addrs != NULL) + free_wordlist(addrs); + } + + return 1; +} + +/* + * set_allowed_addrs() - set the list of allowed addresses. + * Also looks for `--' indicating options to apply for this peer + * and leaves the following words in extra_options. + */ +static void +set_allowed_addrs(unit, addrs, opts) + int unit; + struct wordlist *addrs; + struct wordlist *opts; +{ + int n; + struct wordlist *ap, **plink; + struct permitted_ip *ip; + char *ptr_word, *ptr_mask; + struct hostent *hp; + struct netent *np; + u32_t a, mask, ah, offset; + struct ipcp_options *wo = &ipcp_wantoptions[unit]; + u32_t suggested_ip = 0; + + if (addresses[unit] != NULL) + free(addresses[unit]); + addresses[unit] = NULL; + if (extra_options != NULL) + free_wordlist(extra_options); + extra_options = opts; + + /* + * Count the number of IP addresses given. + */ + n = wordlist_count(addrs) + wordlist_count(noauth_addrs); + if (n == 0) + return; + ip = (struct permitted_ip *) malloc((n + 1) * sizeof(struct permitted_ip)); + if (ip == 0) + return; + + /* temporarily append the noauth_addrs list to addrs */ + for (plink = &addrs; *plink != NULL; plink = &(*plink)->next) + ; + *plink = noauth_addrs; + + n = 0; + for (ap = addrs; ap != NULL; ap = ap->next) { + /* "-" means no addresses authorized, "*" means any address allowed */ + ptr_word = ap->word; + if (strcmp(ptr_word, "-") == 0) + break; + if (strcmp(ptr_word, "*") == 0) { + ip[n].permit = 1; + ip[n].base = ip[n].mask = 0; + ++n; + break; + } + + ip[n].permit = 1; + if (*ptr_word == '!') { + ip[n].permit = 0; + ++ptr_word; + } + + mask = ~ (u32_t) 0; + offset = 0; + ptr_mask = strchr (ptr_word, '/'); + if (ptr_mask != NULL) { + int bit_count; + char *endp; + + bit_count = (int) strtol (ptr_mask+1, &endp, 10); + if (bit_count <= 0 || bit_count > 32) { + ppp_warn("invalid address length %v in auth. address list", + ptr_mask+1); + continue; + } + bit_count = 32 - bit_count; /* # bits in host part */ + if (*endp == '+') { + offset = ifunit + 1; + ++endp; + } + if (*endp != 0) { + ppp_warn("invalid address length syntax: %v", ptr_mask+1); + continue; + } + *ptr_mask = '\0'; + mask <<= bit_count; + } + + hp = gethostbyname(ptr_word); + if (hp != NULL && hp->h_addrtype == AF_INET) { + a = *(u32_t *)hp->h_addr; + } else { + np = getnetbyname (ptr_word); + if (np != NULL && np->n_addrtype == AF_INET) { + a = lwip_htonl ((u32_t)np->n_net); + if (ptr_mask == NULL) { + /* calculate appropriate mask for net */ + ah = lwip_ntohl(a); + if (IN_CLASSA(ah)) + mask = IN_CLASSA_NET; + else if (IN_CLASSB(ah)) + mask = IN_CLASSB_NET; + else if (IN_CLASSC(ah)) + mask = IN_CLASSC_NET; + } + } else { + a = inet_addr (ptr_word); + } + } + + if (ptr_mask != NULL) + *ptr_mask = '/'; + + if (a == (u32_t)-1L) { + ppp_warn("unknown host %s in auth. address list", ap->word); + continue; + } + if (offset != 0) { + if (offset >= ~mask) { + ppp_warn("interface unit %d too large for subnet %v", + ifunit, ptr_word); + continue; + } + a = lwip_htonl((lwip_ntohl(a) & mask) + offset); + mask = ~(u32_t)0; + } + ip[n].mask = lwip_htonl(mask); + ip[n].base = a & ip[n].mask; + ++n; + if (~mask == 0 && suggested_ip == 0) + suggested_ip = a; + } + *plink = NULL; + + ip[n].permit = 0; /* make the last entry forbid all addresses */ + ip[n].base = 0; /* to terminate the list */ + ip[n].mask = 0; + + addresses[unit] = ip; + + /* + * If the address given for the peer isn't authorized, or if + * the user hasn't given one, AND there is an authorized address + * which is a single host, then use that if we find one. + */ + if (suggested_ip != 0 + && (wo->hisaddr == 0 || !auth_ip_addr(unit, wo->hisaddr))) { + wo->hisaddr = suggested_ip; + /* + * Do we insist on this address? No, if there are other + * addresses authorized than the suggested one. + */ + if (n > 1) + wo->accept_remote = 1; + } +} + +/* + * auth_ip_addr - check whether the peer is authorized to use + * a given IP address. Returns 1 if authorized, 0 otherwise. + */ +int +auth_ip_addr(unit, addr) + int unit; + u32_t addr; +{ + int ok; + + /* don't allow loopback or multicast address */ + if (bad_ip_adrs(addr)) + return 0; + + if (allowed_address_hook) { + ok = allowed_address_hook(addr); + if (ok >= 0) return ok; + } + + if (addresses[unit] != NULL) { + ok = ip_addr_check(addr, addresses[unit]); + if (ok >= 0) + return ok; + } + + if (auth_required) + return 0; /* no addresses authorized */ + return allow_any_ip || privileged || !have_route_to(addr); +} + +static int +ip_addr_check(addr, addrs) + u32_t addr; + struct permitted_ip *addrs; +{ + for (; ; ++addrs) + if ((addr & addrs->mask) == addrs->base) + return addrs->permit; +} + +/* + * bad_ip_adrs - return 1 if the IP address is one we don't want + * to use, such as an address in the loopback net or a multicast address. + * addr is in network byte order. + */ +int +bad_ip_adrs(addr) + u32_t addr; +{ + addr = lwip_ntohl(addr); + return (addr >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET + || IN_MULTICAST(addr) || IN_BADCLASS(addr); +} + +/* + * some_ip_ok - check a wordlist to see if it authorizes any + * IP address(es). + */ +static int +some_ip_ok(addrs) + struct wordlist *addrs; +{ + for (; addrs != 0; addrs = addrs->next) { + if (addrs->word[0] == '-') + break; + if (addrs->word[0] != '!') + return 1; /* some IP address is allowed */ + } + return 0; +} + +/* + * auth_number - check whether the remote number is allowed to connect. + * Returns 1 if authorized, 0 otherwise. + */ +int +auth_number() +{ + struct wordlist *wp = permitted_numbers; + int l; + + /* Allow all if no authorization list. */ + if (!wp) + return 1; + + /* Allow if we have a match in the authorization list. */ + while (wp) { + /* trailing '*' wildcard */ + l = strlen(wp->word); + if ((wp->word)[l - 1] == '*') + l--; + if (!strncasecmp(wp->word, remote_number, l)) + return 1; + wp = wp->next; + } + + return 0; +} + +/* + * check_access - complain if a secret file has too-liberal permissions. + */ +static void +check_access(f, filename) + FILE *f; + char *filename; +{ + struct stat sbuf; + + if (fstat(fileno(f), &sbuf) < 0) { + ppp_warn("cannot stat secret file %s: %m", filename); + } else if ((sbuf.st_mode & (S_IRWXG | S_IRWXO)) != 0) { + ppp_warn("Warning - secret file %s has world and/or group access", + filename); + } +} + +/* + * scan_authfile - Scan an authorization file for a secret suitable + * for authenticating `client' on `server'. The return value is -1 + * if no secret is found, otherwise >= 0. The return value has + * NONWILD_CLIENT set if the secret didn't have "*" for the client, and + * NONWILD_SERVER set if the secret didn't have "*" for the server. + * Any following words on the line up to a "--" (i.e. address authorization + * info) are placed in a wordlist and returned in *addrs. Any + * following words (extra options) are placed in a wordlist and + * returned in *opts. + * We assume secret is NULL or points to MAXWORDLEN bytes of space. + * Flags are non-zero if we need two colons in the secret in order to + * match. + */ +static int +scan_authfile(f, client, server, secret, addrs, opts, filename, flags) + FILE *f; + char *client; + char *server; + char *secret; + struct wordlist **addrs; + struct wordlist **opts; + char *filename; + int flags; +{ + int newline, xxx; + int got_flag, best_flag; + FILE *sf; + struct wordlist *ap, *addr_list, *alist, **app; + char word[MAXWORDLEN]; + char atfile[MAXWORDLEN]; + char lsecret[MAXWORDLEN]; + char *cp; + + if (addrs != NULL) + *addrs = NULL; + if (opts != NULL) + *opts = NULL; + addr_list = NULL; + if (!getword(f, word, &newline, filename)) + return -1; /* file is empty??? */ + newline = 1; + best_flag = -1; + for (;;) { + /* + * Skip until we find a word at the start of a line. + */ + while (!newline && getword(f, word, &newline, filename)) + ; + if (!newline) + break; /* got to end of file */ + + /* + * Got a client - check if it's a match or a wildcard. + */ + got_flag = 0; + if (client != NULL && strcmp(word, client) != 0 && !ISWILD(word)) { + newline = 0; + continue; + } + if (!ISWILD(word)) + got_flag = NONWILD_CLIENT; + + /* + * Now get a server and check if it matches. + */ + if (!getword(f, word, &newline, filename)) + break; + if (newline) + continue; + if (!ISWILD(word)) { + if (server != NULL && strcmp(word, server) != 0) + continue; + got_flag |= NONWILD_SERVER; + } + + /* + * Got some sort of a match - see if it's better than what + * we have already. + */ + if (got_flag <= best_flag) + continue; + + /* + * Get the secret. + */ + if (!getword(f, word, &newline, filename)) + break; + if (newline) + continue; + + /* + * SRP-SHA1 authenticator should never be reading secrets from + * a file. (Authenticatee may, though.) + */ + if (flags && ((cp = strchr(word, ':')) == NULL || + strchr(cp + 1, ':') == NULL)) + continue; + + if (secret != NULL) { + /* + * Special syntax: @/pathname means read secret from file. + */ + if (word[0] == '@' && word[1] == '/') { + strlcpy(atfile, word+1, sizeof(atfile)); + if ((sf = fopen(atfile, "r")) == NULL) { + ppp_warn("can't open indirect secret file %s", atfile); + continue; + } + check_access(sf, atfile); + if (!getword(sf, word, &xxx, atfile)) { + ppp_warn("no secret in indirect secret file %s", atfile); + fclose(sf); + continue; + } + fclose(sf); + } + strlcpy(lsecret, word, sizeof(lsecret)); + } + + /* + * Now read address authorization info and make a wordlist. + */ + app = &alist; + for (;;) { + if (!getword(f, word, &newline, filename) || newline) + break; + ap = (struct wordlist *) + malloc(sizeof(struct wordlist) + strlen(word) + 1); + if (ap == NULL) + novm("authorized addresses"); + ap->word = (char *) (ap + 1); + strcpy(ap->word, word); + *app = ap; + app = &ap->next; + } + *app = NULL; + + /* + * This is the best so far; remember it. + */ + best_flag = got_flag; + if (addr_list) + free_wordlist(addr_list); + addr_list = alist; + if (secret != NULL) + strlcpy(secret, lsecret, MAXWORDLEN); + + if (!newline) + break; + } + + /* scan for a -- word indicating the start of options */ + for (app = &addr_list; (ap = *app) != NULL; app = &ap->next) + if (strcmp(ap->word, "--") == 0) + break; + /* ap = start of options */ + if (ap != NULL) { + ap = ap->next; /* first option */ + free(*app); /* free the "--" word */ + *app = NULL; /* terminate addr list */ + } + if (opts != NULL) + *opts = ap; + else if (ap != NULL) + free_wordlist(ap); + if (addrs != NULL) + *addrs = addr_list; + else if (addr_list != NULL) + free_wordlist(addr_list); + + return best_flag; +} + +/* + * wordlist_count - return the number of items in a wordlist + */ +static int +wordlist_count(wp) + struct wordlist *wp; +{ + int n; + + for (n = 0; wp != NULL; wp = wp->next) + ++n; + return n; +} + +/* + * free_wordlist - release memory allocated for a wordlist. + */ +static void +free_wordlist(wp) + struct wordlist *wp; +{ + struct wordlist *next; + + while (wp != NULL) { + next = wp->next; + free(wp); + wp = next; + } +} +#endif /* UNUSED */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c new file mode 100644 index 00000000..f8519ebe --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c @@ -0,0 +1,1740 @@ +/* + * ccp.c - PPP Compression Control Protocol. + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CCP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ccp.h" + +#if MPPE_SUPPORT +#include "netif/ppp/lcp.h" /* lcp_close(), lcp_fsm */ +#include "netif/ppp/mppe.h" /* mppe_init() */ +#endif /* MPPE_SUPPORT */ + +/* + * Unfortunately there is a bug in zlib which means that using a + * size of 8 (window size = 256) for Deflate compression will cause + * buffer overruns and kernel crashes in the deflate module. + * Until this is fixed we only accept sizes in the range 9 .. 15. + * Thanks to James Carlson for pointing this out. + */ +#define DEFLATE_MIN_WORKS 9 + +/* + * Command-line options. + */ +#if PPP_OPTIONS +static int setbsdcomp (char **); +static int setdeflate (char **); +static char bsd_value[8]; +static char deflate_value[8]; + +/* + * Option variables. + */ +#if MPPE_SUPPORT +bool refuse_mppe_stateful = 1; /* Allow stateful mode? */ +#endif /* MPPE_SUPPORT */ + +static option_t ccp_option_list[] = { + { "noccp", o_bool, &ccp_protent.enabled_flag, + "Disable CCP negotiation" }, + { "-ccp", o_bool, &ccp_protent.enabled_flag, + "Disable CCP negotiation", OPT_ALIAS }, + + { "bsdcomp", o_special, (void *)setbsdcomp, + "Request BSD-Compress packet compression", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, bsd_value }, + { "nobsdcomp", o_bool, &ccp_wantoptions[0].bsd_compress, + "don't allow BSD-Compress", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].bsd_compress }, + { "-bsdcomp", o_bool, &ccp_wantoptions[0].bsd_compress, + "don't allow BSD-Compress", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].bsd_compress }, + + { "deflate", o_special, (void *)setdeflate, + "request Deflate compression", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, deflate_value }, + { "nodeflate", o_bool, &ccp_wantoptions[0].deflate, + "don't allow Deflate compression", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].deflate }, + { "-deflate", o_bool, &ccp_wantoptions[0].deflate, + "don't allow Deflate compression", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].deflate }, + + { "nodeflatedraft", o_bool, &ccp_wantoptions[0].deflate_draft, + "don't use draft deflate #", OPT_A2COPY, + &ccp_allowoptions[0].deflate_draft }, + + { "predictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "request Predictor-1", OPT_PRIO | 1 }, + { "nopredictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "don't allow Predictor-1", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].predictor_1 }, + { "-predictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "don't allow Predictor-1", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].predictor_1 }, + +#if MPPE_SUPPORT + /* MPPE options are symmetrical ... we only set wantoptions here */ + { "require-mppe", o_bool, &ccp_wantoptions[0].mppe, + "require MPPE encryption", + OPT_PRIO | MPPE_OPT_40 | MPPE_OPT_128 }, + { "+mppe", o_bool, &ccp_wantoptions[0].mppe, + "require MPPE encryption", + OPT_ALIAS | OPT_PRIO | MPPE_OPT_40 | MPPE_OPT_128 }, + { "nomppe", o_bool, &ccp_wantoptions[0].mppe, + "don't allow MPPE encryption", OPT_PRIO }, + { "-mppe", o_bool, &ccp_wantoptions[0].mppe, + "don't allow MPPE encryption", OPT_ALIAS | OPT_PRIO }, + + /* We use ccp_allowoptions[0].mppe as a junk var ... it is reset later */ + { "require-mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 40-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + { "+mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 40-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + { "nomppe-40", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 40-bit encryption", + OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_40, &ccp_wantoptions[0].mppe }, + { "-mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 40-bit encryption", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + + { "require-mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 128-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + { "+mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 128-bit encryption", + OPT_ALIAS | OPT_PRIO | OPT_A2OR | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + { "nomppe-128", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 128-bit encryption", + OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_128, &ccp_wantoptions[0].mppe }, + { "-mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 128-bit encryption", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + + /* strange one; we always request stateless, but will we allow stateful? */ + { "mppe-stateful", o_bool, &refuse_mppe_stateful, + "allow MPPE stateful mode", OPT_PRIO }, + { "nomppe-stateful", o_bool, &refuse_mppe_stateful, + "disallow MPPE stateful mode", OPT_PRIO | 1 }, +#endif /* MPPE_SUPPORT */ + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ccp_init(ppp_pcb *pcb); +static void ccp_open(ppp_pcb *pcb); +static void ccp_close(ppp_pcb *pcb, const char *reason); +static void ccp_lowerup(ppp_pcb *pcb); +static void ccp_lowerdown(ppp_pcb *pcb); +static void ccp_input(ppp_pcb *pcb, u_char *pkt, int len); +static void ccp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int ccp_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT +static void ccp_datainput(ppp_pcb *pcb, u_char *pkt, int len); +#endif /* PPP_DATAINPUT */ + +const struct protent ccp_protent = { + PPP_CCP, + ccp_init, + ccp_input, + ccp_protrej, + ccp_lowerup, + ccp_lowerdown, + ccp_open, + ccp_close, +#if PRINTPKT_SUPPORT + ccp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + ccp_datainput, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "CCP", + "Compressed", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ccp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +/* + * Callbacks for fsm code. + */ +static void ccp_resetci (fsm *); +static int ccp_cilen (fsm *); +static void ccp_addci (fsm *, u_char *, int *); +static int ccp_ackci (fsm *, u_char *, int); +static int ccp_nakci (fsm *, u_char *, int, int); +static int ccp_rejci (fsm *, u_char *, int); +static int ccp_reqci (fsm *, u_char *, int *, int); +static void ccp_up (fsm *); +static void ccp_down (fsm *); +static int ccp_extcode (fsm *, int, int, u_char *, int); +static void ccp_rack_timeout (void *); +static const char *method_name (ccp_options *, ccp_options *); + +static const fsm_callbacks ccp_callbacks = { + ccp_resetci, + ccp_cilen, + ccp_addci, + ccp_ackci, + ccp_nakci, + ccp_rejci, + ccp_reqci, + ccp_up, + ccp_down, + NULL, + NULL, + NULL, + NULL, + ccp_extcode, + "CCP" +}; + +/* + * Do we want / did we get any compression? + */ +static int ccp_anycompress(ccp_options *opt) { + return (0 +#if DEFLATE_SUPPORT + || (opt)->deflate +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + || (opt)->bsd_compress +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + || (opt)->predictor_1 || (opt)->predictor_2 +#endif /* PREDICTOR_SUPPORT */ +#if MPPE_SUPPORT + || (opt)->mppe +#endif /* MPPE_SUPPORT */ + ); +} + +/* + * Local state (mainly for handling reset-reqs and reset-acks). + */ +#define RACK_PENDING 1 /* waiting for reset-ack */ +#define RREQ_REPEAT 2 /* send another reset-req if no reset-ack */ + +#define RACKTIMEOUT 1 /* second */ + +#if PPP_OPTIONS +/* + * Option parsing + */ +static int +setbsdcomp(argv) + char **argv; +{ + int rbits, abits; + char *str, *endp; + + str = *argv; + abits = rbits = strtol(str, &endp, 0); + if (endp != str && *endp == ',') { + str = endp + 1; + abits = strtol(str, &endp, 0); + } + if (*endp != 0 || endp == str) { + option_error("invalid parameter '%s' for bsdcomp option", *argv); + return 0; + } + if ((rbits != 0 && (rbits < BSD_MIN_BITS || rbits > BSD_MAX_BITS)) + || (abits != 0 && (abits < BSD_MIN_BITS || abits > BSD_MAX_BITS))) { + option_error("bsdcomp option values must be 0 or %d .. %d", + BSD_MIN_BITS, BSD_MAX_BITS); + return 0; + } + if (rbits > 0) { + ccp_wantoptions[0].bsd_compress = 1; + ccp_wantoptions[0].bsd_bits = rbits; + } else + ccp_wantoptions[0].bsd_compress = 0; + if (abits > 0) { + ccp_allowoptions[0].bsd_compress = 1; + ccp_allowoptions[0].bsd_bits = abits; + } else + ccp_allowoptions[0].bsd_compress = 0; + ppp_slprintf(bsd_value, sizeof(bsd_value), + rbits == abits? "%d": "%d,%d", rbits, abits); + + return 1; +} + +static int +setdeflate(argv) + char **argv; +{ + int rbits, abits; + char *str, *endp; + + str = *argv; + abits = rbits = strtol(str, &endp, 0); + if (endp != str && *endp == ',') { + str = endp + 1; + abits = strtol(str, &endp, 0); + } + if (*endp != 0 || endp == str) { + option_error("invalid parameter '%s' for deflate option", *argv); + return 0; + } + if ((rbits != 0 && (rbits < DEFLATE_MIN_SIZE || rbits > DEFLATE_MAX_SIZE)) + || (abits != 0 && (abits < DEFLATE_MIN_SIZE + || abits > DEFLATE_MAX_SIZE))) { + option_error("deflate option values must be 0 or %d .. %d", + DEFLATE_MIN_SIZE, DEFLATE_MAX_SIZE); + return 0; + } + if (rbits == DEFLATE_MIN_SIZE || abits == DEFLATE_MIN_SIZE) { + if (rbits == DEFLATE_MIN_SIZE) + rbits = DEFLATE_MIN_WORKS; + if (abits == DEFLATE_MIN_SIZE) + abits = DEFLATE_MIN_WORKS; + warn("deflate option value of %d changed to %d to avoid zlib bug", + DEFLATE_MIN_SIZE, DEFLATE_MIN_WORKS); + } + if (rbits > 0) { + ccp_wantoptions[0].deflate = 1; + ccp_wantoptions[0].deflate_size = rbits; + } else + ccp_wantoptions[0].deflate = 0; + if (abits > 0) { + ccp_allowoptions[0].deflate = 1; + ccp_allowoptions[0].deflate_size = abits; + } else + ccp_allowoptions[0].deflate = 0; + ppp_slprintf(deflate_value, sizeof(deflate_value), + rbits == abits? "%d": "%d,%d", rbits, abits); + + return 1; +} +#endif /* PPP_OPTIONS */ + +/* + * ccp_init - initialize CCP. + */ +static void ccp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + + f->pcb = pcb; + f->protocol = PPP_CCP; + f->callbacks = &ccp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(go, 0, sizeof(*go)); + memset(ao, 0, sizeof(*ao)); + memset(ho, 0, sizeof(*ho)); +#endif /* 0 */ + +#if DEFLATE_SUPPORT + wo->deflate = 1; + wo->deflate_size = DEFLATE_MAX_SIZE; + wo->deflate_correct = 1; + wo->deflate_draft = 1; + ao->deflate = 1; + ao->deflate_size = DEFLATE_MAX_SIZE; + ao->deflate_correct = 1; + ao->deflate_draft = 1; +#endif /* DEFLATE_SUPPORT */ + +#if BSDCOMPRESS_SUPPORT + wo->bsd_compress = 1; + wo->bsd_bits = BSD_MAX_BITS; + ao->bsd_compress = 1; + ao->bsd_bits = BSD_MAX_BITS; +#endif /* BSDCOMPRESS_SUPPORT */ + +#if PREDICTOR_SUPPORT + ao->predictor_1 = 1; +#endif /* PREDICTOR_SUPPORT */ +} + +/* + * ccp_open - CCP is allowed to come up. + */ +static void ccp_open(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + ccp_options *go = &pcb->ccp_gotoptions; + + if (f->state != PPP_FSM_OPENED) + ccp_set(pcb, 1, 0, 0, 0); + + /* + * Find out which compressors the kernel supports before + * deciding whether to open in silent mode. + */ + ccp_resetci(f); + if (!ccp_anycompress(go)) + f->flags |= OPT_SILENT; + + fsm_open(f); +} + +/* + * ccp_close - Terminate CCP. + */ +static void ccp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->ccp_fsm; + ccp_set(pcb, 0, 0, 0, 0); + fsm_close(f, reason); +} + +/* + * ccp_lowerup - we may now transmit CCP packets. + */ +static void ccp_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + fsm_lowerup(f); +} + +/* + * ccp_lowerdown - we may not transmit CCP packets. + */ +static void ccp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + fsm_lowerdown(f); +} + +/* + * ccp_input - process a received CCP packet. + */ +static void ccp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->ccp_fsm; + ccp_options *go = &pcb->ccp_gotoptions; + int oldstate; + + /* + * Check for a terminate-request so we can print a message. + */ + oldstate = f->state; + fsm_input(f, p, len); + if (oldstate == PPP_FSM_OPENED && p[0] == TERMREQ && f->state != PPP_FSM_OPENED) { + ppp_notice("Compression disabled by peer."); +#if MPPE_SUPPORT + if (go->mppe) { + ppp_error("MPPE disabled, closing LCP"); + lcp_close(pcb, "MPPE disabled by peer"); + } +#endif /* MPPE_SUPPORT */ + } + + /* + * If we get a terminate-ack and we're not asking for compression, + * close CCP. + */ + if (oldstate == PPP_FSM_REQSENT && p[0] == TERMACK + && !ccp_anycompress(go)) + ccp_close(pcb, "No compression negotiated"); +} + +/* + * Handle a CCP-specific code. + */ +static int ccp_extcode(fsm *f, int code, int id, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(len); + + switch (code) { + case CCP_RESETREQ: + if (f->state != PPP_FSM_OPENED) + break; + ccp_reset_comp(pcb); + /* send a reset-ack, which the transmitter will see and + reset its compression state. */ + fsm_sdata(f, CCP_RESETACK, id, NULL, 0); + break; + + case CCP_RESETACK: + if ((pcb->ccp_localstate & RACK_PENDING) && id == f->reqid) { + pcb->ccp_localstate &= ~(RACK_PENDING | RREQ_REPEAT); + UNTIMEOUT(ccp_rack_timeout, f); + ccp_reset_decomp(pcb); + } + break; + + default: + return 0; + } + + return 1; +} + +/* + * ccp_protrej - peer doesn't talk CCP. + */ +static void ccp_protrej(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + + ccp_set(pcb, 0, 0, 0, 0); + fsm_lowerdown(f); + +#if MPPE_SUPPORT + if (go->mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } +#endif /* MPPE_SUPPORT */ + +} + +/* + * ccp_resetci - initialize at start of negotiation. + */ +static void ccp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options *wo = &pcb->ccp_wantoptions; +#if MPPE_SUPPORT + ccp_options *ao = &pcb->ccp_allowoptions; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT + u_char opt_buf[CCP_MAX_OPTION_LENGTH]; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT */ +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT + int res; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT */ + +#if MPPE_SUPPORT + if (pcb->settings.require_mppe) { + wo->mppe = ao->mppe = + (pcb->settings.refuse_mppe_40 ? 0 : MPPE_OPT_40) + | (pcb->settings.refuse_mppe_128 ? 0 : MPPE_OPT_128); + } +#endif /* MPPE_SUPPORT */ + + *go = *wo; + pcb->ccp_all_rejected = 0; + +#if MPPE_SUPPORT + if (go->mppe) { + int auth_mschap_bits = pcb->auth_done; + int numbits; + + /* + * Start with a basic sanity check: mschap[v2] auth must be in + * exactly one direction. RFC 3079 says that the keys are + * 'derived from the credentials of the peer that initiated the call', + * however the PPP protocol doesn't have such a concept, and pppd + * cannot get this info externally. Instead we do the best we can. + * NB: If MPPE is required, all other compression opts are invalid. + * So, we return right away if we can't do it. + */ + + /* Leave only the mschap auth bits set */ + auth_mschap_bits &= (CHAP_MS_WITHPEER | CHAP_MS_PEER | + CHAP_MS2_WITHPEER | CHAP_MS2_PEER); + /* Count the mschap auths */ + auth_mschap_bits >>= CHAP_MS_SHIFT; + numbits = 0; + do { + numbits += auth_mschap_bits & 1; + auth_mschap_bits >>= 1; + } while (auth_mschap_bits); + if (numbits > 1) { + ppp_error("MPPE required, but auth done in both directions."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + if (!numbits) { + ppp_error("MPPE required, but MS-CHAP[v2] auth not performed."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* A plugin (eg radius) may not have obtained key material. */ + if (!pcb->mppe_keys_set) { + ppp_error("MPPE required, but keys are not available. " + "Possible plugin problem?"); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* LM auth not supported for MPPE */ + if (pcb->auth_done & (CHAP_MS_WITHPEER | CHAP_MS_PEER)) { + /* This might be noise */ + if (go->mppe & MPPE_OPT_40) { + ppp_notice("Disabling 40-bit MPPE; MS-CHAP LM not supported"); + go->mppe &= ~MPPE_OPT_40; + wo->mppe &= ~MPPE_OPT_40; + } + } + + /* Last check: can we actually negotiate something? */ + if (!(go->mppe & (MPPE_OPT_40 | MPPE_OPT_128))) { + /* Could be misconfig, could be 40-bit disabled above. */ + ppp_error("MPPE required, but both 40-bit and 128-bit disabled."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* sync options */ + ao->mppe = go->mppe; + /* MPPE is not compatible with other compression types */ +#if BSDCOMPRESS_SUPPORT + ao->bsd_compress = go->bsd_compress = 0; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + ao->predictor_1 = go->predictor_1 = 0; + ao->predictor_2 = go->predictor_2 = 0; +#endif /* PREDICTOR_SUPPORT */ +#if DEFLATE_SUPPORT + ao->deflate = go->deflate = 0; +#endif /* DEFLATE_SUPPORT */ + } +#endif /* MPPE_SUPPORT */ + + /* + * Check whether the kernel knows about the various + * compression methods we might request. + */ +#if BSDCOMPRESS_SUPPORT + /* FIXME: we don't need to test if BSD compress is available + * if BSDCOMPRESS_SUPPORT is set, it is. + */ + if (go->bsd_compress) { + opt_buf[0] = CI_BSD_COMPRESS; + opt_buf[1] = CILEN_BSD_COMPRESS; + for (;;) { + if (go->bsd_bits < BSD_MIN_BITS) { + go->bsd_compress = 0; + break; + } + opt_buf[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits); + res = ccp_test(pcb, opt_buf, CILEN_BSD_COMPRESS, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->bsd_compress = 0; + break; + } + go->bsd_bits--; + } + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + /* FIXME: we don't need to test if deflate is available + * if DEFLATE_SUPPORT is set, it is. + */ + if (go->deflate) { + if (go->deflate_correct) { + opt_buf[0] = CI_DEFLATE; + opt_buf[1] = CILEN_DEFLATE; + opt_buf[3] = DEFLATE_CHK_SEQUENCE; + for (;;) { + if (go->deflate_size < DEFLATE_MIN_WORKS) { + go->deflate_correct = 0; + break; + } + opt_buf[2] = DEFLATE_MAKE_OPT(go->deflate_size); + res = ccp_test(pcb, opt_buf, CILEN_DEFLATE, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->deflate_correct = 0; + break; + } + go->deflate_size--; + } + } + if (go->deflate_draft) { + opt_buf[0] = CI_DEFLATE_DRAFT; + opt_buf[1] = CILEN_DEFLATE; + opt_buf[3] = DEFLATE_CHK_SEQUENCE; + for (;;) { + if (go->deflate_size < DEFLATE_MIN_WORKS) { + go->deflate_draft = 0; + break; + } + opt_buf[2] = DEFLATE_MAKE_OPT(go->deflate_size); + res = ccp_test(pcb, opt_buf, CILEN_DEFLATE, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->deflate_draft = 0; + break; + } + go->deflate_size--; + } + } + if (!go->deflate_correct && !go->deflate_draft) + go->deflate = 0; + } +#endif /* DEFLATE_SUPPORT */ +#if PREDICTOR_SUPPORT + /* FIXME: we don't need to test if predictor is available, + * if PREDICTOR_SUPPORT is set, it is. + */ + if (go->predictor_1) { + opt_buf[0] = CI_PREDICTOR_1; + opt_buf[1] = CILEN_PREDICTOR_1; + if (ccp_test(pcb, opt_buf, CILEN_PREDICTOR_1, 0) <= 0) + go->predictor_1 = 0; + } + if (go->predictor_2) { + opt_buf[0] = CI_PREDICTOR_2; + opt_buf[1] = CILEN_PREDICTOR_2; + if (ccp_test(pcb, opt_buf, CILEN_PREDICTOR_2, 0) <= 0) + go->predictor_2 = 0; + } +#endif /* PREDICTOR_SUPPORT */ +} + +/* + * ccp_cilen - Return total length of our configuration info. + */ +static int ccp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + + return 0 +#if BSDCOMPRESS_SUPPORT + + (go->bsd_compress? CILEN_BSD_COMPRESS: 0) +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + + (go->deflate && go->deflate_correct? CILEN_DEFLATE: 0) + + (go->deflate && go->deflate_draft? CILEN_DEFLATE: 0) +#endif /* DEFLATE_SUPPORT */ +#if PREDICTOR_SUPPORT + + (go->predictor_1? CILEN_PREDICTOR_1: 0) + + (go->predictor_2? CILEN_PREDICTOR_2: 0) +#endif /* PREDICTOR_SUPPORT */ +#if MPPE_SUPPORT + + (go->mppe? CILEN_MPPE: 0) +#endif /* MPPE_SUPPORT */ + ; +} + +/* + * ccp_addci - put our requests in a packet. + */ +static void ccp_addci(fsm *f, u_char *p, int *lenp) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + u_char *p0 = p; + + /* + * Add the compression types that we can receive, in decreasing + * preference order. + */ +#if MPPE_SUPPORT + if (go->mppe) { + p[0] = CI_MPPE; + p[1] = CILEN_MPPE; + MPPE_OPTS_TO_CI(go->mppe, &p[2]); + mppe_init(pcb, &pcb->mppe_decomp, go->mppe); + p += CILEN_MPPE; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate) { + if (go->deflate_correct) { + p[0] = CI_DEFLATE; + p[1] = CILEN_DEFLATE; + p[2] = DEFLATE_MAKE_OPT(go->deflate_size); + p[3] = DEFLATE_CHK_SEQUENCE; + p += CILEN_DEFLATE; + } + if (go->deflate_draft) { + p[0] = CI_DEFLATE_DRAFT; + p[1] = CILEN_DEFLATE; + p[2] = p[2 - CILEN_DEFLATE]; + p[3] = DEFLATE_CHK_SEQUENCE; + p += CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress) { + p[0] = CI_BSD_COMPRESS; + p[1] = CILEN_BSD_COMPRESS; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits); + p += CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + /* XXX Should Predictor 2 be preferable to Predictor 1? */ + if (go->predictor_1) { + p[0] = CI_PREDICTOR_1; + p[1] = CILEN_PREDICTOR_1; + p += CILEN_PREDICTOR_1; + } + if (go->predictor_2) { + p[0] = CI_PREDICTOR_2; + p[1] = CILEN_PREDICTOR_2; + p += CILEN_PREDICTOR_2; + } +#endif /* PREDICTOR_SUPPORT */ + + go->method = (p > p0)? p0[0]: 0; + + *lenp = p - p0; +} + +/* + * ccp_ackci - process a received configure-ack, and return + * 1 iff the packet was OK. + */ +static int ccp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; +#if BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT + u_char *p0 = p; +#endif /* BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT */ + +#if MPPE_SUPPORT + if (go->mppe) { + u_char opt_buf[CILEN_MPPE]; + + opt_buf[0] = CI_MPPE; + opt_buf[1] = CILEN_MPPE; + MPPE_OPTS_TO_CI(go->mppe, &opt_buf[2]); + if (len < CILEN_MPPE || memcmp(opt_buf, p, CILEN_MPPE)) + return 0; + p += CILEN_MPPE; + len -= CILEN_MPPE; + /* XXX Cope with first/fast ack */ + if (len == 0) + return 1; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate) { + if (len < CILEN_DEFLATE + || p[0] != (go->deflate_correct? CI_DEFLATE: CI_DEFLATE_DRAFT) + || p[1] != CILEN_DEFLATE + || p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + /* XXX Cope with first/fast ack */ + if (len == 0) + return 1; + if (go->deflate_correct && go->deflate_draft) { + if (len < CILEN_DEFLATE + || p[0] != CI_DEFLATE_DRAFT + || p[1] != CILEN_DEFLATE + || p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress) { + if (len < CILEN_BSD_COMPRESS + || p[0] != CI_BSD_COMPRESS || p[1] != CILEN_BSD_COMPRESS + || p[2] != BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits)) + return 0; + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + if (go->predictor_1) { + if (len < CILEN_PREDICTOR_1 + || p[0] != CI_PREDICTOR_1 || p[1] != CILEN_PREDICTOR_1) + return 0; + p += CILEN_PREDICTOR_1; + len -= CILEN_PREDICTOR_1; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } + if (go->predictor_2) { + if (len < CILEN_PREDICTOR_2 + || p[0] != CI_PREDICTOR_2 || p[1] != CILEN_PREDICTOR_2) + return 0; + p += CILEN_PREDICTOR_2; + len -= CILEN_PREDICTOR_2; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } +#endif /* PREDICTOR_SUPPORT */ + + if (len != 0) + return 0; + return 1; +} + +/* + * ccp_nakci - process received configure-nak. + * Returns 1 iff the nak was OK. + */ +static int ccp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options no; /* options we've seen already */ + ccp_options try_; /* options to ask for next time */ + LWIP_UNUSED_ARG(treat_as_reject); +#if !MPPE_SUPPORT && !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(len); +#endif /* !MPPE_SUPPORT && !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT */ + + memset(&no, 0, sizeof(no)); + try_ = *go; + +#if MPPE_SUPPORT + if (go->mppe && len >= CILEN_MPPE + && p[0] == CI_MPPE && p[1] == CILEN_MPPE) { + no.mppe = 1; + /* + * Peer wants us to use a different strength or other setting. + * Fail if we aren't willing to use his suggestion. + */ + MPPE_CI_TO_OPTS(&p[2], try_.mppe); + if ((try_.mppe & MPPE_OPT_STATEFUL) && pcb->settings.refuse_mppe_stateful) { + ppp_error("Refusing MPPE stateful mode offered by peer"); + try_.mppe = 0; + } else if (((go->mppe | MPPE_OPT_STATEFUL) & try_.mppe) != try_.mppe) { + /* Peer must have set options we didn't request (suggest) */ + try_.mppe = 0; + } + + if (!try_.mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate && len >= CILEN_DEFLATE + && p[0] == (go->deflate_correct? CI_DEFLATE: CI_DEFLATE_DRAFT) + && p[1] == CILEN_DEFLATE) { + no.deflate = 1; + /* + * Peer wants us to use a different code size or something. + * Stop asking for Deflate if we don't understand his suggestion. + */ + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL + || DEFLATE_SIZE(p[2]) < DEFLATE_MIN_WORKS + || p[3] != DEFLATE_CHK_SEQUENCE) + try_.deflate = 0; + else if (DEFLATE_SIZE(p[2]) < go->deflate_size) + try_.deflate_size = DEFLATE_SIZE(p[2]); + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + if (go->deflate_correct && go->deflate_draft + && len >= CILEN_DEFLATE && p[0] == CI_DEFLATE_DRAFT + && p[1] == CILEN_DEFLATE) { + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress && len >= CILEN_BSD_COMPRESS + && p[0] == CI_BSD_COMPRESS && p[1] == CILEN_BSD_COMPRESS) { + no.bsd_compress = 1; + /* + * Peer wants us to use a different number of bits + * or a different version. + */ + if (BSD_VERSION(p[2]) != BSD_CURRENT_VERSION) + try_.bsd_compress = 0; + else if (BSD_NBITS(p[2]) < go->bsd_bits) + try_.bsd_bits = BSD_NBITS(p[2]); + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ + + /* + * Predictor-1 and 2 have no options, so they can't be Naked. + * + * There may be remaining options but we ignore them. + */ + + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; +} + +/* + * ccp_rejci - reject some of our suggested compression methods. + */ +static int ccp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options try_; /* options to request next time */ + + try_ = *go; + + /* + * Cope with empty configure-rejects by ceasing to send + * configure-requests. + */ + if (len == 0 && pcb->ccp_all_rejected) + return -1; + +#if MPPE_SUPPORT + if (go->mppe && len >= CILEN_MPPE + && p[0] == CI_MPPE && p[1] == CILEN_MPPE) { + ppp_error("MPPE required but peer refused"); + lcp_close(pcb, "MPPE required but peer refused"); + p += CILEN_MPPE; + len -= CILEN_MPPE; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate_correct && len >= CILEN_DEFLATE + && p[0] == CI_DEFLATE && p[1] == CILEN_DEFLATE) { + if (p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; /* Rej is bad */ + try_.deflate_correct = 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + if (go->deflate_draft && len >= CILEN_DEFLATE + && p[0] == CI_DEFLATE_DRAFT && p[1] == CILEN_DEFLATE) { + if (p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; /* Rej is bad */ + try_.deflate_draft = 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + if (!try_.deflate_correct && !try_.deflate_draft) + try_.deflate = 0; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress && len >= CILEN_BSD_COMPRESS + && p[0] == CI_BSD_COMPRESS && p[1] == CILEN_BSD_COMPRESS) { + if (p[2] != BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits)) + return 0; + try_.bsd_compress = 0; + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + if (go->predictor_1 && len >= CILEN_PREDICTOR_1 + && p[0] == CI_PREDICTOR_1 && p[1] == CILEN_PREDICTOR_1) { + try_.predictor_1 = 0; + p += CILEN_PREDICTOR_1; + len -= CILEN_PREDICTOR_1; + } + if (go->predictor_2 && len >= CILEN_PREDICTOR_2 + && p[0] == CI_PREDICTOR_2 && p[1] == CILEN_PREDICTOR_2) { + try_.predictor_2 = 0; + p += CILEN_PREDICTOR_2; + len -= CILEN_PREDICTOR_2; + } +#endif /* PREDICTOR_SUPPORT */ + + if (len != 0) + return 0; + + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; +} + +/* + * ccp_reqci - processed a received configure-request. + * Returns CONFACK, CONFNAK or CONFREJ and the packet modified + * appropriately. + */ +static int ccp_reqci(fsm *f, u_char *p, int *lenp, int dont_nak) { + ppp_pcb *pcb = f->pcb; + ccp_options *ho = &pcb->ccp_hisoptions; + ccp_options *ao = &pcb->ccp_allowoptions; + int ret, newret; +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT + int res; + int nb; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT */ + u_char *p0, *retp; + int len, clen, type; +#if MPPE_SUPPORT + u8_t rej_for_ci_mppe = 1; /* Are we rejecting based on a bad/missing */ + /* CI_MPPE, or due to other options? */ +#endif /* MPPE_SUPPORT */ + + ret = CONFACK; + retp = p0 = p; + len = *lenp; + + memset(ho, 0, sizeof(ccp_options)); + ho->method = (len > 0)? p[0]: 0; + + while (len > 0) { + newret = CONFACK; + if (len < 2 || p[1] < 2 || p[1] > len) { + /* length is bad */ + clen = len; + newret = CONFREJ; + + } else { + type = p[0]; + clen = p[1]; + + switch (type) { +#if MPPE_SUPPORT + case CI_MPPE: + if (!ao->mppe || clen != CILEN_MPPE) { + newret = CONFREJ; + break; + } + MPPE_CI_TO_OPTS(&p[2], ho->mppe); + + /* Nak if anything unsupported or unknown are set. */ + if (ho->mppe & MPPE_OPT_UNSUPPORTED) { + newret = CONFNAK; + ho->mppe &= ~MPPE_OPT_UNSUPPORTED; + } + if (ho->mppe & MPPE_OPT_UNKNOWN) { + newret = CONFNAK; + ho->mppe &= ~MPPE_OPT_UNKNOWN; + } + + /* Check state opt */ + if (ho->mppe & MPPE_OPT_STATEFUL) { + /* + * We can Nak and request stateless, but it's a + * lot easier to just assume the peer will request + * it if he can do it; stateful mode is bad over + * the Internet -- which is where we expect MPPE. + */ + if (pcb->settings.refuse_mppe_stateful) { + ppp_error("Refusing MPPE stateful mode offered by peer"); + newret = CONFREJ; + break; + } + } + + /* Find out which of {S,L} are set. */ + if ((ho->mppe & MPPE_OPT_128) + && (ho->mppe & MPPE_OPT_40)) { + /* Both are set, negotiate the strongest. */ + newret = CONFNAK; + if (ao->mppe & MPPE_OPT_128) + ho->mppe &= ~MPPE_OPT_40; + else if (ao->mppe & MPPE_OPT_40) + ho->mppe &= ~MPPE_OPT_128; + else { + newret = CONFREJ; + break; + } + } else if (ho->mppe & MPPE_OPT_128) { + if (!(ao->mppe & MPPE_OPT_128)) { + newret = CONFREJ; + break; + } + } else if (ho->mppe & MPPE_OPT_40) { + if (!(ao->mppe & MPPE_OPT_40)) { + newret = CONFREJ; + break; + } + } else { + /* Neither are set. */ + /* We cannot accept this. */ + newret = CONFNAK; + /* Give the peer our idea of what can be used, + so it can choose and confirm */ + ho->mppe = ao->mppe; + } + + /* rebuild the opts */ + MPPE_OPTS_TO_CI(ho->mppe, &p[2]); + if (newret == CONFACK) { + int mtu; + + mppe_init(pcb, &pcb->mppe_comp, ho->mppe); + /* + * We need to decrease the interface MTU by MPPE_PAD + * because MPPE frames **grow**. The kernel [must] + * allocate MPPE_PAD extra bytes in xmit buffers. + */ + mtu = netif_get_mtu(pcb); + if (mtu) + netif_set_mtu(pcb, mtu - MPPE_PAD); + else + newret = CONFREJ; + } + + /* + * We have accepted MPPE or are willing to negotiate + * MPPE parameters. A CONFREJ is due to subsequent + * (non-MPPE) processing. + */ + rej_for_ci_mppe = 0; + break; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (!ao->deflate || clen != CILEN_DEFLATE + || (!ao->deflate_correct && type == CI_DEFLATE) + || (!ao->deflate_draft && type == CI_DEFLATE_DRAFT)) { + newret = CONFREJ; + break; + } + + ho->deflate = 1; + ho->deflate_size = nb = DEFLATE_SIZE(p[2]); + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL + || p[3] != DEFLATE_CHK_SEQUENCE + || nb > ao->deflate_size || nb < DEFLATE_MIN_WORKS) { + newret = CONFNAK; + if (!dont_nak) { + p[2] = DEFLATE_MAKE_OPT(ao->deflate_size); + p[3] = DEFLATE_CHK_SEQUENCE; + /* fall through to test this #bits below */ + } else + break; + } + + /* + * Check whether we can do Deflate with the window + * size they want. If the window is too big, reduce + * it until the kernel can cope and nak with that. + * We only check this for the first option. + */ + if (p == p0) { + for (;;) { + res = ccp_test(pcb, p, CILEN_DEFLATE, 1); + if (res > 0) + break; /* it's OK now */ + if (res < 0 || nb == DEFLATE_MIN_WORKS || dont_nak) { + newret = CONFREJ; + p[2] = DEFLATE_MAKE_OPT(ho->deflate_size); + break; + } + newret = CONFNAK; + --nb; + p[2] = DEFLATE_MAKE_OPT(nb); + } + } + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (!ao->bsd_compress || clen != CILEN_BSD_COMPRESS) { + newret = CONFREJ; + break; + } + + ho->bsd_compress = 1; + ho->bsd_bits = nb = BSD_NBITS(p[2]); + if (BSD_VERSION(p[2]) != BSD_CURRENT_VERSION + || nb > ao->bsd_bits || nb < BSD_MIN_BITS) { + newret = CONFNAK; + if (!dont_nak) { + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, ao->bsd_bits); + /* fall through to test this #bits below */ + } else + break; + } + + /* + * Check whether we can do BSD-Compress with the code + * size they want. If the code size is too big, reduce + * it until the kernel can cope and nak with that. + * We only check this for the first option. + */ + if (p == p0) { + for (;;) { + res = ccp_test(pcb, p, CILEN_BSD_COMPRESS, 1); + if (res > 0) + break; + if (res < 0 || nb == BSD_MIN_BITS || dont_nak) { + newret = CONFREJ; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, + ho->bsd_bits); + break; + } + newret = CONFNAK; + --nb; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, nb); + } + } + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + if (!ao->predictor_1 || clen != CILEN_PREDICTOR_1) { + newret = CONFREJ; + break; + } + + ho->predictor_1 = 1; + if (p == p0 + && ccp_test(pcb, p, CILEN_PREDICTOR_1, 1) <= 0) { + newret = CONFREJ; + } + break; + + case CI_PREDICTOR_2: + if (!ao->predictor_2 || clen != CILEN_PREDICTOR_2) { + newret = CONFREJ; + break; + } + + ho->predictor_2 = 1; + if (p == p0 + && ccp_test(pcb, p, CILEN_PREDICTOR_2, 1) <= 0) { + newret = CONFREJ; + } + break; +#endif /* PREDICTOR_SUPPORT */ + + default: + newret = CONFREJ; + } + } + + if (newret == CONFNAK && dont_nak) + newret = CONFREJ; + if (!(newret == CONFACK || (newret == CONFNAK && ret == CONFREJ))) { + /* we're returning this option */ + if (newret == CONFREJ && ret == CONFNAK) + retp = p0; + ret = newret; + if (p != retp) + MEMCPY(retp, p, clen); + retp += clen; + } + + p += clen; + len -= clen; + } + + if (ret != CONFACK) { + if (ret == CONFREJ && *lenp == retp - p0) + pcb->ccp_all_rejected = 1; + else + *lenp = retp - p0; + } +#if MPPE_SUPPORT + if (ret == CONFREJ && ao->mppe && rej_for_ci_mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } +#endif /* MPPE_SUPPORT */ + return ret; +} + +/* + * Make a string name for a compression method (or 2). + */ +static const char *method_name(ccp_options *opt, ccp_options *opt2) { + static char result[64]; +#if !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT + LWIP_UNUSED_ARG(opt2); +#endif /* !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT */ + + if (!ccp_anycompress(opt)) + return "(none)"; + switch (opt->method) { +#if MPPE_SUPPORT + case CI_MPPE: + { + char *p = result; + char *q = result + sizeof(result); /* 1 past result */ + + ppp_slprintf(p, q - p, "MPPE "); + p += 5; + if (opt->mppe & MPPE_OPT_128) { + ppp_slprintf(p, q - p, "128-bit "); + p += 8; + } + if (opt->mppe & MPPE_OPT_40) { + ppp_slprintf(p, q - p, "40-bit "); + p += 7; + } + if (opt->mppe & MPPE_OPT_STATEFUL) + ppp_slprintf(p, q - p, "stateful"); + else + ppp_slprintf(p, q - p, "stateless"); + + break; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (opt2 != NULL && opt2->deflate_size != opt->deflate_size) + ppp_slprintf(result, sizeof(result), "Deflate%s (%d/%d)", + (opt->method == CI_DEFLATE_DRAFT? "(old#)": ""), + opt->deflate_size, opt2->deflate_size); + else + ppp_slprintf(result, sizeof(result), "Deflate%s (%d)", + (opt->method == CI_DEFLATE_DRAFT? "(old#)": ""), + opt->deflate_size); + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (opt2 != NULL && opt2->bsd_bits != opt->bsd_bits) + ppp_slprintf(result, sizeof(result), "BSD-Compress (%d/%d)", + opt->bsd_bits, opt2->bsd_bits); + else + ppp_slprintf(result, sizeof(result), "BSD-Compress (%d)", + opt->bsd_bits); + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + return "Predictor 1"; + case CI_PREDICTOR_2: + return "Predictor 2"; +#endif /* PREDICTOR_SUPPORT */ + default: + ppp_slprintf(result, sizeof(result), "Method %d", opt->method); + } + return result; +} + +/* + * CCP has come up - inform the kernel driver and log a message. + */ +static void ccp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options *ho = &pcb->ccp_hisoptions; + char method1[64]; + + ccp_set(pcb, 1, 1, go->method, ho->method); + if (ccp_anycompress(go)) { + if (ccp_anycompress(ho)) { + if (go->method == ho->method) { + ppp_notice("%s compression enabled", method_name(go, ho)); + } else { + ppp_strlcpy(method1, method_name(go, NULL), sizeof(method1)); + ppp_notice("%s / %s compression enabled", + method1, method_name(ho, NULL)); + } + } else + ppp_notice("%s receive compression enabled", method_name(go, NULL)); + } else if (ccp_anycompress(ho)) + ppp_notice("%s transmit compression enabled", method_name(ho, NULL)); +#if MPPE_SUPPORT + if (go->mppe) { + continue_networks(pcb); /* Bring up IP et al */ + } +#endif /* MPPE_SUPPORT */ +} + +/* + * CCP has gone down - inform the kernel driver. + */ +static void ccp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + + if (pcb->ccp_localstate & RACK_PENDING) + UNTIMEOUT(ccp_rack_timeout, f); + pcb->ccp_localstate = 0; + ccp_set(pcb, 1, 0, 0, 0); +#if MPPE_SUPPORT + if (go->mppe) { + go->mppe = 0; + if (pcb->lcp_fsm.state == PPP_FSM_OPENED) { + /* If LCP is not already going down, make sure it does. */ + ppp_error("MPPE disabled"); + lcp_close(pcb, "MPPE disabled"); + } + } +#endif /* MPPE_SUPPORT */ +} + +#if PRINTPKT_SUPPORT +/* + * Print the contents of a CCP packet. + */ +static const char* const ccp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej", + NULL, NULL, NULL, NULL, NULL, NULL, + "ResetReq", "ResetAck", +}; + +static int ccp_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg) { + const u_char *p0, *optend; + int code, id, len; + int optlen; + + p0 = p; + if (plen < HEADERLEN) + return 0; + code = p[0]; + id = p[1]; + len = (p[2] << 8) + p[3]; + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ccp_codenames) && ccp_codenames[code-1] != NULL) + printer(arg, " %s", ccp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + p += HEADERLEN; + + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print list of possible compression methods */ + while (len >= 2) { + code = p[0]; + optlen = p[1]; + if (optlen < 2 || optlen > len) + break; + printer(arg, " <"); + len -= optlen; + optend = p + optlen; + switch (code) { +#if MPPE_SUPPORT + case CI_MPPE: + if (optlen >= CILEN_MPPE) { + u_char mppe_opts; + + MPPE_CI_TO_OPTS(&p[2], mppe_opts); + printer(arg, "mppe %s %s %s %s %s %s%s", + (p[2] & MPPE_H_BIT)? "+H": "-H", + (p[5] & MPPE_M_BIT)? "+M": "-M", + (p[5] & MPPE_S_BIT)? "+S": "-S", + (p[5] & MPPE_L_BIT)? "+L": "-L", + (p[5] & MPPE_D_BIT)? "+D": "-D", + (p[5] & MPPE_C_BIT)? "+C": "-C", + (mppe_opts & MPPE_OPT_UNKNOWN)? " +U": ""); + if (mppe_opts & MPPE_OPT_UNKNOWN) + printer(arg, " (%.2x %.2x %.2x %.2x)", + p[2], p[3], p[4], p[5]); + p += CILEN_MPPE; + } + break; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (optlen >= CILEN_DEFLATE) { + printer(arg, "deflate%s %d", + (code == CI_DEFLATE_DRAFT? "(old#)": ""), + DEFLATE_SIZE(p[2])); + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL) + printer(arg, " method %d", DEFLATE_METHOD(p[2])); + if (p[3] != DEFLATE_CHK_SEQUENCE) + printer(arg, " check %d", p[3]); + p += CILEN_DEFLATE; + } + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (optlen >= CILEN_BSD_COMPRESS) { + printer(arg, "bsd v%d %d", BSD_VERSION(p[2]), + BSD_NBITS(p[2])); + p += CILEN_BSD_COMPRESS; + } + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + if (optlen >= CILEN_PREDICTOR_1) { + printer(arg, "predictor 1"); + p += CILEN_PREDICTOR_1; + } + break; + case CI_PREDICTOR_2: + if (optlen >= CILEN_PREDICTOR_2) { + printer(arg, "predictor 2"); + p += CILEN_PREDICTOR_2; + } + break; +#endif /* PREDICTOR_SUPPORT */ + default: + break; + } + while (p < optend) + printer(arg, " %.2x", *p++); + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* dump out the rest of the packet in hex */ + while (--len >= 0) + printer(arg, " %.2x", *p++); + + return p - p0; +} +#endif /* PRINTPKT_SUPPORT */ + +#if PPP_DATAINPUT +/* + * We have received a packet that the decompressor failed to + * decompress. Here we would expect to issue a reset-request, but + * Motorola has a patent on resetting the compressor as a result of + * detecting an error in the decompressed data after decompression. + * (See US patent 5,130,993; international patent publication number + * WO 91/10289; Australian patent 73296/91.) + * + * So we ask the kernel whether the error was detected after + * decompression; if it was, we take CCP down, thus disabling + * compression :-(, otherwise we issue the reset-request. + */ +static void ccp_datainput(ppp_pcb *pcb, u_char *pkt, int len) { + fsm *f; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + LWIP_UNUSED_ARG(pkt); + LWIP_UNUSED_ARG(len); + + f = &pcb->ccp_fsm; + if (f->state == PPP_FSM_OPENED) { + if (ccp_fatal_error(pcb)) { + /* + * Disable compression by taking CCP down. + */ + ppp_error("Lost compression sync: disabling compression"); + ccp_close(pcb, "Lost compression sync"); +#if MPPE_SUPPORT + /* + * If we were doing MPPE, we must also take the link down. + */ + if (go->mppe) { + ppp_error("Too many MPPE errors, closing LCP"); + lcp_close(pcb, "Too many MPPE errors"); + } +#endif /* MPPE_SUPPORT */ + } else { + /* + * Send a reset-request to reset the peer's compressor. + * We don't do that if we are still waiting for an + * acknowledgement to a previous reset-request. + */ + if (!(pcb->ccp_localstate & RACK_PENDING)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid = ++f->id, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate |= RACK_PENDING; + } else + pcb->ccp_localstate |= RREQ_REPEAT; + } + } +} +#endif /* PPP_DATAINPUT */ + +/* + * We have received a packet that the decompressor failed to + * decompress. Issue a reset-request. + */ +void ccp_resetrequest(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + + if (f->state != PPP_FSM_OPENED) + return; + + /* + * Send a reset-request to reset the peer's compressor. + * We don't do that if we are still waiting for an + * acknowledgement to a previous reset-request. + */ + if (!(pcb->ccp_localstate & RACK_PENDING)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid = ++f->id, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate |= RACK_PENDING; + } else + pcb->ccp_localstate |= RREQ_REPEAT; +} + +/* + * Timeout waiting for reset-ack. + */ +static void ccp_rack_timeout(void *arg) { + fsm *f = (fsm*)arg; + ppp_pcb *pcb = f->pcb; + + if (f->state == PPP_FSM_OPENED && (pcb->ccp_localstate & RREQ_REPEAT)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate &= ~RREQ_REPEAT; + } else + pcb->ccp_localstate &= ~RACK_PENDING; +} + +#endif /* PPP_SUPPORT && CCP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c new file mode 100644 index 00000000..88f069f0 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c @@ -0,0 +1,126 @@ +/* + * chap-md5.c - New CHAP/MD5 implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap-md5.h" +#include "netif/ppp/magic.h" +#include "netif/ppp/pppcrypt.h" + +#define MD5_HASH_SIZE 16 +#define MD5_MIN_CHALLENGE 17 +#define MD5_MAX_CHALLENGE 24 +#define MD5_MIN_MAX_POWER_OF_TWO_CHALLENGE 3 /* 2^3-1 = 7, 17+7 = 24 */ + +#if PPP_SERVER +static void chap_md5_generate_challenge(ppp_pcb *pcb, unsigned char *cp) { + int clen; + LWIP_UNUSED_ARG(pcb); + + clen = MD5_MIN_CHALLENGE + magic_pow(MD5_MIN_MAX_POWER_OF_TWO_CHALLENGE); + *cp++ = clen; + magic_random_bytes(cp, clen); +} + +static int chap_md5_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + lwip_md5_context ctx; + unsigned char idbyte = id; + unsigned char hash[MD5_HASH_SIZE]; + int challenge_len, response_len; + LWIP_UNUSED_ARG(name); + LWIP_UNUSED_ARG(pcb); + + challenge_len = *challenge++; + response_len = *response++; + if (response_len == MD5_HASH_SIZE) { + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&ctx); + lwip_md5_starts(&ctx); + lwip_md5_update(&ctx, &idbyte, 1); + lwip_md5_update(&ctx, secret, secret_len); + lwip_md5_update(&ctx, challenge, challenge_len); + lwip_md5_finish(&ctx, hash); + lwip_md5_free(&ctx); + + /* Test if our hash matches the peer's response */ + if (memcmp(hash, response, MD5_HASH_SIZE) == 0) { + ppp_slprintf(message, message_space, "Access granted"); + return 1; + } + } + ppp_slprintf(message, message_space, "Access denied"); + return 0; +} +#endif /* PPP_SERVER */ + +static void chap_md5_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + lwip_md5_context ctx; + unsigned char idbyte = id; + int challenge_len = *challenge++; + LWIP_UNUSED_ARG(our_name); + LWIP_UNUSED_ARG(private_); + LWIP_UNUSED_ARG(pcb); + + lwip_md5_init(&ctx); + lwip_md5_starts(&ctx); + lwip_md5_update(&ctx, &idbyte, 1); + lwip_md5_update(&ctx, (const u_char *)secret, secret_len); + lwip_md5_update(&ctx, challenge, challenge_len); + lwip_md5_finish(&ctx, &response[1]); + lwip_md5_free(&ctx); + response[0] = MD5_HASH_SIZE; +} + +const struct chap_digest_type md5_digest = { + CHAP_MD5, /* code */ +#if PPP_SERVER + chap_md5_generate_challenge, + chap_md5_verify_response, +#endif /* PPP_SERVER */ + chap_md5_make_response, + NULL, /* check_success */ + NULL, /* handle_failure */ +}; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c new file mode 100644 index 00000000..485122d2 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c @@ -0,0 +1,677 @@ +/* + * chap-new.c - New CHAP implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#if 0 /* UNUSED */ +#include "session.h" +#endif /* UNUSED */ + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap-md5.h" +#if MSCHAP_SUPPORT +#include "netif/ppp/chap_ms.h" +#endif +#include "netif/ppp/magic.h" + +#if 0 /* UNUSED */ +/* Hook for a plugin to validate CHAP challenge */ +int (*chap_verify_hook)(const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) = NULL; +#endif /* UNUSED */ + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t chap_option_list[] = { + { "chap-restart", o_int, &chap_timeout_time, + "Set timeout for CHAP", OPT_PRIO }, + { "chap-max-challenge", o_int, &pcb->settings.chap_max_transmits, + "Set max #xmits for challenge", OPT_PRIO }, + { "chap-interval", o_int, &pcb->settings.chap_rechallenge_time, + "Set interval for rechallenge", OPT_PRIO }, + { NULL } +}; +#endif /* PPP_OPTIONS */ + + +/* Values for flags in chap_client_state and chap_server_state */ +#define LOWERUP 1 +#define AUTH_STARTED 2 +#define AUTH_DONE 4 +#define AUTH_FAILED 8 +#define TIMEOUT_PENDING 0x10 +#define CHALLENGE_VALID 0x20 + +/* + * Prototypes. + */ +static void chap_init(ppp_pcb *pcb); +static void chap_lowerup(ppp_pcb *pcb); +static void chap_lowerdown(ppp_pcb *pcb); +#if PPP_SERVER +static void chap_timeout(void *arg); +static void chap_generate_challenge(ppp_pcb *pcb); +static void chap_handle_response(ppp_pcb *pcb, int code, + unsigned char *pkt, int len); +static int chap_verify_response(ppp_pcb *pcb, const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space); +#endif /* PPP_SERVER */ +static void chap_respond(ppp_pcb *pcb, int id, + unsigned char *pkt, int len); +static void chap_handle_status(ppp_pcb *pcb, int code, int id, + unsigned char *pkt, int len); +static void chap_protrej(ppp_pcb *pcb); +static void chap_input(ppp_pcb *pcb, unsigned char *pkt, int pktlen); +#if PRINTPKT_SUPPORT +static int chap_print_pkt(const unsigned char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +/* List of digest types that we know about */ +static const struct chap_digest_type* const chap_digests[] = { + &md5_digest, +#if MSCHAP_SUPPORT + &chapms_digest, + &chapms2_digest, +#endif /* MSCHAP_SUPPORT */ + NULL +}; + +/* + * chap_init - reset to initial state. + */ +static void chap_init(ppp_pcb *pcb) { + LWIP_UNUSED_ARG(pcb); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(&pcb->chap_client, 0, sizeof(chap_client_state)); +#if PPP_SERVER + memset(&pcb->chap_server, 0, sizeof(chap_server_state)); +#endif /* PPP_SERVER */ +#endif /* 0 */ +} + +/* + * chap_lowerup - we can start doing stuff now. + */ +static void chap_lowerup(ppp_pcb *pcb) { + + pcb->chap_client.flags |= LOWERUP; +#if PPP_SERVER + pcb->chap_server.flags |= LOWERUP; + if (pcb->chap_server.flags & AUTH_STARTED) + chap_timeout(pcb); +#endif /* PPP_SERVER */ +} + +static void chap_lowerdown(ppp_pcb *pcb) { + + pcb->chap_client.flags = 0; +#if PPP_SERVER + if (pcb->chap_server.flags & TIMEOUT_PENDING) + UNTIMEOUT(chap_timeout, pcb); + pcb->chap_server.flags = 0; +#endif /* PPP_SERVER */ +} + +#if PPP_SERVER +/* + * chap_auth_peer - Start authenticating the peer. + * If the lower layer is already up, we start sending challenges, + * otherwise we wait for the lower layer to come up. + */ +void chap_auth_peer(ppp_pcb *pcb, const char *our_name, int digest_code) { + const struct chap_digest_type *dp; + int i; + + if (pcb->chap_server.flags & AUTH_STARTED) { + ppp_error("CHAP: peer authentication already started!"); + return; + } + for (i = 0; (dp = chap_digests[i]) != NULL; ++i) + if (dp->code == digest_code) + break; + if (dp == NULL) + ppp_fatal("CHAP digest 0x%x requested but not available", + digest_code); + + pcb->chap_server.digest = dp; + pcb->chap_server.name = our_name; + /* Start with a random ID value */ + pcb->chap_server.id = magic(); + pcb->chap_server.flags |= AUTH_STARTED; + if (pcb->chap_server.flags & LOWERUP) + chap_timeout(pcb); +} +#endif /* PPP_SERVER */ + +/* + * chap_auth_with_peer - Prepare to authenticate ourselves to the peer. + * There isn't much to do until we receive a challenge. + */ +void chap_auth_with_peer(ppp_pcb *pcb, const char *our_name, int digest_code) { + const struct chap_digest_type *dp; + int i; + + if(NULL == our_name) + return; + + if (pcb->chap_client.flags & AUTH_STARTED) { + ppp_error("CHAP: authentication with peer already started!"); + return; + } + for (i = 0; (dp = chap_digests[i]) != NULL; ++i) + if (dp->code == digest_code) + break; + + if (dp == NULL) + ppp_fatal("CHAP digest 0x%x requested but not available", + digest_code); + + pcb->chap_client.digest = dp; + pcb->chap_client.name = our_name; + pcb->chap_client.flags |= AUTH_STARTED; +} + +#if PPP_SERVER +/* + * chap_timeout - It's time to send another challenge to the peer. + * This could be either a retransmission of a previous challenge, + * or a new challenge to start re-authentication. + */ +static void chap_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + struct pbuf *p; + + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + if ((pcb->chap_server.flags & CHALLENGE_VALID) == 0) { + pcb->chap_server.challenge_xmits = 0; + chap_generate_challenge(pcb); + pcb->chap_server.flags |= CHALLENGE_VALID; + } else if (pcb->chap_server.challenge_xmits >= pcb->settings.chap_max_transmits) { + pcb->chap_server.flags &= ~CHALLENGE_VALID; + pcb->chap_server.flags |= AUTH_DONE | AUTH_FAILED; + auth_peer_fail(pcb, PPP_CHAP); + return; + } + + p = pbuf_alloc(PBUF_RAW, (u16_t)(pcb->chap_server.challenge_pktlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + MEMCPY(p->payload, pcb->chap_server.challenge, pcb->chap_server.challenge_pktlen); + ppp_write(pcb, p); + ++pcb->chap_server.challenge_xmits; + pcb->chap_server.flags |= TIMEOUT_PENDING; + TIMEOUT(chap_timeout, arg, pcb->settings.chap_timeout_time); +} + +/* + * chap_generate_challenge - generate a challenge string and format + * the challenge packet in pcb->chap_server.challenge_pkt. + */ +static void chap_generate_challenge(ppp_pcb *pcb) { + int clen = 1, nlen, len; + unsigned char *p; + + p = pcb->chap_server.challenge; + MAKEHEADER(p, PPP_CHAP); + p += CHAP_HDRLEN; + pcb->chap_server.digest->generate_challenge(pcb, p); + clen = *p; + nlen = strlen(pcb->chap_server.name); + memcpy(p + 1 + clen, pcb->chap_server.name, nlen); + + len = CHAP_HDRLEN + 1 + clen + nlen; + pcb->chap_server.challenge_pktlen = PPP_HDRLEN + len; + + p = pcb->chap_server.challenge + PPP_HDRLEN; + p[0] = CHAP_CHALLENGE; + p[1] = ++pcb->chap_server.id; + p[2] = len >> 8; + p[3] = len; +} + +/* + * chap_handle_response - check the response to our challenge. + */ +static void chap_handle_response(ppp_pcb *pcb, int id, + unsigned char *pkt, int len) { + int response_len, ok, mlen; + const unsigned char *response; + unsigned char *outp; + struct pbuf *p; + const char *name = NULL; /* initialized to shut gcc up */ +#if 0 /* UNUSED */ + int (*verifier)(const char *, const char *, int, const struct chap_digest_type *, + const unsigned char *, const unsigned char *, char *, int); +#endif /* UNUSED */ + char rname[MAXNAMELEN+1]; + char message[256]; + + if ((pcb->chap_server.flags & LOWERUP) == 0) + return; + if (id != pcb->chap_server.challenge[PPP_HDRLEN+1] || len < 2) + return; + if (pcb->chap_server.flags & CHALLENGE_VALID) { + response = pkt; + GETCHAR(response_len, pkt); + len -= response_len + 1; /* length of name */ + name = (char *)pkt + response_len; + if (len < 0) + return; + + if (pcb->chap_server.flags & TIMEOUT_PENDING) { + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + UNTIMEOUT(chap_timeout, pcb); + } +#if PPP_REMOTENAME + if (pcb->settings.explicit_remote) { + name = pcb->remote_name; + } else +#endif /* PPP_REMOTENAME */ + { + /* Null terminate and clean remote name. */ + ppp_slprintf(rname, sizeof(rname), "%.*v", len, name); + name = rname; + } + +#if 0 /* UNUSED */ + if (chap_verify_hook) + verifier = chap_verify_hook; + else + verifier = chap_verify_response; + ok = (*verifier)(name, pcb->chap_server.name, id, pcb->chap_server.digest, + pcb->chap_server.challenge + PPP_HDRLEN + CHAP_HDRLEN, + response, pcb->chap_server.message, sizeof(pcb->chap_server.message)); +#endif /* UNUSED */ + ok = chap_verify_response(pcb, name, pcb->chap_server.name, id, pcb->chap_server.digest, + pcb->chap_server.challenge + PPP_HDRLEN + CHAP_HDRLEN, + response, message, sizeof(message)); +#if 0 /* UNUSED */ + if (!ok || !auth_number()) { +#endif /* UNUSED */ + if (!ok) { + pcb->chap_server.flags |= AUTH_FAILED; + ppp_warn("Peer %q failed CHAP authentication", name); + } + } else if ((pcb->chap_server.flags & AUTH_DONE) == 0) + return; + + /* send the response */ + mlen = strlen(message); + len = CHAP_HDRLEN + mlen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +len), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (unsigned char *)p->payload; + MAKEHEADER(outp, PPP_CHAP); + + outp[0] = (pcb->chap_server.flags & AUTH_FAILED)? CHAP_FAILURE: CHAP_SUCCESS; + outp[1] = id; + outp[2] = len >> 8; + outp[3] = len; + if (mlen > 0) + memcpy(outp + CHAP_HDRLEN, message, mlen); + ppp_write(pcb, p); + + if (pcb->chap_server.flags & CHALLENGE_VALID) { + pcb->chap_server.flags &= ~CHALLENGE_VALID; + if (!(pcb->chap_server.flags & AUTH_DONE) && !(pcb->chap_server.flags & AUTH_FAILED)) { + +#if 0 /* UNUSED */ + /* + * Auth is OK, so now we need to check session restrictions + * to ensure everything is OK, but only if we used a + * plugin, and only if we're configured to check. This + * allows us to do PAM checks on PPP servers that + * authenticate against ActiveDirectory, and use AD for + * account info (like when using Winbind integrated with + * PAM). + */ + if (session_mgmt && + session_check(name, NULL, devnam, NULL) == 0) { + pcb->chap_server.flags |= AUTH_FAILED; + ppp_warn("Peer %q failed CHAP Session verification", name); + } +#endif /* UNUSED */ + + } + if (pcb->chap_server.flags & AUTH_FAILED) { + auth_peer_fail(pcb, PPP_CHAP); + } else { + if ((pcb->chap_server.flags & AUTH_DONE) == 0) + auth_peer_success(pcb, PPP_CHAP, + pcb->chap_server.digest->code, + name, strlen(name)); + if (pcb->settings.chap_rechallenge_time) { + pcb->chap_server.flags |= TIMEOUT_PENDING; + TIMEOUT(chap_timeout, pcb, + pcb->settings.chap_rechallenge_time); + } + } + pcb->chap_server.flags |= AUTH_DONE; + } +} + +/* + * chap_verify_response - check whether the peer's response matches + * what we think it should be. Returns 1 if it does (authentication + * succeeded), or 0 if it doesn't. + */ +static int chap_verify_response(ppp_pcb *pcb, const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + int ok; + unsigned char secret[MAXSECRETLEN]; + int secret_len; + + /* Get the secret that the peer is supposed to know */ + if (!get_secret(pcb, name, ourname, (char *)secret, &secret_len, 1)) { + ppp_error("No CHAP secret found for authenticating %q", name); + return 0; + } + ok = digest->verify_response(pcb, id, name, secret, secret_len, challenge, + response, message, message_space); + memset(secret, 0, sizeof(secret)); + + return ok; +} +#endif /* PPP_SERVER */ + +/* + * chap_respond - Generate and send a response to a challenge. + */ +static void chap_respond(ppp_pcb *pcb, int id, + unsigned char *pkt, int len) { + int clen, nlen; + int secret_len; + struct pbuf *p; + u_char *outp; + char rname[MAXNAMELEN+1]; + char secret[MAXSECRETLEN+1]; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(RESP_MAX_PKTLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + if ((pcb->chap_client.flags & (LOWERUP | AUTH_STARTED)) != (LOWERUP | AUTH_STARTED)) + return; /* not ready */ + if (len < 2 || len < pkt[0] + 1) + return; /* too short */ + clen = pkt[0]; + nlen = len - (clen + 1); + + /* Null terminate and clean remote name. */ + ppp_slprintf(rname, sizeof(rname), "%.*v", nlen, pkt + clen + 1); + +#if PPP_REMOTENAME + /* Microsoft doesn't send their name back in the PPP packet */ + if (pcb->settings.explicit_remote || (pcb->settings.remote_name[0] != 0 && rname[0] == 0)) + strlcpy(rname, pcb->settings.remote_name, sizeof(rname)); +#endif /* PPP_REMOTENAME */ + + /* get secret for authenticating ourselves with the specified host */ + if (!get_secret(pcb, pcb->chap_client.name, rname, secret, &secret_len, 0)) { + secret_len = 0; /* assume null secret if can't find one */ + ppp_warn("No CHAP secret found for authenticating us to %q", rname); + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_CHAP); + outp += CHAP_HDRLEN; + + pcb->chap_client.digest->make_response(pcb, outp, id, pcb->chap_client.name, pkt, + secret, secret_len, pcb->chap_client.priv); + memset(secret, 0, secret_len); + + clen = *outp; + nlen = strlen(pcb->chap_client.name); + memcpy(outp + clen + 1, pcb->chap_client.name, nlen); + + outp = (u_char*)p->payload + PPP_HDRLEN; + len = CHAP_HDRLEN + clen + 1 + nlen; + outp[0] = CHAP_RESPONSE; + outp[1] = id; + outp[2] = len >> 8; + outp[3] = len; + + pbuf_realloc(p, PPP_HDRLEN + len); + ppp_write(pcb, p); +} + +static void chap_handle_status(ppp_pcb *pcb, int code, int id, + unsigned char *pkt, int len) { + const char *msg = NULL; + LWIP_UNUSED_ARG(id); + + if ((pcb->chap_client.flags & (AUTH_DONE|AUTH_STARTED|LOWERUP)) + != (AUTH_STARTED|LOWERUP)) + return; + pcb->chap_client.flags |= AUTH_DONE; + + if (code == CHAP_SUCCESS) { + /* used for MS-CHAP v2 mutual auth, yuck */ + if (pcb->chap_client.digest->check_success != NULL) { + if (!(*pcb->chap_client.digest->check_success)(pcb, pkt, len, pcb->chap_client.priv)) + code = CHAP_FAILURE; + } else + msg = "CHAP authentication succeeded"; + } else { + if (pcb->chap_client.digest->handle_failure != NULL) + (*pcb->chap_client.digest->handle_failure)(pcb, pkt, len); + else + msg = "CHAP authentication failed"; + } + if (msg) { + if (len > 0) + ppp_info("%s: %.*v", msg, len, pkt); + else + ppp_info("%s", msg); + } + if (code == CHAP_SUCCESS) + auth_withpeer_success(pcb, PPP_CHAP, pcb->chap_client.digest->code); + else { + pcb->chap_client.flags |= AUTH_FAILED; + ppp_error("CHAP authentication failed"); + auth_withpeer_fail(pcb, PPP_CHAP); + } +} + +static void chap_input(ppp_pcb *pcb, unsigned char *pkt, int pktlen) { + unsigned char code, id; + int len; + + if (pktlen < CHAP_HDRLEN) + return; + GETCHAR(code, pkt); + GETCHAR(id, pkt); + GETSHORT(len, pkt); + if (len < CHAP_HDRLEN || len > pktlen) + return; + len -= CHAP_HDRLEN; + + switch (code) { + case CHAP_CHALLENGE: + chap_respond(pcb, id, pkt, len); + break; +#if PPP_SERVER + case CHAP_RESPONSE: + chap_handle_response(pcb, id, pkt, len); + break; +#endif /* PPP_SERVER */ + case CHAP_FAILURE: + case CHAP_SUCCESS: + chap_handle_status(pcb, code, id, pkt, len); + break; + default: + break; + } +} + +static void chap_protrej(ppp_pcb *pcb) { + +#if PPP_SERVER + if (pcb->chap_server.flags & TIMEOUT_PENDING) { + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + UNTIMEOUT(chap_timeout, pcb); + } + if (pcb->chap_server.flags & AUTH_STARTED) { + pcb->chap_server.flags = 0; + auth_peer_fail(pcb, PPP_CHAP); + } +#endif /* PPP_SERVER */ + if ((pcb->chap_client.flags & (AUTH_STARTED|AUTH_DONE)) == AUTH_STARTED) { + pcb->chap_client.flags &= ~AUTH_STARTED; + ppp_error("CHAP authentication failed due to protocol-reject"); + auth_withpeer_fail(pcb, PPP_CHAP); + } +} + +#if PRINTPKT_SUPPORT +/* + * chap_print_pkt - print the contents of a CHAP packet. + */ +static const char* const chap_code_names[] = { + "Challenge", "Response", "Success", "Failure" +}; + +static int chap_print_pkt(const unsigned char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len; + int clen, nlen; + unsigned char x; + + if (plen < CHAP_HDRLEN) + return 0; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < CHAP_HDRLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(chap_code_names)) + printer(arg, " %s", chap_code_names[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= CHAP_HDRLEN; + switch (code) { + case CHAP_CHALLENGE: + case CHAP_RESPONSE: + if (len < 1) + break; + clen = p[0]; + if (len < clen + 1) + break; + ++p; + nlen = len - clen - 1; + printer(arg, " <"); + for (; clen > 0; --clen) { + GETCHAR(x, p); + printer(arg, "%.2x", x); + } + printer(arg, ">, name = "); + ppp_print_string(p, nlen, printer, arg); + break; + case CHAP_FAILURE: + case CHAP_SUCCESS: + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + break; + default: + for (clen = len; clen > 0; --clen) { + GETCHAR(x, p); + printer(arg, " %.2x", x); + } + /* no break */ + } + + return len + CHAP_HDRLEN; +} +#endif /* PRINTPKT_SUPPORT */ + +const struct protent chap_protent = { + PPP_CHAP, + chap_init, + chap_input, + chap_protrej, + chap_lowerup, + chap_lowerdown, + NULL, /* open */ + NULL, /* close */ +#if PRINTPKT_SUPPORT + chap_print_pkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* datainput */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "CHAP", /* name */ + NULL, /* data_name */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + chap_option_list, + NULL, /* check_options */ +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c new file mode 100644 index 00000000..5a989c9b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c @@ -0,0 +1,962 @@ +/* + * chap_ms.c - Microsoft MS-CHAP compatible implementation. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Modifications by Lauri Pesonen / lpesonen@clinet.fi, april 1997 + * + * Implemented LANManager type password response to MS-CHAP challenges. + * Now pppd provides both NT style and LANMan style blocks, and the + * prefered is set by option "ms-lanman". Default is to use NT. + * The hash text (StdText) was taken from Win95 RASAPI32.DLL. + * + * You should also use DOMAIN\\USERNAME as described in README.MSCHAP80 + */ + +/* + * Modifications by Frank Cusack, frank@google.com, March 2002. + * + * Implemented MS-CHAPv2 functionality, heavily based on sample + * implementation in RFC 2759. Implemented MPPE functionality, + * heavily based on sample implementation in RFC 3079. + * + * Copyright (c) 2002 Google, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap_ms.h" +#include "netif/ppp/pppcrypt.h" +#include "netif/ppp/magic.h" +#if MPPE_SUPPORT +#include "netif/ppp/mppe.h" /* For mppe_sha1_pad*, mppe_set_key() */ +#endif /* MPPE_SUPPORT */ + +#define SHA1_SIGNATURE_SIZE 20 +#define MD4_SIGNATURE_SIZE 16 /* 16 bytes in a MD4 message digest */ +#define MAX_NT_PASSWORD 256 /* Max (Unicode) chars in an NT pass */ + +#define MS_CHAP_RESPONSE_LEN 49 /* Response length for MS-CHAP */ +#define MS_CHAP2_RESPONSE_LEN 49 /* Response length for MS-CHAPv2 */ +#define MS_AUTH_RESPONSE_LENGTH 40 /* MS-CHAPv2 authenticator response, */ + /* as ASCII */ + +/* Error codes for MS-CHAP failure messages. */ +#define MS_CHAP_ERROR_RESTRICTED_LOGON_HOURS 646 +#define MS_CHAP_ERROR_ACCT_DISABLED 647 +#define MS_CHAP_ERROR_PASSWD_EXPIRED 648 +#define MS_CHAP_ERROR_NO_DIALIN_PERMISSION 649 +#define MS_CHAP_ERROR_AUTHENTICATION_FAILURE 691 +#define MS_CHAP_ERROR_CHANGING_PASSWORD 709 + +/* + * Offsets within the response field for MS-CHAP + */ +#define MS_CHAP_LANMANRESP 0 +#define MS_CHAP_LANMANRESP_LEN 24 +#define MS_CHAP_NTRESP 24 +#define MS_CHAP_NTRESP_LEN 24 +#define MS_CHAP_USENT 48 + +/* + * Offsets within the response field for MS-CHAP2 + */ +#define MS_CHAP2_PEER_CHALLENGE 0 +#define MS_CHAP2_PEER_CHAL_LEN 16 +#define MS_CHAP2_RESERVED_LEN 8 +#define MS_CHAP2_NTRESP 24 +#define MS_CHAP2_NTRESP_LEN 24 +#define MS_CHAP2_FLAGS 48 + +#if MPPE_SUPPORT +#if 0 /* UNUSED */ +/* These values are the RADIUS attribute values--see RFC 2548. */ +#define MPPE_ENC_POL_ENC_ALLOWED 1 +#define MPPE_ENC_POL_ENC_REQUIRED 2 +#define MPPE_ENC_TYPES_RC4_40 2 +#define MPPE_ENC_TYPES_RC4_128 4 + +/* used by plugins (using above values) */ +extern void set_mppe_enc_types(int, int); +#endif /* UNUSED */ +#endif /* MPPE_SUPPORT */ + +/* Are we the authenticator or authenticatee? For MS-CHAPv2 key derivation. */ +#define MS_CHAP2_AUTHENTICATEE 0 +#define MS_CHAP2_AUTHENTICATOR 1 + +static void ascii2unicode (const char[], int, u_char[]); +static void NTPasswordHash (u_char *, int, u_char[MD4_SIGNATURE_SIZE]); +static void ChallengeResponse (const u_char *, const u_char *, u_char[24]); +static void ChallengeHash (const u_char[16], const u_char *, const char *, u_char[8]); +static void ChapMS_NT (const u_char *, const char *, int, u_char[24]); +static void ChapMS2_NT (const u_char *, const u_char[16], const char *, const char *, int, + u_char[24]); +static void GenerateAuthenticatorResponsePlain + (const char*, int, u_char[24], const u_char[16], const u_char *, + const char *, u_char[41]); +#ifdef MSLANMAN +static void ChapMS_LANMan (u_char *, char *, int, u_char *); +#endif + +static void GenerateAuthenticatorResponse(const u_char PasswordHashHash[MD4_SIGNATURE_SIZE], + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]); + +#if MPPE_SUPPORT +static void Set_Start_Key (ppp_pcb *pcb, const u_char *, const char *, int); +static void SetMasterKeys (ppp_pcb *pcb, const char *, int, u_char[24], int); +#endif /* MPPE_SUPPORT */ + +static void ChapMS (ppp_pcb *pcb, const u_char *, const char *, int, u_char *); +static void ChapMS2 (ppp_pcb *pcb, const u_char *, const u_char *, const char *, const char *, int, + u_char *, u_char[MS_AUTH_RESPONSE_LENGTH+1], int); + +#ifdef MSLANMAN +bool ms_lanman = 0; /* Use LanMan password instead of NT */ + /* Has meaning only with MS-CHAP challenges */ +#endif + +#if MPPE_SUPPORT +#ifdef DEBUGMPPEKEY +/* For MPPE debug */ +/* Use "[]|}{?/><,`!2&&(" (sans quotes) for RFC 3079 MS-CHAPv2 test value */ +static char *mschap_challenge = NULL; +/* Use "!@\#$%^&*()_+:3|~" (sans quotes, backslash is to escape #) for ... */ +static char *mschap2_peer_challenge = NULL; +#endif + +#include "netif/ppp/fsm.h" /* Need to poke MPPE options */ +#include "netif/ppp/ccp.h" +#endif /* MPPE_SUPPORT */ + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t chapms_option_list[] = { +#ifdef MSLANMAN + { "ms-lanman", o_bool, &ms_lanman, + "Use LanMan passwd when using MS-CHAP", 1 }, +#endif +#ifdef DEBUGMPPEKEY + { "mschap-challenge", o_string, &mschap_challenge, + "specify CHAP challenge" }, + { "mschap2-peer-challenge", o_string, &mschap2_peer_challenge, + "specify CHAP peer challenge" }, +#endif + { NULL } +}; +#endif /* PPP_OPTIONS */ + +#if PPP_SERVER +/* + * chapms_generate_challenge - generate a challenge for MS-CHAP. + * For MS-CHAP the challenge length is fixed at 8 bytes. + * The length goes in challenge[0] and the actual challenge starts + * at challenge[1]. + */ +static void chapms_generate_challenge(ppp_pcb *pcb, unsigned char *challenge) { + LWIP_UNUSED_ARG(pcb); + + *challenge++ = 8; +#ifdef DEBUGMPPEKEY + if (mschap_challenge && strlen(mschap_challenge) == 8) + memcpy(challenge, mschap_challenge, 8); + else +#endif + magic_random_bytes(challenge, 8); +} + +static void chapms2_generate_challenge(ppp_pcb *pcb, unsigned char *challenge) { + LWIP_UNUSED_ARG(pcb); + + *challenge++ = 16; +#ifdef DEBUGMPPEKEY + if (mschap_challenge && strlen(mschap_challenge) == 16) + memcpy(challenge, mschap_challenge, 16); + else +#endif + magic_random_bytes(challenge, 16); +} + +static int chapms_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + unsigned char md[MS_CHAP_RESPONSE_LEN]; + int diff; + int challenge_len, response_len; + LWIP_UNUSED_ARG(id); + LWIP_UNUSED_ARG(name); + + challenge_len = *challenge++; /* skip length, is 8 */ + response_len = *response++; + if (response_len != MS_CHAP_RESPONSE_LEN) + goto bad; + +#ifndef MSLANMAN + if (!response[MS_CHAP_USENT]) { + /* Should really propagate this into the error packet. */ + ppp_notice("Peer request for LANMAN auth not supported"); + goto bad; + } +#endif + + /* Generate the expected response. */ + ChapMS(pcb, (const u_char *)challenge, (const char *)secret, secret_len, md); + +#ifdef MSLANMAN + /* Determine which part of response to verify against */ + if (!response[MS_CHAP_USENT]) + diff = memcmp(&response[MS_CHAP_LANMANRESP], + &md[MS_CHAP_LANMANRESP], MS_CHAP_LANMANRESP_LEN); + else +#endif + diff = memcmp(&response[MS_CHAP_NTRESP], &md[MS_CHAP_NTRESP], + MS_CHAP_NTRESP_LEN); + + if (diff == 0) { + ppp_slprintf(message, message_space, "Access granted"); + return 1; + } + + bad: + /* See comments below for MS-CHAP V2 */ + ppp_slprintf(message, message_space, "E=691 R=1 C=%0.*B V=0", + challenge_len, challenge); + return 0; +} + +static int chapms2_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + unsigned char md[MS_CHAP2_RESPONSE_LEN]; + char saresponse[MS_AUTH_RESPONSE_LENGTH+1]; + int challenge_len, response_len; + LWIP_UNUSED_ARG(id); + + challenge_len = *challenge++; /* skip length, is 16 */ + response_len = *response++; + if (response_len != MS_CHAP2_RESPONSE_LEN) + goto bad; /* not even the right length */ + + /* Generate the expected response and our mutual auth. */ + ChapMS2(pcb, (const u_char*)challenge, (const u_char*)&response[MS_CHAP2_PEER_CHALLENGE], name, + (const char *)secret, secret_len, md, + (unsigned char *)saresponse, MS_CHAP2_AUTHENTICATOR); + + /* compare MDs and send the appropriate status */ + /* + * Per RFC 2759, success message must be formatted as + * "S= M=" + * where + * is the Authenticator Response (mutual auth) + * is a text message + * + * However, some versions of Windows (win98 tested) do not know + * about the M= part (required per RFC 2759) and flag + * it as an error (reported incorrectly as an encryption error + * to the user). Since the RFC requires it, and it can be + * useful information, we supply it if the peer is a conforming + * system. Luckily (?), win98 sets the Flags field to 0x04 + * (contrary to RFC requirements) so we can use that to + * distinguish between conforming and non-conforming systems. + * + * Special thanks to Alex Swiridov for + * help debugging this. + */ + if (memcmp(&md[MS_CHAP2_NTRESP], &response[MS_CHAP2_NTRESP], + MS_CHAP2_NTRESP_LEN) == 0) { + if (response[MS_CHAP2_FLAGS]) + ppp_slprintf(message, message_space, "S=%s", saresponse); + else + ppp_slprintf(message, message_space, "S=%s M=%s", + saresponse, "Access granted"); + return 1; + } + + bad: + /* + * Failure message must be formatted as + * "E=e R=r C=c V=v M=m" + * where + * e = error code (we use 691, ERROR_AUTHENTICATION_FAILURE) + * r = retry (we use 1, ok to retry) + * c = challenge to use for next response, we reuse previous + * v = Change Password version supported, we use 0 + * m = text message + * + * The M=m part is only for MS-CHAPv2. Neither win2k nor + * win98 (others untested) display the message to the user anyway. + * They also both ignore the E=e code. + * + * Note that it's safe to reuse the same challenge as we don't + * actually accept another response based on the error message + * (and no clients try to resend a response anyway). + * + * Basically, this whole bit is useless code, even the small + * implementation here is only because of overspecification. + */ + ppp_slprintf(message, message_space, "E=691 R=1 C=%0.*B V=0 M=%s", + challenge_len, challenge, "Access denied"); + return 0; +} +#endif /* PPP_SERVER */ + +static void chapms_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + LWIP_UNUSED_ARG(id); + LWIP_UNUSED_ARG(our_name); + LWIP_UNUSED_ARG(private_); + challenge++; /* skip length, should be 8 */ + *response++ = MS_CHAP_RESPONSE_LEN; + ChapMS(pcb, challenge, secret, secret_len, response); +} + +static void chapms2_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + LWIP_UNUSED_ARG(id); + challenge++; /* skip length, should be 16 */ + *response++ = MS_CHAP2_RESPONSE_LEN; + ChapMS2(pcb, challenge, +#ifdef DEBUGMPPEKEY + mschap2_peer_challenge, +#else + NULL, +#endif + our_name, secret, secret_len, response, private_, + MS_CHAP2_AUTHENTICATEE); +} + +static int chapms2_check_success(ppp_pcb *pcb, unsigned char *msg, int len, unsigned char *private_) { + LWIP_UNUSED_ARG(pcb); + + if ((len < MS_AUTH_RESPONSE_LENGTH + 2) || + strncmp((char *)msg, "S=", 2) != 0) { + /* Packet does not start with "S=" */ + ppp_error("MS-CHAPv2 Success packet is badly formed."); + return 0; + } + msg += 2; + len -= 2; + if (len < MS_AUTH_RESPONSE_LENGTH + || memcmp(msg, private_, MS_AUTH_RESPONSE_LENGTH)) { + /* Authenticator Response did not match expected. */ + ppp_error("MS-CHAPv2 mutual authentication failed."); + return 0; + } + /* Authenticator Response matches. */ + msg += MS_AUTH_RESPONSE_LENGTH; /* Eat it */ + len -= MS_AUTH_RESPONSE_LENGTH; + if ((len >= 3) && !strncmp((char *)msg, " M=", 3)) { + msg += 3; /* Eat the delimiter */ + } else if (len) { + /* Packet has extra text which does not begin " M=" */ + ppp_error("MS-CHAPv2 Success packet is badly formed."); + return 0; + } + return 1; +} + +static void chapms_handle_failure(ppp_pcb *pcb, unsigned char *inp, int len) { + int err; + const char *p; + char msg[64]; + LWIP_UNUSED_ARG(pcb); + + /* We want a null-terminated string for strxxx(). */ + len = LWIP_MIN(len, 63); + MEMCPY(msg, inp, len); + msg[len] = 0; + p = msg; + + /* + * Deal with MS-CHAP formatted failure messages; just print the + * M= part (if any). For MS-CHAP we're not really supposed + * to use M=, but it shouldn't hurt. See + * chapms[2]_verify_response. + */ + if (!strncmp(p, "E=", 2)) + err = strtol(p+2, NULL, 10); /* Remember the error code. */ + else + goto print_msg; /* Message is badly formatted. */ + + if (len && ((p = strstr(p, " M=")) != NULL)) { + /* M= field found. */ + p += 3; + } else { + /* No M=; use the error code. */ + switch (err) { + case MS_CHAP_ERROR_RESTRICTED_LOGON_HOURS: + p = "E=646 Restricted logon hours"; + break; + + case MS_CHAP_ERROR_ACCT_DISABLED: + p = "E=647 Account disabled"; + break; + + case MS_CHAP_ERROR_PASSWD_EXPIRED: + p = "E=648 Password expired"; + break; + + case MS_CHAP_ERROR_NO_DIALIN_PERMISSION: + p = "E=649 No dialin permission"; + break; + + case MS_CHAP_ERROR_AUTHENTICATION_FAILURE: + p = "E=691 Authentication failure"; + break; + + case MS_CHAP_ERROR_CHANGING_PASSWORD: + /* Should never see this, we don't support Change Password. */ + p = "E=709 Error changing password"; + break; + + default: + ppp_error("Unknown MS-CHAP authentication failure: %.*v", + len, inp); + return; + } + } +print_msg: + if (p != NULL) + ppp_error("MS-CHAP authentication failed: %v", p); +} + +static void ChallengeResponse(const u_char *challenge, + const u_char PasswordHash[MD4_SIGNATURE_SIZE], + u_char response[24]) { + u_char ZPasswordHash[21]; + lwip_des_context des; + u_char des_key[8]; + + BZERO(ZPasswordHash, sizeof(ZPasswordHash)); + MEMCPY(ZPasswordHash, PasswordHash, MD4_SIGNATURE_SIZE); + +#if 0 + dbglog("ChallengeResponse - ZPasswordHash %.*B", + sizeof(ZPasswordHash), ZPasswordHash); +#endif + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 0, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +0); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 7, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +8); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 14, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +16); + lwip_des_free(&des); + +#if 0 + dbglog("ChallengeResponse - response %.24B", response); +#endif +} + +static void ChallengeHash(const u_char PeerChallenge[16], const u_char *rchallenge, + const char *username, u_char Challenge[8]) { + lwip_sha1_context sha1Context; + u_char sha1Hash[SHA1_SIGNATURE_SIZE]; + const char *user; + + /* remove domain from "domain\username" */ + if ((user = strrchr(username, '\\')) != NULL) + ++user; + else + user = username; + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PeerChallenge, 16); + lwip_sha1_update(&sha1Context, rchallenge, 16); + lwip_sha1_update(&sha1Context, (const unsigned char*)user, strlen(user)); + lwip_sha1_finish(&sha1Context, sha1Hash); + lwip_sha1_free(&sha1Context); + + MEMCPY(Challenge, sha1Hash, 8); +} + +/* + * Convert the ASCII version of the password to Unicode. + * This implicitly supports 8-bit ISO8859/1 characters. + * This gives us the little-endian representation, which + * is assumed by all M$ CHAP RFCs. (Unicode byte ordering + * is machine-dependent.) + */ +static void ascii2unicode(const char ascii[], int ascii_len, u_char unicode[]) { + int i; + + BZERO(unicode, ascii_len * 2); + for (i = 0; i < ascii_len; i++) + unicode[i * 2] = (u_char) ascii[i]; +} + +static void NTPasswordHash(u_char *secret, int secret_len, u_char hash[MD4_SIGNATURE_SIZE]) { + lwip_md4_context md4Context; + + lwip_md4_init(&md4Context); + lwip_md4_starts(&md4Context); + lwip_md4_update(&md4Context, secret, secret_len); + lwip_md4_finish(&md4Context, hash); + lwip_md4_free(&md4Context); +} + +static void ChapMS_NT(const u_char *rchallenge, const char *secret, int secret_len, + u_char NTResponse[24]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + + /* Hash the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + + ChallengeResponse(rchallenge, PasswordHash, NTResponse); +} + +static void ChapMS2_NT(const u_char *rchallenge, const u_char PeerChallenge[16], const char *username, + const char *secret, int secret_len, u_char NTResponse[24]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char Challenge[8]; + + ChallengeHash(PeerChallenge, rchallenge, username, Challenge); + + /* Hash the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + + ChallengeResponse(Challenge, PasswordHash, NTResponse); +} + +#ifdef MSLANMAN +static u_char *StdText = (u_char *)"KGS!@#$%"; /* key from rasapi32.dll */ + +static void ChapMS_LANMan(u_char *rchallenge, char *secret, int secret_len, + unsigned char *response) { + int i; + u_char UcasePassword[MAX_NT_PASSWORD]; /* max is actually 14 */ + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + lwip_des_context des; + u_char des_key[8]; + + /* LANMan password is case insensitive */ + BZERO(UcasePassword, sizeof(UcasePassword)); + for (i = 0; i < secret_len; i++) + UcasePassword[i] = (u_char)toupper(secret[i]); + + pppcrypt_56_to_64_bit_key(UcasePassword +0, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, StdText, PasswordHash +0); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(UcasePassword +7, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, StdText, PasswordHash +8); + lwip_des_free(&des); + + ChallengeResponse(rchallenge, PasswordHash, &response[MS_CHAP_LANMANRESP]); +} +#endif + + +static void GenerateAuthenticatorResponse(const u_char PasswordHashHash[MD4_SIGNATURE_SIZE], + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]) { + /* + * "Magic" constants used in response generation, from RFC 2759. + */ + static const u_char Magic1[39] = /* "Magic server to client signing constant" */ + { 0x4D, 0x61, 0x67, 0x69, 0x63, 0x20, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x20, 0x74, 0x6F, 0x20, 0x63, 0x6C, 0x69, 0x65, + 0x6E, 0x74, 0x20, 0x73, 0x69, 0x67, 0x6E, 0x69, 0x6E, 0x67, + 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x74, 0x61, 0x6E, 0x74 }; + static const u_char Magic2[41] = /* "Pad to make it do more than one iteration" */ + { 0x50, 0x61, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x6D, 0x61, 0x6B, + 0x65, 0x20, 0x69, 0x74, 0x20, 0x64, 0x6F, 0x20, 0x6D, 0x6F, + 0x72, 0x65, 0x20, 0x74, 0x68, 0x61, 0x6E, 0x20, 0x6F, 0x6E, + 0x65, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6F, + 0x6E }; + + int i; + lwip_sha1_context sha1Context; + u_char Digest[SHA1_SIGNATURE_SIZE]; + u_char Challenge[8]; + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, NTResponse, 24); + lwip_sha1_update(&sha1Context, Magic1, sizeof(Magic1)); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + ChallengeHash(PeerChallenge, rchallenge, username, Challenge); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, Digest, sizeof(Digest)); + lwip_sha1_update(&sha1Context, Challenge, sizeof(Challenge)); + lwip_sha1_update(&sha1Context, Magic2, sizeof(Magic2)); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + /* Convert to ASCII hex string. */ + for (i = 0; i < LWIP_MAX((MS_AUTH_RESPONSE_LENGTH / 2), (int)sizeof(Digest)); i++) + sprintf((char *)&authResponse[i * 2], "%02X", Digest[i]); +} + + +static void GenerateAuthenticatorResponsePlain( + const char *secret, int secret_len, + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), + PasswordHashHash); + + GenerateAuthenticatorResponse(PasswordHashHash, NTResponse, PeerChallenge, + rchallenge, username, authResponse); +} + + +#if MPPE_SUPPORT +/* + * Set mppe_xxxx_key from MS-CHAP credentials. (see RFC 3079) + */ +static void Set_Start_Key(ppp_pcb *pcb, const u_char *rchallenge, const char *secret, int secret_len) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + lwip_sha1_context sha1Context; + u_char Digest[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), PasswordHashHash); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, rchallenge, 8); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + /* Same key in both directions. */ + mppe_set_key(pcb, &pcb->mppe_comp, Digest); + mppe_set_key(pcb, &pcb->mppe_decomp, Digest); + + pcb->mppe_keys_set = 1; +} + +/* + * Set mppe_xxxx_key from MS-CHAPv2 credentials. (see RFC 3079) + */ +static void SetMasterKeys(ppp_pcb *pcb, const char *secret, int secret_len, u_char NTResponse[24], int IsServer) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + lwip_sha1_context sha1Context; + u_char MasterKey[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + u_char Digest[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + const u_char *s; + + /* "This is the MPPE Master Key" */ + static const u_char Magic1[27] = + { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x4d, 0x50, 0x50, 0x45, 0x20, 0x4d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x20, 0x4b, 0x65, 0x79 }; + /* "On the client side, this is the send key; " + "on the server side, it is the receive key." */ + static const u_char Magic2[84] = + { 0x4f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x64, 0x65, 0x2c, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, 0x6b, 0x65, 0x79, + 0x3b, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x73, 0x69, 0x64, 0x65, + 0x2c, 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x2e }; + /* "On the client side, this is the receive key; " + "on the server side, it is the send key." */ + static const u_char Magic3[84] = + { 0x4f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x64, 0x65, 0x2c, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x3b, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x73, + 0x69, 0x64, 0x65, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, + 0x6b, 0x65, 0x79, 0x2e }; + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), PasswordHashHash); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, NTResponse, 24); + lwip_sha1_update(&sha1Context, Magic1, sizeof(Magic1)); + lwip_sha1_finish(&sha1Context, MasterKey); + lwip_sha1_free(&sha1Context); + + /* + * generate send key + */ + if (IsServer) + s = Magic3; + else + s = Magic2; + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, MasterKey, 16); + lwip_sha1_update(&sha1Context, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1Context, s, 84); + lwip_sha1_update(&sha1Context, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + mppe_set_key(pcb, &pcb->mppe_comp, Digest); + + /* + * generate recv key + */ + if (IsServer) + s = Magic2; + else + s = Magic3; + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, MasterKey, 16); + lwip_sha1_update(&sha1Context, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1Context, s, 84); + lwip_sha1_update(&sha1Context, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + mppe_set_key(pcb, &pcb->mppe_decomp, Digest); + + pcb->mppe_keys_set = 1; +} + +#endif /* MPPE_SUPPORT */ + + +static void ChapMS(ppp_pcb *pcb, const u_char *rchallenge, const char *secret, int secret_len, + unsigned char *response) { +#if !MPPE_SUPPORT + LWIP_UNUSED_ARG(pcb); +#endif /* !MPPE_SUPPORT */ + BZERO(response, MS_CHAP_RESPONSE_LEN); + + ChapMS_NT(rchallenge, secret, secret_len, &response[MS_CHAP_NTRESP]); + +#ifdef MSLANMAN + ChapMS_LANMan(rchallenge, secret, secret_len, + &response[MS_CHAP_LANMANRESP]); + + /* preferred method is set by option */ + response[MS_CHAP_USENT] = !ms_lanman; +#else + response[MS_CHAP_USENT] = 1; +#endif + +#if MPPE_SUPPORT + Set_Start_Key(pcb, rchallenge, secret, secret_len); +#endif /* MPPE_SUPPORT */ +} + + +/* + * If PeerChallenge is NULL, one is generated and the PeerChallenge + * field of response is filled in. Call this way when generating a response. + * If PeerChallenge is supplied, it is copied into the PeerChallenge field. + * Call this way when verifying a response (or debugging). + * Do not call with PeerChallenge = response. + * + * The PeerChallenge field of response is then used for calculation of the + * Authenticator Response. + */ +static void ChapMS2(ppp_pcb *pcb, const u_char *rchallenge, const u_char *PeerChallenge, + const char *user, const char *secret, int secret_len, unsigned char *response, + u_char authResponse[], int authenticator) { + /* ARGSUSED */ + LWIP_UNUSED_ARG(authenticator); +#if !MPPE_SUPPORT + LWIP_UNUSED_ARG(pcb); +#endif /* !MPPE_SUPPORT */ + + BZERO(response, MS_CHAP2_RESPONSE_LEN); + + /* Generate the Peer-Challenge if requested, or copy it if supplied. */ + if (!PeerChallenge) + magic_random_bytes(&response[MS_CHAP2_PEER_CHALLENGE], MS_CHAP2_PEER_CHAL_LEN); + else + MEMCPY(&response[MS_CHAP2_PEER_CHALLENGE], PeerChallenge, + MS_CHAP2_PEER_CHAL_LEN); + + /* Generate the NT-Response */ + ChapMS2_NT(rchallenge, &response[MS_CHAP2_PEER_CHALLENGE], user, + secret, secret_len, &response[MS_CHAP2_NTRESP]); + + /* Generate the Authenticator Response. */ + GenerateAuthenticatorResponsePlain(secret, secret_len, + &response[MS_CHAP2_NTRESP], + &response[MS_CHAP2_PEER_CHALLENGE], + rchallenge, user, authResponse); + +#if MPPE_SUPPORT + SetMasterKeys(pcb, secret, secret_len, + &response[MS_CHAP2_NTRESP], authenticator); +#endif /* MPPE_SUPPORT */ +} + +#if 0 /* UNUSED */ +#if MPPE_SUPPORT +/* + * Set MPPE options from plugins. + */ +void set_mppe_enc_types(int policy, int types) { + /* Early exit for unknown policies. */ + if (policy != MPPE_ENC_POL_ENC_ALLOWED || + policy != MPPE_ENC_POL_ENC_REQUIRED) + return; + + /* Don't modify MPPE if it's optional and wasn't already configured. */ + if (policy == MPPE_ENC_POL_ENC_ALLOWED && !ccp_wantoptions[0].mppe) + return; + + /* + * Disable undesirable encryption types. Note that we don't ENABLE + * any encryption types, to avoid overriding manual configuration. + */ + switch(types) { + case MPPE_ENC_TYPES_RC4_40: + ccp_wantoptions[0].mppe &= ~MPPE_OPT_128; /* disable 128-bit */ + break; + case MPPE_ENC_TYPES_RC4_128: + ccp_wantoptions[0].mppe &= ~MPPE_OPT_40; /* disable 40-bit */ + break; + default: + break; + } +} +#endif /* MPPE_SUPPORT */ +#endif /* UNUSED */ + +const struct chap_digest_type chapms_digest = { + CHAP_MICROSOFT, /* code */ +#if PPP_SERVER + chapms_generate_challenge, + chapms_verify_response, +#endif /* PPP_SERVER */ + chapms_make_response, + NULL, /* check_success */ + chapms_handle_failure, +}; + +const struct chap_digest_type chapms2_digest = { + CHAP_MICROSOFT_V2, /* code */ +#if PPP_SERVER + chapms2_generate_challenge, + chapms2_verify_response, +#endif /* PPP_SERVER */ + chapms2_make_response, + chapms2_check_success, + chapms_handle_failure, +}; + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c new file mode 100644 index 00000000..26c6c30d --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c @@ -0,0 +1,465 @@ +/* + * demand.c - Support routines for demand-dialling. + * + * Copyright (c) 1996-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && DEMAND_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PPP_FILTER +#include +#endif + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/lcp.h" + +char *frame; +int framelen; +int framemax; +int escape_flag; +int flush_flag; +int fcs; + +struct packet { + int length; + struct packet *next; + unsigned char data[1]; +}; + +struct packet *pend_q; +struct packet *pend_qtail; + +static int active_packet (unsigned char *, int); + +/* + * demand_conf - configure the interface for doing dial-on-demand. + */ +void +demand_conf() +{ + int i; + const struct protent *protp; + +/* framemax = lcp_allowoptions[0].mru; + if (framemax < PPP_MRU) */ + framemax = PPP_MRU; + framemax += PPP_HDRLEN + PPP_FCSLEN; + frame = malloc(framemax); + if (frame == NULL) + novm("demand frame"); + framelen = 0; + pend_q = NULL; + escape_flag = 0; + flush_flag = 0; + fcs = PPP_INITFCS; + + netif_set_mtu(pcb, LWIP_MIN(lcp_allowoptions[0].mru, PPP_MRU)); + if (ppp_send_config(pcb, PPP_MRU, (u32_t) 0, 0, 0) < 0 + || ppp_recv_config(pcb, PPP_MRU, (u32_t) 0, 0, 0) < 0) + fatal("Couldn't set up demand-dialled PPP interface: %m"); + +#ifdef PPP_FILTER + set_filters(&pass_filter, &active_filter); +#endif + + /* + * Call the demand_conf procedure for each protocol that's got one. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + ((*protp->demand_conf)(pcb)); +/* FIXME: find a way to die() here */ +#if 0 + if (!((*protp->demand_conf)(pcb))) + die(1); +#endif +} + + +/* + * demand_block - set each network protocol to block further packets. + */ +void +demand_block() +{ + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_QUEUE); + get_loop_output(); +} + +/* + * demand_discard - set each network protocol to discard packets + * with an error. + */ +void +demand_discard() +{ + struct packet *pkt, *nextpkt; + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_ERROR); + get_loop_output(); + + /* discard all saved packets */ + for (pkt = pend_q; pkt != NULL; pkt = nextpkt) { + nextpkt = pkt->next; + free(pkt); + } + pend_q = NULL; + framelen = 0; + flush_flag = 0; + escape_flag = 0; + fcs = PPP_INITFCS; +} + +/* + * demand_unblock - set each enabled network protocol to pass packets. + */ +void +demand_unblock() +{ + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_PASS); +} + +/* + * FCS lookup table as calculated by genfcstab. + */ +static u_short fcstab[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; + +/* + * loop_chars - process characters received from the loopback. + * Calls loop_frame when a complete frame has been accumulated. + * Return value is 1 if we need to bring up the link, 0 otherwise. + */ +int +loop_chars(p, n) + unsigned char *p; + int n; +{ + int c, rv; + + rv = 0; + +/* check for synchronous connection... */ + + if ( (p[0] == 0xFF) && (p[1] == 0x03) ) { + rv = loop_frame(p,n); + return rv; + } + + for (; n > 0; --n) { + c = *p++; + if (c == PPP_FLAG) { + if (!escape_flag && !flush_flag + && framelen > 2 && fcs == PPP_GOODFCS) { + framelen -= 2; + if (loop_frame((unsigned char *)frame, framelen)) + rv = 1; + } + framelen = 0; + flush_flag = 0; + escape_flag = 0; + fcs = PPP_INITFCS; + continue; + } + if (flush_flag) + continue; + if (escape_flag) { + c ^= PPP_TRANS; + escape_flag = 0; + } else if (c == PPP_ESCAPE) { + escape_flag = 1; + continue; + } + if (framelen >= framemax) { + flush_flag = 1; + continue; + } + frame[framelen++] = c; + fcs = PPP_FCS(fcs, c); + } + return rv; +} + +/* + * loop_frame - given a frame obtained from the loopback, + * decide whether to bring up the link or not, and, if we want + * to transmit this frame later, put it on the pending queue. + * Return value is 1 if we need to bring up the link, 0 otherwise. + * We assume that the kernel driver has already applied the + * pass_filter, so we won't get packets it rejected. + * We apply the active_filter to see if we want this packet to + * bring up the link. + */ +int +loop_frame(frame, len) + unsigned char *frame; + int len; +{ + struct packet *pkt; + + /* dbglog("from loop: %P", frame, len); */ + if (len < PPP_HDRLEN) + return 0; + if ((PPP_PROTOCOL(frame) & 0x8000) != 0) + return 0; /* shouldn't get any of these anyway */ + if (!active_packet(frame, len)) + return 0; + + pkt = (struct packet *) malloc(sizeof(struct packet) + len); + if (pkt != NULL) { + pkt->length = len; + pkt->next = NULL; + memcpy(pkt->data, frame, len); + if (pend_q == NULL) + pend_q = pkt; + else + pend_qtail->next = pkt; + pend_qtail = pkt; + } + return 1; +} + +/* + * demand_rexmit - Resend all those frames which we got via the + * loopback, now that the real serial link is up. + */ +void +demand_rexmit(proto, newip) + int proto; + u32_t newip; +{ + struct packet *pkt, *prev, *nextpkt; + unsigned short checksum; + unsigned short pkt_checksum = 0; + unsigned iphdr; + struct timeval tv; + char cv = 0; + char ipstr[16]; + + prev = NULL; + pkt = pend_q; + pend_q = NULL; + tv.tv_sec = 1; + tv.tv_usec = 0; + select(0,NULL,NULL,NULL,&tv); /* Sleep for 1 Seconds */ + for (; pkt != NULL; pkt = nextpkt) { + nextpkt = pkt->next; + if (PPP_PROTOCOL(pkt->data) == proto) { + if ( (proto == PPP_IP) && newip ) { + /* Get old checksum */ + + iphdr = (pkt->data[4] & 15) << 2; + checksum = *((unsigned short *) (pkt->data+14)); + if (checksum == 0xFFFF) { + checksum = 0; + } + + + if (pkt->data[13] == 17) { + pkt_checksum = *((unsigned short *) (pkt->data+10+iphdr)); + if (pkt_checksum) { + cv = 1; + if (pkt_checksum == 0xFFFF) { + pkt_checksum = 0; + } + } + else { + cv = 0; + } + } + + if (pkt->data[13] == 6) { + pkt_checksum = *((unsigned short *) (pkt->data+20+iphdr)); + cv = 1; + if (pkt_checksum == 0xFFFF) { + pkt_checksum = 0; + } + } + + /* Delete old Source-IP-Address */ + checksum -= *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + checksum -= *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + pkt_checksum -= *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + pkt_checksum -= *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + /* Change Source-IP-Address */ + * ((u32_t *) (pkt->data + 16)) = newip; + + /* Add new Source-IP-Address */ + checksum += *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + checksum += *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + pkt_checksum += *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + pkt_checksum += *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + /* Write new checksum */ + if (!checksum) { + checksum = 0xFFFF; + } + *((unsigned short *) (pkt->data+14)) = checksum; + if (pkt->data[13] == 6) { + *((unsigned short *) (pkt->data+20+iphdr)) = pkt_checksum; + } + if (cv && (pkt->data[13] == 17) ) { + *((unsigned short *) (pkt->data+10+iphdr)) = pkt_checksum; + } + + /* Log Packet */ + strcpy(ipstr,inet_ntoa(*( (struct in_addr *) (pkt->data+16)))); + if (pkt->data[13] == 1) { + syslog(LOG_INFO,"Open ICMP %s -> %s\n", + ipstr, + inet_ntoa(*( (struct in_addr *) (pkt->data+20)))); + } else { + syslog(LOG_INFO,"Open %s %s:%d -> %s:%d\n", + pkt->data[13] == 6 ? "TCP" : "UDP", + ipstr, + ntohs(*( (short *) (pkt->data+iphdr+4))), + inet_ntoa(*( (struct in_addr *) (pkt->data+20))), + ntohs(*( (short *) (pkt->data+iphdr+6)))); + } + } + output(pcb, pkt->data, pkt->length); + free(pkt); + } else { + if (prev == NULL) + pend_q = pkt; + else + prev->next = pkt; + prev = pkt; + } + } + pend_qtail = prev; + if (prev != NULL) + prev->next = NULL; +} + +/* + * Scan a packet to decide whether it is an "active" packet, + * that is, whether it is worth bringing up the link for. + */ +static int +active_packet(p, len) + unsigned char *p; + int len; +{ + int proto, i; + const struct protent *protp; + + if (len < PPP_HDRLEN) + return 0; + proto = PPP_PROTOCOL(p); +#ifdef PPP_FILTER + p[0] = 1; /* outbound packet indicator */ + if ((pass_filter.bf_len != 0 + && bpf_filter(pass_filter.bf_insns, p, len, len) == 0) + || (active_filter.bf_len != 0 + && bpf_filter(active_filter.bf_insns, p, len, len) == 0)) { + p[0] = 0xff; + return 0; + } + p[0] = 0xff; +#endif + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol < 0xC000 && (protp->protocol & ~0x8000) == proto) { + if (protp->active_pkt == NULL) + return 1; + return (*protp->active_pkt)(p, len); + } + } + return 0; /* not a supported protocol !!?? */ +} + +#endif /* PPP_SUPPORT && DEMAND_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c new file mode 100644 index 00000000..8fb56368 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c @@ -0,0 +1,2423 @@ +/* + * eap.c - Extensible Authentication Protocol for PPP (RFC 2284) + * + * Copyright (c) 2001 by Sun Microsystems, Inc. + * All rights reserved. + * + * Non-exclusive rights to redistribute, modify, translate, and use + * this software in source and binary forms, in whole or in part, is + * hereby granted, provided that the above copyright notice is + * duplicated in any source form, and that neither the name of the + * copyright holder nor the author is used to endorse or promote + * products derived from this software. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Original version by James Carlson + * + * This implementation of EAP supports MD5-Challenge and SRP-SHA1 + * authentication styles. Note that support of MD5-Challenge is a + * requirement of RFC 2284, and that it's essentially just a + * reimplementation of regular RFC 1994 CHAP using EAP messages. + * + * As an authenticator ("server"), there are multiple phases for each + * style. In the first phase of each style, the unauthenticated peer + * name is queried using the EAP Identity request type. If the + * "remotename" option is used, then this phase is skipped, because + * the peer's name is presumed to be known. + * + * For MD5-Challenge, there are two phases, and the second phase + * consists of sending the challenge itself and handling the + * associated response. + * + * For SRP-SHA1, there are four phases. The second sends 's', 'N', + * and 'g'. The reply contains 'A'. The third sends 'B', and the + * reply contains 'M1'. The forth sends the 'M2' value. + * + * As an authenticatee ("client"), there's just a single phase -- + * responding to the queries generated by the peer. EAP is an + * authenticator-driven protocol. + * + * Based on draft-ietf-pppext-eap-srp-03.txt. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && EAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/eap.h" +#include "netif/ppp/magic.h" +#include "netif/ppp/pppcrypt.h" + +#ifdef USE_SRP +#include +#include +#include +#endif /* USE_SRP */ + +#ifndef SHA_DIGESTSIZE +#define SHA_DIGESTSIZE 20 +#endif + +#ifdef USE_SRP +static char *pn_secret = NULL; /* Pseudonym generating secret */ +#endif + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t eap_option_list[] = { + { "eap-restart", o_int, &eap_states[0].es_server.ea_timeout, + "Set retransmit timeout for EAP Requests (server)" }, + { "eap-max-sreq", o_int, &eap_states[0].es_server.ea_maxrequests, + "Set max number of EAP Requests sent (server)" }, + { "eap-timeout", o_int, &eap_states[0].es_client.ea_timeout, + "Set time limit for peer EAP authentication" }, + { "eap-max-rreq", o_int, &eap_states[0].es_client.ea_maxrequests, + "Set max number of EAP Requests allows (client)" }, + { "eap-interval", o_int, &eap_states[0].es_rechallenge, + "Set interval for EAP rechallenge" }, +#ifdef USE_SRP + { "srp-interval", o_int, &eap_states[0].es_lwrechallenge, + "Set interval for SRP lightweight rechallenge" }, + { "srp-pn-secret", o_string, &pn_secret, + "Long term pseudonym generation secret" }, + { "srp-use-pseudonym", o_bool, &eap_states[0].es_usepseudo, + "Use pseudonym if offered one by server", 1 }, +#endif + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points. + */ +static void eap_init(ppp_pcb *pcb); +static void eap_input(ppp_pcb *pcb, u_char *inp, int inlen); +static void eap_protrej(ppp_pcb *pcb); +static void eap_lowerup(ppp_pcb *pcb); +static void eap_lowerdown(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int eap_printpkt(const u_char *inp, int inlen, + void (*)(void *arg, const char *fmt, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent eap_protent = { + PPP_EAP, /* protocol number */ + eap_init, /* initialization procedure */ + eap_input, /* process a received packet */ + eap_protrej, /* process a received protocol-reject */ + eap_lowerup, /* lower layer has gone up */ + eap_lowerdown, /* lower layer has gone down */ + NULL, /* open the protocol */ + NULL, /* close the protocol */ +#if PRINTPKT_SUPPORT + eap_printpkt, /* print a packet in readable form */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* process a received data packet */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "EAP", /* text name of protocol */ + NULL, /* text name of corresponding data protocol */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + eap_option_list, /* list of command-line options */ + NULL, /* check requested options; assign defaults */ +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, /* configure interface for demand-dial */ + NULL /* say whether to bring up link for this pkt */ +#endif /* DEMAND_SUPPORT */ +}; + +#ifdef USE_SRP +/* + * A well-known 2048 bit modulus. + */ +static const u_char wkmodulus[] = { + 0xAC, 0x6B, 0xDB, 0x41, 0x32, 0x4A, 0x9A, 0x9B, + 0xF1, 0x66, 0xDE, 0x5E, 0x13, 0x89, 0x58, 0x2F, + 0xAF, 0x72, 0xB6, 0x65, 0x19, 0x87, 0xEE, 0x07, + 0xFC, 0x31, 0x92, 0x94, 0x3D, 0xB5, 0x60, 0x50, + 0xA3, 0x73, 0x29, 0xCB, 0xB4, 0xA0, 0x99, 0xED, + 0x81, 0x93, 0xE0, 0x75, 0x77, 0x67, 0xA1, 0x3D, + 0xD5, 0x23, 0x12, 0xAB, 0x4B, 0x03, 0x31, 0x0D, + 0xCD, 0x7F, 0x48, 0xA9, 0xDA, 0x04, 0xFD, 0x50, + 0xE8, 0x08, 0x39, 0x69, 0xED, 0xB7, 0x67, 0xB0, + 0xCF, 0x60, 0x95, 0x17, 0x9A, 0x16, 0x3A, 0xB3, + 0x66, 0x1A, 0x05, 0xFB, 0xD5, 0xFA, 0xAA, 0xE8, + 0x29, 0x18, 0xA9, 0x96, 0x2F, 0x0B, 0x93, 0xB8, + 0x55, 0xF9, 0x79, 0x93, 0xEC, 0x97, 0x5E, 0xEA, + 0xA8, 0x0D, 0x74, 0x0A, 0xDB, 0xF4, 0xFF, 0x74, + 0x73, 0x59, 0xD0, 0x41, 0xD5, 0xC3, 0x3E, 0xA7, + 0x1D, 0x28, 0x1E, 0x44, 0x6B, 0x14, 0x77, 0x3B, + 0xCA, 0x97, 0xB4, 0x3A, 0x23, 0xFB, 0x80, 0x16, + 0x76, 0xBD, 0x20, 0x7A, 0x43, 0x6C, 0x64, 0x81, + 0xF1, 0xD2, 0xB9, 0x07, 0x87, 0x17, 0x46, 0x1A, + 0x5B, 0x9D, 0x32, 0xE6, 0x88, 0xF8, 0x77, 0x48, + 0x54, 0x45, 0x23, 0xB5, 0x24, 0xB0, 0xD5, 0x7D, + 0x5E, 0xA7, 0x7A, 0x27, 0x75, 0xD2, 0xEC, 0xFA, + 0x03, 0x2C, 0xFB, 0xDB, 0xF5, 0x2F, 0xB3, 0x78, + 0x61, 0x60, 0x27, 0x90, 0x04, 0xE5, 0x7A, 0xE6, + 0xAF, 0x87, 0x4E, 0x73, 0x03, 0xCE, 0x53, 0x29, + 0x9C, 0xCC, 0x04, 0x1C, 0x7B, 0xC3, 0x08, 0xD8, + 0x2A, 0x56, 0x98, 0xF3, 0xA8, 0xD0, 0xC3, 0x82, + 0x71, 0xAE, 0x35, 0xF8, 0xE9, 0xDB, 0xFB, 0xB6, + 0x94, 0xB5, 0xC8, 0x03, 0xD8, 0x9F, 0x7A, 0xE4, + 0x35, 0xDE, 0x23, 0x6D, 0x52, 0x5F, 0x54, 0x75, + 0x9B, 0x65, 0xE3, 0x72, 0xFC, 0xD6, 0x8E, 0xF2, + 0x0F, 0xA7, 0x11, 0x1F, 0x9E, 0x4A, 0xFF, 0x73 +}; +#endif + +#if PPP_SERVER +/* Local forward declarations. */ +static void eap_server_timeout(void *arg); +#endif /* PPP_SERVER */ + +/* + * Convert EAP state code to printable string for debug. + */ +static const char * eap_state_name(enum eap_state_code esc) +{ + static const char *state_names[] = { EAP_STATES }; + + return (state_names[(int)esc]); +} + +/* + * eap_init - Initialize state for an EAP user. This is currently + * called once by main() during start-up. + */ +static void eap_init(ppp_pcb *pcb) { + + BZERO(&pcb->eap, sizeof(eap_state)); +#if PPP_SERVER + pcb->eap.es_server.ea_id = magic(); +#endif /* PPP_SERVER */ +} + +/* + * eap_client_timeout - Give up waiting for the peer to send any + * Request messages. + */ +static void eap_client_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (!eap_client_active(pcb)) + return; + + ppp_error("EAP: timeout waiting for Request from peer"); + auth_withpeer_fail(pcb, PPP_EAP); + pcb->eap.es_client.ea_state = eapBadAuth; +} + +/* + * eap_authwithpeer - Authenticate to our peer (behave as client). + * + * Start client state and wait for requests. This is called only + * after eap_lowerup. + */ +void eap_authwithpeer(ppp_pcb *pcb, const char *localname) { + + if(NULL == localname) + return; + + /* Save the peer name we're given */ + pcb->eap.es_client.ea_name = localname; + pcb->eap.es_client.ea_namelen = strlen(localname); + + pcb->eap.es_client.ea_state = eapListen; + + /* + * Start a timer so that if the other end just goes + * silent, we don't sit here waiting forever. + */ + if (pcb->settings.eap_req_time > 0) + TIMEOUT(eap_client_timeout, pcb, + pcb->settings.eap_req_time); +} + +#if PPP_SERVER +/* + * Format a standard EAP Failure message and send it to the peer. + * (Server operation) + */ +static void eap_send_failure(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + EAP_HEADERLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_FAILURE, outp); + pcb->eap.es_server.ea_id++; + PUTCHAR(pcb->eap.es_server.ea_id, outp); + PUTSHORT(EAP_HEADERLEN, outp); + + ppp_write(pcb, p); + + pcb->eap.es_server.ea_state = eapBadAuth; + auth_peer_fail(pcb, PPP_EAP); +} + +/* + * Format a standard EAP Success message and send it to the peer. + * (Server operation) + */ +static void eap_send_success(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + EAP_HEADERLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_SUCCESS, outp); + pcb->eap.es_server.ea_id++; + PUTCHAR(pcb->eap.es_server.ea_id, outp); + PUTSHORT(EAP_HEADERLEN, outp); + + ppp_write(pcb, p); + + auth_peer_success(pcb, PPP_EAP, 0, + pcb->eap.es_server.ea_peer, pcb->eap.es_server.ea_peerlen); +} +#endif /* PPP_SERVER */ + +#ifdef USE_SRP +/* + * Set DES key according to pseudonym-generating secret and current + * date. + */ +static bool +pncrypt_setkey(int timeoffs) +{ + struct tm *tp; + char tbuf[9]; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + time_t reftime; + + if (pn_secret == NULL) + return (0); + reftime = time(NULL) + timeoffs; + tp = localtime(&reftime); + SHA1Init(&ctxt); + SHA1Update(&ctxt, pn_secret, strlen(pn_secret)); + strftime(tbuf, sizeof (tbuf), "%Y%m%d", tp); + SHA1Update(&ctxt, tbuf, strlen(tbuf)); + SHA1Final(dig, &ctxt); + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + return (DesSetkey(dig)); +} + +static char base64[] = +"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +struct b64state { + u32_t bs_bits; + int bs_offs; +}; + +static int +b64enc(bs, inp, inlen, outp) +struct b64state *bs; +u_char *inp; +int inlen; +u_char *outp; +{ + int outlen = 0; + + while (inlen > 0) { + bs->bs_bits = (bs->bs_bits << 8) | *inp++; + inlen--; + bs->bs_offs += 8; + if (bs->bs_offs >= 24) { + *outp++ = base64[(bs->bs_bits >> 18) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 12) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 6) & 0x3F]; + *outp++ = base64[bs->bs_bits & 0x3F]; + outlen += 4; + bs->bs_offs = 0; + bs->bs_bits = 0; + } + } + return (outlen); +} + +static int +b64flush(bs, outp) +struct b64state *bs; +u_char *outp; +{ + int outlen = 0; + + if (bs->bs_offs == 8) { + *outp++ = base64[(bs->bs_bits >> 2) & 0x3F]; + *outp++ = base64[(bs->bs_bits << 4) & 0x3F]; + outlen = 2; + } else if (bs->bs_offs == 16) { + *outp++ = base64[(bs->bs_bits >> 10) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 4) & 0x3F]; + *outp++ = base64[(bs->bs_bits << 2) & 0x3F]; + outlen = 3; + } + bs->bs_offs = 0; + bs->bs_bits = 0; + return (outlen); +} + +static int +b64dec(bs, inp, inlen, outp) +struct b64state *bs; +u_char *inp; +int inlen; +u_char *outp; +{ + int outlen = 0; + char *cp; + + while (inlen > 0) { + if ((cp = strchr(base64, *inp++)) == NULL) + break; + bs->bs_bits = (bs->bs_bits << 6) | (cp - base64); + inlen--; + bs->bs_offs += 6; + if (bs->bs_offs >= 8) { + *outp++ = bs->bs_bits >> (bs->bs_offs - 8); + outlen++; + bs->bs_offs -= 8; + } + } + return (outlen); +} +#endif /* USE_SRP */ + +#if PPP_SERVER +/* + * Assume that current waiting server state is complete and figure + * next state to use based on available authentication data. 'status' + * indicates if there was an error in handling the last query. It is + * 0 for success and non-zero for failure. + */ +static void eap_figure_next_state(ppp_pcb *pcb, int status) { +#ifdef USE_SRP + unsigned char secbuf[MAXSECRETLEN], clear[8], *sp, *dp; + struct t_pw tpw; + struct t_confent *tce, mytce; + char *cp, *cp2; + struct t_server *ts; + int id, i, plen, toffs; + u_char vals[2]; + struct b64state bs; +#endif /* USE_SRP */ + + pcb->settings.eap_timeout_time = pcb->eap.es_savedtime; + switch (pcb->eap.es_server.ea_state) { + case eapBadAuth: + return; + + case eapIdentify: +#ifdef USE_SRP + /* Discard any previous session. */ + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0) { + pcb->eap.es_server.ea_state = eapBadAuth; + break; + } +#ifdef USE_SRP + /* If we've got a pseudonym, try to decode to real name. */ + if (pcb->eap.es_server.ea_peerlen > SRP_PSEUDO_LEN && + strncmp(pcb->eap.es_server.ea_peer, SRP_PSEUDO_ID, + SRP_PSEUDO_LEN) == 0 && + (pcb->eap.es_server.ea_peerlen - SRP_PSEUDO_LEN) * 3 / 4 < + sizeof (secbuf)) { + BZERO(&bs, sizeof (bs)); + plen = b64dec(&bs, + pcb->eap.es_server.ea_peer + SRP_PSEUDO_LEN, + pcb->eap.es_server.ea_peerlen - SRP_PSEUDO_LEN, + secbuf); + toffs = 0; + for (i = 0; i < 5; i++) { + pncrypt_setkey(toffs); + toffs -= 86400; + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + if (!DesDecrypt(secbuf, clear)) { + ppp_dbglog("no DES here; cannot decode " + "pseudonym"); + return; + } + id = *(unsigned char *)clear; + if (id + 1 <= plen && id + 9 > plen) + break; + } + if (plen % 8 == 0 && i < 5) { + /* + * Note that this is always shorter than the + * original stored string, so there's no need + * to realloc. + */ + if ((i = plen = *(unsigned char *)clear) > 7) + i = 7; + pcb->eap.es_server.ea_peerlen = plen; + dp = (unsigned char *)pcb->eap.es_server.ea_peer; + MEMCPY(dp, clear + 1, i); + plen -= i; + dp += i; + sp = secbuf + 8; + while (plen > 0) { + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesDecrypt(sp, dp); + sp += 8; + dp += 8; + plen -= 8; + } + pcb->eap.es_server.ea_peer[ + pcb->eap.es_server.ea_peerlen] = '\0'; + ppp_dbglog("decoded pseudonym to \"%.*q\"", + pcb->eap.es_server.ea_peerlen, + pcb->eap.es_server.ea_peer); + } else { + ppp_dbglog("failed to decode real name"); + /* Stay in eapIdentfy state; requery */ + break; + } + } + /* Look up user in secrets database. */ + if (get_srp_secret(pcb->eap.es_unit, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_name, (char *)secbuf, 1) != 0) { + /* Set up default in case SRP entry is bad */ + pcb->eap.es_server.ea_state = eapMD5Chall; + /* Get t_confent based on index in srp-secrets */ + id = strtol((char *)secbuf, &cp, 10); + if (*cp++ != ':' || id < 0) + break; + if (id == 0) { + mytce.index = 0; + mytce.modulus.data = (u_char *)wkmodulus; + mytce.modulus.len = sizeof (wkmodulus); + mytce.generator.data = (u_char *)"\002"; + mytce.generator.len = 1; + tce = &mytce; + } else if ((tce = gettcid(id)) != NULL) { + /* + * Client will have to verify this modulus/ + * generator combination, and that will take + * a while. Lengthen the timeout here. + */ + if (pcb->settings.eap_timeout_time > 0 && + pcb->settings.eap_timeout_time < 30) + pcb->settings.eap_timeout_time = 30; + } else { + break; + } + if ((cp2 = strchr(cp, ':')) == NULL) + break; + *cp2++ = '\0'; + tpw.pebuf.name = pcb->eap.es_server.ea_peer; + tpw.pebuf.password.len = t_fromb64((char *)tpw.pwbuf, + cp); + tpw.pebuf.password.data = tpw.pwbuf; + tpw.pebuf.salt.len = t_fromb64((char *)tpw.saltbuf, + cp2); + tpw.pebuf.salt.data = tpw.saltbuf; + if ((ts = t_serveropenraw(&tpw.pebuf, tce)) == NULL) + break; + pcb->eap.es_server.ea_session = (void *)ts; + pcb->eap.es_server.ea_state = eapSRP1; + vals[0] = pcb->eap.es_server.ea_id + 1; + vals[1] = EAPT_SRP; + t_serveraddexdata(ts, vals, 2); + /* Generate B; must call before t_servergetkey() */ + t_servergenexp(ts); + break; + } +#endif /* USE_SRP */ + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + + case eapSRP1: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status == 1) { + pcb->eap.es_server.ea_state = eapMD5Chall; + } else if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapSRP2; + } + break; + + case eapSRP2: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapSRP3; + } + break; + + case eapSRP3: + case eapSRP4: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapOpen; + } + break; + + case eapMD5Chall: + if (status != 0) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapOpen; + } + break; + + default: + pcb->eap.es_server.ea_state = eapBadAuth; + break; + } + if (pcb->eap.es_server.ea_state == eapBadAuth) + eap_send_failure(pcb); +} + +/* + * Format an EAP Request message and send it to the peer. Message + * type depends on current state. (Server operation) + */ +static void eap_send_request(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + u_char *lenloc; + int outlen; + int len; + const char *str; +#ifdef USE_SRP + struct t_server *ts; + u_char clear[8], cipher[8], dig[SHA_DIGESTSIZE], *optr, *cp; + int i, j; + struct b64state b64; + SHA1_CTX ctxt; +#endif /* USE_SRP */ + + /* Handle both initial auth and restart */ + if (pcb->eap.es_server.ea_state < eapIdentify && + pcb->eap.es_server.ea_state != eapInitial) { + pcb->eap.es_server.ea_state = eapIdentify; +#if PPP_REMOTENAME + if (pcb->settings.explicit_remote && pcb->remote_name) { + /* + * If we already know the peer's + * unauthenticated name, then there's no + * reason to ask. Go to next state instead. + */ + int len = (int)strlen(pcb->remote_name); + if (len > MAXNAMELEN) { + len = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_server.ea_peer, pcb->remote_name, len); + pcb->eap.es_server.ea_peer[len] = '\0'; + pcb->eap.es_server.ea_peerlen = len; + eap_figure_next_state(pcb, 0); + } +#endif /* PPP_REMOTENAME */ + } + + if (pcb->settings.eap_max_transmits > 0 && + pcb->eap.es_server.ea_requests >= pcb->settings.eap_max_transmits) { + if (pcb->eap.es_server.ea_responses > 0) + ppp_error("EAP: too many Requests sent"); + else + ppp_error("EAP: no response to Requests"); + eap_send_failure(pcb); + return; + } + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_CTRL_PBUF_MAX_SIZE), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_REQUEST, outp); + PUTCHAR(pcb->eap.es_server.ea_id, outp); + lenloc = outp; + INCPTR(2, outp); + + switch (pcb->eap.es_server.ea_state) { + case eapIdentify: + PUTCHAR(EAPT_IDENTITY, outp); + str = "Name"; + len = strlen(str); + MEMCPY(outp, str, len); + INCPTR(len, outp); + break; + + case eapMD5Chall: + PUTCHAR(EAPT_MD5CHAP, outp); + /* + * pick a random challenge length between + * EAP_MIN_CHALLENGE_LENGTH and EAP_MAX_CHALLENGE_LENGTH + */ + pcb->eap.es_challen = EAP_MIN_CHALLENGE_LENGTH + + magic_pow(EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH); + PUTCHAR(pcb->eap.es_challen, outp); + magic_random_bytes(pcb->eap.es_challenge, pcb->eap.es_challen); + MEMCPY(outp, pcb->eap.es_challenge, pcb->eap.es_challen); + INCPTR(pcb->eap.es_challen, outp); + MEMCPY(outp, pcb->eap.es_server.ea_name, pcb->eap.es_server.ea_namelen); + INCPTR(pcb->eap.es_server.ea_namelen, outp); + break; + +#ifdef USE_SRP + case eapSRP1: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_CHALLENGE, outp); + + PUTCHAR(pcb->eap.es_server.ea_namelen, outp); + MEMCPY(outp, pcb->eap.es_server.ea_name, pcb->eap.es_server.ea_namelen); + INCPTR(pcb->eap.es_server.ea_namelen, outp); + + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + PUTCHAR(ts->s.len, outp); + MEMCPY(outp, ts->s.data, ts->s.len); + INCPTR(ts->s.len, outp); + + if (ts->g.len == 1 && ts->g.data[0] == 2) { + PUTCHAR(0, outp); + } else { + PUTCHAR(ts->g.len, outp); + MEMCPY(outp, ts->g.data, ts->g.len); + INCPTR(ts->g.len, outp); + } + + if (ts->n.len != sizeof (wkmodulus) || + BCMP(ts->n.data, wkmodulus, sizeof (wkmodulus)) != 0) { + MEMCPY(outp, ts->n.data, ts->n.len); + INCPTR(ts->n.len, outp); + } + break; + + case eapSRP2: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_SKEY, outp); + + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + MEMCPY(outp, ts->B.data, ts->B.len); + INCPTR(ts->B.len, outp); + break; + + case eapSRP3: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_SVALIDATOR, outp); + PUTLONG(SRPVAL_EBIT, outp); + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + MEMCPY(outp, t_serverresponse(ts), SHA_DIGESTSIZE); + INCPTR(SHA_DIGESTSIZE, outp); + + if (pncrypt_setkey(0)) { + /* Generate pseudonym */ + optr = outp; + cp = (unsigned char *)pcb->eap.es_server.ea_peer; + if ((j = i = pcb->eap.es_server.ea_peerlen) > 7) + j = 7; + clear[0] = i; + MEMCPY(clear + 1, cp, j); + i -= j; + cp += j; + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + if (!DesEncrypt(clear, cipher)) { + ppp_dbglog("no DES here; not generating pseudonym"); + break; + } + BZERO(&b64, sizeof (b64)); + outp++; /* space for pseudonym length */ + outp += b64enc(&b64, cipher, 8, outp); + while (i >= 8) { + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesEncrypt(cp, cipher); + outp += b64enc(&b64, cipher, 8, outp); + cp += 8; + i -= 8; + } + if (i > 0) { + MEMCPY(clear, cp, i); + cp += i; + magic_random_bytes(cp, 8-i); + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesEncrypt(clear, cipher); + outp += b64enc(&b64, cipher, 8, outp); + } + outp += b64flush(&b64, outp); + + /* Set length and pad out to next 20 octet boundary */ + i = outp - optr - 1; + *optr = i; + i %= SHA_DIGESTSIZE; + if (i != 0) { + magic_random_bytes(outp, SHA_DIGESTSIZE-i); + INCPTR(SHA_DIGESTSIZE-i, outp); + } + + /* Obscure the pseudonym with SHA1 hash */ + SHA1Init(&ctxt); + SHA1Update(&ctxt, &pcb->eap.es_server.ea_id, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_peerlen); + while (optr < outp) { + SHA1Final(dig, &ctxt); + cp = dig; + while (cp < dig + SHA_DIGESTSIZE) + *optr++ ^= *cp++; + SHA1Init(&ctxt); + SHA1Update(&ctxt, &pcb->eap.es_server.ea_id, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, optr - SHA_DIGESTSIZE, + SHA_DIGESTSIZE); + } + } + break; + + case eapSRP4: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_LWRECHALLENGE, outp); + pcb->eap.es_challen = EAP_MIN_CHALLENGE_LENGTH + + magic_pow(EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH); + magic_random_bytes(pcb->eap.es_challenge, pcb->eap.es_challen); + MEMCPY(outp, pcb->eap.es_challenge, pcb->eap.es_challen); + INCPTR(pcb->eap.es_challen, outp); + break; +#endif /* USE_SRP */ + + default: + return; + } + + outlen = (outp - (unsigned char*)p->payload) - PPP_HDRLEN; + PUTSHORT(outlen, lenloc); + + pbuf_realloc(p, outlen + PPP_HDRLEN); + ppp_write(pcb, p); + + pcb->eap.es_server.ea_requests++; + + if (pcb->settings.eap_timeout_time > 0) + TIMEOUT(eap_server_timeout, pcb, pcb->settings.eap_timeout_time); +} + +/* + * eap_authpeer - Authenticate our peer (behave as server). + * + * Start server state and send first request. This is called only + * after eap_lowerup. + */ +void eap_authpeer(ppp_pcb *pcb, const char *localname) { + + /* Save the name we're given. */ + pcb->eap.es_server.ea_name = localname; + pcb->eap.es_server.ea_namelen = strlen(localname); + + pcb->eap.es_savedtime = pcb->settings.eap_timeout_time; + + /* Lower layer up yet? */ + if (pcb->eap.es_server.ea_state == eapInitial || + pcb->eap.es_server.ea_state == eapPending) { + pcb->eap.es_server.ea_state = eapPending; + return; + } + + pcb->eap.es_server.ea_state = eapPending; + + /* ID number not updated here intentionally; hashed into M1 */ + eap_send_request(pcb); +} + +/* + * eap_server_timeout - Retransmission timer for sending Requests + * expired. + */ +static void eap_server_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (!eap_server_active(pcb)) + return; + + /* EAP ID number must not change on timeout. */ + eap_send_request(pcb); +} + +/* + * When it's time to send rechallenge the peer, this timeout is + * called. Once the rechallenge is successful, the response handler + * will restart the timer. If it fails, then the link is dropped. + */ +static void eap_rechallenge(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->eap.es_server.ea_state != eapOpen && + pcb->eap.es_server.ea_state != eapSRP4) + return; + + pcb->eap.es_server.ea_requests = 0; + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); +} + +static void srp_lwrechallenge(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->eap.es_server.ea_state != eapOpen || + pcb->eap.es_server.ea_type != EAPT_SRP) + return; + + pcb->eap.es_server.ea_requests = 0; + pcb->eap.es_server.ea_state = eapSRP4; + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); +} +#endif /* PPP_SERVER */ + +/* + * eap_lowerup - The lower layer is now up. + * + * This is called before either eap_authpeer or eap_authwithpeer. See + * link_established() in auth.c. All that's necessary here is to + * return to closed state so that those two routines will do the right + * thing. + */ +static void eap_lowerup(ppp_pcb *pcb) { + pcb->eap.es_client.ea_state = eapClosed; +#if PPP_SERVER + pcb->eap.es_server.ea_state = eapClosed; +#endif /* PPP_SERVER */ +} + +/* + * eap_lowerdown - The lower layer is now down. + * + * Cancel all timeouts and return to initial state. + */ +static void eap_lowerdown(ppp_pcb *pcb) { + + if (eap_client_active(pcb) && pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } +#if PPP_SERVER + if (eap_server_active(pcb)) { + if (pcb->settings.eap_timeout_time > 0) { + UNTIMEOUT(eap_server_timeout, pcb); + } + } else { + if ((pcb->eap.es_server.ea_state == eapOpen || + pcb->eap.es_server.ea_state == eapSRP4) && + pcb->eap.es_rechallenge > 0) { + UNTIMEOUT(eap_rechallenge, (void *)pcb); + } + if (pcb->eap.es_server.ea_state == eapOpen && + pcb->eap.es_lwrechallenge > 0) { + UNTIMEOUT(srp_lwrechallenge, (void *)pcb); + } + } + + pcb->eap.es_client.ea_state = pcb->eap.es_server.ea_state = eapInitial; + pcb->eap.es_client.ea_requests = pcb->eap.es_server.ea_requests = 0; +#endif /* PPP_SERVER */ +} + +/* + * eap_protrej - Peer doesn't speak this protocol. + * + * This shouldn't happen. If it does, it represents authentication + * failure. + */ +static void eap_protrej(ppp_pcb *pcb) { + + if (eap_client_active(pcb)) { + ppp_error("EAP authentication failed due to Protocol-Reject"); + auth_withpeer_fail(pcb, PPP_EAP); + } +#if PPP_SERVER + if (eap_server_active(pcb)) { + ppp_error("EAP authentication of peer failed on Protocol-Reject"); + auth_peer_fail(pcb, PPP_EAP); + } +#endif /* PPP_SERVER */ + eap_lowerdown(pcb); +} + +/* + * Format and send a regular EAP Response message. + */ +static void eap_send_response(ppp_pcb *pcb, u_char id, u_char typenum, const u_char *str, int lenstr) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + sizeof (u_char) + lenstr; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(typenum, outp); + if (lenstr > 0) { + MEMCPY(outp, str, lenstr); + } + + ppp_write(pcb, p); +} + +/* + * Format and send an MD5-Challenge EAP Response message. + */ +static void eap_chap_response(ppp_pcb *pcb, u_char id, u_char *hash, const char *name, int namelen) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + MD5_SIGNATURE_SIZE + + namelen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_MD5CHAP, outp); + PUTCHAR(MD5_SIGNATURE_SIZE, outp); + MEMCPY(outp, hash, MD5_SIGNATURE_SIZE); + INCPTR(MD5_SIGNATURE_SIZE, outp); + if (namelen > 0) { + MEMCPY(outp, name, namelen); + } + + ppp_write(pcb, p); +} + +#ifdef USE_SRP +/* + * Format and send a SRP EAP Response message. + */ +static void +eap_srp_response(esp, id, subtypenum, str, lenstr) +eap_state *esp; +u_char id; +u_char subtypenum; +u_char *str; +int lenstr; +{ + ppp_pcb *pcb = &ppp_pcb_list[pcb->eap.es_unit]; + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + lenstr; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(subtypenum, outp); + if (lenstr > 0) { + MEMCPY(outp, str, lenstr); + } + + ppp_write(pcb, p); +} + +/* + * Format and send a SRP EAP Client Validator Response message. + */ +static void +eap_srpval_response(esp, id, flags, str) +eap_state *esp; +u_char id; +u32_t flags; +u_char *str; +{ + ppp_pcb *pcb = &ppp_pcb_list[pcb->eap.es_unit]; + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + sizeof (u32_t) + + SHA_DIGESTSIZE; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_CVALIDATOR, outp); + PUTLONG(flags, outp); + MEMCPY(outp, str, SHA_DIGESTSIZE); + + ppp_write(pcb, p); +} +#endif /* USE_SRP */ + +static void eap_send_nak(ppp_pcb *pcb, u_char id, u_char type) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char); + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_NAK, outp); + PUTCHAR(type, outp); + + ppp_write(pcb, p); +} + +#ifdef USE_SRP +static char * +name_of_pn_file() +{ + char *user, *path, *file; + struct passwd *pw; + size_t pl; + static bool pnlogged = 0; + + pw = getpwuid(getuid()); + if (pw == NULL || (user = pw->pw_dir) == NULL || user[0] == 0) { + errno = EINVAL; + return (NULL); + } + file = _PATH_PSEUDONYM; + pl = strlen(user) + strlen(file) + 2; + path = malloc(pl); + if (path == NULL) + return (NULL); + (void) slprintf(path, pl, "%s/%s", user, file); + if (!pnlogged) { + ppp_dbglog("pseudonym file: %s", path); + pnlogged = 1; + } + return (path); +} + +static int +open_pn_file(modebits) +mode_t modebits; +{ + char *path; + int fd, err; + + if ((path = name_of_pn_file()) == NULL) + return (-1); + fd = open(path, modebits, S_IRUSR | S_IWUSR); + err = errno; + free(path); + errno = err; + return (fd); +} + +static void +remove_pn_file() +{ + char *path; + + if ((path = name_of_pn_file()) != NULL) { + (void) unlink(path); + (void) free(path); + } +} + +static void +write_pseudonym(esp, inp, len, id) +eap_state *esp; +u_char *inp; +int len, id; +{ + u_char val; + u_char *datp, *digp; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + int dsize, fd, olen = len; + + /* + * Do the decoding by working backwards. This eliminates the need + * to save the decoded output in a separate buffer. + */ + val = id; + while (len > 0) { + if ((dsize = len % SHA_DIGESTSIZE) == 0) + dsize = SHA_DIGESTSIZE; + len -= dsize; + datp = inp + len; + SHA1Init(&ctxt); + SHA1Update(&ctxt, &val, 1); + SHA1Update(&ctxt, pcb->eap.es_client.ea_skey, SESSION_KEY_LEN); + if (len > 0) { + SHA1Update(&ctxt, datp, SHA_DIGESTSIZE); + } else { + SHA1Update(&ctxt, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + } + SHA1Final(dig, &ctxt); + for (digp = dig; digp < dig + SHA_DIGESTSIZE; digp++) + *datp++ ^= *digp; + } + + /* Now check that the result is sane */ + if (olen <= 0 || *inp + 1 > olen) { + ppp_dbglog("EAP: decoded pseudonym is unusable <%.*B>", olen, inp); + return; + } + + /* Save it away */ + fd = open_pn_file(O_WRONLY | O_CREAT | O_TRUNC); + if (fd < 0) { + ppp_dbglog("EAP: error saving pseudonym: %m"); + return; + } + len = write(fd, inp + 1, *inp); + if (close(fd) != -1 && len == *inp) { + ppp_dbglog("EAP: saved pseudonym"); + pcb->eap.es_usedpseudo = 0; + } else { + ppp_dbglog("EAP: failed to save pseudonym"); + remove_pn_file(); + } +} +#endif /* USE_SRP */ + +/* + * eap_request - Receive EAP Request message (client mode). + */ +static void eap_request(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char typenum; + u_char vallen; + int secret_len; + char secret[MAXSECRETLEN]; + char rhostname[MAXNAMELEN]; + lwip_md5_context mdContext; + u_char hash[MD5_SIGNATURE_SIZE]; +#ifdef USE_SRP + struct t_client *tc; + struct t_num sval, gval, Nval, *Ap, Bval; + u_char vals[2]; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + int fd; +#endif /* USE_SRP */ + + /* + * Note: we update es_client.ea_id *only if* a Response + * message is being generated. Otherwise, we leave it the + * same for duplicate detection purposes. + */ + + pcb->eap.es_client.ea_requests++; + if (pcb->settings.eap_allow_req != 0 && + pcb->eap.es_client.ea_requests > pcb->settings.eap_allow_req) { + ppp_info("EAP: received too many Request messages"); + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + auth_withpeer_fail(pcb, PPP_EAP); + return; + } + + if (len <= 0) { + ppp_error("EAP: empty Request message discarded"); + return; + } + + GETCHAR(typenum, inp); + len--; + + switch (typenum) { + case EAPT_IDENTITY: + if (len > 0) + ppp_info("EAP: Identity prompt \"%.*q\"", len, inp); +#ifdef USE_SRP + if (pcb->eap.es_usepseudo && + (pcb->eap.es_usedpseudo == 0 || + (pcb->eap.es_usedpseudo == 1 && + id == pcb->eap.es_client.ea_id))) { + pcb->eap.es_usedpseudo = 1; + /* Try to get a pseudonym */ + if ((fd = open_pn_file(O_RDONLY)) >= 0) { + strcpy(rhostname, SRP_PSEUDO_ID); + len = read(fd, rhostname + SRP_PSEUDO_LEN, + sizeof (rhostname) - SRP_PSEUDO_LEN); + /* XXX NAI unsupported */ + if (len > 0) { + eap_send_response(pcb, id, typenum, + rhostname, len + SRP_PSEUDO_LEN); + } + (void) close(fd); + if (len > 0) + break; + } + } + /* Stop using pseudonym now. */ + if (pcb->eap.es_usepseudo && pcb->eap.es_usedpseudo != 2) { + remove_pn_file(); + pcb->eap.es_usedpseudo = 2; + } +#endif /* USE_SRP */ + eap_send_response(pcb, id, typenum, (const u_char*)pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + break; + + case EAPT_NOTIFICATION: + if (len > 0) + ppp_info("EAP: Notification \"%.*q\"", len, inp); + eap_send_response(pcb, id, typenum, NULL, 0); + break; + + case EAPT_NAK: + /* + * Avoid the temptation to send Response Nak in reply + * to Request Nak here. It can only lead to trouble. + */ + ppp_warn("EAP: unexpected Nak in Request; ignored"); + /* Return because we're waiting for something real. */ + return; + + case EAPT_MD5CHAP: + if (len < 1) { + ppp_error("EAP: received MD5-Challenge with no data"); + /* Bogus request; wait for something real. */ + return; + } + GETCHAR(vallen, inp); + len--; + if (vallen < 8 || vallen > len) { + ppp_error("EAP: MD5-Challenge with bad length %d (8..%d)", + vallen, len); + /* Try something better. */ + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + + /* Not so likely to happen. */ + if (vallen >= len + sizeof (rhostname)) { + ppp_dbglog("EAP: trimming really long peer name down"); + MEMCPY(rhostname, inp + vallen, sizeof (rhostname) - 1); + rhostname[sizeof (rhostname) - 1] = '\0'; + } else { + MEMCPY(rhostname, inp + vallen, len - vallen); + rhostname[len - vallen] = '\0'; + } + +#if PPP_REMOTENAME + /* In case the remote doesn't give us his name. */ + if (pcb->settings.explicit_remote || + (pcb->settings.remote_name[0] != '\0' && vallen == len)) + strlcpy(rhostname, pcb->settings.remote_name, sizeof (rhostname)); +#endif /* PPP_REMOTENAME */ + + /* + * Get the secret for authenticating ourselves with + * the specified host. + */ + if (!get_secret(pcb, pcb->eap.es_client.ea_name, + rhostname, secret, &secret_len, 0)) { + ppp_dbglog("EAP: no MD5 secret for auth to %q", rhostname); + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + lwip_md5_init(&mdContext); + lwip_md5_starts(&mdContext); + typenum = id; + lwip_md5_update(&mdContext, &typenum, 1); + lwip_md5_update(&mdContext, (u_char *)secret, secret_len); + BZERO(secret, sizeof (secret)); + lwip_md5_update(&mdContext, inp, vallen); + lwip_md5_finish(&mdContext, hash); + lwip_md5_free(&mdContext); + eap_chap_response(pcb, id, hash, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + break; + +#ifdef USE_SRP + case EAPT_SRP: + if (len < 1) { + ppp_error("EAP: received empty SRP Request"); + /* Bogus request; wait for something real. */ + return; + } + + /* Get subtype */ + GETCHAR(vallen, inp); + len--; + switch (vallen) { + case EAPSRP_CHALLENGE: + tc = NULL; + if (pcb->eap.es_client.ea_session != NULL) { + tc = (struct t_client *)pcb->eap.es_client. + ea_session; + /* + * If this is a new challenge, then start + * over with a new client session context. + * Otherwise, just resend last response. + */ + if (id != pcb->eap.es_client.ea_id) { + t_clientclose(tc); + pcb->eap.es_client.ea_session = NULL; + tc = NULL; + } + } + /* No session key just yet */ + pcb->eap.es_client.ea_skey = NULL; + if (tc == NULL) { + int rhostnamelen; + + GETCHAR(vallen, inp); + len--; + if (vallen >= len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (name)"); + /* Ignore badly-formed messages */ + return; + } + MEMCPY(rhostname, inp, vallen); + rhostname[vallen] = '\0'; + INCPTR(vallen, inp); + len -= vallen; + + /* + * In case the remote doesn't give us his name, + * use configured name. + */ + if (explicit_remote || + (remote_name[0] != '\0' && vallen == 0)) { + strlcpy(rhostname, remote_name, + sizeof (rhostname)); + } + + rhostnamelen = (int)strlen(rhostname); + if (rhostnamelen > MAXNAMELEN) { + rhostnamelen = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_client.ea_peer, rhostname, rhostnamelen); + pcb->eap.es_client.ea_peer[rhostnamelen] = '\0'; + pcb->eap.es_client.ea_peerlen = rhostnamelen; + + GETCHAR(vallen, inp); + len--; + if (vallen >= len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (s)"); + /* Ignore badly-formed messages */ + return; + } + sval.data = inp; + sval.len = vallen; + INCPTR(vallen, inp); + len -= vallen; + + GETCHAR(vallen, inp); + len--; + if (vallen > len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (g)"); + /* Ignore badly-formed messages */ + return; + } + /* If no generator present, then use value 2 */ + if (vallen == 0) { + gval.data = (u_char *)"\002"; + gval.len = 1; + } else { + gval.data = inp; + gval.len = vallen; + } + INCPTR(vallen, inp); + len -= vallen; + + /* + * If no modulus present, then use well-known + * value. + */ + if (len == 0) { + Nval.data = (u_char *)wkmodulus; + Nval.len = sizeof (wkmodulus); + } else { + Nval.data = inp; + Nval.len = len; + } + tc = t_clientopen(pcb->eap.es_client.ea_name, + &Nval, &gval, &sval); + if (tc == NULL) { + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + pcb->eap.es_client.ea_session = (void *)tc; + + /* Add Challenge ID & type to verifier */ + vals[0] = id; + vals[1] = EAPT_SRP; + t_clientaddexdata(tc, vals, 2); + } + Ap = t_clientgenexp(tc); + eap_srp_response(esp, id, EAPSRP_CKEY, Ap->data, + Ap->len); + break; + + case EAPSRP_SKEY: + tc = (struct t_client *)pcb->eap.es_client.ea_session; + if (tc == NULL) { + ppp_warn("EAP: peer sent Subtype 2 without 1"); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + if (pcb->eap.es_client.ea_skey != NULL) { + /* + * ID number should not change here. Warn + * if it does (but otherwise ignore). + */ + if (id != pcb->eap.es_client.ea_id) { + ppp_warn("EAP: ID changed from %d to %d " + "in SRP Subtype 2 rexmit", + pcb->eap.es_client.ea_id, id); + } + } else { + if (get_srp_secret(pcb->eap.es_unit, + pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_peer, secret, 0) == 0) { + /* + * Can't work with this peer because + * the secret is missing. Just give + * up. + */ + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + Bval.data = inp; + Bval.len = len; + t_clientpasswd(tc, secret); + BZERO(secret, sizeof (secret)); + pcb->eap.es_client.ea_skey = + t_clientgetkey(tc, &Bval); + if (pcb->eap.es_client.ea_skey == NULL) { + /* Server is rogue; stop now */ + ppp_error("EAP: SRP server is rogue"); + goto client_failure; + } + } + eap_srpval_response(esp, id, SRPVAL_EBIT, + t_clientresponse(tc)); + break; + + case EAPSRP_SVALIDATOR: + tc = (struct t_client *)pcb->eap.es_client.ea_session; + if (tc == NULL || pcb->eap.es_client.ea_skey == NULL) { + ppp_warn("EAP: peer sent Subtype 3 without 1/2"); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + /* + * If we're already open, then this ought to be a + * duplicate. Otherwise, check that the server is + * who we think it is. + */ + if (pcb->eap.es_client.ea_state == eapOpen) { + if (id != pcb->eap.es_client.ea_id) { + ppp_warn("EAP: ID changed from %d to %d " + "in SRP Subtype 3 rexmit", + pcb->eap.es_client.ea_id, id); + } + } else { + len -= sizeof (u32_t) + SHA_DIGESTSIZE; + if (len < 0 || t_clientverify(tc, inp + + sizeof (u32_t)) != 0) { + ppp_error("EAP: SRP server verification " + "failed"); + goto client_failure; + } + GETLONG(pcb->eap.es_client.ea_keyflags, inp); + /* Save pseudonym if user wants it. */ + if (len > 0 && pcb->eap.es_usepseudo) { + INCPTR(SHA_DIGESTSIZE, inp); + write_pseudonym(esp, inp, len, id); + } + } + /* + * We've verified our peer. We're now mostly done, + * except for waiting on the regular EAP Success + * message. + */ + eap_srp_response(esp, id, EAPSRP_ACK, NULL, 0); + break; + + case EAPSRP_LWRECHALLENGE: + if (len < 4) { + ppp_warn("EAP: malformed Lightweight rechallenge"); + return; + } + SHA1Init(&ctxt); + vals[0] = id; + SHA1Update(&ctxt, vals, 1); + SHA1Update(&ctxt, pcb->eap.es_client.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, inp, len); + SHA1Update(&ctxt, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + SHA1Final(dig, &ctxt); + eap_srp_response(esp, id, EAPSRP_LWRECHALLENGE, dig, + SHA_DIGESTSIZE); + break; + + default: + ppp_error("EAP: unknown SRP Subtype %d", vallen); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + break; +#endif /* USE_SRP */ + + default: + ppp_info("EAP: unknown authentication type %d; Naking", typenum); + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + TIMEOUT(eap_client_timeout, pcb, + pcb->settings.eap_req_time); + } + return; + +#ifdef USE_SRP +client_failure: + pcb->eap.es_client.ea_state = eapBadAuth; + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, (void *)esp); + } + pcb->eap.es_client.ea_session = NULL; + t_clientclose(tc); + auth_withpeer_fail(pcb, PPP_EAP); +#endif /* USE_SRP */ +} + +#if PPP_SERVER +/* + * eap_response - Receive EAP Response message (server mode). + */ +static void eap_response(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char typenum; + u_char vallen; + int secret_len; + char secret[MAXSECRETLEN]; + char rhostname[MAXNAMELEN]; + lwip_md5_context mdContext; + u_char hash[MD5_SIGNATURE_SIZE]; +#ifdef USE_SRP + struct t_server *ts; + struct t_num A; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; +#endif /* USE_SRP */ + + if (pcb->eap.es_server.ea_id != id) { + ppp_dbglog("EAP: discarding Response %d; expected ID %d", id, + pcb->eap.es_server.ea_id); + return; + } + + pcb->eap.es_server.ea_responses++; + + if (len <= 0) { + ppp_error("EAP: empty Response message discarded"); + return; + } + + GETCHAR(typenum, inp); + len--; + + switch (typenum) { + case EAPT_IDENTITY: + if (pcb->eap.es_server.ea_state != eapIdentify) { + ppp_dbglog("EAP discarding unwanted Identify \"%.q\"", len, + inp); + break; + } + ppp_info("EAP: unauthenticated peer name \"%.*q\"", len, inp); + if (len > MAXNAMELEN) { + len = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_server.ea_peer, inp, len); + pcb->eap.es_server.ea_peer[len] = '\0'; + pcb->eap.es_server.ea_peerlen = len; + eap_figure_next_state(pcb, 0); + break; + + case EAPT_NOTIFICATION: + ppp_dbglog("EAP unexpected Notification; response discarded"); + break; + + case EAPT_NAK: + if (len < 1) { + ppp_info("EAP: Nak Response with no suggested protocol"); + eap_figure_next_state(pcb, 1); + break; + } + + GETCHAR(vallen, inp); + len--; + + if ( +#if PPP_REMOTENAME + !pcb->explicit_remote && +#endif /* PPP_REMOTENAME */ + pcb->eap.es_server.ea_state == eapIdentify){ + /* Peer cannot Nak Identify Request */ + eap_figure_next_state(pcb, 1); + break; + } + + switch (vallen) { + case EAPT_SRP: + /* Run through SRP validator selection again. */ + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + break; + + case EAPT_MD5CHAP: + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + + default: + ppp_dbglog("EAP: peer requesting unknown Type %d", vallen); + switch (pcb->eap.es_server.ea_state) { + case eapSRP1: + case eapSRP2: + case eapSRP3: + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + case eapMD5Chall: + case eapSRP4: + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + break; + default: + break; + } + break; + } + break; + + case EAPT_MD5CHAP: + if (pcb->eap.es_server.ea_state != eapMD5Chall) { + ppp_error("EAP: unexpected MD5-Response"); + eap_figure_next_state(pcb, 1); + break; + } + if (len < 1) { + ppp_error("EAP: received MD5-Response with no data"); + eap_figure_next_state(pcb, 1); + break; + } + GETCHAR(vallen, inp); + len--; + if (vallen != 16 || vallen > len) { + ppp_error("EAP: MD5-Response with bad length %d", vallen); + eap_figure_next_state(pcb, 1); + break; + } + + /* Not so likely to happen. */ + if (vallen >= len + sizeof (rhostname)) { + ppp_dbglog("EAP: trimming really long peer name down"); + MEMCPY(rhostname, inp + vallen, sizeof (rhostname) - 1); + rhostname[sizeof (rhostname) - 1] = '\0'; + } else { + MEMCPY(rhostname, inp + vallen, len - vallen); + rhostname[len - vallen] = '\0'; + } + +#if PPP_REMOTENAME + /* In case the remote doesn't give us his name. */ + if (explicit_remote || + (remote_name[0] != '\0' && vallen == len)) + strlcpy(rhostname, remote_name, sizeof (rhostname)); +#endif /* PPP_REMOTENAME */ + + /* + * Get the secret for authenticating the specified + * host. + */ + if (!get_secret(pcb, rhostname, + pcb->eap.es_server.ea_name, secret, &secret_len, 1)) { + ppp_dbglog("EAP: no MD5 secret for auth of %q", rhostname); + eap_send_failure(pcb); + break; + } + lwip_md5_init(&mdContext); + lwip_md5_starts(&mdContext); + lwip_md5_update(&mdContext, &pcb->eap.es_server.ea_id, 1); + lwip_md5_update(&mdContext, (u_char *)secret, secret_len); + BZERO(secret, sizeof (secret)); + lwip_md5_update(&mdContext, pcb->eap.es_challenge, pcb->eap.es_challen); + lwip_md5_finish(&mdContext, hash); + lwip_md5_free(&mdContext); + if (BCMP(hash, inp, MD5_SIGNATURE_SIZE) != 0) { + eap_send_failure(pcb); + break; + } + pcb->eap.es_server.ea_type = EAPT_MD5CHAP; + eap_send_success(pcb); + eap_figure_next_state(pcb, 0); + if (pcb->eap.es_rechallenge != 0) + TIMEOUT(eap_rechallenge, pcb, pcb->eap.es_rechallenge); + break; + +#ifdef USE_SRP + case EAPT_SRP: + if (len < 1) { + ppp_error("EAP: empty SRP Response"); + eap_figure_next_state(pcb, 1); + break; + } + GETCHAR(typenum, inp); + len--; + switch (typenum) { + case EAPSRP_CKEY: + if (pcb->eap.es_server.ea_state != eapSRP1) { + ppp_error("EAP: unexpected SRP Subtype 1 Response"); + eap_figure_next_state(pcb, 1); + break; + } + A.data = inp; + A.len = len; + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + pcb->eap.es_server.ea_skey = t_servergetkey(ts, &A); + if (pcb->eap.es_server.ea_skey == NULL) { + /* Client's A value is bogus; terminate now */ + ppp_error("EAP: bogus A value from client"); + eap_send_failure(pcb); + } else { + eap_figure_next_state(pcb, 0); + } + break; + + case EAPSRP_CVALIDATOR: + if (pcb->eap.es_server.ea_state != eapSRP2) { + ppp_error("EAP: unexpected SRP Subtype 2 Response"); + eap_figure_next_state(pcb, 1); + break; + } + if (len < sizeof (u32_t) + SHA_DIGESTSIZE) { + ppp_error("EAP: M1 length %d < %d", len, + sizeof (u32_t) + SHA_DIGESTSIZE); + eap_figure_next_state(pcb, 1); + break; + } + GETLONG(pcb->eap.es_server.ea_keyflags, inp); + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + if (t_serververify(ts, inp)) { + ppp_info("EAP: unable to validate client identity"); + eap_send_failure(pcb); + break; + } + eap_figure_next_state(pcb, 0); + break; + + case EAPSRP_ACK: + if (pcb->eap.es_server.ea_state != eapSRP3) { + ppp_error("EAP: unexpected SRP Subtype 3 Response"); + eap_send_failure(esp); + break; + } + pcb->eap.es_server.ea_type = EAPT_SRP; + eap_send_success(pcb, esp); + eap_figure_next_state(pcb, 0); + if (pcb->eap.es_rechallenge != 0) + TIMEOUT(eap_rechallenge, pcb, + pcb->eap.es_rechallenge); + if (pcb->eap.es_lwrechallenge != 0) + TIMEOUT(srp_lwrechallenge, pcb, + pcb->eap.es_lwrechallenge); + break; + + case EAPSRP_LWRECHALLENGE: + if (pcb->eap.es_server.ea_state != eapSRP4) { + ppp_info("EAP: unexpected SRP Subtype 4 Response"); + return; + } + if (len != SHA_DIGESTSIZE) { + ppp_error("EAP: bad Lightweight rechallenge " + "response"); + return; + } + SHA1Init(&ctxt); + vallen = id; + SHA1Update(&ctxt, &vallen, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, pcb->eap.es_challenge, pcb->eap.es_challen); + SHA1Update(&ctxt, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_peerlen); + SHA1Final(dig, &ctxt); + if (BCMP(dig, inp, SHA_DIGESTSIZE) != 0) { + ppp_error("EAP: failed Lightweight rechallenge"); + eap_send_failure(pcb); + break; + } + pcb->eap.es_server.ea_state = eapOpen; + if (pcb->eap.es_lwrechallenge != 0) + TIMEOUT(srp_lwrechallenge, esp, + pcb->eap.es_lwrechallenge); + break; + } + break; +#endif /* USE_SRP */ + + default: + /* This can't happen. */ + ppp_error("EAP: unknown Response type %d; ignored", typenum); + return; + } + + if (pcb->settings.eap_timeout_time > 0) { + UNTIMEOUT(eap_server_timeout, pcb); + } + + if (pcb->eap.es_server.ea_state != eapBadAuth && + pcb->eap.es_server.ea_state != eapOpen) { + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); + } +} +#endif /* PPP_SERVER */ + +/* + * eap_success - Receive EAP Success message (client mode). + */ +static void eap_success(ppp_pcb *pcb, u_char *inp, int id, int len) { + LWIP_UNUSED_ARG(id); + + if (pcb->eap.es_client.ea_state != eapOpen && !eap_client_active(pcb)) { + ppp_dbglog("EAP unexpected success message in state %s (%d)", + eap_state_name(pcb->eap.es_client.ea_state), + pcb->eap.es_client.ea_state); + return; + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + + if (len > 0) { + /* This is odd. The spec doesn't allow for this. */ + PRINTMSG(inp, len); + } + + pcb->eap.es_client.ea_state = eapOpen; + auth_withpeer_success(pcb, PPP_EAP, 0); +} + +/* + * eap_failure - Receive EAP Failure message (client mode). + */ +static void eap_failure(ppp_pcb *pcb, u_char *inp, int id, int len) { + LWIP_UNUSED_ARG(id); + + if (!eap_client_active(pcb)) { + ppp_dbglog("EAP unexpected failure message in state %s (%d)", + eap_state_name(pcb->eap.es_client.ea_state), + pcb->eap.es_client.ea_state); + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + + if (len > 0) { + /* This is odd. The spec doesn't allow for this. */ + PRINTMSG(inp, len); + } + + pcb->eap.es_client.ea_state = eapBadAuth; + + ppp_error("EAP: peer reports authentication failure"); + auth_withpeer_fail(pcb, PPP_EAP); +} + +/* + * eap_input - Handle received EAP message. + */ +static void eap_input(ppp_pcb *pcb, u_char *inp, int inlen) { + u_char code, id; + int len; + + /* + * Parse header (code, id and length). If packet too short, + * drop it. + */ + if (inlen < EAP_HEADERLEN) { + ppp_error("EAP: packet too short: %d < %d", inlen, EAP_HEADERLEN); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < EAP_HEADERLEN || len > inlen) { + ppp_error("EAP: packet has illegal length field %d (%d..%d)", len, + EAP_HEADERLEN, inlen); + return; + } + len -= EAP_HEADERLEN; + + /* Dispatch based on message code */ + switch (code) { + case EAP_REQUEST: + eap_request(pcb, inp, id, len); + break; + +#if PPP_SERVER + case EAP_RESPONSE: + eap_response(pcb, inp, id, len); + break; +#endif /* PPP_SERVER */ + + case EAP_SUCCESS: + eap_success(pcb, inp, id, len); + break; + + case EAP_FAILURE: + eap_failure(pcb, inp, id, len); + break; + + default: /* XXX Need code reject */ + /* Note: it's not legal to send EAP Nak here. */ + ppp_warn("EAP: unknown code %d received", code); + break; + } +} + +#if PRINTPKT_SUPPORT +/* + * eap_printpkt - print the contents of an EAP packet. + */ +static const char* const eap_codenames[] = { + "Request", "Response", "Success", "Failure" +}; + +static const char* const eap_typenames[] = { + "Identity", "Notification", "Nak", "MD5-Challenge", + "OTP", "Generic-Token", NULL, NULL, + "RSA", "DSS", "KEA", "KEA-Validate", + "TLS", "Defender", "Windows 2000", "Arcot", + "Cisco", "Nokia", "SRP" +}; + +static int eap_printpkt(const u_char *inp, int inlen, void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, rtype, vallen; + const u_char *pstart; + u32_t uval; + + if (inlen < EAP_HEADERLEN) + return (0); + pstart = inp; + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < EAP_HEADERLEN || len > inlen) + return (0); + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(eap_codenames)) + printer(arg, " %s", eap_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= EAP_HEADERLEN; + switch (code) { + case EAP_REQUEST: + if (len < 1) { + printer(arg, " "); + break; + } + GETCHAR(rtype, inp); + len--; + if (rtype >= 1 && rtype <= (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " %s", eap_typenames[rtype-1]); + else + printer(arg, " type=0x%x", rtype); + switch (rtype) { + case EAPT_IDENTITY: + case EAPT_NOTIFICATION: + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_MD5CHAP: + if (len <= 0) + break; + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_SRP: + if (len < 3) + goto truncated; + GETCHAR(vallen, inp); + len--; + printer(arg, "-%d", vallen); + switch (vallen) { + case EAPSRP_CHALLENGE: + GETCHAR(vallen, inp); + len--; + if (vallen >= len) + goto truncated; + if (vallen > 0) { + printer(arg, " "); + } else { + printer(arg, " "); + } + INCPTR(vallen, inp); + len -= vallen; + GETCHAR(vallen, inp); + len--; + if (vallen >= len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + if (vallen == 0) { + printer(arg, " "); + } else { + printer(arg, " ", vallen, inp); + } + INCPTR(vallen, inp); + len -= vallen; + if (len == 0) { + printer(arg, " "); + } else { + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPSRP_SKEY: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_SVALIDATOR: + if (len < (int)sizeof (u32_t)) + break; + GETLONG(uval, inp); + len -= sizeof (u32_t); + if (uval & SRPVAL_EBIT) { + printer(arg, " E"); + uval &= ~SRPVAL_EBIT; + } + if (uval != 0) { + printer(arg, " f<%X>", uval); + } + if ((vallen = len) > SHA_DIGESTSIZE) + vallen = SHA_DIGESTSIZE; + printer(arg, " ", len, inp, + len < SHA_DIGESTSIZE ? "?" : ""); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPSRP_LWRECHALLENGE: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + default: + break; + } + break; + default: + break; + } + break; + + case EAP_RESPONSE: + if (len < 1) + break; + GETCHAR(rtype, inp); + len--; + if (rtype >= 1 && rtype <= (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " %s", eap_typenames[rtype-1]); + else + printer(arg, " type=0x%x", rtype); + switch (rtype) { + case EAPT_IDENTITY: + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPT_NAK: + if (len <= 0) { + printer(arg, " "); + break; + } + GETCHAR(rtype, inp); + len--; + printer(arg, " = 1 && rtype < (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " (%s)", eap_typenames[rtype-1]); + printer(arg, ">"); + break; + + case EAPT_MD5CHAP: + if (len <= 0) { + printer(arg, " "); + break; + } + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_SRP: + if (len < 1) + goto truncated; + GETCHAR(vallen, inp); + len--; + printer(arg, "-%d", vallen); + switch (vallen) { + case EAPSRP_CKEY: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_CVALIDATOR: + if (len < (int)sizeof (u32_t)) + break; + GETLONG(uval, inp); + len -= sizeof (u32_t); + if (uval & SRPVAL_EBIT) { + printer(arg, " E"); + uval &= ~SRPVAL_EBIT; + } + if (uval != 0) { + printer(arg, " f<%X>", uval); + } + printer(arg, " ", len, inp, + len == SHA_DIGESTSIZE ? "" : "?"); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_ACK: + break; + + case EAPSRP_LWRECHALLENGE: + printer(arg, " ", len, inp, + len == SHA_DIGESTSIZE ? "" : "?"); + if ((vallen = len) > SHA_DIGESTSIZE) + vallen = SHA_DIGESTSIZE; + INCPTR(vallen, inp); + len -= vallen; + break; + default: + break; + } + break; + default: + break; + } + break; + + case EAP_SUCCESS: /* No payload expected for these! */ + case EAP_FAILURE: + default: + break; + + truncated: + printer(arg, " "); + break; + } + + if (len > 8) + printer(arg, "%8B...", inp); + else if (len > 0) + printer(arg, "%.*B", len, inp); + INCPTR(len, inp); + + return (inp - pstart); +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && EAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c new file mode 100644 index 00000000..4d84f609 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c @@ -0,0 +1,191 @@ +/* + * ecp.c - PPP Encryption Control Protocol. + * + * Copyright (c) 2002 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Derived from ccp.c, which is: + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && ECP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ecp.h" + +#if PPP_OPTIONS +static option_t ecp_option_list[] = { + { "noecp", o_bool, &ecp_protent.enabled_flag, + "Disable ECP negotiation" }, + { "-ecp", o_bool, &ecp_protent.enabled_flag, + "Disable ECP negotiation", OPT_ALIAS }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ecp_init (int unit); +/* +static void ecp_open (int unit); +static void ecp_close (int unit, char *); +static void ecp_lowerup (int unit); +static void ecp_lowerdown (int); +static void ecp_input (int unit, u_char *pkt, int len); +static void ecp_protrej (int unit); +*/ +#if PRINTPKT_SUPPORT +static int ecp_printpkt (const u_char *pkt, int len, + void (*printer) (void *, char *, ...), + void *arg); +#endif /* PRINTPKT_SUPPORT */ +/* +static void ecp_datainput (int unit, u_char *pkt, int len); +*/ + +const struct protent ecp_protent = { + PPP_ECP, + ecp_init, + NULL, /* ecp_input, */ + NULL, /* ecp_protrej, */ + NULL, /* ecp_lowerup, */ + NULL, /* ecp_lowerdown, */ + NULL, /* ecp_open, */ + NULL, /* ecp_close, */ +#if PRINTPKT_SUPPORT + ecp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* ecp_datainput, */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "ECP", + "Encrypted", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ecp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +fsm ecp_fsm[NUM_PPP]; +ecp_options ecp_wantoptions[NUM_PPP]; /* what to request the peer to use */ +ecp_options ecp_gotoptions[NUM_PPP]; /* what the peer agreed to do */ +ecp_options ecp_allowoptions[NUM_PPP]; /* what we'll agree to do */ +ecp_options ecp_hisoptions[NUM_PPP]; /* what we agreed to do */ + +static const fsm_callbacks ecp_callbacks = { + NULL, /* ecp_resetci, */ + NULL, /* ecp_cilen, */ + NULL, /* ecp_addci, */ + NULL, /* ecp_ackci, */ + NULL, /* ecp_nakci, */ + NULL, /* ecp_rejci, */ + NULL, /* ecp_reqci, */ + NULL, /* ecp_up, */ + NULL, /* ecp_down, */ + NULL, + NULL, + NULL, + NULL, + NULL, /* ecp_extcode, */ + "ECP" +}; + +/* + * ecp_init - initialize ECP. + */ +static void +ecp_init(unit) + int unit; +{ + fsm *f = &ecp_fsm[unit]; + + f->unit = unit; + f->protocol = PPP_ECP; + f->callbacks = &ecp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(&ecp_wantoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_gotoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_allowoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_hisoptions[unit], 0, sizeof(ecp_options)); +#endif /* 0 */ + +} + + +#if PRINTPKT_SUPPORT +static int +ecp_printpkt(p, plen, printer, arg) + const u_char *p; + int plen; + void (*printer) (void *, char *, ...); + void *arg; +{ + return 0; +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && ECP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c new file mode 100644 index 00000000..01493bc1 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c @@ -0,0 +1,56 @@ +/* + * eui64.c - EUI64 routines for IPv6CP. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: eui64.c,v 1.6 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/eui64.h" + +/* + * eui64_ntoa - Make an ascii representation of an interface identifier + */ +char *eui64_ntoa(eui64_t e) { + static char buf[20]; + + sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x", + e.e8[0], e.e8[1], e.e8[2], e.e8[3], + e.e8[4], e.e8[5], e.e8[6], e.e8[7]); + return buf; +} + +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c new file mode 100644 index 00000000..b1f08aff --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c @@ -0,0 +1,799 @@ +/* + * fsm.c - {Link, IP} Control Protocol Finite State Machine. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + * Randomize fsm id on link/init. + * Deal with variable outgoing MTU. + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" + +static void fsm_timeout (void *); +static void fsm_rconfreq(fsm *f, u_char id, u_char *inp, int len); +static void fsm_rconfack(fsm *f, int id, u_char *inp, int len); +static void fsm_rconfnakrej(fsm *f, int code, int id, u_char *inp, int len); +static void fsm_rtermreq(fsm *f, int id, u_char *p, int len); +static void fsm_rtermack(fsm *f); +static void fsm_rcoderej(fsm *f, u_char *inp, int len); +static void fsm_sconfreq(fsm *f, int retransmit); + +#define PROTO_NAME(f) ((f)->callbacks->proto_name) + +/* + * fsm_init - Initialize fsm. + * + * Initialize fsm state. + */ +void fsm_init(fsm *f) { + ppp_pcb *pcb = f->pcb; + f->state = PPP_FSM_INITIAL; + f->flags = 0; + f->id = 0; /* XXX Start with random id? */ + f->maxnakloops = pcb->settings.fsm_max_nak_loops; + f->term_reason_len = 0; +} + + +/* + * fsm_lowerup - The lower layer is up. + */ +void fsm_lowerup(fsm *f) { + switch( f->state ){ + case PPP_FSM_INITIAL: + f->state = PPP_FSM_CLOSED; + break; + + case PPP_FSM_STARTING: + if( f->flags & OPT_SILENT ) + f->state = PPP_FSM_STOPPED; + else { + /* Send an initial configure-request */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + } + break; + + default: + FSMDEBUG(("%s: Up event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_lowerdown - The lower layer is down. + * + * Cancel all timeouts and inform upper layers. + */ +void fsm_lowerdown(fsm *f) { + switch( f->state ){ + case PPP_FSM_CLOSED: + f->state = PPP_FSM_INITIAL; + break; + + case PPP_FSM_STOPPED: + f->state = PPP_FSM_STARTING; + if( f->callbacks->starting ) + (*f->callbacks->starting)(f); + break; + + case PPP_FSM_CLOSING: + f->state = PPP_FSM_INITIAL; + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + break; + + case PPP_FSM_STOPPING: + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + f->state = PPP_FSM_STARTING; + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + break; + + case PPP_FSM_OPENED: + if( f->callbacks->down ) + (*f->callbacks->down)(f); + f->state = PPP_FSM_STARTING; + break; + + default: + FSMDEBUG(("%s: Down event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_open - Link is allowed to come up. + */ +void fsm_open(fsm *f) { + switch( f->state ){ + case PPP_FSM_INITIAL: + f->state = PPP_FSM_STARTING; + if( f->callbacks->starting ) + (*f->callbacks->starting)(f); + break; + + case PPP_FSM_CLOSED: + if( f->flags & OPT_SILENT ) + f->state = PPP_FSM_STOPPED; + else { + /* Send an initial configure-request */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + } + break; + + case PPP_FSM_CLOSING: + f->state = PPP_FSM_STOPPING; + /* fall through */ + /* no break */ + case PPP_FSM_STOPPED: + case PPP_FSM_OPENED: + if( f->flags & OPT_RESTART ){ + fsm_lowerdown(f); + fsm_lowerup(f); + } + break; + default: + break; + } +} + +/* + * terminate_layer - Start process of shutting down the FSM + * + * Cancel any timeout running, notify upper layers we're done, and + * send a terminate-request message as configured. + */ +static void terminate_layer(fsm *f, int nextstate) { + ppp_pcb *pcb = f->pcb; + + if( f->state != PPP_FSM_OPENED ) + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + else if( f->callbacks->down ) + (*f->callbacks->down)(f); /* Inform upper layers we're down */ + + /* Init restart counter and send Terminate-Request */ + f->retransmits = pcb->settings.fsm_max_term_transmits; + fsm_sdata(f, TERMREQ, f->reqid = ++f->id, + (const u_char *) f->term_reason, f->term_reason_len); + + if (f->retransmits == 0) { + /* + * User asked for no terminate requests at all; just close it. + * We've already fired off one Terminate-Request just to be nice + * to the peer, but we're not going to wait for a reply. + */ + f->state = nextstate == PPP_FSM_CLOSING ? PPP_FSM_CLOSED : PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + return; + } + + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + --f->retransmits; + + f->state = nextstate; +} + +/* + * fsm_close - Start closing connection. + * + * Cancel timeouts and either initiate close or possibly go directly to + * the PPP_FSM_CLOSED state. + */ +void fsm_close(fsm *f, const char *reason) { + f->term_reason = reason; + f->term_reason_len = (reason == NULL? 0: (u8_t)LWIP_MIN(strlen(reason), 0xFF) ); + switch( f->state ){ + case PPP_FSM_STARTING: + f->state = PPP_FSM_INITIAL; + break; + case PPP_FSM_STOPPED: + f->state = PPP_FSM_CLOSED; + break; + case PPP_FSM_STOPPING: + f->state = PPP_FSM_CLOSING; + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + case PPP_FSM_OPENED: + terminate_layer(f, PPP_FSM_CLOSING); + break; + default: + break; + } +} + + +/* + * fsm_timeout - Timeout expired. + */ +static void fsm_timeout(void *arg) { + fsm *f = (fsm *) arg; + ppp_pcb *pcb = f->pcb; + + switch (f->state) { + case PPP_FSM_CLOSING: + case PPP_FSM_STOPPING: + if( f->retransmits <= 0 ){ + /* + * We've waited for an ack long enough. Peer probably heard us. + */ + f->state = (f->state == PPP_FSM_CLOSING)? PPP_FSM_CLOSED: PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + } else { + /* Send Terminate-Request */ + fsm_sdata(f, TERMREQ, f->reqid = ++f->id, + (const u_char *) f->term_reason, f->term_reason_len); + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + --f->retransmits; + } + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + if (f->retransmits <= 0) { + ppp_warn("%s: timeout sending Config-Requests", PROTO_NAME(f)); + f->state = PPP_FSM_STOPPED; + if( (f->flags & OPT_PASSIVE) == 0 && f->callbacks->finished ) + (*f->callbacks->finished)(f); + + } else { + /* Retransmit the configure-request */ + if (f->callbacks->retransmit) + (*f->callbacks->retransmit)(f); + fsm_sconfreq(f, 1); /* Re-send Configure-Request */ + if( f->state == PPP_FSM_ACKRCVD ) + f->state = PPP_FSM_REQSENT; + } + break; + + default: + FSMDEBUG(("%s: Timeout event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_input - Input packet. + */ +void fsm_input(fsm *f, u_char *inpacket, int l) { + u_char *inp; + u_char code, id; + int len; + + /* + * Parse header (code, id and length). + * If packet too short, drop it. + */ + inp = inpacket; + if (l < HEADERLEN) { + FSMDEBUG(("fsm_input(%x): Rcvd short header.", f->protocol)); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < HEADERLEN) { + FSMDEBUG(("fsm_input(%x): Rcvd illegal length.", f->protocol)); + return; + } + if (len > l) { + FSMDEBUG(("fsm_input(%x): Rcvd short packet.", f->protocol)); + return; + } + len -= HEADERLEN; /* subtract header length */ + + if( f->state == PPP_FSM_INITIAL || f->state == PPP_FSM_STARTING ){ + FSMDEBUG(("fsm_input(%x): Rcvd packet in state %d.", + f->protocol, f->state)); + return; + } + + /* + * Action depends on code. + */ + switch (code) { + case CONFREQ: + fsm_rconfreq(f, id, inp, len); + break; + + case CONFACK: + fsm_rconfack(f, id, inp, len); + break; + + case CONFNAK: + case CONFREJ: + fsm_rconfnakrej(f, code, id, inp, len); + break; + + case TERMREQ: + fsm_rtermreq(f, id, inp, len); + break; + + case TERMACK: + fsm_rtermack(f); + break; + + case CODEREJ: + fsm_rcoderej(f, inp, len); + break; + + default: + if( !f->callbacks->extcode + || !(*f->callbacks->extcode)(f, code, id, inp, len) ) + fsm_sdata(f, CODEREJ, ++f->id, inpacket, len + HEADERLEN); + break; + } +} + + +/* + * fsm_rconfreq - Receive Configure-Request. + */ +static void fsm_rconfreq(fsm *f, u_char id, u_char *inp, int len) { + int code, reject_if_disagree; + + switch( f->state ){ + case PPP_FSM_CLOSED: + /* Go away, we're closed */ + fsm_sdata(f, TERMACK, id, NULL, 0); + return; + case PPP_FSM_CLOSING: + case PPP_FSM_STOPPING: + return; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if( f->callbacks->down ) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_STOPPED: + /* Negotiation started by our peer */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } + + /* + * Pass the requested configuration options + * to protocol-specific code for checking. + */ + if (f->callbacks->reqci){ /* Check CI */ + reject_if_disagree = (f->nakloops >= f->maxnakloops); + code = (*f->callbacks->reqci)(f, inp, &len, reject_if_disagree); + } else if (len) + code = CONFREJ; /* Reject all CI */ + else + code = CONFACK; + + /* send the Ack, Nak or Rej to the peer */ + fsm_sdata(f, code, id, inp, len); + + if (code == CONFACK) { + if (f->state == PPP_FSM_ACKRCVD) { + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + f->state = PPP_FSM_OPENED; + if (f->callbacks->up) + (*f->callbacks->up)(f); /* Inform upper layers */ + } else + f->state = PPP_FSM_ACKSENT; + f->nakloops = 0; + + } else { + /* we sent CONFACK or CONFREJ */ + if (f->state != PPP_FSM_ACKRCVD) + f->state = PPP_FSM_REQSENT; + if( code == CONFNAK ) + ++f->nakloops; + } +} + + +/* + * fsm_rconfack - Receive Configure-Ack. + */ +static void fsm_rconfack(fsm *f, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + + if (id != f->reqid || f->seen_ack) /* Expected id? */ + return; /* Nope, toss... */ + if( !(f->callbacks->ackci? (*f->callbacks->ackci)(f, inp, len): + (len == 0)) ){ + /* Ack is bad - ignore it */ + ppp_error("Received bad configure-ack: %P", inp, len); + return; + } + f->seen_ack = 1; + f->rnakloops = 0; + + switch (f->state) { + case PPP_FSM_CLOSED: + case PPP_FSM_STOPPED: + fsm_sdata(f, TERMACK, id, NULL, 0); + break; + + case PPP_FSM_REQSENT: + f->state = PPP_FSM_ACKRCVD; + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + break; + + case PPP_FSM_ACKRCVD: + /* Huh? an extra valid Ack? oh well... */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_ACKSENT: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + f->state = PPP_FSM_OPENED; + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + if (f->callbacks->up) + (*f->callbacks->up)(f); /* Inform upper layers */ + break; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rconfnakrej - Receive Configure-Nak or Configure-Reject. + */ +static void fsm_rconfnakrej(fsm *f, int code, int id, u_char *inp, int len) { + int ret; + int treat_as_reject; + + if (id != f->reqid || f->seen_ack) /* Expected id? */ + return; /* Nope, toss... */ + + if (code == CONFNAK) { + ++f->rnakloops; + treat_as_reject = (f->rnakloops >= f->maxnakloops); + if (f->callbacks->nakci == NULL + || !(ret = f->callbacks->nakci(f, inp, len, treat_as_reject))) { + ppp_error("Received bad configure-nak: %P", inp, len); + return; + } + } else { + f->rnakloops = 0; + if (f->callbacks->rejci == NULL + || !(ret = f->callbacks->rejci(f, inp, len))) { + ppp_error("Received bad configure-rej: %P", inp, len); + return; + } + } + + f->seen_ack = 1; + + switch (f->state) { + case PPP_FSM_CLOSED: + case PPP_FSM_STOPPED: + fsm_sdata(f, TERMACK, id, NULL, 0); + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKSENT: + /* They didn't agree to what we wanted - try another request */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + if (ret < 0) + f->state = PPP_FSM_STOPPED; /* kludge for stopping CCP */ + else + fsm_sconfreq(f, 0); /* Send Configure-Request */ + break; + + case PPP_FSM_ACKRCVD: + /* Got a Nak/reject when we had already had an Ack?? oh well... */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rtermreq - Receive Terminate-Req. + */ +static void fsm_rtermreq(fsm *f, int id, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + + switch (f->state) { + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + f->state = PPP_FSM_REQSENT; /* Start over but keep trying */ + break; + + case PPP_FSM_OPENED: + if (len > 0) { + ppp_info("%s terminated by peer (%0.*v)", PROTO_NAME(f), len, p); + } else + ppp_info("%s terminated by peer", PROTO_NAME(f)); + f->retransmits = 0; + f->state = PPP_FSM_STOPPING; + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + break; + default: + break; + } + + fsm_sdata(f, TERMACK, id, NULL, 0); +} + + +/* + * fsm_rtermack - Receive Terminate-Ack. + */ +static void fsm_rtermack(fsm *f) { + switch (f->state) { + case PPP_FSM_CLOSING: + UNTIMEOUT(fsm_timeout, f); + f->state = PPP_FSM_CLOSED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + case PPP_FSM_STOPPING: + UNTIMEOUT(fsm_timeout, f); + f->state = PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_ACKRCVD: + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_OPENED: + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rcoderej - Receive an Code-Reject. + */ +static void fsm_rcoderej(fsm *f, u_char *inp, int len) { + u_char code, id; + + if (len < HEADERLEN) { + FSMDEBUG(("fsm_rcoderej: Rcvd short Code-Reject packet!")); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + ppp_warn("%s: Rcvd Code-Reject for code %d, id %d", PROTO_NAME(f), code, id); + + if( f->state == PPP_FSM_ACKRCVD ) + f->state = PPP_FSM_REQSENT; +} + + +/* + * fsm_protreject - Peer doesn't speak this protocol. + * + * Treat this as a catastrophic error (RXJ-). + */ +void fsm_protreject(fsm *f) { + switch( f->state ){ + case PPP_FSM_CLOSING: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + /* fall through */ + /* no break */ + case PPP_FSM_CLOSED: + f->state = PPP_FSM_CLOSED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_STOPPING: + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + /* fall through */ + /* no break */ + case PPP_FSM_STOPPED: + f->state = PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_OPENED: + terminate_layer(f, PPP_FSM_STOPPING); + break; + + default: + FSMDEBUG(("%s: Protocol-reject event in state %d!", + PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_sconfreq - Send a Configure-Request. + */ +static void fsm_sconfreq(fsm *f, int retransmit) { + ppp_pcb *pcb = f->pcb; + struct pbuf *p; + u_char *outp; + int cilen; + + if( f->state != PPP_FSM_REQSENT && f->state != PPP_FSM_ACKRCVD && f->state != PPP_FSM_ACKSENT ){ + /* Not currently negotiating - reset options */ + if( f->callbacks->resetci ) + (*f->callbacks->resetci)(f); + f->nakloops = 0; + f->rnakloops = 0; + } + + if( !retransmit ){ + /* New request - reset retransmission counter, use new ID */ + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + f->reqid = ++f->id; + } + + f->seen_ack = 0; + + /* + * Make up the request packet + */ + if( f->callbacks->cilen && f->callbacks->addci ){ + cilen = (*f->callbacks->cilen)(f); + if( cilen > pcb->peer_mru - HEADERLEN ) + cilen = pcb->peer_mru - HEADERLEN; + } else + cilen = 0; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(cilen + HEADERLEN + PPP_HDRLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + /* send the request to our peer */ + outp = (u_char*)p->payload; + MAKEHEADER(outp, f->protocol); + PUTCHAR(CONFREQ, outp); + PUTCHAR(f->reqid, outp); + PUTSHORT(cilen + HEADERLEN, outp); + if (cilen != 0) { + (*f->callbacks->addci)(f, outp, &cilen); + LWIP_ASSERT("cilen == p->len - HEADERLEN - PPP_HDRLEN", cilen == p->len - HEADERLEN - PPP_HDRLEN); + } + + ppp_write(pcb, p); + + /* start the retransmit timer */ + --f->retransmits; + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); +} + + +/* + * fsm_sdata - Send some data. + * + * Used for all packets sent to our peer by this module. + */ +void fsm_sdata(fsm *f, u_char code, u_char id, const u_char *data, int datalen) { + ppp_pcb *pcb = f->pcb; + struct pbuf *p; + u_char *outp; + int outlen; + + /* Adjust length to be smaller than MTU */ + if (datalen > pcb->peer_mru - HEADERLEN) + datalen = pcb->peer_mru - HEADERLEN; + outlen = datalen + HEADERLEN; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(outlen + PPP_HDRLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + if (datalen) /* && data != outp + PPP_HDRLEN + HEADERLEN) -- was only for fsm_sconfreq() */ + MEMCPY(outp + PPP_HDRLEN + HEADERLEN, data, datalen); + MAKEHEADER(outp, f->protocol); + PUTCHAR(code, outp); + PUTCHAR(id, outp); + PUTSHORT(outlen, outp); + ppp_write(pcb, p); +} + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c new file mode 100644 index 00000000..b7c766eb --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c @@ -0,0 +1,2418 @@ +/* + * ipcp.c - PPP IP Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV4_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" + +#if 0 /* UNUSED */ +/* global vars */ +u32_t netmask = 0; /* IP netmask to set on interface */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +bool disable_defaultip = 0; /* Don't use hostname for default IP adrs */ +#endif /* UNUSED */ + +#if 0 /* moved to ppp_settings */ +bool noremoteip = 0; /* Let him have no IP address */ +#endif /* moved to ppp_setting */ + +#if 0 /* UNUSED */ +/* Hook for a plugin to know when IP protocol has come up */ +void (*ip_up_hook) (void) = NULL; + +/* Hook for a plugin to know when IP protocol has come down */ +void (*ip_down_hook) (void) = NULL; + +/* Hook for a plugin to choose the remote IP address */ +void (*ip_choose_hook) (u32_t *) = NULL; +#endif /* UNUSED */ + +#if PPP_NOTIFY +/* Notifiers for when IPCP goes up and down */ +struct notifier *ip_up_notifier = NULL; +struct notifier *ip_down_notifier = NULL; +#endif /* PPP_NOTIFY */ + +/* local vars */ +#if 0 /* moved to ppp_pcb */ +static int default_route_set[NUM_PPP]; /* Have set up a default route */ +static int proxy_arp_set[NUM_PPP]; /* Have created proxy arp entry */ +static int ipcp_is_up; /* have called np_up() */ +static int ipcp_is_open; /* haven't called np_finished() */ +static bool ask_for_local; /* request our address from peer */ +#endif /* moved to ppp_pcb */ +#if 0 /* UNUSED */ +static char vj_value[8]; /* string form of vj option value */ +static char netmask_str[20]; /* string form of netmask value */ +#endif /* UNUSED */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void ipcp_resetci(fsm *f); /* Reset our CI */ +static int ipcp_cilen(fsm *f); /* Return length of our CI */ +static void ipcp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI */ +static int ipcp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int ipcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject);/* Peer nak'd our CI */ +static int ipcp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int ipcp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree); /* Rcv CI */ +static void ipcp_up(fsm *f); /* We're UP */ +static void ipcp_down(fsm *f); /* We're DOWN */ +static void ipcp_finished(fsm *f); /* Don't need lower layer */ + +static const fsm_callbacks ipcp_callbacks = { /* IPCP callback routines */ + ipcp_resetci, /* Reset our Configuration Information */ + ipcp_cilen, /* Length of our Configuration Information */ + ipcp_addci, /* Add our Configuration Information */ + ipcp_ackci, /* ACK our Configuration Information */ + ipcp_nakci, /* NAK our Configuration Information */ + ipcp_rejci, /* Reject our Configuration Information */ + ipcp_reqci, /* Request peer's Configuration Information */ + ipcp_up, /* Called when fsm reaches OPENED state */ + ipcp_down, /* Called when fsm leaves OPENED state */ + NULL, /* Called when we want the lower layer up */ + ipcp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + NULL, /* Called to handle protocol-specific codes */ + "IPCP" /* String name of protocol */ +}; + +/* + * Command-line options. + */ +#if PPP_OPTIONS +static int setvjslots (char **); +static int setdnsaddr (char **); +static int setwinsaddr (char **); +static int setnetmask (char **); +int setipaddr (char *, char **, int); + +static void printipaddr (option_t *, void (*)(void *, char *,...),void *); + +static option_t ipcp_option_list[] = { + { "noip", o_bool, &ipcp_protent.enabled_flag, + "Disable IP and IPCP" }, + { "-ip", o_bool, &ipcp_protent.enabled_flag, + "Disable IP and IPCP", OPT_ALIAS }, + + { "novj", o_bool, &ipcp_wantoptions[0].neg_vj, + "Disable VJ compression", OPT_A2CLR, &ipcp_allowoptions[0].neg_vj }, + { "-vj", o_bool, &ipcp_wantoptions[0].neg_vj, + "Disable VJ compression", OPT_ALIAS | OPT_A2CLR, + &ipcp_allowoptions[0].neg_vj }, + + { "novjccomp", o_bool, &ipcp_wantoptions[0].cflag, + "Disable VJ connection-ID compression", OPT_A2CLR, + &ipcp_allowoptions[0].cflag }, + { "-vjccomp", o_bool, &ipcp_wantoptions[0].cflag, + "Disable VJ connection-ID compression", OPT_ALIAS | OPT_A2CLR, + &ipcp_allowoptions[0].cflag }, + + { "vj-max-slots", o_special, (void *)setvjslots, + "Set maximum VJ header slots", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, vj_value }, + + { "ipcp-accept-local", o_bool, &ipcp_wantoptions[0].accept_local, + "Accept peer's address for us", 1 }, + { "ipcp-accept-remote", o_bool, &ipcp_wantoptions[0].accept_remote, + "Accept peer's address for it", 1 }, + + { "ipparam", o_string, &ipparam, + "Set ip script parameter", OPT_PRIO }, + + { "noipdefault", o_bool, &disable_defaultip, + "Don't use name for default IP adrs", 1 }, + + { "ms-dns", 1, (void *)setdnsaddr, + "DNS address for the peer's use" }, + { "ms-wins", 1, (void *)setwinsaddr, + "Nameserver for SMB over TCP/IP for peer" }, + + { "ipcp-restart", o_int, &ipcp_fsm[0].timeouttime, + "Set timeout for IPCP", OPT_PRIO }, + { "ipcp-max-terminate", o_int, &ipcp_fsm[0].maxtermtransmits, + "Set max #xmits for term-reqs", OPT_PRIO }, + { "ipcp-max-configure", o_int, &ipcp_fsm[0].maxconfreqtransmits, + "Set max #xmits for conf-reqs", OPT_PRIO }, + { "ipcp-max-failure", o_int, &ipcp_fsm[0].maxnakloops, + "Set max #conf-naks for IPCP", OPT_PRIO }, + + { "defaultroute", o_bool, &ipcp_wantoptions[0].default_route, + "Add default route", OPT_ENABLE|1, &ipcp_allowoptions[0].default_route }, + { "nodefaultroute", o_bool, &ipcp_allowoptions[0].default_route, + "disable defaultroute option", OPT_A2CLR, + &ipcp_wantoptions[0].default_route }, + { "-defaultroute", o_bool, &ipcp_allowoptions[0].default_route, + "disable defaultroute option", OPT_ALIAS | OPT_A2CLR, + &ipcp_wantoptions[0].default_route }, + + { "replacedefaultroute", o_bool, + &ipcp_wantoptions[0].replace_default_route, + "Replace default route", 1 + }, + { "noreplacedefaultroute", o_bool, + &ipcp_allowoptions[0].replace_default_route, + "Never replace default route", OPT_A2COPY, + &ipcp_wantoptions[0].replace_default_route }, + { "proxyarp", o_bool, &ipcp_wantoptions[0].proxy_arp, + "Add proxy ARP entry", OPT_ENABLE|1, &ipcp_allowoptions[0].proxy_arp }, + { "noproxyarp", o_bool, &ipcp_allowoptions[0].proxy_arp, + "disable proxyarp option", OPT_A2CLR, + &ipcp_wantoptions[0].proxy_arp }, + { "-proxyarp", o_bool, &ipcp_allowoptions[0].proxy_arp, + "disable proxyarp option", OPT_ALIAS | OPT_A2CLR, + &ipcp_wantoptions[0].proxy_arp }, + + { "usepeerdns", o_bool, &usepeerdns, + "Ask peer for DNS address(es)", 1 }, + + { "netmask", o_special, (void *)setnetmask, + "set netmask", OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, netmask_str }, + + { "ipcp-no-addresses", o_bool, &ipcp_wantoptions[0].old_addrs, + "Disable old-style IP-Addresses usage", OPT_A2CLR, + &ipcp_allowoptions[0].old_addrs }, + { "ipcp-no-address", o_bool, &ipcp_wantoptions[0].neg_addr, + "Disable IP-Address usage", OPT_A2CLR, + &ipcp_allowoptions[0].neg_addr }, + + { "noremoteip", o_bool, &noremoteip, + "Allow peer to have no IP address", 1 }, + + { "nosendip", o_bool, &ipcp_wantoptions[0].neg_addr, + "Don't send our IP address to peer", OPT_A2CLR, + &ipcp_wantoptions[0].old_addrs}, + + { "IP addresses", o_wild, (void *) &setipaddr, + "set local and remote IP addresses", + OPT_NOARG | OPT_A2PRINTER, (void *) &printipaddr }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ipcp_init(ppp_pcb *pcb); +static void ipcp_open(ppp_pcb *pcb); +static void ipcp_close(ppp_pcb *pcb, const char *reason); +static void ipcp_lowerup(ppp_pcb *pcb); +static void ipcp_lowerdown(ppp_pcb *pcb); +static void ipcp_input(ppp_pcb *pcb, u_char *p, int len); +static void ipcp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int ipcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS +static void ip_check_options (void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT +static int ip_demand_conf (int); +static int ip_active_pkt (u_char *, int); +#endif /* DEMAND_SUPPORT */ +#if 0 /* UNUSED */ +static void create_resolv (u32_t, u32_t); +#endif /* UNUSED */ + +const struct protent ipcp_protent = { + PPP_IPCP, + ipcp_init, + ipcp_input, + ipcp_protrej, + ipcp_lowerup, + ipcp_lowerdown, + ipcp_open, + ipcp_close, +#if PRINTPKT_SUPPORT + ipcp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "IPCP", + "IP", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ipcp_option_list, + ip_check_options, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + ip_demand_conf, + ip_active_pkt +#endif /* DEMAND_SUPPORT */ +}; + +static void ipcp_clear_addrs(ppp_pcb *pcb, u32_t ouraddr, u32_t hisaddr, u8_t replacedefaultroute); + +/* + * Lengths of configuration options. + */ +#define CILEN_VOID 2 +#define CILEN_COMPRESS 4 /* min length for compression protocol opt. */ +#define CILEN_VJ 6 /* length for RFC1332 Van-Jacobson opt. */ +#define CILEN_ADDR 6 /* new-style single address option */ +#define CILEN_ADDRS 10 /* old-style dual address option */ + + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if 0 /* UNUSED, already defined by lwIP */ +/* + * Make a string representation of a network IP address. + */ +char * +ip_ntoa(ipaddr) +u32_t ipaddr; +{ + static char b[64]; + + slprintf(b, sizeof(b), "%I", ipaddr); + return b; +} +#endif /* UNUSED, already defined by lwIP */ + +/* + * Option parsing. + */ +#if PPP_OPTIONS +/* + * setvjslots - set maximum number of connection slots for VJ compression + */ +static int +setvjslots(argv) + char **argv; +{ + int value; + + if (!int_option(*argv, &value)) + return 0; + + if (value < 2 || value > 16) { + option_error("vj-max-slots value must be between 2 and 16"); + return 0; + } + ipcp_wantoptions [0].maxslotindex = + ipcp_allowoptions[0].maxslotindex = value - 1; + slprintf(vj_value, sizeof(vj_value), "%d", value); + return 1; +} + +/* + * setdnsaddr - set the dns address(es) + */ +static int +setdnsaddr(argv) + char **argv; +{ + u32_t dns; + struct hostent *hp; + + dns = inet_addr(*argv); + if (dns == (u32_t) -1) { + if ((hp = gethostbyname(*argv)) == NULL) { + option_error("invalid address parameter '%s' for ms-dns option", + *argv); + return 0; + } + dns = *(u32_t *)hp->h_addr; + } + + /* We take the last 2 values given, the 2nd-last as the primary + and the last as the secondary. If only one is given it + becomes both primary and secondary. */ + if (ipcp_allowoptions[0].dnsaddr[1] == 0) + ipcp_allowoptions[0].dnsaddr[0] = dns; + else + ipcp_allowoptions[0].dnsaddr[0] = ipcp_allowoptions[0].dnsaddr[1]; + + /* always set the secondary address value. */ + ipcp_allowoptions[0].dnsaddr[1] = dns; + + return (1); +} + +/* + * setwinsaddr - set the wins address(es) + * This is primrarly used with the Samba package under UNIX or for pointing + * the caller to the existing WINS server on a Windows NT platform. + */ +static int +setwinsaddr(argv) + char **argv; +{ + u32_t wins; + struct hostent *hp; + + wins = inet_addr(*argv); + if (wins == (u32_t) -1) { + if ((hp = gethostbyname(*argv)) == NULL) { + option_error("invalid address parameter '%s' for ms-wins option", + *argv); + return 0; + } + wins = *(u32_t *)hp->h_addr; + } + + /* We take the last 2 values given, the 2nd-last as the primary + and the last as the secondary. If only one is given it + becomes both primary and secondary. */ + if (ipcp_allowoptions[0].winsaddr[1] == 0) + ipcp_allowoptions[0].winsaddr[0] = wins; + else + ipcp_allowoptions[0].winsaddr[0] = ipcp_allowoptions[0].winsaddr[1]; + + /* always set the secondary address value. */ + ipcp_allowoptions[0].winsaddr[1] = wins; + + return (1); +} + +/* + * setipaddr - Set the IP address + * If doit is 0, the call is to check whether this option is + * potentially an IP address specification. + * Not static so that plugins can call it to set the addresses + */ +int +setipaddr(arg, argv, doit) + char *arg; + char **argv; + int doit; +{ + struct hostent *hp; + char *colon; + u32_t local, remote; + ipcp_options *wo = &ipcp_wantoptions[0]; + static int prio_local = 0, prio_remote = 0; + + /* + * IP address pair separated by ":". + */ + if ((colon = strchr(arg, ':')) == NULL) + return 0; + if (!doit) + return 1; + + /* + * If colon first character, then no local addr. + */ + if (colon != arg && option_priority >= prio_local) { + *colon = '\0'; + if ((local = inet_addr(arg)) == (u32_t) -1) { + if ((hp = gethostbyname(arg)) == NULL) { + option_error("unknown host: %s", arg); + return 0; + } + local = *(u32_t *)hp->h_addr; + } + if (bad_ip_adrs(local)) { + option_error("bad local IP address %s", ip_ntoa(local)); + return 0; + } + if (local != 0) + wo->ouraddr = local; + *colon = ':'; + prio_local = option_priority; + } + + /* + * If colon last character, then no remote addr. + */ + if (*++colon != '\0' && option_priority >= prio_remote) { + if ((remote = inet_addr(colon)) == (u32_t) -1) { + if ((hp = gethostbyname(colon)) == NULL) { + option_error("unknown host: %s", colon); + return 0; + } + remote = *(u32_t *)hp->h_addr; + if (remote_name[0] == 0) + strlcpy(remote_name, colon, sizeof(remote_name)); + } + if (bad_ip_adrs(remote)) { + option_error("bad remote IP address %s", ip_ntoa(remote)); + return 0; + } + if (remote != 0) + wo->hisaddr = remote; + prio_remote = option_priority; + } + + return 1; +} + +static void +printipaddr(opt, printer, arg) + option_t *opt; + void (*printer) (void *, char *, ...); + void *arg; +{ + ipcp_options *wo = &ipcp_wantoptions[0]; + + if (wo->ouraddr != 0) + printer(arg, "%I", wo->ouraddr); + printer(arg, ":"); + if (wo->hisaddr != 0) + printer(arg, "%I", wo->hisaddr); +} + +/* + * setnetmask - set the netmask to be used on the interface. + */ +static int +setnetmask(argv) + char **argv; +{ + u32_t mask; + int n; + char *p; + + /* + * Unfortunately, if we use inet_addr, we can't tell whether + * a result of all 1s is an error or a valid 255.255.255.255. + */ + p = *argv; + n = parse_dotted_ip(p, &mask); + + mask = lwip_htonl(mask); + + if (n == 0 || p[n] != 0 || (netmask & ~mask) != 0) { + option_error("invalid netmask value '%s'", *argv); + return 0; + } + + netmask = mask; + slprintf(netmask_str, sizeof(netmask_str), "%I", mask); + + return (1); +} + +int +parse_dotted_ip(p, vp) + char *p; + u32_t *vp; +{ + int n; + u32_t v, b; + char *endp, *p0 = p; + + v = 0; + for (n = 3;; --n) { + b = strtoul(p, &endp, 0); + if (endp == p) + return 0; + if (b > 255) { + if (n < 3) + return 0; + /* accept e.g. 0xffffff00 */ + *vp = b; + return endp - p0; + } + v |= b << (n * 8); + p = endp; + if (n == 0) + break; + if (*p != '.') + return 0; + ++p; + } + *vp = v; + return p - p0; +} +#endif /* PPP_OPTIONS */ + +/* + * ipcp_init - Initialize IPCP. + */ +static void ipcp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_IPCP; + f->callbacks = &ipcp_callbacks; + fsm_init(f); + + /* + * Some 3G modems use repeated IPCP NAKs as a way of stalling + * until they can contact a server on the network, so we increase + * the default number of NAKs we accept before we start treating + * them as rejects. + */ + f->maxnakloops = 100; + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(ao, 0, sizeof(*ao)); +#endif /* 0 */ + + wo->neg_addr = wo->old_addrs = 1; +#if VJ_SUPPORT + wo->neg_vj = 1; + wo->vj_protocol = IPCP_VJ_COMP; + wo->maxslotindex = MAX_STATES - 1; /* really max index */ + wo->cflag = 1; +#endif /* VJ_SUPPORT */ + +#if 0 /* UNUSED */ + /* wanting default route by default */ + wo->default_route = 1; +#endif /* UNUSED */ + + ao->neg_addr = ao->old_addrs = 1; +#if VJ_SUPPORT + /* max slots and slot-id compression are currently hardwired in */ + /* ppp_if.c to 16 and 1, this needs to be changed (among other */ + /* things) gmc */ + + ao->neg_vj = 1; + ao->maxslotindex = MAX_STATES - 1; + ao->cflag = 1; +#endif /* #if VJ_SUPPORT */ + +#if 0 /* UNUSED */ + /* + * XXX These control whether the user may use the proxyarp + * and defaultroute options. + */ + ao->proxy_arp = 1; + ao->default_route = 1; +#endif /* UNUSED */ +} + + +/* + * ipcp_open - IPCP is allowed to come up. + */ +static void ipcp_open(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_open(f); + pcb->ipcp_is_open = 1; +} + + +/* + * ipcp_close - Take IPCP down. + */ +static void ipcp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->ipcp_fsm; + fsm_close(f, reason); +} + + +/* + * ipcp_lowerup - The lower layer is up. + */ +static void ipcp_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerup(f); +} + + +/* + * ipcp_lowerdown - The lower layer is down. + */ +static void ipcp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerdown(f); +} + + +/* + * ipcp_input - Input IPCP packet. + */ +static void ipcp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->ipcp_fsm; + fsm_input(f, p, len); +} + + +/* + * ipcp_protrej - A Protocol-Reject was received for IPCP. + * + * Pretend the lower layer went down, so we shut up. + */ +static void ipcp_protrej(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerdown(f); +} + + +/* + * ipcp_resetci - Reset our CI. + * Called by fsm_sconfreq, Send Configure Request. + */ +static void ipcp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + + wo->req_addr = (wo->neg_addr || wo->old_addrs) && + (ao->neg_addr || ao->old_addrs); + if (wo->ouraddr == 0) + wo->accept_local = 1; + if (wo->hisaddr == 0) + wo->accept_remote = 1; +#if LWIP_DNS + wo->req_dns1 = wo->req_dns2 = pcb->settings.usepeerdns; /* Request DNS addresses from the peer */ +#endif /* LWIP_DNS */ + *go = *wo; + if (!pcb->ask_for_local) + go->ouraddr = 0; +#if 0 /* UNUSED */ + if (ip_choose_hook) { + ip_choose_hook(&wo->hisaddr); + if (wo->hisaddr) { + wo->accept_remote = 0; + } + } +#endif /* UNUSED */ + BZERO(&pcb->ipcp_hisoptions, sizeof(ipcp_options)); +} + + +/* + * ipcp_cilen - Return length of our CI. + * Called by fsm_sconfreq, Send Configure Request. + */ +static int ipcp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; +#if VJ_SUPPORT + ipcp_options *wo = &pcb->ipcp_wantoptions; +#endif /* VJ_SUPPORT */ + ipcp_options *ho = &pcb->ipcp_hisoptions; + +#define LENCIADDRS(neg) (neg ? CILEN_ADDRS : 0) +#if VJ_SUPPORT +#define LENCIVJ(neg, old) (neg ? (old? CILEN_COMPRESS : CILEN_VJ) : 0) +#endif /* VJ_SUPPORT */ +#define LENCIADDR(neg) (neg ? CILEN_ADDR : 0) +#if LWIP_DNS +#define LENCIDNS(neg) LENCIADDR(neg) +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ +#define LENCIWINS(neg) LENCIADDR(neg) +#endif /* UNUSED - WINS */ + + /* + * First see if we want to change our options to the old + * forms because we have received old forms from the peer. + */ + if (go->neg_addr && go->old_addrs && !ho->neg_addr && ho->old_addrs) + go->neg_addr = 0; + +#if VJ_SUPPORT + if (wo->neg_vj && !go->neg_vj && !go->old_vj) { + /* try an older style of VJ negotiation */ + /* use the old style only if the peer did */ + if (ho->neg_vj && ho->old_vj) { + go->neg_vj = 1; + go->old_vj = 1; + go->vj_protocol = ho->vj_protocol; + } + } +#endif /* VJ_SUPPORT */ + + return (LENCIADDRS(!go->neg_addr && go->old_addrs) + +#if VJ_SUPPORT + LENCIVJ(go->neg_vj, go->old_vj) + +#endif /* VJ_SUPPORT */ + LENCIADDR(go->neg_addr) + +#if LWIP_DNS + LENCIDNS(go->req_dns1) + + LENCIDNS(go->req_dns2) + +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + LENCIWINS(go->winsaddr[0]) + + LENCIWINS(go->winsaddr[1]) + +#endif /* UNUSED - WINS */ + 0); +} + + +/* + * ipcp_addci - Add our desired CIs to a packet. + * Called by fsm_sconfreq, Send Configure Request. + */ +static void ipcp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + int len = *lenp; + +#define ADDCIADDRS(opt, neg, val1, val2) \ + if (neg) { \ + if (len >= CILEN_ADDRS) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDRS, ucp); \ + l = lwip_ntohl(val1); \ + PUTLONG(l, ucp); \ + l = lwip_ntohl(val2); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDRS; \ + } else \ + go->old_addrs = 0; \ + } + +#if VJ_SUPPORT +#define ADDCIVJ(opt, neg, val, old, maxslotindex, cflag) \ + if (neg) { \ + int vjlen = old? CILEN_COMPRESS : CILEN_VJ; \ + if (len >= vjlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(vjlen, ucp); \ + PUTSHORT(val, ucp); \ + if (!old) { \ + PUTCHAR(maxslotindex, ucp); \ + PUTCHAR(cflag, ucp); \ + } \ + len -= vjlen; \ + } else \ + neg = 0; \ + } +#endif /* VJ_SUPPORT */ + +#define ADDCIADDR(opt, neg, val) \ + if (neg) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(val); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + neg = 0; \ + } + +#if LWIP_DNS +#define ADDCIDNS(opt, neg, addr) \ + if (neg) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(addr); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + neg = 0; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define ADDCIWINS(opt, addr) \ + if (addr) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(addr); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + addr = 0; \ + } +#endif /* UNUSED - WINS */ + + ADDCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, go->ouraddr, + go->hisaddr); + +#if VJ_SUPPORT + ADDCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + ADDCIADDR(CI_ADDR, go->neg_addr, go->ouraddr); + +#if LWIP_DNS + ADDCIDNS(CI_MS_DNS1, go->req_dns1, go->dnsaddr[0]); + + ADDCIDNS(CI_MS_DNS2, go->req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + ADDCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + ADDCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + *lenp -= len; +} + + +/* + * ipcp_ackci - Ack our CIs. + * Called by fsm_rconfack, Receive Configure ACK. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int ipcp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_short cilen, citype; + u32_t cilong; +#if VJ_SUPPORT + u_short cishort; + u_char cimaxslotindex, cicflag; +#endif /* VJ_SUPPORT */ + + /* + * CIs must be in exactly the same order that we sent... + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ + +#define ACKCIADDRS(opt, neg, val1, val2) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDRS) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDRS || \ + citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val1 != cilong) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val2 != cilong) \ + goto bad; \ + } + +#if VJ_SUPPORT +#define ACKCIVJ(opt, neg, val, old, maxslotindex, cflag) \ + if (neg) { \ + int vjlen = old? CILEN_COMPRESS : CILEN_VJ; \ + if ((len -= vjlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != vjlen || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + if (!old) { \ + GETCHAR(cimaxslotindex, p); \ + if (cimaxslotindex != maxslotindex) \ + goto bad; \ + GETCHAR(cicflag, p); \ + if (cicflag != cflag) \ + goto bad; \ + } \ + } +#endif /* VJ_SUPPORT */ + +#define ACKCIADDR(opt, neg, val) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || \ + citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val != cilong) \ + goto bad; \ + } + +#if LWIP_DNS +#define ACKCIDNS(opt, neg, addr) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (addr != cilong) \ + goto bad; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define ACKCIWINS(opt, addr) \ + if (addr) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (addr != cilong) \ + goto bad; \ + } +#endif /* UNUSED - WINS */ + + ACKCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, go->ouraddr, + go->hisaddr); + +#if VJ_SUPPORT + ACKCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + ACKCIADDR(CI_ADDR, go->neg_addr, go->ouraddr); + +#if LWIP_DNS + ACKCIDNS(CI_MS_DNS1, go->req_dns1, go->dnsaddr[0]); + + ACKCIDNS(CI_MS_DNS2, go->req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + ACKCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + ACKCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); + +bad: + IPCPDEBUG(("ipcp_ackci: received bad Ack!")); + return (0); +} + +/* + * ipcp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if IPCP is in the OPENED state. + * Calback from fsm_rconfnakrej - Receive Configure-Nak or Configure-Reject. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int ipcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_char citype, cilen, *next; +#if VJ_SUPPORT + u_char cimaxslotindex, cicflag; + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t ciaddr1, ciaddr2, l; +#if LWIP_DNS + u32_t cidnsaddr; +#endif /* LWIP_DNS */ + ipcp_options no; /* options we've seen Naks for */ + ipcp_options try_; /* options to request next time */ + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIADDRS(opt, neg, code) \ + if ((neg) && \ + (cilen = p[1]) == CILEN_ADDRS && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + ciaddr1 = lwip_htonl(l); \ + GETLONG(l, p); \ + ciaddr2 = lwip_htonl(l); \ + no.old_addrs = 1; \ + code \ + } + +#if VJ_SUPPORT +#define NAKCIVJ(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_COMPRESS || cilen == CILEN_VJ) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#endif /* VJ_SUPPORT */ + +#define NAKCIADDR(opt, neg, code) \ + if (go->neg && \ + (cilen = p[1]) == CILEN_ADDR && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + ciaddr1 = lwip_htonl(l); \ + no.neg = 1; \ + code \ + } + +#if LWIP_DNS +#define NAKCIDNS(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cidnsaddr = lwip_htonl(l); \ + no.neg = 1; \ + code \ + } +#endif /* LWIP_DNS */ + + /* + * Accept the peer's idea of {our,his} address, if different + * from our idea, only if the accept_{local,remote} flag is set. + */ + NAKCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, + if (treat_as_reject) { + try_.old_addrs = 0; + } else { + if (go->accept_local && ciaddr1) { + /* take his idea of our address */ + try_.ouraddr = ciaddr1; + } + if (go->accept_remote && ciaddr2) { + /* take his idea of his address */ + try_.hisaddr = ciaddr2; + } + } + ); + +#if VJ_SUPPORT + /* + * Accept the peer's value of maxslotindex provided that it + * is less than what we asked for. Turn off slot-ID compression + * if the peer wants. Send old-style compress-type option if + * the peer wants. + */ + NAKCIVJ(CI_COMPRESSTYPE, neg_vj, + if (treat_as_reject) { + try_.neg_vj = 0; + } else if (cilen == CILEN_VJ) { + GETCHAR(cimaxslotindex, p); + GETCHAR(cicflag, p); + if (cishort == IPCP_VJ_COMP) { + try_.old_vj = 0; + if (cimaxslotindex < go->maxslotindex) + try_.maxslotindex = cimaxslotindex; + if (!cicflag) + try_.cflag = 0; + } else { + try_.neg_vj = 0; + } + } else { + if (cishort == IPCP_VJ_COMP || cishort == IPCP_VJ_COMP_OLD) { + try_.old_vj = 1; + try_.vj_protocol = cishort; + } else { + try_.neg_vj = 0; + } + } + ); +#endif /* VJ_SUPPORT */ + + NAKCIADDR(CI_ADDR, neg_addr, + if (treat_as_reject) { + try_.neg_addr = 0; + try_.old_addrs = 0; + } else if (go->accept_local && ciaddr1) { + /* take his idea of our address */ + try_.ouraddr = ciaddr1; + } + ); + +#if LWIP_DNS + NAKCIDNS(CI_MS_DNS1, req_dns1, + if (treat_as_reject) { + try_.req_dns1 = 0; + } else { + try_.dnsaddr[0] = cidnsaddr; + } + ); + + NAKCIDNS(CI_MS_DNS2, req_dns2, + if (treat_as_reject) { + try_.req_dns2 = 0; + } else { + try_.dnsaddr[1] = cidnsaddr; + } + ); +#endif /* #if LWIP_DNS */ + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If they want to negotiate about IP addresses, we comply. + * If they want us to ask for compression, we refuse. + * If they want us to ask for ms-dns, we do that, since some + * peers get huffy if we don't. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if ( cilen < CILEN_VOID || (len -= cilen) < 0 ) + goto bad; + next = p + cilen - 2; + + switch (citype) { +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (go->neg_vj || no.neg_vj || + (cilen != CILEN_VJ && cilen != CILEN_COMPRESS)) + goto bad; + no.neg_vj = 1; + break; +#endif /* VJ_SUPPORT */ + case CI_ADDRS: + if ((!go->neg_addr && go->old_addrs) || no.old_addrs + || cilen != CILEN_ADDRS) + goto bad; + try_.neg_addr = 0; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1 && go->accept_local) + try_.ouraddr = ciaddr1; + GETLONG(l, p); + ciaddr2 = lwip_htonl(l); + if (ciaddr2 && go->accept_remote) + try_.hisaddr = ciaddr2; + no.old_addrs = 1; + break; + case CI_ADDR: + if (go->neg_addr || no.neg_addr || cilen != CILEN_ADDR) + goto bad; + try_.old_addrs = 0; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1 && go->accept_local) + try_.ouraddr = ciaddr1; + if (try_.ouraddr != 0) + try_.neg_addr = 1; + no.neg_addr = 1; + break; +#if LWIP_DNS + case CI_MS_DNS1: + if (go->req_dns1 || no.req_dns1 || cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + try_.dnsaddr[0] = lwip_htonl(l); + try_.req_dns1 = 1; + no.req_dns1 = 1; + break; + case CI_MS_DNS2: + if (go->req_dns2 || no.req_dns2 || cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + try_.dnsaddr[1] = lwip_htonl(l); + try_.req_dns2 = 1; + no.req_dns2 = 1; + break; +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + if (cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1) + try_.winsaddr[citype == CI_MS_WINS2] = ciaddr1; + break; +#endif /* UNUSED - WINS */ + default: + break; + } + p = next; + } + + /* + * OK, the Nak is good. Now we can update state. + * If there are any remaining options, we ignore them. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; + +bad: + IPCPDEBUG(("ipcp_nakci: received bad Nak!")); + return 0; +} + + +/* + * ipcp_rejci - Reject some of our CIs. + * Callback from fsm_rconfnakrej. + */ +static int ipcp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_char cilen; +#if VJ_SUPPORT + u_char cimaxslotindex, ciflag; + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t cilong; + ipcp_options try_; /* options to request next time */ + + try_ = *go; + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIADDRS(opt, neg, val1, val2) \ + if ((neg) && \ + (cilen = p[1]) == CILEN_ADDRS && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val1) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val2) \ + goto bad; \ + try_.old_addrs = 0; \ + } + +#if VJ_SUPPORT +#define REJCIVJ(opt, neg, val, old, maxslot, cflag) \ + if (go->neg && \ + p[1] == (old? CILEN_COMPRESS : CILEN_VJ) && \ + len >= p[1] && \ + p[0] == opt) { \ + len -= p[1]; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + if (!old) { \ + GETCHAR(cimaxslotindex, p); \ + if (cimaxslotindex != maxslot) \ + goto bad; \ + GETCHAR(ciflag, p); \ + if (ciflag != cflag) \ + goto bad; \ + } \ + try_.neg = 0; \ + } +#endif /* VJ_SUPPORT */ + +#define REJCIADDR(opt, neg, val) \ + if (go->neg && \ + (cilen = p[1]) == CILEN_ADDR && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } + +#if LWIP_DNS +#define REJCIDNS(opt, neg, dnsaddr) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != dnsaddr) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define REJCIWINS(opt, addr) \ + if (addr && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != addr) \ + goto bad; \ + try_.winsaddr[opt == CI_MS_WINS2] = 0; \ + } +#endif /* UNUSED - WINS */ + + REJCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, + go->ouraddr, go->hisaddr); + +#if VJ_SUPPORT + REJCIVJ(CI_COMPRESSTYPE, neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + REJCIADDR(CI_ADDR, neg_addr, go->ouraddr); + +#if LWIP_DNS + REJCIDNS(CI_MS_DNS1, req_dns1, go->dnsaddr[0]); + + REJCIDNS(CI_MS_DNS2, req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + REJCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + REJCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + IPCPDEBUG(("ipcp_rejci: received bad Reject!")); + return 0; +} + + +/* + * ipcp_reqci - Check the peer's requested CIs and send appropriate response. + * Callback from fsm_rconfreq, Receive Configure Request + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * len = Length of requested CIs + */ +static int ipcp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + u_short cilen, citype; /* Parsed len, type */ +#if VJ_SUPPORT + u_short cishort; /* Parsed short value */ +#endif /* VJ_SUPPORT */ + u32_t tl, ciaddr1, ciaddr2;/* Parsed address values */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *ucp = inp; /* Pointer to current output char */ + int l = *len; /* Length left */ +#if VJ_SUPPORT + u_char maxslotindex, cflag; +#endif /* VJ_SUPPORT */ +#if LWIP_DNS + int d; +#endif /* LWIP_DNS */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + IPCPDEBUG(("ipcp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_ADDRS: + if (!ao->old_addrs || ho->neg_addr || + cilen != CILEN_ADDRS) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no address, or if we both have his address but + * disagree about it, then NAK it with our idea. + * In particular, if we don't know his address, but he does, + * then accept it. + */ + GETLONG(tl, p); /* Parse source address (his) */ + ciaddr1 = lwip_htonl(tl); + if (ciaddr1 != wo->hisaddr + && (ciaddr1 == 0 || !wo->accept_remote)) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, p); + } + } else if (ciaddr1 == 0 && wo->hisaddr == 0) { + /* + * If neither we nor he knows his address, reject the option. + */ + orc = CONFREJ; + wo->req_addr = 0; /* don't NAK with 0.0.0.0 later */ + break; + } + + /* + * If he doesn't know our address, or if we both have our address + * but disagree about it, then NAK it with our idea. + */ + GETLONG(tl, p); /* Parse desination address (ours) */ + ciaddr2 = lwip_htonl(tl); + if (ciaddr2 != wo->ouraddr) { + if (ciaddr2 == 0 || !wo->accept_local) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->ouraddr); + PUTLONG(tl, p); + } + } else { + wo->ouraddr = ciaddr2; /* accept peer's idea */ + } + } + + ho->old_addrs = 1; + ho->hisaddr = ciaddr1; + ho->ouraddr = ciaddr2; + break; + + case CI_ADDR: + if (!ao->neg_addr || ho->old_addrs || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no address, or if we both have his address but + * disagree about it, then NAK it with our idea. + * In particular, if we don't know his address, but he does, + * then accept it. + */ + GETLONG(tl, p); /* Parse source address (his) */ + ciaddr1 = lwip_htonl(tl); + if (ciaddr1 != wo->hisaddr + && (ciaddr1 == 0 || !wo->accept_remote)) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, p); + } + } else if (ciaddr1 == 0 && wo->hisaddr == 0) { + /* + * Don't ACK an address of 0.0.0.0 - reject it instead. + */ + orc = CONFREJ; + wo->req_addr = 0; /* don't NAK with 0.0.0.0 later */ + break; + } + + ho->neg_addr = 1; + ho->hisaddr = ciaddr1; + break; + +#if LWIP_DNS + case CI_MS_DNS1: + case CI_MS_DNS2: + /* Microsoft primary or secondary DNS request */ + d = citype == CI_MS_DNS2; + + /* If we do not have a DNS address then we cannot send it */ + if (ao->dnsaddr[d] == 0 || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETLONG(tl, p); + if (lwip_htonl(tl) != ao->dnsaddr[d]) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(ao->dnsaddr[d]); + PUTLONG(tl, p); + orc = CONFNAK; + } + break; +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + /* Microsoft primary or secondary WINS request */ + d = citype == CI_MS_WINS2; + + /* If we do not have a DNS address then we cannot send it */ + if (ao->winsaddr[d] == 0 || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETLONG(tl, p); + if (lwip_htonl(tl) != ao->winsaddr[d]) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(ao->winsaddr[d]); + PUTLONG(tl, p); + orc = CONFNAK; + } + break; +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (!ao->neg_vj || + (cilen != CILEN_VJ && cilen != CILEN_COMPRESS)) { + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + + if (!(cishort == IPCP_VJ_COMP || + (cishort == IPCP_VJ_COMP_OLD && cilen == CILEN_COMPRESS))) { + orc = CONFREJ; + break; + } + + ho->neg_vj = 1; + ho->vj_protocol = cishort; + if (cilen == CILEN_VJ) { + GETCHAR(maxslotindex, p); + if (maxslotindex > ao->maxslotindex) { + orc = CONFNAK; + if (!reject_if_disagree){ + DECPTR(1, p); + PUTCHAR(ao->maxslotindex, p); + } + } + GETCHAR(cflag, p); + if (cflag && !ao->cflag) { + orc = CONFNAK; + if (!reject_if_disagree){ + DECPTR(1, p); + PUTCHAR(wo->cflag, p); + } + } + ho->maxslotindex = maxslotindex; + ho->cflag = cflag; + } else { + ho->old_vj = 1; + ho->maxslotindex = MAX_STATES - 1; + ho->cflag = 1; + } + break; +#endif /* VJ_SUPPORT */ + + default: + orc = CONFREJ; + break; + } +endswitch: + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree) /* Getting fed up with sending NAKs? */ + orc = CONFREJ; /* Get tough if so */ + else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + if (rc == CONFACK) { /* Ack'd all prior CIs? */ + rc = CONFNAK; /* Not anymore... */ + ucp = inp; /* Backup */ + } + } + } + + if (orc == CONFREJ && /* Reject this CI */ + rc != CONFREJ) { /* but no prior ones? */ + rc = CONFREJ; + ucp = inp; /* Backup */ + } + + /* Need to move CI? */ + if (ucp != cip) + MEMCPY(ucp, cip, cilen); /* Move it */ + + /* Update output pointer */ + INCPTR(cilen, ucp); + } + + /* + * If we aren't rejecting this packet, and we want to negotiate + * their address, and they didn't send their address, then we + * send a NAK with a CI_ADDR option appended. We assume the + * input buffer is long enough that we can append the extra + * option safely. + */ + if (rc != CONFREJ && !ho->neg_addr && !ho->old_addrs && + wo->req_addr && !reject_if_disagree && !pcb->settings.noremoteip) { + if (rc == CONFACK) { + rc = CONFNAK; + ucp = inp; /* reset pointer */ + wo->req_addr = 0; /* don't ask again */ + } + PUTCHAR(CI_ADDR, ucp); + PUTCHAR(CILEN_ADDR, ucp); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, ucp); + } + + *len = ucp - inp; /* Compute output length */ + IPCPDEBUG(("ipcp: returning Configure-%s", CODENAME(rc))); + return (rc); /* Return final code */ +} + + +#if 0 /* UNUSED */ +/* + * ip_check_options - check that any IP-related options are OK, + * and assign appropriate defaults. + */ +static void +ip_check_options() +{ + struct hostent *hp; + u32_t local; + ipcp_options *wo = &ipcp_wantoptions[0]; + + /* + * Default our local IP address based on our hostname. + * If local IP address already given, don't bother. + */ + if (wo->ouraddr == 0 && !disable_defaultip) { + /* + * Look up our hostname (possibly with domain name appended) + * and take the first IP address as our local IP address. + * If there isn't an IP address for our hostname, too bad. + */ + wo->accept_local = 1; /* don't insist on this default value */ + if ((hp = gethostbyname(hostname)) != NULL) { + local = *(u32_t *)hp->h_addr; + if (local != 0 && !bad_ip_adrs(local)) + wo->ouraddr = local; + } + } + ask_for_local = wo->ouraddr != 0 || !disable_defaultip; +} +#endif /* UNUSED */ + +#if DEMAND_SUPPORT +/* + * ip_demand_conf - configure the interface as though + * IPCP were up, for use with dial-on-demand. + */ +static int +ip_demand_conf(u) + int u; +{ + ppp_pcb *pcb = &ppp_pcb_list[u]; + ipcp_options *wo = &ipcp_wantoptions[u]; + + if (wo->hisaddr == 0 && !pcb->settings.noremoteip) { + /* make up an arbitrary address for the peer */ + wo->hisaddr = lwip_htonl(0x0a707070 + ifunit); + wo->accept_remote = 1; + } + if (wo->ouraddr == 0) { + /* make up an arbitrary address for us */ + wo->ouraddr = lwip_htonl(0x0a404040 + ifunit); + wo->accept_local = 1; + ask_for_local = 0; /* don't tell the peer this address */ + } + if (!sifaddr(pcb, wo->ouraddr, wo->hisaddr, get_mask(wo->ouraddr))) + return 0; + if (!sifup(pcb)) + return 0; + if (!sifnpmode(pcb, PPP_IP, NPMODE_QUEUE)) + return 0; +#if 0 /* UNUSED */ + if (wo->default_route) + if (sifdefaultroute(pcb, wo->ouraddr, wo->hisaddr, + wo->replace_default_route)) + default_route_set[u] = 1; +#endif /* UNUSED */ +#if 0 /* UNUSED - PROXY ARP */ + if (wo->proxy_arp) + if (sifproxyarp(pcb, wo->hisaddr)) + proxy_arp_set[u] = 1; +#endif /* UNUSED - PROXY ARP */ + + ppp_notice("local IP address %I", wo->ouraddr); + if (wo->hisaddr) + ppp_notice("remote IP address %I", wo->hisaddr); + + return 1; +} +#endif /* DEMAND_SUPPORT */ + +/* + * ipcp_up - IPCP has come UP. + * + * Configure the IP network interface appropriately and bring it up. + */ +static void ipcp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + u32_t mask; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + ipcp_options *wo = &pcb->ipcp_wantoptions; + + IPCPDEBUG(("ipcp: up")); + + /* + * We must have a non-zero IP address for both ends of the link. + */ + if (!ho->neg_addr && !ho->old_addrs) + ho->hisaddr = wo->hisaddr; + + if (!(go->neg_addr || go->old_addrs) && (wo->neg_addr || wo->old_addrs) + && wo->ouraddr != 0) { + ppp_error("Peer refused to agree to our IP address"); + ipcp_close(f->pcb, "Refused our IP address"); + return; + } + if (go->ouraddr == 0) { + ppp_error("Could not determine local IP address"); + ipcp_close(f->pcb, "Could not determine local IP address"); + return; + } + if (ho->hisaddr == 0 && !pcb->settings.noremoteip) { + ho->hisaddr = lwip_htonl(0x0a404040); + ppp_warn("Could not determine remote IP address: defaulting to %I", + ho->hisaddr); + } +#if 0 /* UNUSED */ + script_setenv("IPLOCAL", ip_ntoa(go->ouraddr), 0); + if (ho->hisaddr != 0) + script_setenv("IPREMOTE", ip_ntoa(ho->hisaddr), 1); +#endif /* UNUSED */ + +#if LWIP_DNS + if (!go->req_dns1) + go->dnsaddr[0] = 0; + if (!go->req_dns2) + go->dnsaddr[1] = 0; +#if 0 /* UNUSED */ + if (go->dnsaddr[0]) + script_setenv("DNS1", ip_ntoa(go->dnsaddr[0]), 0); + if (go->dnsaddr[1]) + script_setenv("DNS2", ip_ntoa(go->dnsaddr[1]), 0); +#endif /* UNUSED */ + if (pcb->settings.usepeerdns && (go->dnsaddr[0] || go->dnsaddr[1])) { + sdns(pcb, go->dnsaddr[0], go->dnsaddr[1]); +#if 0 /* UNUSED */ + script_setenv("USEPEERDNS", "1", 0); + create_resolv(go->dnsaddr[0], go->dnsaddr[1]); +#endif /* UNUSED */ + } +#endif /* LWIP_DNS */ + + /* + * Check that the peer is allowed to use the IP address it wants. + */ + if (ho->hisaddr != 0) { + u32_t addr = lwip_ntohl(ho->hisaddr); + if ((addr >> IP_CLASSA_NSHIFT) == IP_LOOPBACKNET + || IP_MULTICAST(addr) || IP_BADCLASS(addr) + /* + * For now, consider that PPP in server mode with peer required + * to authenticate must provide the peer IP address, reject any + * IP address wanted by peer different than the one we wanted. + */ +#if PPP_SERVER && PPP_AUTH_SUPPORT + || (pcb->settings.auth_required && wo->hisaddr != ho->hisaddr) +#endif /* PPP_SERVER && PPP_AUTH_SUPPORT */ + ) { + ppp_error("Peer is not authorized to use remote address %I", ho->hisaddr); + ipcp_close(pcb, "Unauthorized remote IP address"); + return; + } + } +#if 0 /* Unused */ + /* Upstream checking code */ + if (ho->hisaddr != 0 && !auth_ip_addr(f->unit, ho->hisaddr)) { + ppp_error("Peer is not authorized to use remote address %I", ho->hisaddr); + ipcp_close(f->unit, "Unauthorized remote IP address"); + return; + } +#endif /* Unused */ + +#if VJ_SUPPORT + /* set tcp compression */ + sifvjcomp(pcb, ho->neg_vj, ho->cflag, ho->maxslotindex); +#endif /* VJ_SUPPORT */ + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, the interface is already + * configured, so we put out any saved-up packets, then set the + * interface to pass IP packets. + */ + if (demand) { + if (go->ouraddr != wo->ouraddr || ho->hisaddr != wo->hisaddr) { + ipcp_clear_addrs(f->unit, wo->ouraddr, wo->hisaddr, + wo->replace_default_route); + if (go->ouraddr != wo->ouraddr) { + ppp_warn("Local IP address changed to %I", go->ouraddr); + script_setenv("OLDIPLOCAL", ip_ntoa(wo->ouraddr), 0); + wo->ouraddr = go->ouraddr; + } else + script_unsetenv("OLDIPLOCAL"); + if (ho->hisaddr != wo->hisaddr && wo->hisaddr != 0) { + ppp_warn("Remote IP address changed to %I", ho->hisaddr); + script_setenv("OLDIPREMOTE", ip_ntoa(wo->hisaddr), 0); + wo->hisaddr = ho->hisaddr; + } else + script_unsetenv("OLDIPREMOTE"); + + /* Set the interface to the new addresses */ + mask = get_mask(go->ouraddr); + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->unit, "Interface configuration failed"); + return; + } + + /* assign a default route through the interface if required */ + if (ipcp_wantoptions[f->unit].default_route) + if (sifdefaultroute(pcb, go->ouraddr, ho->hisaddr, + wo->replace_default_route)) + default_route_set[f->unit] = 1; + +#if 0 /* UNUSED - PROXY ARP */ + /* Make a proxy ARP entry if requested. */ + if (ho->hisaddr != 0 && ipcp_wantoptions[f->unit].proxy_arp) + if (sifproxyarp(pcb, ho->hisaddr)) + proxy_arp_set[f->unit] = 1; +#endif /* UNUSED - PROXY ARP */ + + } + demand_rexmit(PPP_IP,go->ouraddr); + sifnpmode(pcb, PPP_IP, NPMODE_PASS); + + } else +#endif /* DEMAND_SUPPORT */ + { + /* + * Set IP addresses and (if specified) netmask. + */ + mask = get_mask(go->ouraddr); + +#if !(defined(SVR4) && (defined(SNI) || defined(__USLC__))) + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->pcb, "Interface configuration failed"); + return; + } +#endif + + /* bring the interface up for IP */ + if (!sifup(pcb)) { +#if PPP_DEBUG + ppp_warn("Interface failed to come up"); +#endif /* PPP_DEBUG */ + ipcp_close(f->pcb, "Interface configuration failed"); + return; + } + +#if (defined(SVR4) && (defined(SNI) || defined(__USLC__))) + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->unit, "Interface configuration failed"); + return; + } +#endif +#if DEMAND_SUPPORT + sifnpmode(pcb, PPP_IP, NPMODE_PASS); +#endif /* DEMAND_SUPPORT */ + +#if 0 /* UNUSED */ + /* assign a default route through the interface if required */ + if (wo->default_route) + if (sifdefaultroute(pcb, go->ouraddr, ho->hisaddr, + wo->replace_default_route)) + pcb->default_route_set = 1; +#endif /* UNUSED */ + +#if 0 /* UNUSED - PROXY ARP */ + /* Make a proxy ARP entry if requested. */ + if (ho->hisaddr != 0 && wo->proxy_arp) + if (sifproxyarp(pcb, ho->hisaddr)) + pcb->proxy_arp_set = 1; +#endif /* UNUSED - PROXY ARP */ + + wo->ouraddr = go->ouraddr; + + ppp_notice("local IP address %I", go->ouraddr); + if (ho->hisaddr != 0) + ppp_notice("remote IP address %I", ho->hisaddr); +#if LWIP_DNS + if (go->dnsaddr[0]) + ppp_notice("primary DNS address %I", go->dnsaddr[0]); + if (go->dnsaddr[1]) + ppp_notice("secondary DNS address %I", go->dnsaddr[1]); +#endif /* LWIP_DNS */ + } + +#if PPP_STATS_SUPPORT + reset_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ + + np_up(pcb, PPP_IP); + pcb->ipcp_is_up = 1; + +#if PPP_NOTIFY + notify(ip_up_notifier, 0); +#endif /* PPP_NOTIFY */ +#if 0 /* UNUSED */ + if (ip_up_hook) + ip_up_hook(); +#endif /* UNUSED */ +} + + +/* + * ipcp_down - IPCP has gone DOWN. + * + * Take the IP network interface down, clear its addresses + * and delete routes through it. + */ +static void ipcp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + + IPCPDEBUG(("ipcp: down")); +#if PPP_STATS_SUPPORT + /* XXX a bit IPv4-centric here, we only need to get the stats + * before the interface is marked down. */ + /* XXX more correct: we must get the stats before running the notifiers, + * at least for the radius plugin */ + update_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ +#if PPP_NOTIFY + notify(ip_down_notifier, 0); +#endif /* PPP_NOTIFY */ +#if 0 /* UNUSED */ + if (ip_down_hook) + ip_down_hook(); +#endif /* UNUSED */ + if (pcb->ipcp_is_up) { + pcb->ipcp_is_up = 0; + np_down(pcb, PPP_IP); + } +#if VJ_SUPPORT + sifvjcomp(pcb, 0, 0, 0); +#endif /* VJ_SUPPORT */ + +#if PPP_STATS_SUPPORT + print_link_stats(); /* _after_ running the notifiers and ip_down_hook(), + * because print_link_stats() sets link_stats_valid + * to 0 (zero) */ +#endif /* PPP_STATS_SUPPORT */ + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, set the interface + * to queue up outgoing packets (for now). + */ + if (demand) { + sifnpmode(pcb, PPP_IP, NPMODE_QUEUE); + } else +#endif /* DEMAND_SUPPORT */ + { +#if DEMAND_SUPPORT + sifnpmode(pcb, PPP_IP, NPMODE_DROP); +#endif /* DEMAND_SUPPORT */ + sifdown(pcb); + ipcp_clear_addrs(pcb, go->ouraddr, + ho->hisaddr, 0); +#if LWIP_DNS + cdns(pcb, go->dnsaddr[0], go->dnsaddr[1]); +#endif /* LWIP_DNS */ + } +} + + +/* + * ipcp_clear_addrs() - clear the interface addresses, routes, + * proxy arp entries, etc. + */ +static void ipcp_clear_addrs(ppp_pcb *pcb, u32_t ouraddr, u32_t hisaddr, u8_t replacedefaultroute) { + LWIP_UNUSED_ARG(replacedefaultroute); + +#if 0 /* UNUSED - PROXY ARP */ + if (pcb->proxy_arp_set) { + cifproxyarp(pcb, hisaddr); + pcb->proxy_arp_set = 0; + } +#endif /* UNUSED - PROXY ARP */ +#if 0 /* UNUSED */ + /* If replacedefaultroute, sifdefaultroute will be called soon + * with replacedefaultroute set and that will overwrite the current + * default route. This is the case only when doing demand, otherwise + * during demand, this cifdefaultroute would restore the old default + * route which is not what we want in this case. In the non-demand + * case, we'll delete the default route and restore the old if there + * is one saved by an sifdefaultroute with replacedefaultroute. + */ + if (!replacedefaultroute && pcb->default_route_set) { + cifdefaultroute(pcb, ouraddr, hisaddr); + pcb->default_route_set = 0; + } +#endif /* UNUSED */ + cifaddr(pcb, ouraddr, hisaddr); +} + + +/* + * ipcp_finished - possibly shut down the lower layers. + */ +static void ipcp_finished(fsm *f) { + ppp_pcb *pcb = f->pcb; + if (pcb->ipcp_is_open) { + pcb->ipcp_is_open = 0; + np_finished(pcb, PPP_IP); + } +} + + +#if 0 /* UNUSED */ +/* + * create_resolv - create the replacement resolv.conf file + */ +static void +create_resolv(peerdns1, peerdns2) + u32_t peerdns1, peerdns2; +{ + +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ipcp_printpkt - print the contents of an IPCP packet. + */ +static const char* const ipcp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej" +}; + +static int ipcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, olen; + const u_char *pstart, *optend; +#if VJ_SUPPORT + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t cilong; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ipcp_codenames)) + printer(arg, " %s", ipcp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { + case CI_ADDRS: + if (olen == CILEN_ADDRS) { + p += 2; + GETLONG(cilong, p); + printer(arg, "addrs %I", lwip_htonl(cilong)); + GETLONG(cilong, p); + printer(arg, " %I", lwip_htonl(cilong)); + } + break; +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (olen >= CILEN_COMPRESS) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "compress "); + switch (cishort) { + case IPCP_VJ_COMP: + printer(arg, "VJ"); + break; + case IPCP_VJ_COMP_OLD: + printer(arg, "old-VJ"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; +#endif /* VJ_SUPPORT */ + case CI_ADDR: + if (olen == CILEN_ADDR) { + p += 2; + GETLONG(cilong, p); + printer(arg, "addr %I", lwip_htonl(cilong)); + } + break; +#if LWIP_DNS + case CI_MS_DNS1: + case CI_MS_DNS2: + p += 2; + GETLONG(cilong, p); + printer(arg, "ms-dns%d %I", (code == CI_MS_DNS1? 1: 2), + htonl(cilong)); + break; +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + p += 2; + GETLONG(cilong, p); + printer(arg, "ms-wins %I", lwip_htonl(cilong)); + break; +#endif /* UNUSED - WINS */ + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * ip_active_pkt - see if this IP packet is worth bringing the link up for. + * We don't bring the link up for IP fragments or for TCP FIN packets + * with no data. + */ +#define IP_HDRLEN 20 /* bytes */ +#define IP_OFFMASK 0x1fff +#ifndef IPPROTO_TCP +#define IPPROTO_TCP 6 +#endif +#define TCP_HDRLEN 20 +#define TH_FIN 0x01 + +/* + * We use these macros because the IP header may be at an odd address, + * and some compilers might use word loads to get th_off or ip_hl. + */ + +#define net_short(x) (((x)[0] << 8) + (x)[1]) +#define get_iphl(x) (((unsigned char *)(x))[0] & 0xF) +#define get_ipoff(x) net_short((unsigned char *)(x) + 6) +#define get_ipproto(x) (((unsigned char *)(x))[9]) +#define get_tcpoff(x) (((unsigned char *)(x))[12] >> 4) +#define get_tcpflags(x) (((unsigned char *)(x))[13]) + +static int +ip_active_pkt(pkt, len) + u_char *pkt; + int len; +{ + u_char *tcp; + int hlen; + + len -= PPP_HDRLEN; + pkt += PPP_HDRLEN; + if (len < IP_HDRLEN) + return 0; + if ((get_ipoff(pkt) & IP_OFFMASK) != 0) + return 0; + if (get_ipproto(pkt) != IPPROTO_TCP) + return 1; + hlen = get_iphl(pkt) * 4; + if (len < hlen + TCP_HDRLEN) + return 0; + tcp = pkt + hlen; + if ((get_tcpflags(tcp) & TH_FIN) != 0 && len == hlen + get_tcpoff(tcp) * 4) + return 0; + return 1; +} +#endif /* DEMAND_SUPPORT */ + +#endif /* PPP_SUPPORT && PPP_IPV4_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c new file mode 100644 index 00000000..11c18df7 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c @@ -0,0 +1,1533 @@ +/* + * ipv6cp.c - PPP IPV6 Control Protocol. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* Original version, based on RFC2023 : + + Copyright (c) 1995, 1996, 1997 Francis.Dupont@inria.fr, INRIA Rocquencourt, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Copyright (c) 1998, 1999 Francis.Dupont@inria.fr, GIE DYADE, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Ce travail a été fait au sein du GIE DYADE (Groupement d'Intérêt + Économique ayant pour membres BULL S.A. et l'INRIA). + + Ce logiciel informatique est disponible aux conditions + usuelles dans la recherche, c'est-à-dire qu'il peut + être utilisé, copié, modifié, distribué à l'unique + condition que ce texte soit conservé afin que + l'origine de ce logiciel soit reconnue. + + Le nom de l'Institut National de Recherche en Informatique + et en Automatique (INRIA), de l'IMAG, ou d'une personne morale + ou physique ayant participé à l'élaboration de ce logiciel ne peut + être utilisé sans son accord préalable explicite. + + Ce logiciel est fourni tel quel sans aucune garantie, + support ou responsabilité d'aucune sorte. + Ce logiciel est dérivé de sources d'origine + "University of California at Berkeley" et + "Digital Equipment Corporation" couvertes par des copyrights. + + L'Institut d'Informatique et de Mathématiques Appliquées de Grenoble (IMAG) + est une fédération d'unités mixtes de recherche du CNRS, de l'Institut National + Polytechnique de Grenoble et de l'Université Joseph Fourier regroupant + sept laboratoires dont le laboratoire Logiciels, Systèmes, Réseaux (LSR). + + This work has been done in the context of GIE DYADE (joint R & D venture + between BULL S.A. and INRIA). + + This software is available with usual "research" terms + with the aim of retain credits of the software. + Permission to use, copy, modify and distribute this software for any + purpose and without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies, + and the name of INRIA, IMAG, or any contributor not be used in advertising + or publicity pertaining to this material without the prior explicit + permission. The software is provided "as is" without any + warranties, support or liabilities of any kind. + This software is derived from source code from + "University of California at Berkeley" and + "Digital Equipment Corporation" protected by copyrights. + + Grenoble's Institute of Computer Science and Applied Mathematics (IMAG) + is a federation of seven research units funded by the CNRS, National + Polytechnic Institute of Grenoble and University Joseph Fourier. + The research unit in Software, Systems, Networks (LSR) is member of IMAG. +*/ + +/* + * Derived from : + * + * + * ipcp.c - PPP IP Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipv6cp.c,v 1.21 2005/08/25 23:59:34 paulus Exp $ + */ + +/* + * @todo: + * + * Proxy Neighbour Discovery. + * + * Better defines for selecting the ordering of + * interface up / set address. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/ipv6cp.h" +#include "netif/ppp/magic.h" + +/* global vars */ +#if 0 /* UNUSED */ +int no_ifaceid_neg = 0; +#endif /* UNUSED */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void ipv6cp_resetci(fsm *f); /* Reset our CI */ +static int ipv6cp_cilen(fsm *f); /* Return length of our CI */ +static void ipv6cp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI */ +static int ipv6cp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int ipv6cp_nakci(fsm *f, u_char *p, int len, int treat_as_reject); /* Peer nak'd our CI */ +static int ipv6cp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int ipv6cp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree); /* Rcv CI */ +static void ipv6cp_up(fsm *f); /* We're UP */ +static void ipv6cp_down(fsm *f); /* We're DOWN */ +static void ipv6cp_finished(fsm *f); /* Don't need lower layer */ + +static const fsm_callbacks ipv6cp_callbacks = { /* IPV6CP callback routines */ + ipv6cp_resetci, /* Reset our Configuration Information */ + ipv6cp_cilen, /* Length of our Configuration Information */ + ipv6cp_addci, /* Add our Configuration Information */ + ipv6cp_ackci, /* ACK our Configuration Information */ + ipv6cp_nakci, /* NAK our Configuration Information */ + ipv6cp_rejci, /* Reject our Configuration Information */ + ipv6cp_reqci, /* Request peer's Configuration Information */ + ipv6cp_up, /* Called when fsm reaches OPENED state */ + ipv6cp_down, /* Called when fsm leaves OPENED state */ + NULL, /* Called when we want the lower layer up */ + ipv6cp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + NULL, /* Called to handle protocol-specific codes */ + "IPV6CP" /* String name of protocol */ +}; + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static int setifaceid(char **arg)); +static void printifaceid(option_t *, + void (*)(void *, char *, ...), void *)); + +static option_t ipv6cp_option_list[] = { + { "ipv6", o_special, (void *)setifaceid, + "Set interface identifiers for IPV6", + OPT_A2PRINTER, (void *)printifaceid }, + + { "+ipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Enable IPv6 and IPv6CP", OPT_PRIO | 1 }, + { "noipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Disable IPv6 and IPv6CP", OPT_PRIOSUB }, + { "-ipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Disable IPv6 and IPv6CP", OPT_PRIOSUB | OPT_ALIAS }, + + { "ipv6cp-accept-local", o_bool, &ipv6cp_allowoptions[0].accept_local, + "Accept peer's interface identifier for us", 1 }, + + { "ipv6cp-use-ipaddr", o_bool, &ipv6cp_allowoptions[0].use_ip, + "Use (default) IPv4 address as interface identifier", 1 }, + + { "ipv6cp-use-persistent", o_bool, &ipv6cp_wantoptions[0].use_persistent, + "Use uniquely-available persistent value for link local address", 1 }, + + { "ipv6cp-restart", o_int, &ipv6cp_fsm[0].timeouttime, + "Set timeout for IPv6CP", OPT_PRIO }, + { "ipv6cp-max-terminate", o_int, &ipv6cp_fsm[0].maxtermtransmits, + "Set max #xmits for term-reqs", OPT_PRIO }, + { "ipv6cp-max-configure", o_int, &ipv6cp_fsm[0].maxconfreqtransmits, + "Set max #xmits for conf-reqs", OPT_PRIO }, + { "ipv6cp-max-failure", o_int, &ipv6cp_fsm[0].maxnakloops, + "Set max #conf-naks for IPv6CP", OPT_PRIO }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ipv6cp_init(ppp_pcb *pcb); +static void ipv6cp_open(ppp_pcb *pcb); +static void ipv6cp_close(ppp_pcb *pcb, const char *reason); +static void ipv6cp_lowerup(ppp_pcb *pcb); +static void ipv6cp_lowerdown(ppp_pcb *pcb); +static void ipv6cp_input(ppp_pcb *pcb, u_char *p, int len); +static void ipv6cp_protrej(ppp_pcb *pcb); +#if PPP_OPTIONS +static void ipv6_check_options(void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT +static int ipv6_demand_conf(int u); +#endif /* DEMAND_SUPPORT */ +#if PRINTPKT_SUPPORT +static int ipv6cp_printpkt(const u_char *p, int plen, + void (*printer)(void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if DEMAND_SUPPORT +static int ipv6_active_pkt(u_char *pkt, int len); +#endif /* DEMAND_SUPPORT */ + +const struct protent ipv6cp_protent = { + PPP_IPV6CP, + ipv6cp_init, + ipv6cp_input, + ipv6cp_protrej, + ipv6cp_lowerup, + ipv6cp_lowerdown, + ipv6cp_open, + ipv6cp_close, +#if PRINTPKT_SUPPORT + ipv6cp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "IPV6CP", + "IPV6", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ipv6cp_option_list, + ipv6_check_options, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + ipv6_demand_conf, + ipv6_active_pkt +#endif /* DEMAND_SUPPORT */ +}; + +static void ipv6cp_clear_addrs(ppp_pcb *pcb, eui64_t ourid, eui64_t hisid); +#if 0 /* UNUSED */ +static void ipv6cp_script(char *)); +static void ipv6cp_script_done(void *)); +#endif /* UNUSED */ + +/* + * Lengths of configuration options. + */ +#define CILEN_VOID 2 +#define CILEN_COMPRESS 4 /* length for RFC2023 compress opt. */ +#define CILEN_IFACEID 10 /* RFC2472, interface identifier */ + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if 0 /* UNUSED */ +/* + * This state variable is used to ensure that we don't + * run an ipcp-up/down script while one is already running. + */ +static enum script_state { + s_down, + s_up, +} ipv6cp_script_state; +static pid_t ipv6cp_script_pid; +#endif /* UNUSED */ + +static char *llv6_ntoa(eui64_t ifaceid); + +#if PPP_OPTIONS +/* + * setifaceid - set the interface identifiers manually + */ +static int +setifaceid(argv) + char **argv; +{ + char *comma, *arg, c; + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + struct in6_addr addr; + static int prio_local, prio_remote; + +#define VALIDID(a) ( (((a).s6_addr32[0] == 0) && ((a).s6_addr32[1] == 0)) && \ + (((a).s6_addr32[2] != 0) || ((a).s6_addr32[3] != 0)) ) + + arg = *argv; + if ((comma = strchr(arg, ',')) == NULL) + comma = arg + strlen(arg); + + /* + * If comma first character, then no local identifier + */ + if (comma != arg) { + c = *comma; + *comma = '\0'; + + if (inet_pton(AF_INET6, arg, &addr) == 0 || !VALIDID(addr)) { + option_error("Illegal interface identifier (local): %s", arg); + return 0; + } + + if (option_priority >= prio_local) { + eui64_copy(addr.s6_addr32[2], wo->ourid); + wo->opt_local = 1; + prio_local = option_priority; + } + *comma = c; + } + + /* + * If comma last character, the no remote identifier + */ + if (*comma != 0 && *++comma != '\0') { + if (inet_pton(AF_INET6, comma, &addr) == 0 || !VALIDID(addr)) { + option_error("Illegal interface identifier (remote): %s", comma); + return 0; + } + if (option_priority >= prio_remote) { + eui64_copy(addr.s6_addr32[2], wo->hisid); + wo->opt_remote = 1; + prio_remote = option_priority; + } + } + + if (override_value("+ipv6", option_priority, option_source)) + ipv6cp_protent.enabled_flag = 1; + return 1; +} + +static void +printifaceid(opt, printer, arg) + option_t *opt; + void (*printer)(void *, char *, ...)); + void *arg; +{ + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + + if (wo->opt_local) + printer(arg, "%s", llv6_ntoa(wo->ourid)); + printer(arg, ","); + if (wo->opt_remote) + printer(arg, "%s", llv6_ntoa(wo->hisid)); +} +#endif /* PPP_OPTIONS */ + +/* + * Make a string representation of a network address. + */ +static char * +llv6_ntoa(eui64_t ifaceid) +{ + static char b[26]; + + sprintf(b, "fe80::%02x%02x:%02x%02x:%02x%02x:%02x%02x", + ifaceid.e8[0], ifaceid.e8[1], ifaceid.e8[2], ifaceid.e8[3], + ifaceid.e8[4], ifaceid.e8[5], ifaceid.e8[6], ifaceid.e8[7]); + + return b; +} + + +/* + * ipv6cp_init - Initialize IPV6CP. + */ +static void ipv6cp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ipv6cp_fsm; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_IPV6CP; + f->callbacks = &ipv6cp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(ao, 0, sizeof(*ao)); +#endif /* 0 */ + + wo->accept_local = 1; + wo->neg_ifaceid = 1; + ao->neg_ifaceid = 1; + +#ifdef IPV6CP_COMP + wo->neg_vj = 1; + ao->neg_vj = 1; + wo->vj_protocol = IPV6CP_COMP; +#endif + +} + + +/* + * ipv6cp_open - IPV6CP is allowed to come up. + */ +static void ipv6cp_open(ppp_pcb *pcb) { + fsm_open(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_close - Take IPV6CP down. + */ +static void ipv6cp_close(ppp_pcb *pcb, const char *reason) { + fsm_close(&pcb->ipv6cp_fsm, reason); +} + + +/* + * ipv6cp_lowerup - The lower layer is up. + */ +static void ipv6cp_lowerup(ppp_pcb *pcb) { + fsm_lowerup(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_lowerdown - The lower layer is down. + */ +static void ipv6cp_lowerdown(ppp_pcb *pcb) { + fsm_lowerdown(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_input - Input IPV6CP packet. + */ +static void ipv6cp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm_input(&pcb->ipv6cp_fsm, p, len); +} + + +/* + * ipv6cp_protrej - A Protocol-Reject was received for IPV6CP. + * + * Pretend the lower layer went down, so we shut up. + */ +static void ipv6cp_protrej(ppp_pcb *pcb) { + fsm_lowerdown(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_resetci - Reset our CI. + */ +static void ipv6cp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + + wo->req_ifaceid = wo->neg_ifaceid && ao->neg_ifaceid; + + if (!wo->opt_local) { + eui64_magic_nz(wo->ourid); + } + + *go = *wo; + eui64_zero(go->hisid); /* last proposed interface identifier */ +} + + +/* + * ipv6cp_cilen - Return length of our CI. + */ +static int ipv6cp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + +#ifdef IPV6CP_COMP +#define LENCIVJ(neg) (neg ? CILEN_COMPRESS : 0) +#endif /* IPV6CP_COMP */ +#define LENCIIFACEID(neg) (neg ? CILEN_IFACEID : 0) + + return (LENCIIFACEID(go->neg_ifaceid) + +#ifdef IPV6CP_COMP + LENCIVJ(go->neg_vj) + +#endif /* IPV6CP_COMP */ + 0); +} + + +/* + * ipv6cp_addci - Add our desired CIs to a packet. + */ +static void ipv6cp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + int len = *lenp; + +#ifdef IPV6CP_COMP +#define ADDCIVJ(opt, neg, val) \ + if (neg) { \ + int vjlen = CILEN_COMPRESS; \ + if (len >= vjlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(vjlen, ucp); \ + PUTSHORT(val, ucp); \ + len -= vjlen; \ + } else \ + neg = 0; \ + } +#endif /* IPV6CP_COMP */ + +#define ADDCIIFACEID(opt, neg, val1) \ + if (neg) { \ + int idlen = CILEN_IFACEID; \ + if (len >= idlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(idlen, ucp); \ + eui64_put(val1, ucp); \ + len -= idlen; \ + } else \ + neg = 0; \ + } + + ADDCIIFACEID(CI_IFACEID, go->neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + ADDCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + *lenp -= len; +} + + +/* + * ipv6cp_ackci - Ack our CIs. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int ipv6cp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_short cilen, citype; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + + /* + * CIs must be in exactly the same order that we sent... + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ + +#ifdef IPV6CP_COMP +#define ACKCIVJ(opt, neg, val) \ + if (neg) { \ + int vjlen = CILEN_COMPRESS; \ + if ((len -= vjlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != vjlen || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + } +#endif /* IPV6CP_COMP */ + +#define ACKCIIFACEID(opt, neg, val1) \ + if (neg) { \ + int idlen = CILEN_IFACEID; \ + if ((len -= idlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != idlen || \ + citype != opt) \ + goto bad; \ + eui64_get(ifaceid, p); \ + if (! eui64_equals(val1, ifaceid)) \ + goto bad; \ + } + + ACKCIIFACEID(CI_IFACEID, go->neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + ACKCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); + +bad: + IPV6CPDEBUG(("ipv6cp_ackci: received bad Ack!")); + return (0); +} + +/* + * ipv6cp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if IPV6CP is in the OPENED state. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int ipv6cp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char citype, cilen, *next; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + ipv6cp_options no; /* options we've seen Naks for */ + ipv6cp_options try_; /* options to request next time */ + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIIFACEID(opt, neg, code) \ + if (go->neg && \ + len >= (cilen = CILEN_IFACEID) && \ + p[1] == cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + eui64_get(ifaceid, p); \ + no.neg = 1; \ + code \ + } + +#ifdef IPV6CP_COMP +#define NAKCIVJ(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_COMPRESS) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#endif /* IPV6CP_COMP */ + + /* + * Accept the peer's idea of {our,his} interface identifier, if different + * from our idea, only if the accept_{local,remote} flag is set. + */ + NAKCIIFACEID(CI_IFACEID, neg_ifaceid, + if (treat_as_reject) { + try_.neg_ifaceid = 0; + } else if (go->accept_local) { + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->hisid)) /* bad luck */ + eui64_magic(ifaceid); + try_.ourid = ifaceid; + IPV6CPDEBUG(("local LL address %s", llv6_ntoa(ifaceid))); + } + ); + +#ifdef IPV6CP_COMP + NAKCIVJ(CI_COMPRESSTYPE, neg_vj, + { + if (cishort == IPV6CP_COMP && !treat_as_reject) { + try_.vj_protocol = cishort; + } else { + try_.neg_vj = 0; + } + } + ); +#endif /* IPV6CP_COMP */ + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If they want to negotiate about interface identifier, we comply. + * If they want us to ask for compression, we refuse. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if ( cilen < CILEN_VOID || (len -= cilen) < 0 ) + goto bad; + next = p + cilen - 2; + + switch (citype) { +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + if (go->neg_vj || no.neg_vj || + (cilen != CILEN_COMPRESS)) + goto bad; + no.neg_vj = 1; + break; +#endif /* IPV6CP_COMP */ + case CI_IFACEID: + if (go->neg_ifaceid || no.neg_ifaceid || cilen != CILEN_IFACEID) + goto bad; + try_.neg_ifaceid = 1; + eui64_get(ifaceid, p); + if (go->accept_local) { + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->hisid)) /* bad luck */ + eui64_magic(ifaceid); + try_.ourid = ifaceid; + } + no.neg_ifaceid = 1; + break; + default: + break; + } + p = next; + } + + /* If there is still anything left, this packet is bad. */ + if (len != 0) + goto bad; + + /* + * OK, the Nak is good. Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; + +bad: + IPV6CPDEBUG(("ipv6cp_nakci: received bad Nak!")); + return 0; +} + + +/* + * ipv6cp_rejci - Reject some of our CIs. + */ +static int ipv6cp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char cilen; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + ipv6cp_options try_; /* options to request next time */ + + try_ = *go; + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIIFACEID(opt, neg, val1) \ + if (go->neg && \ + len >= (cilen = CILEN_IFACEID) && \ + p[1] == cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + eui64_get(ifaceid, p); \ + /* Check rejected value. */ \ + if (! eui64_equals(ifaceid, val1)) \ + goto bad; \ + try_.neg = 0; \ + } + +#ifdef IPV6CP_COMP +#define REJCIVJ(opt, neg, val) \ + if (go->neg && \ + p[1] == CILEN_COMPRESS && \ + len >= p[1] && \ + p[0] == opt) { \ + len -= p[1]; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* IPV6CP_COMP */ + + REJCIIFACEID(CI_IFACEID, neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + REJCIVJ(CI_COMPRESSTYPE, neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + IPV6CPDEBUG(("ipv6cp_rejci: received bad Reject!")); + return 0; +} + + +/* + * ipv6cp_reqci - Check the peer's requested CIs and send appropriate response. + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * len = Length of requested CIs + * + */ +static int ipv6cp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + u_short cilen, citype; /* Parsed len, type */ +#ifdef IPV6CP_COMP + u_short cishort; /* Parsed short value */ +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; /* Parsed interface identifier */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *ucp = inp; /* Pointer to current output char */ + int l = *len; /* Length left */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + IPV6CPDEBUG(("ipv6cp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_IFACEID: + IPV6CPDEBUG(("ipv6cp: received interface identifier ")); + + if (!ao->neg_ifaceid || + cilen != CILEN_IFACEID) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no interface identifier, or if we both have same + * identifier then NAK it with new idea. + * In particular, if we don't know his identifier, but he does, + * then accept it. + */ + eui64_get(ifaceid, p); + IPV6CPDEBUG(("(%s)", llv6_ntoa(ifaceid))); + if (eui64_iszero(ifaceid) && eui64_iszero(go->ourid)) { + orc = CONFREJ; /* Reject CI */ + break; + } + if (!eui64_iszero(wo->hisid) && + !eui64_equals(ifaceid, wo->hisid) && + eui64_iszero(go->hisid)) { + + orc = CONFNAK; + ifaceid = wo->hisid; + go->hisid = ifaceid; + DECPTR(sizeof(ifaceid), p); + eui64_put(ifaceid, p); + } else + if (eui64_iszero(ifaceid) || eui64_equals(ifaceid, go->ourid)) { + orc = CONFNAK; + if (eui64_iszero(go->hisid)) /* first time, try option */ + ifaceid = wo->hisid; + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->ourid)) /* bad luck */ + eui64_magic(ifaceid); + go->hisid = ifaceid; + DECPTR(sizeof(ifaceid), p); + eui64_put(ifaceid, p); + } + + ho->neg_ifaceid = 1; + ho->hisid = ifaceid; + break; + +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + IPV6CPDEBUG(("ipv6cp: received COMPRESSTYPE ")); + if (!ao->neg_vj || + (cilen != CILEN_COMPRESS)) { + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + IPV6CPDEBUG(("(%d)", cishort)); + + if (!(cishort == IPV6CP_COMP)) { + orc = CONFREJ; + break; + } + + ho->neg_vj = 1; + ho->vj_protocol = cishort; + break; +#endif /* IPV6CP_COMP */ + + default: + orc = CONFREJ; + break; + } + +endswitch: + IPV6CPDEBUG((" (%s)\n", CODENAME(orc))); + + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree) /* Getting fed up with sending NAKs? */ + orc = CONFREJ; /* Get tough if so */ + else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + if (rc == CONFACK) { /* Ack'd all prior CIs? */ + rc = CONFNAK; /* Not anymore... */ + ucp = inp; /* Backup */ + } + } + } + + if (orc == CONFREJ && /* Reject this CI */ + rc != CONFREJ) { /* but no prior ones? */ + rc = CONFREJ; + ucp = inp; /* Backup */ + } + + /* Need to move CI? */ + if (ucp != cip) + MEMCPY(ucp, cip, cilen); /* Move it */ + + /* Update output pointer */ + INCPTR(cilen, ucp); + } + + /* + * If we aren't rejecting this packet, and we want to negotiate + * their identifier and they didn't send their identifier, then we + * send a NAK with a CI_IFACEID option appended. We assume the + * input buffer is long enough that we can append the extra + * option safely. + */ + if (rc != CONFREJ && !ho->neg_ifaceid && + wo->req_ifaceid && !reject_if_disagree) { + if (rc == CONFACK) { + rc = CONFNAK; + ucp = inp; /* reset pointer */ + wo->req_ifaceid = 0; /* don't ask again */ + } + PUTCHAR(CI_IFACEID, ucp); + PUTCHAR(CILEN_IFACEID, ucp); + eui64_put(wo->hisid, ucp); + } + + *len = ucp - inp; /* Compute output length */ + IPV6CPDEBUG(("ipv6cp: returning Configure-%s", CODENAME(rc))); + return (rc); /* Return final code */ +} + +#if PPP_OPTIONS +/* + * ipv6_check_options - check that any IP-related options are OK, + * and assign appropriate defaults. + */ +static void ipv6_check_options() { + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + + if (!ipv6cp_protent.enabled_flag) + return; + + /* + * Persistent link-local id is only used when user has not explicitly + * configure/hard-code the id + */ + if ((wo->use_persistent) && (!wo->opt_local) && (!wo->opt_remote)) { + + /* + * On systems where there are no Ethernet interfaces used, there + * may be other ways to obtain a persistent id. Right now, it + * will fall back to using magic [see eui64_magic] below when + * an EUI-48 from MAC address can't be obtained. Other possibilities + * include obtaining EEPROM serial numbers, or some other unique + * yet persistent number. On Sparc platforms, this is possible, + * but too bad there's no standards yet for x86 machines. + */ + if (ether_to_eui64(&wo->ourid)) { + wo->opt_local = 1; + } + } + + if (!wo->opt_local) { /* init interface identifier */ + if (wo->use_ip && eui64_iszero(wo->ourid)) { + eui64_setlo32(wo->ourid, lwip_ntohl(ipcp_wantoptions[0].ouraddr)); + if (!eui64_iszero(wo->ourid)) + wo->opt_local = 1; + } + + while (eui64_iszero(wo->ourid)) + eui64_magic(wo->ourid); + } + + if (!wo->opt_remote) { + if (wo->use_ip && eui64_iszero(wo->hisid)) { + eui64_setlo32(wo->hisid, lwip_ntohl(ipcp_wantoptions[0].hisaddr)); + if (!eui64_iszero(wo->hisid)) + wo->opt_remote = 1; + } + } + + if (demand && (eui64_iszero(wo->ourid) || eui64_iszero(wo->hisid))) { + option_error("local/remote LL address required for demand-dialling\n"); + exit(1); + } +} +#endif /* PPP_OPTIONS */ + +#if DEMAND_SUPPORT +/* + * ipv6_demand_conf - configure the interface as though + * IPV6CP were up, for use with dial-on-demand. + */ +static int ipv6_demand_conf(int u) { + ipv6cp_options *wo = &ipv6cp_wantoptions[u]; + + if (!sif6up(u)) + return 0; + + if (!sif6addr(u, wo->ourid, wo->hisid)) + return 0; + + if (!sifnpmode(u, PPP_IPV6, NPMODE_QUEUE)) + return 0; + + ppp_notice("ipv6_demand_conf"); + ppp_notice("local LL address %s", llv6_ntoa(wo->ourid)); + ppp_notice("remote LL address %s", llv6_ntoa(wo->hisid)); + + return 1; +} +#endif /* DEMAND_SUPPORT */ + + +/* + * ipv6cp_up - IPV6CP has come UP. + * + * Configure the IPv6 network interface appropriately and bring it up. + */ +static void ipv6cp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + + IPV6CPDEBUG(("ipv6cp: up")); + + /* + * We must have a non-zero LL address for both ends of the link. + */ + if (!ho->neg_ifaceid) + ho->hisid = wo->hisid; + +#if 0 /* UNUSED */ + if(!no_ifaceid_neg) { +#endif /* UNUSED */ + if (eui64_iszero(ho->hisid)) { + ppp_error("Could not determine remote LL address"); + ipv6cp_close(f->pcb, "Could not determine remote LL address"); + return; + } + if (eui64_iszero(go->ourid)) { + ppp_error("Could not determine local LL address"); + ipv6cp_close(f->pcb, "Could not determine local LL address"); + return; + } + if (eui64_equals(go->ourid, ho->hisid)) { + ppp_error("local and remote LL addresses are equal"); + ipv6cp_close(f->pcb, "local and remote LL addresses are equal"); + return; + } +#if 0 /* UNUSED */ + } +#endif /* UNUSED */ +#if 0 /* UNUSED */ + script_setenv("LLLOCAL", llv6_ntoa(go->ourid), 0); + script_setenv("LLREMOTE", llv6_ntoa(ho->hisid), 0); +#endif /* UNUSED */ + +#ifdef IPV6CP_COMP + /* set tcp compression */ + sif6comp(f->unit, ho->neg_vj); +#endif + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, the interface is already + * configured, so we put out any saved-up packets, then set the + * interface to pass IPv6 packets. + */ + if (demand) { + if (! eui64_equals(go->ourid, wo->ourid) || + ! eui64_equals(ho->hisid, wo->hisid)) { + if (! eui64_equals(go->ourid, wo->ourid)) + warn("Local LL address changed to %s", + llv6_ntoa(go->ourid)); + if (! eui64_equals(ho->hisid, wo->hisid)) + warn("Remote LL address changed to %s", + llv6_ntoa(ho->hisid)); + ipv6cp_clear_addrs(f->pcb, go->ourid, ho->hisid); + + /* Set the interface to the new addresses */ + if (!sif6addr(f->pcb, go->ourid, ho->hisid)) { + if (debug) + warn("sif6addr failed"); + ipv6cp_close(f->unit, "Interface configuration failed"); + return; + } + + } + demand_rexmit(PPP_IPV6); + sifnpmode(f->unit, PPP_IPV6, NPMODE_PASS); + + } else +#endif /* DEMAND_SUPPORT */ + { + /* + * Set LL addresses + */ + if (!sif6addr(f->pcb, go->ourid, ho->hisid)) { + PPPDEBUG(LOG_DEBUG, ("sif6addr failed")); + ipv6cp_close(f->pcb, "Interface configuration failed"); + return; + } + + /* bring the interface up for IPv6 */ + if (!sif6up(f->pcb)) { + PPPDEBUG(LOG_DEBUG, ("sif6up failed (IPV6)")); + ipv6cp_close(f->pcb, "Interface configuration failed"); + return; + } +#if DEMAND_SUPPORT + sifnpmode(f->pcb, PPP_IPV6, NPMODE_PASS); +#endif /* DEMAND_SUPPORT */ + + ppp_notice("local LL address %s", llv6_ntoa(go->ourid)); + ppp_notice("remote LL address %s", llv6_ntoa(ho->hisid)); + } + + np_up(f->pcb, PPP_IPV6); + pcb->ipv6cp_is_up = 1; + +#if 0 /* UNUSED */ + /* + * Execute the ipv6-up script, like this: + * /etc/ppp/ipv6-up interface tty speed local-LL remote-LL + */ + if (ipv6cp_script_state == s_down && ipv6cp_script_pid == 0) { + ipv6cp_script_state = s_up; + ipv6cp_script(_PATH_IPV6UP); + } +#endif /* UNUSED */ +} + + +/* + * ipv6cp_down - IPV6CP has gone DOWN. + * + * Take the IPv6 network interface down, clear its addresses + * and delete routes through it. + */ +static void ipv6cp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + + IPV6CPDEBUG(("ipv6cp: down")); +#if PPP_STATS_SUPPORT + update_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ + if (pcb->ipv6cp_is_up) { + pcb->ipv6cp_is_up = 0; + np_down(f->pcb, PPP_IPV6); + } +#ifdef IPV6CP_COMP + sif6comp(f->unit, 0); +#endif + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, set the interface + * to queue up outgoing packets (for now). + */ + if (demand) { + sifnpmode(f->pcb, PPP_IPV6, NPMODE_QUEUE); + } else +#endif /* DEMAND_SUPPORT */ + { +#if DEMAND_SUPPORT + sifnpmode(f->pcb, PPP_IPV6, NPMODE_DROP); +#endif /* DEMAND_SUPPORT */ + ipv6cp_clear_addrs(f->pcb, + go->ourid, + ho->hisid); + sif6down(f->pcb); + } + +#if 0 /* UNUSED */ + /* Execute the ipv6-down script */ + if (ipv6cp_script_state == s_up && ipv6cp_script_pid == 0) { + ipv6cp_script_state = s_down; + ipv6cp_script(_PATH_IPV6DOWN); + } +#endif /* UNUSED */ +} + + +/* + * ipv6cp_clear_addrs() - clear the interface addresses, routes, + * proxy neighbour discovery entries, etc. + */ +static void ipv6cp_clear_addrs(ppp_pcb *pcb, eui64_t ourid, eui64_t hisid) { + cif6addr(pcb, ourid, hisid); +} + + +/* + * ipv6cp_finished - possibly shut down the lower layers. + */ +static void ipv6cp_finished(fsm *f) { + np_finished(f->pcb, PPP_IPV6); +} + + +#if 0 /* UNUSED */ +/* + * ipv6cp_script_done - called when the ipv6-up or ipv6-down script + * has finished. + */ +static void +ipv6cp_script_done(arg) + void *arg; +{ + ipv6cp_script_pid = 0; + switch (ipv6cp_script_state) { + case s_up: + if (ipv6cp_fsm[0].state != PPP_FSM_OPENED) { + ipv6cp_script_state = s_down; + ipv6cp_script(_PATH_IPV6DOWN); + } + break; + case s_down: + if (ipv6cp_fsm[0].state == PPP_FSM_OPENED) { + ipv6cp_script_state = s_up; + ipv6cp_script(_PATH_IPV6UP); + } + break; + } +} + + +/* + * ipv6cp_script - Execute a script with arguments + * interface-name tty-name speed local-LL remote-LL. + */ +static void +ipv6cp_script(script) + char *script; +{ + char strspeed[32], strlocal[32], strremote[32]; + char *argv[8]; + + sprintf(strspeed, "%d", baud_rate); + strcpy(strlocal, llv6_ntoa(ipv6cp_gotoptions[0].ourid)); + strcpy(strremote, llv6_ntoa(ipv6cp_hisoptions[0].hisid)); + + argv[0] = script; + argv[1] = ifname; + argv[2] = devnam; + argv[3] = strspeed; + argv[4] = strlocal; + argv[5] = strremote; + argv[6] = ipparam; + argv[7] = NULL; + + ipv6cp_script_pid = run_program(script, argv, 0, ipv6cp_script_done, + NULL, 0); +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ipv6cp_printpkt - print the contents of an IPV6CP packet. + */ +static const char* const ipv6cp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej" +}; + +static int ipv6cp_printpkt(const u_char *p, int plen, + void (*printer)(void *, const char *, ...), void *arg) { + int code, id, len, olen; + const u_char *pstart, *optend; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ipv6cp_codenames)) + printer(arg, " %s", ipv6cp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + if (olen >= CILEN_COMPRESS) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "compress "); + printer(arg, "0x%x", cishort); + } + break; +#endif /* IPV6CP_COMP */ + case CI_IFACEID: + if (olen == CILEN_IFACEID) { + p += 2; + eui64_get(ifaceid, p); + printer(arg, "addr %s", llv6_ntoa(ifaceid)); + } + break; + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * ipv6_active_pkt - see if this IP packet is worth bringing the link up for. + * We don't bring the link up for IP fragments or for TCP FIN packets + * with no data. + */ +#define IP6_HDRLEN 40 /* bytes */ +#define IP6_NHDR_FRAG 44 /* fragment IPv6 header */ +#define TCP_HDRLEN 20 +#define TH_FIN 0x01 + +/* + * We use these macros because the IP header may be at an odd address, + * and some compilers might use word loads to get th_off or ip_hl. + */ + +#define get_ip6nh(x) (((unsigned char *)(x))[6]) +#define get_tcpoff(x) (((unsigned char *)(x))[12] >> 4) +#define get_tcpflags(x) (((unsigned char *)(x))[13]) + +static int ipv6_active_pkt(u_char *pkt, int len) { + u_char *tcp; + + len -= PPP_HDRLEN; + pkt += PPP_HDRLEN; + if (len < IP6_HDRLEN) + return 0; + if (get_ip6nh(pkt) == IP6_NHDR_FRAG) + return 0; + if (get_ip6nh(pkt) != IPPROTO_TCP) + return 1; + if (len < IP6_HDRLEN + TCP_HDRLEN) + return 0; + tcp = pkt + IP6_HDRLEN; + if ((get_tcpflags(tcp) & TH_FIN) != 0 && len == IP6_HDRLEN + get_tcpoff(tcp) * 4) + return 0; + return 1; +} +#endif /* DEMAND_SUPPORT */ + +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c new file mode 100644 index 00000000..90ed183b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c @@ -0,0 +1,2790 @@ +/* + * lcp.c - PPP Link Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#include "netif/ppp/magic.h" + +/* + * When the link comes up we want to be able to wait for a short while, + * or until seeing some input from the peer, before starting to send + * configure-requests. We do this by delaying the fsm_lowerup call. + */ +/* steal a bit in fsm flags word */ +#define DELAYED_UP 0x80 + +static void lcp_delayed_up(void *arg); + +/* + * LCP-related command-line options. + */ +#if 0 /* UNUSED */ +int lcp_echo_interval = 0; /* Interval between LCP echo-requests */ +int lcp_echo_fails = 0; /* Tolerance to unanswered echo-requests */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* options */ +static u_int lcp_echo_interval = LCP_ECHOINTERVAL; /* Interval between LCP echo-requests */ +static u_int lcp_echo_fails = LCP_MAXECHOFAILS; /* Tolerance to unanswered echo-requests */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +#if PPP_LCP_ADAPTIVE +bool lcp_echo_adaptive = 0; /* request echo only if the link was idle */ +#endif +bool lax_recv = 0; /* accept control chars in asyncmap */ +bool noendpoint = 0; /* don't send/accept endpoint discriminator */ +#endif /* UNUSED */ + +#if PPP_OPTIONS +static int noopt (char **); +#endif /* PPP_OPTIONS */ + +#ifdef HAVE_MULTILINK +static int setendpoint (char **); +static void printendpoint (option_t *, void (*)(void *, char *, ...), + void *); +#endif /* HAVE_MULTILINK */ + +#if PPP_OPTIONS +static option_t lcp_option_list[] = { + /* LCP options */ + { "-all", o_special_noarg, (void *)noopt, + "Don't request/allow any LCP options" }, + + { "noaccomp", o_bool, &lcp_wantoptions[0].neg_accompression, + "Disable address/control compression", + OPT_A2CLR, &lcp_allowoptions[0].neg_accompression }, + { "-ac", o_bool, &lcp_wantoptions[0].neg_accompression, + "Disable address/control compression", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_accompression }, + + { "asyncmap", o_uint32, &lcp_wantoptions[0].asyncmap, + "Set asyncmap (for received packets)", + OPT_OR, &lcp_wantoptions[0].neg_asyncmap }, + { "-as", o_uint32, &lcp_wantoptions[0].asyncmap, + "Set asyncmap (for received packets)", + OPT_ALIAS | OPT_OR, &lcp_wantoptions[0].neg_asyncmap }, + { "default-asyncmap", o_uint32, &lcp_wantoptions[0].asyncmap, + "Disable asyncmap negotiation", + OPT_OR | OPT_NOARG | OPT_VAL(~0U) | OPT_A2CLR, + &lcp_allowoptions[0].neg_asyncmap }, + { "-am", o_uint32, &lcp_wantoptions[0].asyncmap, + "Disable asyncmap negotiation", + OPT_ALIAS | OPT_OR | OPT_NOARG | OPT_VAL(~0U) | OPT_A2CLR, + &lcp_allowoptions[0].neg_asyncmap }, + + { "nomagic", o_bool, &lcp_wantoptions[0].neg_magicnumber, + "Disable magic number negotiation (looped-back line detection)", + OPT_A2CLR, &lcp_allowoptions[0].neg_magicnumber }, + { "-mn", o_bool, &lcp_wantoptions[0].neg_magicnumber, + "Disable magic number negotiation (looped-back line detection)", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_magicnumber }, + + { "mru", o_int, &lcp_wantoptions[0].mru, + "Set MRU (maximum received packet size) for negotiation", + OPT_PRIO, &lcp_wantoptions[0].neg_mru }, + { "default-mru", o_bool, &lcp_wantoptions[0].neg_mru, + "Disable MRU negotiation (use default 1500)", + OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_mru }, + { "-mru", o_bool, &lcp_wantoptions[0].neg_mru, + "Disable MRU negotiation (use default 1500)", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_mru }, + + { "mtu", o_int, &lcp_allowoptions[0].mru, + "Set our MTU", OPT_LIMITS, NULL, MAXMRU, MINMRU }, + + { "nopcomp", o_bool, &lcp_wantoptions[0].neg_pcompression, + "Disable protocol field compression", + OPT_A2CLR, &lcp_allowoptions[0].neg_pcompression }, + { "-pc", o_bool, &lcp_wantoptions[0].neg_pcompression, + "Disable protocol field compression", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_pcompression }, + + { "passive", o_bool, &lcp_wantoptions[0].passive, + "Set passive mode", 1 }, + { "-p", o_bool, &lcp_wantoptions[0].passive, + "Set passive mode", OPT_ALIAS | 1 }, + + { "silent", o_bool, &lcp_wantoptions[0].silent, + "Set silent mode", 1 }, + + { "lcp-echo-failure", o_int, &lcp_echo_fails, + "Set number of consecutive echo failures to indicate link failure", + OPT_PRIO }, + { "lcp-echo-interval", o_int, &lcp_echo_interval, + "Set time in seconds between LCP echo requests", OPT_PRIO }, +#if PPP_LCP_ADAPTIVE + { "lcp-echo-adaptive", o_bool, &lcp_echo_adaptive, + "Suppress LCP echo requests if traffic was received", 1 }, +#endif + { "lcp-restart", o_int, &lcp_fsm[0].timeouttime, + "Set time in seconds between LCP retransmissions", OPT_PRIO }, + { "lcp-max-terminate", o_int, &lcp_fsm[0].maxtermtransmits, + "Set maximum number of LCP terminate-request transmissions", OPT_PRIO }, + { "lcp-max-configure", o_int, &lcp_fsm[0].maxconfreqtransmits, + "Set maximum number of LCP configure-request transmissions", OPT_PRIO }, + { "lcp-max-failure", o_int, &lcp_fsm[0].maxnakloops, + "Set limit on number of LCP configure-naks", OPT_PRIO }, + + { "receive-all", o_bool, &lax_recv, + "Accept all received control characters", 1 }, + +#ifdef HAVE_MULTILINK + { "mrru", o_int, &lcp_wantoptions[0].mrru, + "Maximum received packet size for multilink bundle", + OPT_PRIO, &lcp_wantoptions[0].neg_mrru }, + + { "mpshortseq", o_bool, &lcp_wantoptions[0].neg_ssnhf, + "Use short sequence numbers in multilink headers", + OPT_PRIO | 1, &lcp_allowoptions[0].neg_ssnhf }, + { "nompshortseq", o_bool, &lcp_wantoptions[0].neg_ssnhf, + "Don't use short sequence numbers in multilink headers", + OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_ssnhf }, + + { "endpoint", o_special, (void *) setendpoint, + "Endpoint discriminator for multilink", + OPT_PRIO | OPT_A2PRINTER, (void *) printendpoint }, +#endif /* HAVE_MULTILINK */ + + { "noendpoint", o_bool, &noendpoint, + "Don't send or accept multilink endpoint discriminator", 1 }, + + {NULL} +}; +#endif /* PPP_OPTIONS */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void lcp_resetci(fsm *f); /* Reset our CI */ +static int lcp_cilen(fsm *f); /* Return length of our CI */ +static void lcp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI to pkt */ +static int lcp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int lcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject); /* Peer nak'd our CI */ +static int lcp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int lcp_reqci(fsm *f, u_char *inp, int *lenp, int reject_if_disagree); /* Rcv peer CI */ +static void lcp_up(fsm *f); /* We're UP */ +static void lcp_down(fsm *f); /* We're DOWN */ +static void lcp_starting (fsm *); /* We need lower layer up */ +static void lcp_finished (fsm *); /* We need lower layer down */ +static int lcp_extcode(fsm *f, int code, int id, u_char *inp, int len); +static void lcp_rprotrej(fsm *f, u_char *inp, int len); + +/* + * routines to send LCP echos to peer + */ + +static void lcp_echo_lowerup(ppp_pcb *pcb); +static void lcp_echo_lowerdown(ppp_pcb *pcb); +static void LcpEchoTimeout(void *arg); +static void lcp_received_echo_reply(fsm *f, int id, u_char *inp, int len); +static void LcpSendEchoRequest(fsm *f); +static void LcpLinkFailure(fsm *f); +static void LcpEchoCheck(fsm *f); + +static const fsm_callbacks lcp_callbacks = { /* LCP callback routines */ + lcp_resetci, /* Reset our Configuration Information */ + lcp_cilen, /* Length of our Configuration Information */ + lcp_addci, /* Add our Configuration Information */ + lcp_ackci, /* ACK our Configuration Information */ + lcp_nakci, /* NAK our Configuration Information */ + lcp_rejci, /* Reject our Configuration Information */ + lcp_reqci, /* Request peer's Configuration Information */ + lcp_up, /* Called when fsm reaches OPENED state */ + lcp_down, /* Called when fsm leaves OPENED state */ + lcp_starting, /* Called when we want the lower layer up */ + lcp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + lcp_extcode, /* Called to handle LCP-specific codes */ + "LCP" /* String name of protocol */ +}; + +/* + * Protocol entry points. + * Some of these are called directly. + */ + +static void lcp_init(ppp_pcb *pcb); +static void lcp_input(ppp_pcb *pcb, u_char *p, int len); +static void lcp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int lcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent lcp_protent = { + PPP_LCP, + lcp_init, + lcp_input, + lcp_protrej, + lcp_lowerup, + lcp_lowerdown, + lcp_open, + lcp_close, +#if PRINTPKT_SUPPORT + lcp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "LCP", + NULL, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + lcp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +/* + * Length of each type of configuration option (in octets) + */ +#define CILEN_VOID 2 +#define CILEN_CHAR 3 +#define CILEN_SHORT 4 /* CILEN_VOID + 2 */ +#if CHAP_SUPPORT +#define CILEN_CHAP 5 /* CILEN_VOID + 2 + 1 */ +#endif /* CHAP_SUPPORT */ +#define CILEN_LONG 6 /* CILEN_VOID + 4 */ +#if LQR_SUPPORT +#define CILEN_LQR 8 /* CILEN_VOID + 2 + 4 */ +#endif /* LQR_SUPPORT */ +#define CILEN_CBCP 3 + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if PPP_OPTIONS +/* + * noopt - Disable all options (why?). + */ +static int +noopt(argv) + char **argv; +{ + BZERO((char *) &lcp_wantoptions[0], sizeof (struct lcp_options)); + BZERO((char *) &lcp_allowoptions[0], sizeof (struct lcp_options)); + + return (1); +} +#endif /* PPP_OPTIONS */ + +#ifdef HAVE_MULTILINK +static int +setendpoint(argv) + char **argv; +{ + if (str_to_epdisc(&lcp_wantoptions[0].endpoint, *argv)) { + lcp_wantoptions[0].neg_endpoint = 1; + return 1; + } + option_error("Can't parse '%s' as an endpoint discriminator", *argv); + return 0; +} + +static void +printendpoint(opt, printer, arg) + option_t *opt; + void (*printer) (void *, char *, ...); + void *arg; +{ + printer(arg, "%s", epdisc_to_str(&lcp_wantoptions[0].endpoint)); +} +#endif /* HAVE_MULTILINK */ + +/* + * lcp_init - Initialize LCP. + */ +static void lcp_init(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_LCP; + f->callbacks = &lcp_callbacks; + + fsm_init(f); + + BZERO(wo, sizeof(*wo)); + wo->neg_mru = 1; + wo->mru = PPP_DEFMRU; + wo->neg_asyncmap = 1; + wo->neg_magicnumber = 1; + wo->neg_pcompression = 1; + wo->neg_accompression = 1; + + BZERO(ao, sizeof(*ao)); + ao->neg_mru = 1; + ao->mru = PPP_MAXMRU; + ao->neg_asyncmap = 1; +#if CHAP_SUPPORT + ao->neg_chap = 1; + ao->chap_mdtype = CHAP_MDTYPE_SUPPORTED; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + ao->neg_upap = 1; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + ao->neg_eap = 1; +#endif /* EAP_SUPPORT */ + ao->neg_magicnumber = 1; + ao->neg_pcompression = 1; + ao->neg_accompression = 1; + ao->neg_endpoint = 1; +} + + +/* + * lcp_open - LCP is allowed to come up. + */ +void lcp_open(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + lcp_options *wo = &pcb->lcp_wantoptions; + + f->flags &= ~(OPT_PASSIVE | OPT_SILENT); + if (wo->passive) + f->flags |= OPT_PASSIVE; + if (wo->silent) + f->flags |= OPT_SILENT; + fsm_open(f); +} + + +/* + * lcp_close - Take LCP down. + */ +void lcp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->lcp_fsm; + int oldstate; + + if (pcb->phase != PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + && pcb->phase != PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + new_phase(pcb, PPP_PHASE_TERMINATE); + + if (f->flags & DELAYED_UP) { + UNTIMEOUT(lcp_delayed_up, f); + f->state = PPP_FSM_STOPPED; + } + oldstate = f->state; + + fsm_close(f, reason); + if (oldstate == PPP_FSM_STOPPED && (f->flags & (OPT_PASSIVE|OPT_SILENT|DELAYED_UP))) { + /* + * This action is not strictly according to the FSM in RFC1548, + * but it does mean that the program terminates if you do a + * lcp_close() when a connection hasn't been established + * because we are in passive/silent mode or because we have + * delayed the fsm_lowerup() call and it hasn't happened yet. + */ + f->flags &= ~DELAYED_UP; + lcp_finished(f); + } +} + + +/* + * lcp_lowerup - The lower layer is up. + */ +void lcp_lowerup(ppp_pcb *pcb) { + lcp_options *wo = &pcb->lcp_wantoptions; + fsm *f = &pcb->lcp_fsm; + /* + * Don't use A/C or protocol compression on transmission, + * but accept A/C and protocol compressed packets + * if we are going to ask for A/C and protocol compression. + */ + if (ppp_send_config(pcb, PPP_MRU, 0xffffffff, 0, 0) < 0 + || ppp_recv_config(pcb, PPP_MRU, (pcb->settings.lax_recv? 0: 0xffffffff), + wo->neg_pcompression, wo->neg_accompression) < 0) + return; + pcb->peer_mru = PPP_MRU; + + if (pcb->settings.listen_time != 0) { + f->flags |= DELAYED_UP; + TIMEOUTMS(lcp_delayed_up, f, pcb->settings.listen_time); + } else + fsm_lowerup(f); +} + + +/* + * lcp_lowerdown - The lower layer is down. + */ +void lcp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + UNTIMEOUT(lcp_delayed_up, f); + } else + fsm_lowerdown(f); +} + + +/* + * lcp_delayed_up - Bring the lower layer up now. + */ +static void lcp_delayed_up(void *arg) { + fsm *f = (fsm*)arg; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + fsm_lowerup(f); + } +} + + +/* + * lcp_input - Input LCP packet. + */ +static void lcp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->lcp_fsm; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + UNTIMEOUT(lcp_delayed_up, f); + fsm_lowerup(f); + } + fsm_input(f, p, len); +} + +/* + * lcp_extcode - Handle a LCP-specific code. + */ +static int lcp_extcode(fsm *f, int code, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char *magp; + + switch( code ){ + case PROTREJ: + lcp_rprotrej(f, inp, len); + break; + + case ECHOREQ: + if (f->state != PPP_FSM_OPENED) + break; + magp = inp; + PUTLONG(go->magicnumber, magp); + fsm_sdata(f, ECHOREP, id, inp, len); + break; + + case ECHOREP: + lcp_received_echo_reply(f, id, inp, len); + break; + + case DISCREQ: + case IDENTIF: + case TIMEREM: + break; + + default: + return 0; + } + return 1; +} + + +/* + * lcp_rprotrej - Receive an Protocol-Reject. + * + * Figure out which protocol is rejected and inform it. + */ +static void lcp_rprotrej(fsm *f, u_char *inp, int len) { + int i; + const struct protent *protp; + u_short prot; +#if PPP_PROTOCOLNAME + const char *pname; +#endif /* PPP_PROTOCOLNAME */ + + if (len < 2) { + LCPDEBUG(("lcp_rprotrej: Rcvd short Protocol-Reject packet!")); + return; + } + + GETSHORT(prot, inp); + + /* + * Protocol-Reject packets received in any state other than the LCP + * OPENED state SHOULD be silently discarded. + */ + if( f->state != PPP_FSM_OPENED ){ + LCPDEBUG(("Protocol-Reject discarded: LCP in state %d", f->state)); + return; + } + +#if PPP_PROTOCOLNAME + pname = protocol_name(prot); +#endif /* PPP_PROTOCOLNAME */ + + /* + * Upcall the proper Protocol-Reject routine. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol == prot) { +#if PPP_PROTOCOLNAME + if (pname != NULL) + ppp_dbglog("Protocol-Reject for '%s' (0x%x) received", pname, + prot); + else +#endif /* PPP_PROTOCOLNAME */ + ppp_dbglog("Protocol-Reject for 0x%x received", prot); + (*protp->protrej)(f->pcb); + return; + } + +#if PPP_PROTOCOLNAME + if (pname != NULL) + ppp_warn("Protocol-Reject for unsupported protocol '%s' (0x%x)", pname, + prot); + else +#endif /* #if PPP_PROTOCOLNAME */ + ppp_warn("Protocol-Reject for unsupported protocol 0x%x", prot); +} + + +/* + * lcp_protrej - A Protocol-Reject was received. + */ +/*ARGSUSED*/ +static void lcp_protrej(ppp_pcb *pcb) { + /* + * Can't reject LCP! + */ + ppp_error("Received Protocol-Reject for LCP!"); + fsm_protreject(&pcb->lcp_fsm); +} + + +/* + * lcp_sprotrej - Send a Protocol-Reject for some protocol. + */ +void lcp_sprotrej(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->lcp_fsm; + /* + * Send back the protocol and the information field of the + * rejected packet. We only get here if LCP is in the OPENED state. + */ +#if 0 + p += 2; + len -= 2; +#endif + + fsm_sdata(f, PROTREJ, ++f->id, + p, len); +} + + +/* + * lcp_resetci - Reset our CI. + */ +static void lcp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + +#if PPP_AUTH_SUPPORT + + /* note: default value is true for allow options */ + if (pcb->settings.user && pcb->settings.passwd) { +#if PAP_SUPPORT + if (pcb->settings.refuse_pap) { + ao->neg_upap = 0; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (pcb->settings.refuse_chap) { + ao->chap_mdtype &= ~MDTYPE_MD5; + } +#if MSCHAP_SUPPORT + if (pcb->settings.refuse_mschap) { + ao->chap_mdtype &= ~MDTYPE_MICROSOFT; + } + if (pcb->settings.refuse_mschap_v2) { + ao->chap_mdtype &= ~MDTYPE_MICROSOFT_V2; + } +#endif /* MSCHAP_SUPPORT */ + ao->neg_chap = (ao->chap_mdtype != MDTYPE_NONE); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (pcb->settings.refuse_eap) { + ao->neg_eap = 0; + } +#endif /* EAP_SUPPORT */ + +#if PPP_SERVER + /* note: default value is false for wanted options */ + if (pcb->settings.auth_required) { +#if PAP_SUPPORT + if (!pcb->settings.refuse_pap) { + wo->neg_upap = 1; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (!pcb->settings.refuse_chap) { + wo->chap_mdtype |= MDTYPE_MD5; + } +#if MSCHAP_SUPPORT + if (!pcb->settings.refuse_mschap) { + wo->chap_mdtype |= MDTYPE_MICROSOFT; + } + if (!pcb->settings.refuse_mschap_v2) { + wo->chap_mdtype |= MDTYPE_MICROSOFT_V2; + } +#endif /* MSCHAP_SUPPORT */ + wo->neg_chap = (wo->chap_mdtype != MDTYPE_NONE); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (!pcb->settings.refuse_eap) { + wo->neg_eap = 1; + } +#endif /* EAP_SUPPORT */ + } +#endif /* PPP_SERVER */ + + } else { +#if PAP_SUPPORT + ao->neg_upap = 0; +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + ao->neg_chap = 0; + ao->chap_mdtype = MDTYPE_NONE; +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + ao->neg_eap = 0; +#endif /* EAP_SUPPORT */ + } + + PPPDEBUG(LOG_DEBUG, ("ppp: auth protocols:")); +#if PAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" PAP=%d", ao->neg_upap)); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" CHAP=%d CHAP_MD5=%d", ao->neg_chap, !!(ao->chap_mdtype&MDTYPE_MD5))); +#if MSCHAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" CHAP_MS=%d CHAP_MS2=%d", !!(ao->chap_mdtype&MDTYPE_MICROSOFT), !!(ao->chap_mdtype&MDTYPE_MICROSOFT_V2))); +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" EAP=%d", ao->neg_eap)); +#endif /* EAP_SUPPORT */ + PPPDEBUG(LOG_DEBUG, ("\n")); + +#endif /* PPP_AUTH_SUPPORT */ + + wo->magicnumber = magic(); + wo->numloops = 0; + *go = *wo; +#ifdef HAVE_MULTILINK + if (!multilink) { + go->neg_mrru = 0; +#endif /* HAVE_MULTILINK */ + go->neg_ssnhf = 0; + go->neg_endpoint = 0; +#ifdef HAVE_MULTILINK + } +#endif /* HAVE_MULTILINK */ + if (pcb->settings.noendpoint) + ao->neg_endpoint = 0; + pcb->peer_mru = PPP_MRU; +#if 0 /* UNUSED */ + auth_reset(pcb); +#endif /* UNUSED */ +} + + +/* + * lcp_cilen - Return length of our CI. + */ +static int lcp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + +#define LENCIVOID(neg) ((neg) ? CILEN_VOID : 0) +#if CHAP_SUPPORT +#define LENCICHAP(neg) ((neg) ? CILEN_CHAP : 0) +#endif /* CHAP_SUPPORT */ +#define LENCISHORT(neg) ((neg) ? CILEN_SHORT : 0) +#define LENCILONG(neg) ((neg) ? CILEN_LONG : 0) +#if LQR_SUPPORT +#define LENCILQR(neg) ((neg) ? CILEN_LQR: 0) +#endif /* LQR_SUPPORT */ +#define LENCICBCP(neg) ((neg) ? CILEN_CBCP: 0) + /* + * NB: we only ask for one of CHAP, UPAP, or EAP, even if we will + * accept more than one. We prefer EAP first, then CHAP, then + * PAP. + */ + return (LENCISHORT(go->neg_mru && go->mru != PPP_DEFMRU) + + LENCILONG(go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) + +#if EAP_SUPPORT + LENCISHORT(go->neg_eap) + +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + LENCICHAP(!go->neg_eap && go->neg_chap) + +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + LENCICHAP(go->neg_chap) + +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + LENCISHORT(!go->neg_eap && !go->neg_chap && go->neg_upap) + +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + LENCISHORT(!go->neg_eap && go->neg_upap) + +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + LENCISHORT(!go->neg_chap && go->neg_upap) + +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + LENCISHORT(go->neg_upap) + +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + LENCILQR(go->neg_lqr) + +#endif /* LQR_SUPPORT */ + LENCICBCP(go->neg_cbcp) + + LENCILONG(go->neg_magicnumber) + + LENCIVOID(go->neg_pcompression) + + LENCIVOID(go->neg_accompression) + +#ifdef HAVE_MULTILINK + LENCISHORT(go->neg_mrru) + +#endif /* HAVE_MULTILINK */ + LENCIVOID(go->neg_ssnhf) + + (go->neg_endpoint? CILEN_CHAR + go->endpoint.length: 0)); +} + + +/* + * lcp_addci - Add our desired CIs to a packet. + */ +static void lcp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char *start_ucp = ucp; + +#define ADDCIVOID(opt, neg) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_VOID, ucp); \ + } +#define ADDCISHORT(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_SHORT, ucp); \ + PUTSHORT(val, ucp); \ + } +#if CHAP_SUPPORT +#define ADDCICHAP(opt, neg, val) \ + if (neg) { \ + PUTCHAR((opt), ucp); \ + PUTCHAR(CILEN_CHAP, ucp); \ + PUTSHORT(PPP_CHAP, ucp); \ + PUTCHAR((CHAP_DIGEST(val)), ucp); \ + } +#endif /* CHAP_SUPPORT */ +#define ADDCILONG(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_LONG, ucp); \ + PUTLONG(val, ucp); \ + } +#if LQR_SUPPORT +#define ADDCILQR(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_LQR, ucp); \ + PUTSHORT(PPP_LQR, ucp); \ + PUTLONG(val, ucp); \ + } +#endif /* LQR_SUPPORT */ +#define ADDCICHAR(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_CHAR, ucp); \ + PUTCHAR(val, ucp); \ + } +#define ADDCIENDP(opt, neg, class, val, len) \ + if (neg) { \ + int i; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_CHAR + len, ucp); \ + PUTCHAR(class, ucp); \ + for (i = 0; i < len; ++i) \ + PUTCHAR(val[i], ucp); \ + } + + ADDCISHORT(CI_MRU, go->neg_mru && go->mru != PPP_DEFMRU, go->mru); + ADDCILONG(CI_ASYNCMAP, go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF, + go->asyncmap); +#if EAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, go->neg_eap, PPP_EAP); +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + ADDCICHAP(CI_AUTHTYPE, !go->neg_eap && go->neg_chap, go->chap_mdtype); +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + ADDCICHAP(CI_AUTHTYPE, go->neg_chap, go->chap_mdtype); +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_eap && !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_eap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + ADDCILQR(CI_QUALITY, go->neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + ADDCICHAR(CI_CALLBACK, go->neg_cbcp, CBCP_OPT); + ADDCILONG(CI_MAGICNUMBER, go->neg_magicnumber, go->magicnumber); + ADDCIVOID(CI_PCOMPRESSION, go->neg_pcompression); + ADDCIVOID(CI_ACCOMPRESSION, go->neg_accompression); +#ifdef HAVE_MULTILINK + ADDCISHORT(CI_MRRU, go->neg_mrru, go->mrru); +#endif + ADDCIVOID(CI_SSNHF, go->neg_ssnhf); + ADDCIENDP(CI_EPDISC, go->neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + if (ucp - start_ucp != *lenp) { + /* this should never happen, because peer_mtu should be 1500 */ + ppp_error("Bug in lcp_addci: wrong length"); + } +} + + +/* + * lcp_ackci - Ack our CIs. + * This should not modify any state if the Ack is bad. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int lcp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char cilen, citype, cichar; + u_short cishort; + u32_t cilong; + + /* + * CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define ACKCIVOID(opt, neg) \ + if (neg) { \ + if ((len -= CILEN_VOID) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_VOID || \ + citype != opt) \ + goto bad; \ + } +#define ACKCISHORT(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_SHORT) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_SHORT || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + } +#define ACKCICHAR(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_CHAR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAR || \ + citype != opt) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != val) \ + goto bad; \ + } +#if CHAP_SUPPORT +#define ACKCICHAP(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_CHAP) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAP || \ + citype != (opt)) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != PPP_CHAP) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != (CHAP_DIGEST(val))) \ + goto bad; \ + } +#endif /* CHAP_SUPPORT */ +#define ACKCILONG(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_LONG) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_LONG || \ + citype != opt) \ + goto bad; \ + GETLONG(cilong, p); \ + if (cilong != val) \ + goto bad; \ + } +#if LQR_SUPPORT +#define ACKCILQR(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_LQR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_LQR || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != PPP_LQR) \ + goto bad; \ + GETLONG(cilong, p); \ + if (cilong != val) \ + goto bad; \ + } +#endif /* LQR_SUPPORT */ +#define ACKCIENDP(opt, neg, class, val, vlen) \ + if (neg) { \ + int i; \ + if ((len -= CILEN_CHAR + vlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAR + vlen || \ + citype != opt) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != class) \ + goto bad; \ + for (i = 0; i < vlen; ++i) { \ + GETCHAR(cichar, p); \ + if (cichar != val[i]) \ + goto bad; \ + } \ + } + + ACKCISHORT(CI_MRU, go->neg_mru && go->mru != PPP_DEFMRU, go->mru); + ACKCILONG(CI_ASYNCMAP, go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF, + go->asyncmap); +#if EAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, go->neg_eap, PPP_EAP); +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + ACKCICHAP(CI_AUTHTYPE, !go->neg_eap && go->neg_chap, go->chap_mdtype); +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + ACKCICHAP(CI_AUTHTYPE, go->neg_chap, go->chap_mdtype); +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_eap && !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_eap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + ACKCILQR(CI_QUALITY, go->neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + ACKCICHAR(CI_CALLBACK, go->neg_cbcp, CBCP_OPT); + ACKCILONG(CI_MAGICNUMBER, go->neg_magicnumber, go->magicnumber); + ACKCIVOID(CI_PCOMPRESSION, go->neg_pcompression); + ACKCIVOID(CI_ACCOMPRESSION, go->neg_accompression); +#ifdef HAVE_MULTILINK + ACKCISHORT(CI_MRRU, go->neg_mrru, go->mrru); +#endif /* HAVE_MULTILINK */ + ACKCIVOID(CI_SSNHF, go->neg_ssnhf); + ACKCIENDP(CI_EPDISC, go->neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); +bad: + LCPDEBUG(("lcp_acki: received bad Ack!")); + return (0); +} + + +/* + * lcp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if LCP is in the OPENED state. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int lcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *wo = &pcb->lcp_wantoptions; + u_char citype, cichar, *next; + u_short cishort; + u32_t cilong; + lcp_options no; /* options we've seen Naks for */ + lcp_options try_; /* options to request next time */ + int looped_back = 0; + int cilen; + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIVOID(opt, neg) \ + if (go->neg && \ + len >= CILEN_VOID && \ + p[1] == CILEN_VOID && \ + p[0] == opt) { \ + len -= CILEN_VOID; \ + INCPTR(CILEN_VOID, p); \ + no.neg = 1; \ + try_.neg = 0; \ + } +#if CHAP_SUPPORT +#define NAKCICHAP(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + no.neg = 1; \ + code \ + } +#endif /* CHAP_SUPPORT */ +#define NAKCICHAR(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_CHAR && \ + p[1] == CILEN_CHAR && \ + p[0] == opt) { \ + len -= CILEN_CHAR; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + no.neg = 1; \ + code \ + } +#define NAKCISHORT(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_SHORT && \ + p[1] == CILEN_SHORT && \ + p[0] == opt) { \ + len -= CILEN_SHORT; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#define NAKCILONG(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_LONG && \ + p[1] == CILEN_LONG && \ + p[0] == opt) { \ + len -= CILEN_LONG; \ + INCPTR(2, p); \ + GETLONG(cilong, p); \ + no.neg = 1; \ + code \ + } +#if LQR_SUPPORT +#define NAKCILQR(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_LQR && \ + p[1] == CILEN_LQR && \ + p[0] == opt) { \ + len -= CILEN_LQR; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETLONG(cilong, p); \ + no.neg = 1; \ + code \ + } +#endif /* LQR_SUPPORT */ +#define NAKCIENDP(opt, neg) \ + if (go->neg && \ + len >= CILEN_CHAR && \ + p[0] == opt && \ + p[1] >= CILEN_CHAR && \ + p[1] <= len) { \ + len -= p[1]; \ + INCPTR(p[1], p); \ + no.neg = 1; \ + try_.neg = 0; \ + } + + /* + * NOTE! There must be no assignments to individual fields of *go in + * the code below. Any such assignment is a BUG! + */ + /* + * We don't care if they want to send us smaller packets than + * we want. Therefore, accept any MRU less than what we asked for, + * but then ignore the new value when setting the MRU in the kernel. + * If they send us a bigger MRU than what we asked, accept it, up to + * the limit of the default MRU we'd get if we didn't negotiate. + */ + if (go->neg_mru && go->mru != PPP_DEFMRU) { + NAKCISHORT(CI_MRU, neg_mru, + if (cishort <= wo->mru || cishort <= PPP_DEFMRU) + try_.mru = cishort; + ); + } + + /* + * Add any characters they want to our (receive-side) asyncmap. + */ + if (go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) { + NAKCILONG(CI_ASYNCMAP, neg_asyncmap, + try_.asyncmap = go->asyncmap | cilong; + ); + } + + /* + * If they've nak'd our authentication-protocol, check whether + * they are proposing a different protocol, or a different + * hash algorithm for CHAP. + */ + if ((0 +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + ) + && len >= CILEN_SHORT + && p[0] == CI_AUTHTYPE && p[1] >= CILEN_SHORT && p[1] <= len) { + cilen = p[1]; + len -= cilen; +#if CHAP_SUPPORT + no.neg_chap = go->neg_chap; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + no.neg_upap = go->neg_upap; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + no.neg_eap = go->neg_eap; +#endif /* EAP_SUPPORT */ + INCPTR(2, p); + GETSHORT(cishort, p); + +#if PAP_SUPPORT + if (cishort == PPP_PAP && cilen == CILEN_SHORT) { +#if EAP_SUPPORT + /* If we were asking for EAP, then we need to stop that. */ + if (go->neg_eap) + try_.neg_eap = 0; + else +#endif /* EAP_SUPPORT */ + +#if CHAP_SUPPORT + /* If we were asking for CHAP, then we need to stop that. */ + if (go->neg_chap) + try_.neg_chap = 0; + else +#endif /* CHAP_SUPPORT */ + + /* + * If we weren't asking for CHAP or EAP, then we were asking for + * PAP, in which case this Nak is bad. + */ + goto bad; + } else +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + if (cishort == PPP_CHAP && cilen == CILEN_CHAP) { + GETCHAR(cichar, p); +#if EAP_SUPPORT + /* Stop asking for EAP, if we were. */ + if (go->neg_eap) { + try_.neg_eap = 0; + /* Try to set up to use their suggestion, if possible */ + if (CHAP_CANDIGEST(go->chap_mdtype, cichar)) + try_.chap_mdtype = CHAP_MDTYPE_D(cichar); + } else +#endif /* EAP_SUPPORT */ + if (go->neg_chap) { + /* + * We were asking for our preferred algorithm, they must + * want something different. + */ + if (cichar != CHAP_DIGEST(go->chap_mdtype)) { + if (CHAP_CANDIGEST(go->chap_mdtype, cichar)) { + /* Use their suggestion if we support it ... */ + try_.chap_mdtype = CHAP_MDTYPE_D(cichar); + } else { + /* ... otherwise, try our next-preferred algorithm. */ + try_.chap_mdtype &= ~(CHAP_MDTYPE(try_.chap_mdtype)); + if (try_.chap_mdtype == MDTYPE_NONE) /* out of algos */ + try_.neg_chap = 0; + } + } else { + /* + * Whoops, they Nak'd our algorithm of choice + * but then suggested it back to us. + */ + goto bad; + } + } else { + /* + * Stop asking for PAP if we were asking for it. + */ +#if PAP_SUPPORT + try_.neg_upap = 0; +#endif /* PAP_SUPPORT */ + } + + } else +#endif /* CHAP_SUPPORT */ + { + +#if EAP_SUPPORT + /* + * If we were asking for EAP, and they're Conf-Naking EAP, + * well, that's just strange. Nobody should do that. + */ + if (cishort == PPP_EAP && cilen == CILEN_SHORT && go->neg_eap) + ppp_dbglog("Unexpected Conf-Nak for EAP"); + + /* + * We don't recognize what they're suggesting. + * Stop asking for what we were asking for. + */ + if (go->neg_eap) + try_.neg_eap = 0; + else +#endif /* EAP_SUPPORT */ + +#if CHAP_SUPPORT + if (go->neg_chap) + try_.neg_chap = 0; + else +#endif /* CHAP_SUPPORT */ + +#if PAP_SUPPORT + if(1) + try_.neg_upap = 0; + else +#endif /* PAP_SUPPORT */ + {} + + p += cilen - CILEN_SHORT; + } + } + +#if LQR_SUPPORT + /* + * If they can't cope with our link quality protocol, we'll have + * to stop asking for LQR. We haven't got any other protocol. + * If they Nak the reporting period, take their value XXX ? + */ + NAKCILQR(CI_QUALITY, neg_lqr, + if (cishort != PPP_LQR) + try_.neg_lqr = 0; + else + try_.lqr_period = cilong; + ); +#endif /* LQR_SUPPORT */ + + /* + * Only implementing CBCP...not the rest of the callback options + */ + NAKCICHAR(CI_CALLBACK, neg_cbcp, + try_.neg_cbcp = 0; + (void)cichar; /* if CHAP support is not compiled, cichar is set but not used, which makes some compilers complaining */ + ); + + /* + * Check for a looped-back line. + */ + NAKCILONG(CI_MAGICNUMBER, neg_magicnumber, + try_.magicnumber = magic(); + looped_back = 1; + ); + + /* + * Peer shouldn't send Nak for protocol compression or + * address/control compression requests; they should send + * a Reject instead. If they send a Nak, treat it as a Reject. + */ + NAKCIVOID(CI_PCOMPRESSION, neg_pcompression); + NAKCIVOID(CI_ACCOMPRESSION, neg_accompression); + +#ifdef HAVE_MULTILINK + /* + * Nak for MRRU option - accept their value if it is smaller + * than the one we want. + */ + if (go->neg_mrru) { + NAKCISHORT(CI_MRRU, neg_mrru, + if (treat_as_reject) + try_.neg_mrru = 0; + else if (cishort <= wo->mrru) + try_.mrru = cishort; + ); + } +#else /* HAVE_MULTILINK */ + LWIP_UNUSED_ARG(treat_as_reject); +#endif /* HAVE_MULTILINK */ + + /* + * Nak for short sequence numbers shouldn't be sent, treat it + * like a reject. + */ + NAKCIVOID(CI_SSNHF, neg_ssnhf); + + /* + * Nak of the endpoint discriminator option is not permitted, + * treat it like a reject. + */ + NAKCIENDP(CI_EPDISC, neg_endpoint); + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If we see an option that we requested, or one we've already seen + * in this packet, then this packet is bad. + * If we wanted to respond by starting to negotiate on the requested + * option(s), we could, but we don't, because except for the + * authentication type and quality protocol, if we are not negotiating + * an option, it is because we were told not to. + * For the authentication type, the Nak from the peer means + * `let me authenticate myself with you' which is a bit pointless. + * For the quality protocol, the Nak means `ask me to send you quality + * reports', but if we didn't ask for them, we don't want them. + * An option we don't recognize represents the peer asking to + * negotiate some option we don't support, so ignore it. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if (cilen < CILEN_VOID || (len -= cilen) < 0) + goto bad; + next = p + cilen - 2; + + switch (citype) { + case CI_MRU: + if ((go->neg_mru && go->mru != PPP_DEFMRU) + || no.neg_mru || cilen != CILEN_SHORT) + goto bad; + GETSHORT(cishort, p); + if (cishort < PPP_DEFMRU) { + try_.neg_mru = 1; + try_.mru = cishort; + } + break; + case CI_ASYNCMAP: + if ((go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) + || no.neg_asyncmap || cilen != CILEN_LONG) + goto bad; + break; + case CI_AUTHTYPE: + if (0 +#if CHAP_SUPPORT + || go->neg_chap || no.neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap || no.neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap || no.neg_eap +#endif /* EAP_SUPPORT */ + ) + goto bad; + break; + case CI_MAGICNUMBER: + if (go->neg_magicnumber || no.neg_magicnumber || + cilen != CILEN_LONG) + goto bad; + break; + case CI_PCOMPRESSION: + if (go->neg_pcompression || no.neg_pcompression + || cilen != CILEN_VOID) + goto bad; + break; + case CI_ACCOMPRESSION: + if (go->neg_accompression || no.neg_accompression + || cilen != CILEN_VOID) + goto bad; + break; +#if LQR_SUPPORT + case CI_QUALITY: + if (go->neg_lqr || no.neg_lqr || cilen != CILEN_LQR) + goto bad; + break; +#endif /* LQR_SUPPORT */ +#ifdef HAVE_MULTILINK + case CI_MRRU: + if (go->neg_mrru || no.neg_mrru || cilen != CILEN_SHORT) + goto bad; + break; +#endif /* HAVE_MULTILINK */ + case CI_SSNHF: + if (go->neg_ssnhf || no.neg_ssnhf || cilen != CILEN_VOID) + goto bad; + try_.neg_ssnhf = 1; + break; + case CI_EPDISC: + if (go->neg_endpoint || no.neg_endpoint || cilen < CILEN_CHAR) + goto bad; + break; + default: + break; + } + p = next; + } + + /* + * OK, the Nak is good. Now we can update state. + * If there are any options left we ignore them. + */ + if (f->state != PPP_FSM_OPENED) { + if (looped_back) { + if (++try_.numloops >= pcb->settings.lcp_loopbackfail) { + ppp_notice("Serial line is looped back."); + pcb->err_code = PPPERR_LOOPBACK; + lcp_close(f->pcb, "Loopback detected"); + } + } else + try_.numloops = 0; + *go = try_; + } + + return 1; + +bad: + LCPDEBUG(("lcp_nakci: received bad Nak!")); + return 0; +} + + +/* + * lcp_rejci - Peer has Rejected some of our CIs. + * This should not modify any state if the Reject is bad + * or if LCP is in the OPENED state. + * + * Returns: + * 0 - Reject was bad. + * 1 - Reject was good. + */ +static int lcp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char cichar; + u_short cishort; + u32_t cilong; + lcp_options try_; /* options to request next time */ + + try_ = *go; + + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIVOID(opt, neg) \ + if (go->neg && \ + len >= CILEN_VOID && \ + p[1] == CILEN_VOID && \ + p[0] == opt) { \ + len -= CILEN_VOID; \ + INCPTR(CILEN_VOID, p); \ + try_.neg = 0; \ + } +#define REJCISHORT(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_SHORT && \ + p[1] == CILEN_SHORT && \ + p[0] == opt) { \ + len -= CILEN_SHORT; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + try_.neg = 0; \ + } + +#if CHAP_SUPPORT && EAP_SUPPORT && PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_eap = try_.neg_upap = 0; \ + } +#endif /* CHAP_SUPPORT && EAP_SUPPORT && PAP_SUPPORT */ + +#if CHAP_SUPPORT && !EAP_SUPPORT && PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_upap = 0; \ + } +#endif /* CHAP_SUPPORT && !EAP_SUPPORT && PAP_SUPPORT */ + +#if CHAP_SUPPORT && EAP_SUPPORT && !PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_eap = 0; \ + } +#endif /* CHAP_SUPPORT && EAP_SUPPORT && !PAP_SUPPORT */ + +#if CHAP_SUPPORT && !EAP_SUPPORT && !PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* CHAP_SUPPORT && !EAP_SUPPORT && !PAP_SUPPORT */ + +#define REJCILONG(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_LONG && \ + p[1] == CILEN_LONG && \ + p[0] == opt) { \ + len -= CILEN_LONG; \ + INCPTR(2, p); \ + GETLONG(cilong, p); \ + /* Check rejected value. */ \ + if (cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } +#if LQR_SUPPORT +#define REJCILQR(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_LQR && \ + p[1] == CILEN_LQR && \ + p[0] == opt) { \ + len -= CILEN_LQR; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETLONG(cilong, p); \ + /* Check rejected value. */ \ + if (cishort != PPP_LQR || cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* LQR_SUPPORT */ +#define REJCICBCP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CBCP && \ + p[1] == CILEN_CBCP && \ + p[0] == opt) { \ + len -= CILEN_CBCP; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if (cichar != val) \ + goto bad; \ + try_.neg = 0; \ + } +#define REJCIENDP(opt, neg, class, val, vlen) \ + if (go->neg && \ + len >= CILEN_CHAR + vlen && \ + p[0] == opt && \ + p[1] == CILEN_CHAR + vlen) { \ + int i; \ + len -= CILEN_CHAR + vlen; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + if (cichar != class) \ + goto bad; \ + for (i = 0; i < vlen; ++i) { \ + GETCHAR(cichar, p); \ + if (cichar != val[i]) \ + goto bad; \ + } \ + try_.neg = 0; \ + } + + REJCISHORT(CI_MRU, neg_mru, go->mru); + REJCILONG(CI_ASYNCMAP, neg_asyncmap, go->asyncmap); +#if EAP_SUPPORT + REJCISHORT(CI_AUTHTYPE, neg_eap, PPP_EAP); + if (!go->neg_eap) { +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + REJCICHAP(CI_AUTHTYPE, neg_chap, go->chap_mdtype); + if (!go->neg_chap) { +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + REJCISHORT(CI_AUTHTYPE, neg_upap, PPP_PAP); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + } +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + } +#endif /* EAP_SUPPORT */ +#if LQR_SUPPORT + REJCILQR(CI_QUALITY, neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + REJCICBCP(CI_CALLBACK, neg_cbcp, CBCP_OPT); + REJCILONG(CI_MAGICNUMBER, neg_magicnumber, go->magicnumber); + REJCIVOID(CI_PCOMPRESSION, neg_pcompression); + REJCIVOID(CI_ACCOMPRESSION, neg_accompression); +#ifdef HAVE_MULTILINK + REJCISHORT(CI_MRRU, neg_mrru, go->mrru); +#endif /* HAVE_MULTILINK */ + REJCIVOID(CI_SSNHF, neg_ssnhf); + REJCIENDP(CI_EPDISC, neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + LCPDEBUG(("lcp_rejci: received bad Reject!")); + return 0; +} + + +/* + * lcp_reqci - Check the peer's requested CIs and send appropriate response. + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * lenp = Length of requested CIs + */ +static int lcp_reqci(fsm *f, u_char *inp, int *lenp, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ho = &pcb->lcp_hisoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + int cilen, citype, cichar; /* Parsed len, type, char value */ + u_short cishort; /* Parsed short value */ + u32_t cilong; /* Parse long value */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *rejp; /* Pointer to next char in reject frame */ + struct pbuf *nakp; /* Nak buffer */ + u_char *nakoutp; /* Pointer to next char in Nak frame */ + int l = *lenp; /* Length left */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + nakp = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_CTRL_PBUF_MAX_SIZE), PPP_CTRL_PBUF_TYPE); + if(NULL == nakp) + return 0; + if(nakp->tot_len != nakp->len) { + pbuf_free(nakp); + return 0; + } + + nakoutp = (u_char*)nakp->payload; + rejp = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + LCPDEBUG(("lcp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + citype = 0; + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_MRU: + if (!ao->neg_mru || /* Allow option? */ + cilen != CILEN_SHORT) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETSHORT(cishort, p); /* Parse MRU */ + + /* + * He must be able to receive at least our minimum. + * No need to check a maximum. If he sends a large number, + * we'll just ignore it. + */ + if (cishort < PPP_MINMRU) { + orc = CONFNAK; /* Nak CI */ + PUTCHAR(CI_MRU, nakoutp); + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_MINMRU, nakoutp); /* Give him a hint */ + break; + } + ho->neg_mru = 1; /* Remember he sent MRU */ + ho->mru = cishort; /* And remember value */ + break; + + case CI_ASYNCMAP: + if (!ao->neg_asyncmap || + cilen != CILEN_LONG) { + orc = CONFREJ; + break; + } + GETLONG(cilong, p); + + /* + * Asyncmap must have set at least the bits + * which are set in lcp_allowoptions[unit].asyncmap. + */ + if ((ao->asyncmap & ~cilong) != 0) { + orc = CONFNAK; + PUTCHAR(CI_ASYNCMAP, nakoutp); + PUTCHAR(CILEN_LONG, nakoutp); + PUTLONG(ao->asyncmap | cilong, nakoutp); + break; + } + ho->neg_asyncmap = 1; + ho->asyncmap = cilong; + break; + + case CI_AUTHTYPE: + if (cilen < CILEN_SHORT || + !(0 +#if PAP_SUPPORT + || ao->neg_upap +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || ao->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || ao->neg_eap +#endif /* EAP_SUPPORT */ + )) { + /* + * Reject the option if we're not willing to authenticate. + */ + ppp_dbglog("No auth is possible"); + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + + /* + * Authtype must be PAP, CHAP, or EAP. + * + * Note: if more than one of ao->neg_upap, ao->neg_chap, and + * ao->neg_eap are set, and the peer sends a Configure-Request + * with two or more authenticate-protocol requests, then we will + * reject the second request. + * Whether we end up doing CHAP, UPAP, or EAP depends then on + * the ordering of the CIs in the peer's Configure-Request. + */ + +#if PAP_SUPPORT + if (cishort == PPP_PAP) { + /* we've already accepted CHAP or EAP */ + if (0 +#if CHAP_SUPPORT + || ho->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || ho->neg_eap +#endif /* EAP_SUPPORT */ + || cilen != CILEN_SHORT) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE PAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_upap) { /* we don't want to do PAP */ + orc = CONFNAK; /* NAK it and suggest CHAP or EAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_EAP, nakoutp); + } else { +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + } +#endif /* EAP_SUPPORT */ + break; + } + ho->neg_upap = 1; + break; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (cishort == PPP_CHAP) { + /* we've already accepted PAP or EAP */ + if ( +#if PAP_SUPPORT + ho->neg_upap || +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + ho->neg_eap || +#endif /* EAP_SUPPORT */ + cilen != CILEN_CHAP) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE CHAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_chap) { /* we don't want to do CHAP */ + orc = CONFNAK; /* NAK it and suggest EAP or PAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); + PUTCHAR(CILEN_SHORT, nakoutp); +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTSHORT(PPP_EAP, nakoutp); + } else +#endif /* EAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTSHORT(PPP_PAP, nakoutp); + } + else +#endif /* PAP_SUPPORT */ + {} + break; + } + GETCHAR(cichar, p); /* get digest type */ + if (!(CHAP_CANDIGEST(ao->chap_mdtype, cichar))) { + /* + * We can't/won't do the requested type, + * suggest something else. + */ + orc = CONFNAK; + PUTCHAR(CI_AUTHTYPE, nakoutp); + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + break; + } + ho->chap_mdtype = CHAP_MDTYPE_D(cichar); /* save md type */ + ho->neg_chap = 1; + break; + } +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (cishort == PPP_EAP) { + /* we've already accepted CHAP or PAP */ + if ( +#if CHAP_SUPPORT + ho->neg_chap || +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + ho->neg_upap || +#endif /* PAP_SUPPORT */ + cilen != CILEN_SHORT) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE EAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_eap) { /* we don't want to do EAP */ + orc = CONFNAK; /* NAK it and suggest CHAP or PAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); +#if CHAP_SUPPORT + if (ao->neg_chap) { + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_PAP, nakoutp); + } else +#endif /* PAP_SUPPORT */ + {} + break; + } + ho->neg_eap = 1; + break; + } +#endif /* EAP_SUPPORT */ + + /* + * We don't recognize the protocol they're asking for. + * Nak it with something we're willing to do. + * (At this point we know ao->neg_upap || ao->neg_chap || + * ao->neg_eap.) + */ + orc = CONFNAK; + PUTCHAR(CI_AUTHTYPE, nakoutp); + +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_EAP, nakoutp); + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (ao->neg_chap) { + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_PAP, nakoutp); + } else +#endif /* PAP_SUPPORT */ + {} + break; + +#if LQR_SUPPORT + case CI_QUALITY: + if (!ao->neg_lqr || + cilen != CILEN_LQR) { + orc = CONFREJ; + break; + } + + GETSHORT(cishort, p); + GETLONG(cilong, p); + + /* + * Check the protocol and the reporting period. + * XXX When should we Nak this, and what with? + */ + if (cishort != PPP_LQR) { + orc = CONFNAK; + PUTCHAR(CI_QUALITY, nakoutp); + PUTCHAR(CILEN_LQR, nakoutp); + PUTSHORT(PPP_LQR, nakoutp); + PUTLONG(ao->lqr_period, nakoutp); + break; + } + break; +#endif /* LQR_SUPPORT */ + + case CI_MAGICNUMBER: + if (!(ao->neg_magicnumber || go->neg_magicnumber) || + cilen != CILEN_LONG) { + orc = CONFREJ; + break; + } + GETLONG(cilong, p); + + /* + * He must have a different magic number. + */ + if (go->neg_magicnumber && + cilong == go->magicnumber) { + cilong = magic(); /* Don't put magic() inside macro! */ + orc = CONFNAK; + PUTCHAR(CI_MAGICNUMBER, nakoutp); + PUTCHAR(CILEN_LONG, nakoutp); + PUTLONG(cilong, nakoutp); + break; + } + ho->neg_magicnumber = 1; + ho->magicnumber = cilong; + break; + + + case CI_PCOMPRESSION: + if (!ao->neg_pcompression || + cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_pcompression = 1; + break; + + case CI_ACCOMPRESSION: + if (!ao->neg_accompression || + cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_accompression = 1; + break; + +#ifdef HAVE_MULTILINK + case CI_MRRU: + if (!ao->neg_mrru + || !multilink + || cilen != CILEN_SHORT) { + orc = CONFREJ; + break; + } + + GETSHORT(cishort, p); + /* possibly should insist on a minimum/maximum MRRU here */ + ho->neg_mrru = 1; + ho->mrru = cishort; + break; +#endif /* HAVE_MULTILINK */ + + case CI_SSNHF: + if (!ao->neg_ssnhf +#ifdef HAVE_MULTILINK + || !multilink +#endif /* HAVE_MULTILINK */ + || cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_ssnhf = 1; + break; + + case CI_EPDISC: + if (!ao->neg_endpoint || + cilen < CILEN_CHAR || + cilen > CILEN_CHAR + MAX_ENDP_LEN) { + orc = CONFREJ; + break; + } + GETCHAR(cichar, p); + cilen -= CILEN_CHAR; + ho->neg_endpoint = 1; + ho->endpoint.class_ = cichar; + ho->endpoint.length = cilen; + MEMCPY(ho->endpoint.value, p, cilen); + INCPTR(cilen, p); + break; + + default: + LCPDEBUG(("lcp_reqci: rcvd unknown option %d", citype)); + orc = CONFREJ; + break; + } + +endswitch: + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree /* Getting fed up with sending NAKs? */ + && citype != CI_MAGICNUMBER) { + orc = CONFREJ; /* Get tough if so */ + } else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + rc = CONFNAK; + } + } + if (orc == CONFREJ) { /* Reject this CI */ + rc = CONFREJ; + if (cip != rejp) /* Need to move rejected CI? */ + MEMCPY(rejp, cip, cilen); /* Move it */ + INCPTR(cilen, rejp); /* Update output pointer */ + } + } + + /* + * If we wanted to send additional NAKs (for unsent CIs), the + * code would go here. The extra NAKs would go at *nakoutp. + * At present there are no cases where we want to ask the + * peer to negotiate an option. + */ + + switch (rc) { + case CONFACK: + *lenp = next - inp; + break; + case CONFNAK: + /* + * Copy the Nak'd options from the nak buffer to the caller's buffer. + */ + *lenp = nakoutp - (u_char*)nakp->payload; + MEMCPY(inp, nakp->payload, *lenp); + break; + case CONFREJ: + *lenp = rejp - inp; + break; + default: + break; + } + + pbuf_free(nakp); + LCPDEBUG(("lcp_reqci: returning CONF%s.", CODENAME(rc))); + return (rc); /* Return final code */ +} + + +/* + * lcp_up - LCP has come UP. + */ +static void lcp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *ho = &pcb->lcp_hisoptions; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + int mtu, mru; + + if (!go->neg_magicnumber) + go->magicnumber = 0; + if (!ho->neg_magicnumber) + ho->magicnumber = 0; + + /* + * Set our MTU to the smaller of the MTU we wanted and + * the MRU our peer wanted. If we negotiated an MRU, + * set our MRU to the larger of value we wanted and + * the value we got in the negotiation. + * Note on the MTU: the link MTU can be the MRU the peer wanted, + * the interface MTU is set to the lowest of that, the + * MTU we want to use, and our link MRU. + */ + mtu = ho->neg_mru? ho->mru: PPP_MRU; + mru = go->neg_mru? LWIP_MAX(wo->mru, go->mru): PPP_MRU; +#ifdef HAVE_MULTILINK + if (!(multilink && go->neg_mrru && ho->neg_mrru)) +#endif /* HAVE_MULTILINK */ + netif_set_mtu(pcb, LWIP_MIN(LWIP_MIN(mtu, mru), ao->mru)); + ppp_send_config(pcb, mtu, + (ho->neg_asyncmap? ho->asyncmap: 0xffffffff), + ho->neg_pcompression, ho->neg_accompression); + ppp_recv_config(pcb, mru, + (pcb->settings.lax_recv? 0: go->neg_asyncmap? go->asyncmap: 0xffffffff), + go->neg_pcompression, go->neg_accompression); + + if (ho->neg_mru) + pcb->peer_mru = ho->mru; + + lcp_echo_lowerup(f->pcb); /* Enable echo messages */ + + link_established(pcb); +} + + +/* + * lcp_down - LCP has gone DOWN. + * + * Alert other protocols. + */ +static void lcp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + + lcp_echo_lowerdown(f->pcb); + + link_down(pcb); + + ppp_send_config(pcb, PPP_MRU, 0xffffffff, 0, 0); + ppp_recv_config(pcb, PPP_MRU, + (go->neg_asyncmap? go->asyncmap: 0xffffffff), + go->neg_pcompression, go->neg_accompression); + pcb->peer_mru = PPP_MRU; +} + + +/* + * lcp_starting - LCP needs the lower layer up. + */ +static void lcp_starting(fsm *f) { + ppp_pcb *pcb = f->pcb; + link_required(pcb); +} + + +/* + * lcp_finished - LCP has finished with the lower layer. + */ +static void lcp_finished(fsm *f) { + ppp_pcb *pcb = f->pcb; + link_terminated(pcb); +} + + +#if PRINTPKT_SUPPORT +/* + * lcp_printpkt - print the contents of an LCP packet. + */ +static const char* const lcp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej", "ProtRej", + "EchoReq", "EchoRep", "DiscReq", "Ident", + "TimeRem" +}; + +static int lcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, olen, i; + const u_char *pstart, *optend; + u_short cishort; + u32_t cilong; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(lcp_codenames)) + printer(arg, " %s", lcp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { + case CI_MRU: + if (olen == CILEN_SHORT) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "mru %d", cishort); + } + break; + case CI_ASYNCMAP: + if (olen == CILEN_LONG) { + p += 2; + GETLONG(cilong, p); + printer(arg, "asyncmap 0x%x", cilong); + } + break; + case CI_AUTHTYPE: + if (olen >= CILEN_SHORT) { + p += 2; + printer(arg, "auth "); + GETSHORT(cishort, p); + switch (cishort) { +#if PAP_SUPPORT + case PPP_PAP: + printer(arg, "pap"); + break; +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + case PPP_CHAP: + printer(arg, "chap"); + if (p < optend) { + switch (*p) { + case CHAP_MD5: + printer(arg, " MD5"); + ++p; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + printer(arg, " MS"); + ++p; + break; + + case CHAP_MICROSOFT_V2: + printer(arg, " MS-v2"); + ++p; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + } + break; +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + printer(arg, "eap"); + break; +#endif /* EAP_SUPPORT */ + default: + printer(arg, "0x%x", cishort); + } + } + break; +#if LQR_SUPPORT + case CI_QUALITY: + if (olen >= CILEN_SHORT) { + p += 2; + printer(arg, "quality "); + GETSHORT(cishort, p); + switch (cishort) { + case PPP_LQR: + printer(arg, "lqr"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; +#endif /* LQR_SUPPORT */ + case CI_CALLBACK: + if (olen >= CILEN_CHAR) { + p += 2; + printer(arg, "callback "); + GETCHAR(cishort, p); + switch (cishort) { + case CBCP_OPT: + printer(arg, "CBCP"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; + case CI_MAGICNUMBER: + if (olen == CILEN_LONG) { + p += 2; + GETLONG(cilong, p); + printer(arg, "magic 0x%x", cilong); + } + break; + case CI_PCOMPRESSION: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "pcomp"); + } + break; + case CI_ACCOMPRESSION: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "accomp"); + } + break; + case CI_MRRU: + if (olen == CILEN_SHORT) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "mrru %d", cishort); + } + break; + case CI_SSNHF: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "ssnhf"); + } + break; + case CI_EPDISC: +#ifdef HAVE_MULTILINK + if (olen >= CILEN_CHAR) { + struct epdisc epd; + p += 2; + GETCHAR(epd.class, p); + epd.length = olen - CILEN_CHAR; + if (epd.length > MAX_ENDP_LEN) + epd.length = MAX_ENDP_LEN; + if (epd.length > 0) { + MEMCPY(epd.value, p, epd.length); + p += epd.length; + } + printer(arg, "endpoint [%s]", epdisc_to_str(&epd)); + } +#else + printer(arg, "endpoint"); +#endif + break; + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + + case ECHOREQ: + case ECHOREP: + case DISCREQ: + if (len >= 4) { + GETLONG(cilong, p); + printer(arg, " magic=0x%x", cilong); + len -= 4; + } + break; + + case IDENTIF: + case TIMEREM: + if (len >= 4) { + GETLONG(cilong, p); + printer(arg, " magic=0x%x", cilong); + len -= 4; + } + if (code == TIMEREM) { + if (len < 4) + break; + GETLONG(cilong, p); + printer(arg, " seconds=%u", cilong); + len -= 4; + } + if (len > 0) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (i = 0; i < len && i < 32; ++i) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + if (i < len) { + printer(arg, " ..."); + p += len - i; + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +/* + * Time to shut down the link because there is nothing out there. + */ + +static void LcpLinkFailure(fsm *f) { + ppp_pcb *pcb = f->pcb; + if (f->state == PPP_FSM_OPENED) { + ppp_info("No response to %d echo-requests", pcb->lcp_echos_pending); + ppp_notice("Serial link appears to be disconnected."); + pcb->err_code = PPPERR_PEERDEAD; + lcp_close(pcb, "Peer not responding"); + } +} + +/* + * Timer expired for the LCP echo requests from this process. + */ + +static void LcpEchoCheck(fsm *f) { + ppp_pcb *pcb = f->pcb; + + LcpSendEchoRequest (f); + if (f->state != PPP_FSM_OPENED) + return; + + /* + * Start the timer for the next interval. + */ + if (pcb->lcp_echo_timer_running) + ppp_warn("assertion lcp_echo_timer_running==0 failed"); + TIMEOUT (LcpEchoTimeout, f, pcb->settings.lcp_echo_interval); + pcb->lcp_echo_timer_running = 1; +} + +/* + * LcpEchoTimeout - Timer expired on the LCP echo + */ + +static void LcpEchoTimeout(void *arg) { + fsm *f = (fsm*)arg; + ppp_pcb *pcb = f->pcb; + if (pcb->lcp_echo_timer_running != 0) { + pcb->lcp_echo_timer_running = 0; + LcpEchoCheck ((fsm *) arg); + } +} + +/* + * LcpEchoReply - LCP has received a reply to the echo + */ + +static void lcp_received_echo_reply(fsm *f, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u32_t magic_val; + LWIP_UNUSED_ARG(id); + + /* Check the magic number - don't count replies from ourselves. */ + if (len < 4) { + ppp_dbglog("lcp: received short Echo-Reply, length %d", len); + return; + } + GETLONG(magic_val, inp); + if (go->neg_magicnumber + && magic_val == go->magicnumber) { + ppp_warn("appear to have received our own echo-reply!"); + return; + } + + /* Reset the number of outstanding echo frames */ + pcb->lcp_echos_pending = 0; +} + +/* + * LcpSendEchoRequest - Send an echo request frame to the peer + */ + +static void LcpSendEchoRequest(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u32_t lcp_magic; + u_char pkt[4], *pktp; + + /* + * Detect the failure of the peer at this point. + */ + if (pcb->settings.lcp_echo_fails != 0) { + if (pcb->lcp_echos_pending >= pcb->settings.lcp_echo_fails) { + LcpLinkFailure(f); + pcb->lcp_echos_pending = 0; + } + } + +#if PPP_LCP_ADAPTIVE + /* + * If adaptive echos have been enabled, only send the echo request if + * no traffic was received since the last one. + */ + if (pcb->settings.lcp_echo_adaptive) { + static unsigned int last_pkts_in = 0; + +#if PPP_STATS_SUPPORT + update_link_stats(f->unit); + link_stats_valid = 0; +#endif /* PPP_STATS_SUPPORT */ + + if (link_stats.pkts_in != last_pkts_in) { + last_pkts_in = link_stats.pkts_in; + return; + } + } +#endif + + /* + * Make and send the echo request frame. + */ + if (f->state == PPP_FSM_OPENED) { + lcp_magic = go->magicnumber; + pktp = pkt; + PUTLONG(lcp_magic, pktp); + fsm_sdata(f, ECHOREQ, pcb->lcp_echo_number++, pkt, pktp - pkt); + ++pcb->lcp_echos_pending; + } +} + +/* + * lcp_echo_lowerup - Start the timer for the LCP frame + */ + +static void lcp_echo_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + /* Clear the parameters for generating echo frames */ + pcb->lcp_echos_pending = 0; + pcb->lcp_echo_number = 0; + pcb->lcp_echo_timer_running = 0; + + /* If a timeout interval is specified then start the timer */ + if (pcb->settings.lcp_echo_interval != 0) + LcpEchoCheck (f); +} + +/* + * lcp_echo_lowerdown - Stop the timer for the LCP frame + */ + +static void lcp_echo_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + if (pcb->lcp_echo_timer_running != 0) { + UNTIMEOUT (LcpEchoTimeout, f); + pcb->lcp_echo_timer_running = 0; + } +} + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c new file mode 100644 index 00000000..d0d87c5e --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c @@ -0,0 +1,294 @@ +/* + * magic.c - PPP Magic Number routines. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/***************************************************************************** +* randm.c - Random number generator program file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* Copyright (c) 1998 by Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-06-03 Guy Lancaster , Global Election Systems Inc. +* Extracted from avos. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/magic.h" + +#if PPP_MD5_RANDM /* Using MD5 for better randomness if enabled */ + +#include "netif/ppp/pppcrypt.h" + +#define MD5_HASH_SIZE 16 +static char magic_randpool[MD5_HASH_SIZE]; /* Pool of randomness. */ +static long magic_randcount; /* Pseudo-random incrementer */ +static u32_t magic_randomseed; /* Seed used for random number generation. */ + +/* + * Churn the randomness pool on a random event. Call this early and often + * on random and semi-random system events to build randomness in time for + * usage. For randomly timed events, pass a null pointer and a zero length + * and this will use the system timer and other sources to add randomness. + * If new random data is available, pass a pointer to that and it will be + * included. + * + * Ref: Applied Cryptography 2nd Ed. by Bruce Schneier p. 427 + */ +static void magic_churnrand(char *rand_data, u32_t rand_len) { + lwip_md5_context md5_ctx; + + /* LWIP_DEBUGF(LOG_INFO, ("magic_churnrand: %u@%P\n", rand_len, rand_data)); */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + lwip_md5_update(&md5_ctx, (u_char *)magic_randpool, sizeof(magic_randpool)); + if (rand_data) { + lwip_md5_update(&md5_ctx, (u_char *)rand_data, rand_len); + } else { + struct { + /* INCLUDE fields for any system sources of randomness */ + u32_t jiffies; +#ifdef LWIP_RAND + u32_t rand; +#endif /* LWIP_RAND */ + } sys_data; + magic_randomseed += sys_jiffies(); + sys_data.jiffies = magic_randomseed; +#ifdef LWIP_RAND + sys_data.rand = LWIP_RAND(); +#endif /* LWIP_RAND */ + /* Load sys_data fields here. */ + lwip_md5_update(&md5_ctx, (u_char *)&sys_data, sizeof(sys_data)); + } + lwip_md5_finish(&md5_ctx, (u_char *)magic_randpool); + lwip_md5_free(&md5_ctx); +/* LWIP_DEBUGF(LOG_INFO, ("magic_churnrand: -> 0\n")); */ +} + +/* + * Initialize the random number generator. + */ +void magic_init(void) { + magic_churnrand(NULL, 0); +} + +/* + * Randomize our random seed value. + */ +void magic_randomize(void) { + magic_churnrand(NULL, 0); +} + +/* + * magic_random_bytes - Fill a buffer with random bytes. + * + * Use the random pool to generate random data. This degrades to pseudo + * random when used faster than randomness is supplied using magic_churnrand(). + * Note: It's important that there be sufficient randomness in magic_randpool + * before this is called for otherwise the range of the result may be + * narrow enough to make a search feasible. + * + * Ref: Applied Cryptography 2nd Ed. by Bruce Schneier p. 427 + * + * XXX Why does he not just call magic_churnrand() for each block? Probably + * so that you don't ever publish the seed which could possibly help + * predict future values. + * XXX Why don't we preserve md5 between blocks and just update it with + * magic_randcount each time? Probably there is a weakness but I wish that + * it was documented. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len) { + lwip_md5_context md5_ctx; + u_char tmp[MD5_HASH_SIZE]; + u32_t n; + + while (buf_len > 0) { + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + lwip_md5_update(&md5_ctx, (u_char *)magic_randpool, sizeof(magic_randpool)); + lwip_md5_update(&md5_ctx, (u_char *)&magic_randcount, sizeof(magic_randcount)); + lwip_md5_finish(&md5_ctx, tmp); + lwip_md5_free(&md5_ctx); + magic_randcount++; + n = LWIP_MIN(buf_len, MD5_HASH_SIZE); + MEMCPY(buf, tmp, n); + buf += n; + buf_len -= n; + } +} + +/* + * Return a new random number. + */ +u32_t magic(void) { + u32_t new_rand; + + magic_random_bytes((unsigned char *)&new_rand, sizeof(new_rand)); + + return new_rand; +} + +#else /* PPP_MD5_RANDM */ + +/*****************************/ +/*** LOCAL DATA STRUCTURES ***/ +/*****************************/ +#ifndef LWIP_RAND +static int magic_randomized; /* Set when truely randomized. */ +#endif /* LWIP_RAND */ +static u32_t magic_randomseed; /* Seed used for random number generation. */ + + +/***********************************/ +/*** PUBLIC FUNCTION DEFINITIONS ***/ +/***********************************/ + +/* + * Initialize the random number generator. + * + * Here we attempt to compute a random number seed but even if + * it isn't random, we'll randomize it later. + * + * The current method uses the fields from the real time clock, + * the idle process counter, the millisecond counter, and the + * hardware timer tick counter. When this is invoked + * in startup(), then the idle counter and timer values may + * repeat after each boot and the real time clock may not be + * operational. Thus we call it again on the first random + * event. + */ +void magic_init(void) { + magic_randomseed += sys_jiffies(); +#ifndef LWIP_RAND + /* Initialize the Borland random number generator. */ + srand((unsigned)magic_randomseed); +#endif /* LWIP_RAND */ +} + +/* + * magic_init - Initialize the magic number generator. + * + * Randomize our random seed value. Here we use the fact that + * this function is called at *truely random* times by the polling + * and network functions. Here we only get 16 bits of new random + * value but we use the previous value to randomize the other 16 + * bits. + */ +void magic_randomize(void) { +#ifndef LWIP_RAND + if (!magic_randomized) { + magic_randomized = !0; + magic_init(); + /* The initialization function also updates the seed. */ + } else { +#endif /* LWIP_RAND */ + magic_randomseed += sys_jiffies(); +#ifndef LWIP_RAND + } +#endif /* LWIP_RAND */ +} + +/* + * Return a new random number. + * + * Here we use the Borland rand() function to supply a pseudo random + * number which we make truely random by combining it with our own + * seed which is randomized by truely random events. + * Thus the numbers will be truely random unless there have been no + * operator or network events in which case it will be pseudo random + * seeded by the real time clock. + */ +u32_t magic(void) { +#ifdef LWIP_RAND + return LWIP_RAND() + magic_randomseed; +#else /* LWIP_RAND */ + return ((u32_t)rand() << 16) + (u32_t)rand() + magic_randomseed; +#endif /* LWIP_RAND */ +} + +/* + * magic_random_bytes - Fill a buffer with random bytes. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len) { + u32_t new_rand, n; + + while (buf_len > 0) { + new_rand = magic(); + n = LWIP_MIN(buf_len, sizeof(new_rand)); + MEMCPY(buf, &new_rand, n); + buf += n; + buf_len -= n; + } +} +#endif /* PPP_MD5_RANDM */ + +/* + * Return a new random number between 0 and (2^pow)-1 included. + */ +u32_t magic_pow(u8_t pow) { + return magic() & ~(~0UL<. + * Copyright (c) 2002,2003,2004 Google, Inc. + * All rights reserved. + * + * License: + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. + * + * Changelog: + * 08/12/05 - Matt Domsch + * Only need extra skb padding on transmit, not receive. + * 06/18/04 - Matt Domsch , Oleg Makarenko + * Use Linux kernel 2.6 arc4 and sha1 routines rather than + * providing our own. + * 2/15/04 - TS: added #include and testing for Kernel + * version before using + * MOD_DEC_USAGE_COUNT/MOD_INC_USAGE_COUNT which are + * deprecated in 2.6 + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MPPE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "lwip/err.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/ccp.h" +#include "netif/ppp/mppe.h" +#include "netif/ppp/pppdebug.h" +#include "netif/ppp/pppcrypt.h" + +#define SHA1_SIGNATURE_SIZE 20 + +/* ppp_mppe_state.bits definitions */ +#define MPPE_BIT_A 0x80 /* Encryption table were (re)inititalized */ +#define MPPE_BIT_B 0x40 /* MPPC only (not implemented) */ +#define MPPE_BIT_C 0x20 /* MPPC only (not implemented) */ +#define MPPE_BIT_D 0x10 /* This is an encrypted frame */ + +#define MPPE_BIT_FLUSHED MPPE_BIT_A +#define MPPE_BIT_ENCRYPTED MPPE_BIT_D + +#define MPPE_BITS(p) ((p)[0] & 0xf0) +#define MPPE_CCOUNT(p) ((((p)[0] & 0x0f) << 8) + (p)[1]) +#define MPPE_CCOUNT_SPACE 0x1000 /* The size of the ccount space */ + +#define MPPE_OVHD 2 /* MPPE overhead/packet */ +#define SANITY_MAX 1600 /* Max bogon factor we will tolerate */ + +/* + * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3. + * Well, not what's written there, but rather what they meant. + */ +static void mppe_rekey(ppp_mppe_state * state, int initial_key) +{ + lwip_sha1_context sha1_ctx; + u8_t sha1_digest[SHA1_SIGNATURE_SIZE]; + + /* + * Key Derivation, from RFC 3078, RFC 3079. + * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. + */ + lwip_sha1_init(&sha1_ctx); + lwip_sha1_starts(&sha1_ctx); + lwip_sha1_update(&sha1_ctx, state->master_key, state->keylen); + lwip_sha1_update(&sha1_ctx, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1_ctx, state->session_key, state->keylen); + lwip_sha1_update(&sha1_ctx, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1_ctx, sha1_digest); + lwip_sha1_free(&sha1_ctx); + MEMCPY(state->session_key, sha1_digest, state->keylen); + + if (!initial_key) { + lwip_arc4_init(&state->arc4); + lwip_arc4_setup(&state->arc4, sha1_digest, state->keylen); + lwip_arc4_crypt(&state->arc4, state->session_key, state->keylen); + lwip_arc4_free(&state->arc4); + } + if (state->keylen == 8) { + /* See RFC 3078 */ + state->session_key[0] = 0xd1; + state->session_key[1] = 0x26; + state->session_key[2] = 0x9e; + } + lwip_arc4_init(&state->arc4); + lwip_arc4_setup(&state->arc4, state->session_key, state->keylen); +} + +/* + * Set key, used by MSCHAP before mppe_init() is actually called by CCP so we + * don't have to keep multiple copies of keys. + */ +void mppe_set_key(ppp_pcb *pcb, ppp_mppe_state *state, u8_t *key) { + LWIP_UNUSED_ARG(pcb); + MEMCPY(state->master_key, key, MPPE_MAX_KEY_LEN); +} + +/* + * Initialize (de)compressor state. + */ +void +mppe_init(ppp_pcb *pcb, ppp_mppe_state *state, u8_t options) +{ +#if PPP_DEBUG + const u8_t *debugstr = (const u8_t*)"mppe_comp_init"; + if (&pcb->mppe_decomp == state) { + debugstr = (const u8_t*)"mppe_decomp_init"; + } +#endif /* PPP_DEBUG */ + + /* Save keys. */ + MEMCPY(state->session_key, state->master_key, sizeof(state->master_key)); + + if (options & MPPE_OPT_128) + state->keylen = 16; + else if (options & MPPE_OPT_40) + state->keylen = 8; + else { + PPPDEBUG(LOG_DEBUG, ("%s[%d]: unknown key length\n", debugstr, + pcb->netif->num)); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + return; + } + if (options & MPPE_OPT_STATEFUL) + state->stateful = 1; + + /* Generate the initial session key. */ + mppe_rekey(state, 1); + +#if PPP_DEBUG + { + int i; + char mkey[sizeof(state->master_key) * 2 + 1]; + char skey[sizeof(state->session_key) * 2 + 1]; + + PPPDEBUG(LOG_DEBUG, ("%s[%d]: initialized with %d-bit %s mode\n", + debugstr, pcb->netif->num, (state->keylen == 16) ? 128 : 40, + (state->stateful) ? "stateful" : "stateless")); + + for (i = 0; i < (int)sizeof(state->master_key); i++) + sprintf(mkey + i * 2, "%02x", state->master_key[i]); + for (i = 0; i < (int)sizeof(state->session_key); i++) + sprintf(skey + i * 2, "%02x", state->session_key[i]); + PPPDEBUG(LOG_DEBUG, + ("%s[%d]: keys: master: %s initial session: %s\n", + debugstr, pcb->netif->num, mkey, skey)); + } +#endif /* PPP_DEBUG */ + + /* + * Initialize the coherency count. The initial value is not specified + * in RFC 3078, but we can make a reasonable assumption that it will + * start at 0. Setting it to the max here makes the comp/decomp code + * do the right thing (determined through experiment). + */ + state->ccount = MPPE_CCOUNT_SPACE - 1; + + /* + * Note that even though we have initialized the key table, we don't + * set the FLUSHED bit. This is contrary to RFC 3078, sec. 3.1. + */ + state->bits = MPPE_BIT_ENCRYPTED; +} + +/* + * We received a CCP Reset-Request (actually, we are sending a Reset-Ack), + * tell the compressor to rekey. Note that we MUST NOT rekey for + * every CCP Reset-Request; we only rekey on the next xmit packet. + * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost. + * So, rekeying for every CCP Reset-Request is broken as the peer will not + * know how many times we've rekeyed. (If we rekey and THEN get another + * CCP Reset-Request, we must rekey again.) + */ +void mppe_comp_reset(ppp_pcb *pcb, ppp_mppe_state *state) +{ + LWIP_UNUSED_ARG(pcb); + state->bits |= MPPE_BIT_FLUSHED; +} + +/* + * Compress (encrypt) a packet. + * It's strange to call this a compressor, since the output is always + * MPPE_OVHD + 2 bytes larger than the input. + */ +err_t +mppe_compress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb, u16_t protocol) +{ + struct pbuf *n, *np; + u8_t *pl; + err_t err; + + LWIP_UNUSED_ARG(pcb); + + /* TCP stack requires that we don't change the packet payload, therefore we copy + * the whole packet before encryption. + */ + np = pbuf_alloc(PBUF_RAW, MPPE_OVHD + sizeof(protocol) + (*pb)->tot_len, PBUF_RAM); + if (!np) { + return ERR_MEM; + } + + /* Hide MPPE header + protocol */ + pbuf_remove_header(np, MPPE_OVHD + sizeof(protocol)); + + if ((err = pbuf_copy(np, *pb)) != ERR_OK) { + pbuf_free(np); + return err; + } + + /* Reveal MPPE header + protocol */ + pbuf_add_header(np, MPPE_OVHD + sizeof(protocol)); + + *pb = np; + pl = (u8_t*)np->payload; + + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + PPPDEBUG(LOG_DEBUG, ("mppe_compress[%d]: ccount %d\n", pcb->netif->num, state->ccount)); + /* FIXME: use PUT* macros */ + pl[0] = state->ccount>>8; + pl[1] = state->ccount; + + if (!state->stateful || /* stateless mode */ + ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ + (state->bits & MPPE_BIT_FLUSHED)) { /* CCP Reset-Request */ + /* We must rekey */ + if (state->stateful) { + PPPDEBUG(LOG_DEBUG, ("mppe_compress[%d]: rekeying\n", pcb->netif->num)); + } + mppe_rekey(state, 0); + state->bits |= MPPE_BIT_FLUSHED; + } + pl[0] |= state->bits; + state->bits &= ~MPPE_BIT_FLUSHED; /* reset for next xmit */ + pl += MPPE_OVHD; + + /* Add protocol */ + /* FIXME: add PFC support */ + pl[0] = protocol >> 8; + pl[1] = protocol; + + /* Hide MPPE header */ + pbuf_remove_header(np, MPPE_OVHD); + + /* Encrypt packet */ + for (n = np; n != NULL; n = n->next) { + lwip_arc4_crypt(&state->arc4, (u8_t*)n->payload, n->len); + if (n->tot_len == n->len) { + break; + } + } + + /* Reveal MPPE header */ + pbuf_add_header(np, MPPE_OVHD); + + return ERR_OK; +} + +/* + * We received a CCP Reset-Ack. Just ignore it. + */ +void mppe_decomp_reset(ppp_pcb *pcb, ppp_mppe_state *state) +{ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(state); + return; +} + +/* + * Decompress (decrypt) an MPPE packet. + */ +err_t +mppe_decompress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb) +{ + struct pbuf *n0 = *pb, *n; + u8_t *pl; + u16_t ccount; + u8_t flushed; + + /* MPPE Header */ + if (n0->len < MPPE_OVHD) { + PPPDEBUG(LOG_DEBUG, + ("mppe_decompress[%d]: short pkt (%d)\n", + pcb->netif->num, n0->len)); + state->sanity_errors += 100; + goto sanity_error; + } + + pl = (u8_t*)n0->payload; + flushed = MPPE_BITS(pl) & MPPE_BIT_FLUSHED; + ccount = MPPE_CCOUNT(pl); + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: ccount %d\n", + pcb->netif->num, ccount)); + + /* sanity checks -- terminate with extreme prejudice */ + if (!(MPPE_BITS(pl) & MPPE_BIT_ENCRYPTED)) { + PPPDEBUG(LOG_DEBUG, + ("mppe_decompress[%d]: ENCRYPTED bit not set!\n", + pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + if (!state->stateful && !flushed) { + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: FLUSHED bit not set in " + "stateless mode!\n", pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: FLUSHED bit not set on " + "flag packet!\n", pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + + /* + * Check the coherency count. + */ + + if (!state->stateful) { + /* Discard late packet */ + if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE > MPPE_CCOUNT_SPACE / 2) { + state->sanity_errors++; + goto sanity_error; + } + + /* RFC 3078, sec 8.1. Rekey for every packet. */ + while (state->ccount != ccount) { + mppe_rekey(state, 0); + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + } + } else { + /* RFC 3078, sec 8.2. */ + if (!state->discard) { + /* normal state */ + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + if (ccount != state->ccount) { + /* + * (ccount > state->ccount) + * Packet loss detected, enter the discard state. + * Signal the peer to rekey (by sending a CCP Reset-Request). + */ + state->discard = 1; + ccp_resetrequest(pcb); + return ERR_BUF; + } + } else { + /* discard state */ + if (!flushed) { + /* ccp.c will be silent (no additional CCP Reset-Requests). */ + return ERR_BUF; + } else { + /* Rekey for every missed "flag" packet. */ + while ((ccount & ~0xff) != + (state->ccount & ~0xff)) { + mppe_rekey(state, 0); + state->ccount = + (state->ccount + + 256) % MPPE_CCOUNT_SPACE; + } + + /* reset */ + state->discard = 0; + state->ccount = ccount; + /* + * Another problem with RFC 3078 here. It implies that the + * peer need not send a Reset-Ack packet. But RFC 1962 + * requires it. Hopefully, M$ does send a Reset-Ack; even + * though it isn't required for MPPE synchronization, it is + * required to reset CCP state. + */ + } + } + if (flushed) + mppe_rekey(state, 0); + } + + /* Hide MPPE header */ + pbuf_remove_header(n0, MPPE_OVHD); + + /* Decrypt the packet. */ + for (n = n0; n != NULL; n = n->next) { + lwip_arc4_crypt(&state->arc4, (u8_t*)n->payload, n->len); + if (n->tot_len == n->len) { + break; + } + } + + /* good packet credit */ + state->sanity_errors >>= 1; + + return ERR_OK; + +sanity_error: + if (state->sanity_errors >= SANITY_MAX) { + /* + * Take LCP down if the peer is sending too many bogons. + * We don't want to do this for a single or just a few + * instances since it could just be due to packet corruption. + */ + lcp_close(pcb, "Too many MPPE errors"); + } + return ERR_BUF; +} + +#endif /* PPP_SUPPORT && MPPE_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c new file mode 100644 index 00000000..62014e8c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c @@ -0,0 +1,609 @@ +/* + * multilink.c - support routines for multilink. + * + * Copyright (c) 2000-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && defined(HAVE_MULTILINK) /* don't build if not configured for use in lwipopts.h */ + +/* Multilink support + * + * Multilink uses Samba TDB (Trivial Database Library), which + * we cannot port, because it needs a filesystem. + * + * We have to choose between doing a memory-shared TDB-clone, + * or dropping multilink support at all. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/tdb.h" + +bool endpoint_specified; /* user gave explicit endpoint discriminator */ +char *bundle_id; /* identifier for our bundle */ +char *blinks_id; /* key for the list of links */ +bool doing_multilink; /* multilink was enabled and agreed to */ +bool multilink_master; /* we own the multilink bundle */ + +extern TDB_CONTEXT *pppdb; +extern char db_key[]; + +static void make_bundle_links (int append); +static void remove_bundle_link (void); +static void iterate_bundle_links (void (*func) (char *)); + +static int get_default_epdisc (struct epdisc *); +static int parse_num (char *str, const char *key, int *valp); +static int owns_unit (TDB_DATA pid, int unit); + +#define set_ip_epdisc(ep, addr) do { \ + ep->length = 4; \ + ep->value[0] = addr >> 24; \ + ep->value[1] = addr >> 16; \ + ep->value[2] = addr >> 8; \ + ep->value[3] = addr; \ +} while (0) + +#define LOCAL_IP_ADDR(addr) \ + (((addr) & 0xff000000) == 0x0a000000 /* 10.x.x.x */ \ + || ((addr) & 0xfff00000) == 0xac100000 /* 172.16.x.x */ \ + || ((addr) & 0xffff0000) == 0xc0a80000) /* 192.168.x.x */ + +#define process_exists(n) (kill((n), 0) == 0 || errno != ESRCH) + +void +mp_check_options() +{ + lcp_options *wo = &lcp_wantoptions[0]; + lcp_options *ao = &lcp_allowoptions[0]; + + doing_multilink = 0; + if (!multilink) + return; + /* if we're doing multilink, we have to negotiate MRRU */ + if (!wo->neg_mrru) { + /* mrru not specified, default to mru */ + wo->mrru = wo->mru; + wo->neg_mrru = 1; + } + ao->mrru = ao->mru; + ao->neg_mrru = 1; + + if (!wo->neg_endpoint && !noendpoint) { + /* get a default endpoint value */ + wo->neg_endpoint = get_default_epdisc(&wo->endpoint); + } +} + +/* + * Make a new bundle or join us to an existing bundle + * if we are doing multilink. + */ +int +mp_join_bundle() +{ + lcp_options *go = &lcp_gotoptions[0]; + lcp_options *ho = &lcp_hisoptions[0]; + lcp_options *ao = &lcp_allowoptions[0]; + int unit, pppd_pid; + int l, mtu; + char *p; + TDB_DATA key, pid, rec; + + if (doing_multilink) { + /* have previously joined a bundle */ + if (!go->neg_mrru || !ho->neg_mrru) { + notice("oops, didn't get multilink on renegotiation"); + lcp_close(pcb, "multilink required"); + return 0; + } + /* XXX should check the peer_authname and ho->endpoint + are the same as previously */ + return 0; + } + + if (!go->neg_mrru || !ho->neg_mrru) { + /* not doing multilink */ + if (go->neg_mrru) + notice("oops, multilink negotiated only for receive"); + mtu = ho->neg_mru? ho->mru: PPP_MRU; + if (mtu > ao->mru) + mtu = ao->mru; + if (demand) { + /* already have a bundle */ + cfg_bundle(0, 0, 0, 0); + netif_set_mtu(pcb, mtu); + return 0; + } + make_new_bundle(0, 0, 0, 0); + set_ifunit(1); + netif_set_mtu(pcb, mtu); + return 0; + } + + doing_multilink = 1; + + /* + * Find the appropriate bundle or join a new one. + * First we make up a name for the bundle. + * The length estimate is worst-case assuming every + * character has to be quoted. + */ + l = 4 * strlen(peer_authname) + 10; + if (ho->neg_endpoint) + l += 3 * ho->endpoint.length + 8; + if (bundle_name) + l += 3 * strlen(bundle_name) + 2; + bundle_id = malloc(l); + if (bundle_id == 0) + novm("bundle identifier"); + + p = bundle_id; + p += slprintf(p, l-1, "BUNDLE=\"%q\"", peer_authname); + if (ho->neg_endpoint || bundle_name) + *p++ = '/'; + if (ho->neg_endpoint) + p += slprintf(p, bundle_id+l-p, "%s", + epdisc_to_str(&ho->endpoint)); + if (bundle_name) + p += slprintf(p, bundle_id+l-p, "/%v", bundle_name); + + /* Make the key for the list of links belonging to the bundle */ + l = p - bundle_id; + blinks_id = malloc(l + 7); + if (blinks_id == NULL) + novm("bundle links key"); + slprintf(blinks_id, l + 7, "BUNDLE_LINKS=%s", bundle_id + 7); + + /* + * For demand mode, we only need to configure the bundle + * and attach the link. + */ + mtu = LWIP_MIN(ho->mrru, ao->mru); + if (demand) { + cfg_bundle(go->mrru, ho->mrru, go->neg_ssnhf, ho->neg_ssnhf); + netif_set_mtu(pcb, mtu); + script_setenv("BUNDLE", bundle_id + 7, 1); + return 0; + } + + /* + * Check if the bundle ID is already in the database. + */ + unit = -1; + lock_db(); + key.dptr = bundle_id; + key.dsize = p - bundle_id; + pid = tdb_fetch(pppdb, key); + if (pid.dptr != NULL) { + /* bundle ID exists, see if the pppd record exists */ + rec = tdb_fetch(pppdb, pid); + if (rec.dptr != NULL && rec.dsize > 0) { + /* make sure the string is null-terminated */ + rec.dptr[rec.dsize-1] = 0; + /* parse the interface number */ + parse_num(rec.dptr, "IFNAME=ppp", &unit); + /* check the pid value */ + if (!parse_num(rec.dptr, "PPPD_PID=", &pppd_pid) + || !process_exists(pppd_pid) + || !owns_unit(pid, unit)) + unit = -1; + free(rec.dptr); + } + free(pid.dptr); + } + + if (unit >= 0) { + /* attach to existing unit */ + if (bundle_attach(unit)) { + set_ifunit(0); + script_setenv("BUNDLE", bundle_id + 7, 0); + make_bundle_links(1); + unlock_db(); + info("Link attached to %s", ifname); + return 1; + } + /* attach failed because bundle doesn't exist */ + } + + /* we have to make a new bundle */ + make_new_bundle(go->mrru, ho->mrru, go->neg_ssnhf, ho->neg_ssnhf); + set_ifunit(1); + netif_set_mtu(pcb, mtu); + script_setenv("BUNDLE", bundle_id + 7, 1); + make_bundle_links(pcb); + unlock_db(); + info("New bundle %s created", ifname); + multilink_master = 1; + return 0; +} + +void mp_exit_bundle() +{ + lock_db(); + remove_bundle_link(); + unlock_db(); +} + +static void sendhup(char *str) +{ + int pid; + + if (parse_num(str, "PPPD_PID=", &pid) && pid != getpid()) { + if (debug) + dbglog("sending SIGHUP to process %d", pid); + kill(pid, SIGHUP); + } +} + +void mp_bundle_terminated() +{ + TDB_DATA key; + + bundle_terminating = 1; + upper_layers_down(pcb); + notice("Connection terminated."); +#if PPP_STATS_SUPPORT + print_link_stats(); +#endif /* PPP_STATS_SUPPORT */ + if (!demand) { + remove_pidfiles(); + script_unsetenv("IFNAME"); + } + + lock_db(); + destroy_bundle(); + iterate_bundle_links(sendhup); + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + tdb_delete(pppdb, key); + unlock_db(); + + new_phase(PPP_PHASE_DEAD); + + doing_multilink = 0; + multilink_master = 0; +} + +static void make_bundle_links(int append) +{ + TDB_DATA key, rec; + char *p; + char entry[32]; + int l; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + slprintf(entry, sizeof(entry), "%s;", db_key); + p = entry; + if (append) { + rec = tdb_fetch(pppdb, key); + if (rec.dptr != NULL && rec.dsize > 0) { + rec.dptr[rec.dsize-1] = 0; + if (strstr(rec.dptr, db_key) != NULL) { + /* already in there? strange */ + warn("link entry already exists in tdb"); + return; + } + l = rec.dsize + strlen(entry); + p = malloc(l); + if (p == NULL) + novm("bundle link list"); + slprintf(p, l, "%s%s", rec.dptr, entry); + } else { + warn("bundle link list not found"); + } + if (rec.dptr != NULL) + free(rec.dptr); + } + rec.dptr = p; + rec.dsize = strlen(p) + 1; + if (tdb_store(pppdb, key, rec, TDB_REPLACE)) + error("couldn't %s bundle link list", + append? "update": "create"); + if (p != entry) + free(p); +} + +static void remove_bundle_link() +{ + TDB_DATA key, rec; + char entry[32]; + char *p, *q; + int l; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + slprintf(entry, sizeof(entry), "%s;", db_key); + + rec = tdb_fetch(pppdb, key); + if (rec.dptr == NULL || rec.dsize <= 0) { + if (rec.dptr != NULL) + free(rec.dptr); + return; + } + rec.dptr[rec.dsize-1] = 0; + p = strstr(rec.dptr, entry); + if (p != NULL) { + q = p + strlen(entry); + l = strlen(q) + 1; + memmove(p, q, l); + rec.dsize = p - rec.dptr + l; + if (tdb_store(pppdb, key, rec, TDB_REPLACE)) + error("couldn't update bundle link list (removal)"); + } + free(rec.dptr); +} + +static void iterate_bundle_links(void (*func)(char *)) +{ + TDB_DATA key, rec, pp; + char *p, *q; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + rec = tdb_fetch(pppdb, key); + if (rec.dptr == NULL || rec.dsize <= 0) { + error("bundle link list not found (iterating list)"); + if (rec.dptr != NULL) + free(rec.dptr); + return; + } + p = rec.dptr; + p[rec.dsize-1] = 0; + while ((q = strchr(p, ';')) != NULL) { + *q = 0; + key.dptr = p; + key.dsize = q - p; + pp = tdb_fetch(pppdb, key); + if (pp.dptr != NULL && pp.dsize > 0) { + pp.dptr[pp.dsize-1] = 0; + func(pp.dptr); + } + if (pp.dptr != NULL) + free(pp.dptr); + p = q + 1; + } + free(rec.dptr); +} + +static int +parse_num(str, key, valp) + char *str; + const char *key; + int *valp; +{ + char *p, *endp; + int i; + + p = strstr(str, key); + if (p != 0) { + p += strlen(key); + i = strtol(p, &endp, 10); + if (endp != p && (*endp == 0 || *endp == ';')) { + *valp = i; + return 1; + } + } + return 0; +} + +/* + * Check whether the pppd identified by `key' still owns ppp unit `unit'. + */ +static int +owns_unit(key, unit) + TDB_DATA key; + int unit; +{ + char ifkey[32]; + TDB_DATA kd, vd; + int ret = 0; + + slprintf(ifkey, sizeof(ifkey), "IFNAME=ppp%d", unit); + kd.dptr = ifkey; + kd.dsize = strlen(ifkey); + vd = tdb_fetch(pppdb, kd); + if (vd.dptr != NULL) { + ret = vd.dsize == key.dsize + && memcmp(vd.dptr, key.dptr, vd.dsize) == 0; + free(vd.dptr); + } + return ret; +} + +static int +get_default_epdisc(ep) + struct epdisc *ep; +{ + char *p; + struct hostent *hp; + u32_t addr; + + /* First try for an ethernet MAC address */ + p = get_first_ethernet(); + if (p != 0 && get_if_hwaddr(ep->value, p) >= 0) { + ep->class = EPD_MAC; + ep->length = 6; + return 1; + } + + /* see if our hostname corresponds to a reasonable IP address */ + hp = gethostbyname(hostname); + if (hp != NULL) { + addr = *(u32_t *)hp->h_addr; + if (!bad_ip_adrs(addr)) { + addr = lwip_ntohl(addr); + if (!LOCAL_IP_ADDR(addr)) { + ep->class = EPD_IP; + set_ip_epdisc(ep, addr); + return 1; + } + } + } + + return 0; +} + +/* + * epdisc_to_str - make a printable string from an endpoint discriminator. + */ + +static char *endp_class_names[] = { + "null", "local", "IP", "MAC", "magic", "phone" +}; + +char * +epdisc_to_str(ep) + struct epdisc *ep; +{ + static char str[MAX_ENDP_LEN*3+8]; + u_char *p = ep->value; + int i, mask = 0; + char *q, c, c2; + + if (ep->class == EPD_NULL && ep->length == 0) + return "null"; + if (ep->class == EPD_IP && ep->length == 4) { + u32_t addr; + + GETLONG(addr, p); + slprintf(str, sizeof(str), "IP:%I", lwip_htonl(addr)); + return str; + } + + c = ':'; + c2 = '.'; + if (ep->class == EPD_MAC && ep->length == 6) + c2 = ':'; + else if (ep->class == EPD_MAGIC && (ep->length % 4) == 0) + mask = 3; + q = str; + if (ep->class <= EPD_PHONENUM) + q += slprintf(q, sizeof(str)-1, "%s", + endp_class_names[ep->class]); + else + q += slprintf(q, sizeof(str)-1, "%d", ep->class); + c = ':'; + for (i = 0; i < ep->length && i < MAX_ENDP_LEN; ++i) { + if ((i & mask) == 0) { + *q++ = c; + c = c2; + } + q += slprintf(q, str + sizeof(str) - q, "%.2x", ep->value[i]); + } + return str; +} + +static int hexc_val(int c) +{ + if (c >= 'a') + return c - 'a' + 10; + if (c >= 'A') + return c - 'A' + 10; + return c - '0'; +} + +int +str_to_epdisc(ep, str) + struct epdisc *ep; + char *str; +{ + int i, l; + char *p, *endp; + + for (i = EPD_NULL; i <= EPD_PHONENUM; ++i) { + int sl = strlen(endp_class_names[i]); + if (strncasecmp(str, endp_class_names[i], sl) == 0) { + str += sl; + break; + } + } + if (i > EPD_PHONENUM) { + /* not a class name, try a decimal class number */ + i = strtol(str, &endp, 10); + if (endp == str) + return 0; /* can't parse class number */ + str = endp; + } + ep->class = i; + if (*str == 0) { + ep->length = 0; + return 1; + } + if (*str != ':' && *str != '.') + return 0; + ++str; + + if (i == EPD_IP) { + u32_t addr; + i = parse_dotted_ip(str, &addr); + if (i == 0 || str[i] != 0) + return 0; + set_ip_epdisc(ep, addr); + return 1; + } + if (i == EPD_MAC && get_if_hwaddr(ep->value, str) >= 0) { + ep->length = 6; + return 1; + } + + p = str; + for (l = 0; l < MAX_ENDP_LEN; ++l) { + if (*str == 0) + break; + if (p <= str) + for (p = str; isxdigit(*p); ++p) + ; + i = p - str; + if (i == 0) + return 0; + ep->value[l] = hexc_val(*str++); + if ((i & 1) == 0) + ep->value[l] = (ep->value[l] << 4) + hexc_val(*str++); + if (*str == ':' || *str == '.') + ++str; + } + if (*str != 0 || (ep->class == EPD_MAC && l != 6)) + return 0; + ep->length = l; + return 1; +} + +#endif /* PPP_SUPPORT && HAVE_MULTILINK */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c new file mode 100644 index 00000000..a9c18e30 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c @@ -0,0 +1,1628 @@ +/***************************************************************************** +* ppp.c - Network Point to Point Protocol program file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 by Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original. +*****************************************************************************/ + +/* + * ppp_defs.h - PPP definitions. + * + * if_pppvar.h - private structures and declarations for PPP. + * + * Copyright (c) 1994 The Australian National University. + * All rights reserved. + * + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. The Australian National University + * makes no representations about the suitability of this software for + * any purpose. + * + * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY + * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF + * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO + * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, + * OR MODIFICATIONS. + */ + +/* + * if_ppp.h - Point-to-Point Protocol definitions. + * + * Copyright (c) 1989 Carnegie Mellon University. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by Carnegie Mellon University. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +/** + * @defgroup ppp PPP + * @ingroup netifs + * @verbinclude "ppp.txt" + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/tcpip.h" +#include "lwip/api.h" +#include "lwip/snmp.h" +#include "lwip/ip4.h" /* for ip4_input() */ +#if PPP_IPV6_SUPPORT +#include "lwip/ip6.h" /* for ip6_input() */ +#endif /* PPP_IPV6_SUPPORT */ +#include "lwip/dns.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppos.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/magic.h" + +#if PAP_SUPPORT +#include "netif/ppp/upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "netif/ppp/eap.h" +#endif /* EAP_SUPPORT */ +#if CCP_SUPPORT +#include "netif/ppp/ccp.h" +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT +#include "netif/ppp/mppe.h" +#endif /* MPPE_SUPPORT */ +#if ECP_SUPPORT +#include "netif/ppp/ecp.h" +#endif /* EAP_SUPPORT */ +#if VJ_SUPPORT +#include "netif/ppp/vj.h" +#endif /* VJ_SUPPORT */ +#if PPP_IPV4_SUPPORT +#include "netif/ppp/ipcp.h" +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +#include "netif/ppp/ipv6cp.h" +#endif /* PPP_IPV6_SUPPORT */ + +/*************************/ +/*** LOCAL DEFINITIONS ***/ +/*************************/ + +/* Memory pools */ +#if PPPOS_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOS_PCB); +#endif +#if PPPOE_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOE_IF); +#endif +#if PPPOL2TP_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOL2TP_PCB); +#endif +#if LWIP_PPP_API && LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL_PROTOTYPE(PPPAPI_MSG); +#endif +LWIP_MEMPOOL_DECLARE(PPP_PCB, MEMP_NUM_PPP_PCB, sizeof(ppp_pcb), "PPP_PCB") + +/* FIXME: add stats per PPP session */ +#if PPP_STATS_SUPPORT +static struct timeval start_time; /* Time when link was started. */ +static struct pppd_stats old_link_stats; +struct pppd_stats link_stats; +unsigned link_connect_time; +int link_stats_valid; +#endif /* PPP_STATS_SUPPORT */ + +/* + * PPP Data Link Layer "protocol" table. + * One entry per supported protocol. + * The last entry must be NULL. + */ +const struct protent* const protocols[] = { + &lcp_protent, +#if PAP_SUPPORT + &pap_protent, +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + &chap_protent, +#endif /* CHAP_SUPPORT */ +#if CBCP_SUPPORT + &cbcp_protent, +#endif /* CBCP_SUPPORT */ +#if PPP_IPV4_SUPPORT + &ipcp_protent, +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + &ipv6cp_protent, +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT + &ccp_protent, +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT + &ecp_protent, +#endif /* ECP_SUPPORT */ +#ifdef AT_CHANGE + &atcp_protent, +#endif /* AT_CHANGE */ +#if EAP_SUPPORT + &eap_protent, +#endif /* EAP_SUPPORT */ + NULL +}; + +/* Prototypes for procedures local to this file. */ +static void ppp_do_connect(void *arg); +static err_t ppp_netif_init_cb(struct netif *netif); +#if PPP_IPV4_SUPPORT +static err_t ppp_netif_output_ip4(struct netif *netif, struct pbuf *pb, const ip4_addr_t *ipaddr); +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +static err_t ppp_netif_output_ip6(struct netif *netif, struct pbuf *pb, const ip6_addr_t *ipaddr); +#endif /* PPP_IPV6_SUPPORT */ +static err_t ppp_netif_output(struct netif *netif, struct pbuf *pb, u16_t protocol); + +/***********************************/ +/*** PUBLIC FUNCTION DEFINITIONS ***/ +/***********************************/ +#if PPP_AUTH_SUPPORT +void ppp_set_auth(ppp_pcb *pcb, u8_t authtype, const char *user, const char *passwd) { + LWIP_ASSERT_CORE_LOCKED(); +#if PAP_SUPPORT + pcb->settings.refuse_pap = !(authtype & PPPAUTHTYPE_PAP); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + pcb->settings.refuse_chap = !(authtype & PPPAUTHTYPE_CHAP); +#if MSCHAP_SUPPORT + pcb->settings.refuse_mschap = !(authtype & PPPAUTHTYPE_MSCHAP); + pcb->settings.refuse_mschap_v2 = !(authtype & PPPAUTHTYPE_MSCHAP_V2); +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + pcb->settings.refuse_eap = !(authtype & PPPAUTHTYPE_EAP); +#endif /* EAP_SUPPORT */ + pcb->settings.user = user; + pcb->settings.passwd = passwd; +} +#endif /* PPP_AUTH_SUPPORT */ + +#if MPPE_SUPPORT +/* Set MPPE configuration */ +void ppp_set_mppe(ppp_pcb *pcb, u8_t flags) { + if (flags == PPP_MPPE_DISABLE) { + pcb->settings.require_mppe = 0; + return; + } + + pcb->settings.require_mppe = 1; + pcb->settings.refuse_mppe_stateful = !(flags & PPP_MPPE_ALLOW_STATEFUL); + pcb->settings.refuse_mppe_40 = !!(flags & PPP_MPPE_REFUSE_40); + pcb->settings.refuse_mppe_128 = !!(flags & PPP_MPPE_REFUSE_128); +} +#endif /* MPPE_SUPPORT */ + +#if PPP_NOTIFY_PHASE +void ppp_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb) { + pcb->notify_phase_cb = notify_phase_cb; + notify_phase_cb(pcb, pcb->phase, pcb->ctx_cb); +} +#endif /* PPP_NOTIFY_PHASE */ + +/* + * Initiate a PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * Holdoff is the time to wait (in seconds) before initiating + * the connection. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_connect(ppp_pcb *pcb, u16_t holdoff) { + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_ALREADY; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_connect[%d]: holdoff=%d\n", pcb->netif->num, holdoff)); + + magic_randomize(); + + if (holdoff == 0) { + ppp_do_connect(pcb); + return ERR_OK; + } + + new_phase(pcb, PPP_PHASE_HOLDOFF); + sys_timeout((u32_t)(holdoff*1000), ppp_do_connect, pcb); + return ERR_OK; +} + +#if PPP_SERVER +/* + * Listen for an incoming PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_listen(ppp_pcb *pcb) { + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_ALREADY; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_listen[%d]\n", pcb->netif->num)); + + magic_randomize(); + + if (pcb->link_cb->listen) { + new_phase(pcb, PPP_PHASE_INITIALIZE); + pcb->link_cb->listen(pcb, pcb->link_ctx_cb); + return ERR_OK; + } + return ERR_IF; +} +#endif /* PPP_SERVER */ + +/* + * Initiate the end of a PPP connection. + * Any outstanding packets in the queues are dropped. + * + * Setting nocarrier to 1 close the PPP connection without initiating the + * shutdown procedure. Always using nocarrier = 0 is still recommended, + * this is going to take a little longer time if your link is down, but + * is a safer choice for the PPP state machine. + * + * Return 0 on success, an error code on failure. + */ +err_t +ppp_close(ppp_pcb *pcb, u8_t nocarrier) +{ + LWIP_ASSERT_CORE_LOCKED(); + + pcb->err_code = PPPERR_USER; + + /* holdoff phase, cancel the reconnection */ + if (pcb->phase == PPP_PHASE_HOLDOFF) { + sys_untimeout(ppp_do_connect, pcb); + new_phase(pcb, PPP_PHASE_DEAD); + } + + /* dead phase, nothing to do, call the status callback to be consistent */ + if (pcb->phase == PPP_PHASE_DEAD) { + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return ERR_OK; + } + + /* Already terminating, nothing to do */ + if (pcb->phase >= PPP_PHASE_TERMINATE) { + return ERR_INPROGRESS; + } + + /* LCP not open, close link protocol */ + if (pcb->phase < PPP_PHASE_ESTABLISH) { + new_phase(pcb, PPP_PHASE_DISCONNECT); + ppp_link_terminated(pcb); + return ERR_OK; + } + + /* + * Only accept carrier lost signal on the stable running phase in order + * to prevent changing the PPP phase FSM in transition phases. + * + * Always using nocarrier = 0 is still recommended, this is going to + * take a little longer time, but is a safer choice from FSM point of view. + */ + if (nocarrier && pcb->phase == PPP_PHASE_RUNNING) { + PPPDEBUG(LOG_DEBUG, ("ppp_close[%d]: carrier lost -> lcp_lowerdown\n", pcb->netif->num)); + lcp_lowerdown(pcb); + /* forced link termination, this will force link protocol to disconnect. */ + link_terminated(pcb); + return ERR_OK; + } + + /* Disconnect */ + PPPDEBUG(LOG_DEBUG, ("ppp_close[%d]: kill_link -> lcp_close\n", pcb->netif->num)); + /* LCP soft close request. */ + lcp_close(pcb, "User request"); + return ERR_OK; +} + +/* + * Release the control block. + * + * This can only be called if PPP is in the dead phase. + * + * You must use ppp_close() before if you wish to terminate + * an established PPP session. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_free(ppp_pcb *pcb) { + err_t err; + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_CONN; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_free[%d]\n", pcb->netif->num)); + + netif_remove(pcb->netif); + + err = pcb->link_cb->free(pcb, pcb->link_ctx_cb); + + LWIP_MEMPOOL_FREE(PPP_PCB, pcb); + return err; +} + +/* Get and set parameters for the given connection. + * Return 0 on success, an error code on failure. */ +err_t +ppp_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb == NULL) { + return ERR_VAL; + } + + switch(cmd) { + case PPPCTLG_UPSTATUS: /* Get the PPP up status. */ + if (!arg) { + goto fail; + } + *(int *)arg = (int)(0 +#if PPP_IPV4_SUPPORT + || pcb->if4_up +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + || pcb->if6_up +#endif /* PPP_IPV6_SUPPORT */ + ); + return ERR_OK; + + case PPPCTLG_ERRCODE: /* Get the PPP error code. */ + if (!arg) { + goto fail; + } + *(int *)arg = (int)(pcb->err_code); + return ERR_OK; + + default: + goto fail; + } + +fail: + return ERR_VAL; +} + + +/**********************************/ +/*** LOCAL FUNCTION DEFINITIONS ***/ +/**********************************/ + +static void ppp_do_connect(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + LWIP_ASSERT("pcb->phase == PPP_PHASE_DEAD || pcb->phase == PPP_PHASE_HOLDOFF", pcb->phase == PPP_PHASE_DEAD || pcb->phase == PPP_PHASE_HOLDOFF); + + new_phase(pcb, PPP_PHASE_INITIALIZE); + pcb->link_cb->connect(pcb, pcb->link_ctx_cb); +} + +/* + * ppp_netif_init_cb - netif init callback + */ +static err_t ppp_netif_init_cb(struct netif *netif) { + netif->name[0] = 'p'; + netif->name[1] = 'p'; +#if PPP_IPV4_SUPPORT + netif->output = ppp_netif_output_ip4; +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + netif->output_ip6 = ppp_netif_output_ip6; +#endif /* PPP_IPV6_SUPPORT */ + netif->flags = NETIF_FLAG_UP; +#if LWIP_NETIF_HOSTNAME + /* @todo: Initialize interface hostname */ + /* netif_set_hostname(netif, "lwip"); */ +#endif /* LWIP_NETIF_HOSTNAME */ + return ERR_OK; +} + +#if PPP_IPV4_SUPPORT +/* + * Send an IPv4 packet on the given connection. + */ +static err_t ppp_netif_output_ip4(struct netif *netif, struct pbuf *pb, const ip4_addr_t *ipaddr) { + LWIP_UNUSED_ARG(ipaddr); + return ppp_netif_output(netif, pb, PPP_IP); +} +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +/* + * Send an IPv6 packet on the given connection. + */ +static err_t ppp_netif_output_ip6(struct netif *netif, struct pbuf *pb, const ip6_addr_t *ipaddr) { + LWIP_UNUSED_ARG(ipaddr); + return ppp_netif_output(netif, pb, PPP_IPV6); +} +#endif /* PPP_IPV6_SUPPORT */ + +static err_t ppp_netif_output(struct netif *netif, struct pbuf *pb, u16_t protocol) { + ppp_pcb *pcb = (ppp_pcb*)netif->state; + err_t err; + struct pbuf *fpb = NULL; + + /* Check that the link is up. */ + if (0 +#if PPP_IPV4_SUPPORT + || (protocol == PPP_IP && !pcb->if4_up) +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + || (protocol == PPP_IPV6 && !pcb->if6_up) +#endif /* PPP_IPV6_SUPPORT */ + ) { + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: link not up\n", pcb->netif->num)); + goto err_rte_drop; + } + +#if MPPE_SUPPORT + /* If MPPE is required, refuse any IP packet until we are able to crypt them. */ + if (pcb->settings.require_mppe && pcb->ccp_transmit_method != CI_MPPE) { + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: MPPE required, not up\n", pcb->netif->num)); + goto err_rte_drop; + } +#endif /* MPPE_SUPPORT */ + +#if VJ_SUPPORT + /* + * Attempt Van Jacobson header compression if VJ is configured and + * this is an IP packet. + */ + if (protocol == PPP_IP && pcb->vj_enabled) { + switch (vj_compress_tcp(&pcb->vj_comp, &pb)) { + case TYPE_IP: + /* No change... + protocol = PPP_IP; */ + break; + case TYPE_COMPRESSED_TCP: + /* vj_compress_tcp() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_VJC_COMP; + break; + case TYPE_UNCOMPRESSED_TCP: + /* vj_compress_tcp() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_VJC_UNCOMP; + break; + default: + PPPDEBUG(LOG_WARNING, ("ppp_netif_output[%d]: bad IP packet\n", pcb->netif->num)); + LINK_STATS_INC(link.proterr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pcb->netif, ifoutdiscards); + return ERR_VAL; + } + } +#endif /* VJ_SUPPORT */ + +#if CCP_SUPPORT + switch (pcb->ccp_transmit_method) { + case 0: + break; /* Don't compress */ +#if MPPE_SUPPORT + case CI_MPPE: + if ((err = mppe_compress(pcb, &pcb->mppe_comp, &pb, protocol)) != ERR_OK) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + goto err; + } + /* if VJ compressor returned a new allocated pbuf, free it */ + if (fpb) { + pbuf_free(fpb); + } + /* mppe_compress() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_COMP; + break; +#endif /* MPPE_SUPPORT */ + default: + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: bad CCP transmit method\n", pcb->netif->num)); + goto err_rte_drop; /* Cannot really happen, we only negotiate what we are able to do */ + } +#endif /* CCP_SUPPORT */ + + err = pcb->link_cb->netif_output(pcb, pcb->link_ctx_cb, pb, protocol); + goto err; + +err_rte_drop: + err = ERR_RTE; + LINK_STATS_INC(link.rterr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); +err: + if (fpb) { + pbuf_free(fpb); + } + return err; +} + +/************************************/ +/*** PRIVATE FUNCTION DEFINITIONS ***/ +/************************************/ + +/* Initialize the PPP subsystem. */ +int ppp_init(void) +{ +#if PPPOS_SUPPORT + LWIP_MEMPOOL_INIT(PPPOS_PCB); +#endif +#if PPPOE_SUPPORT + LWIP_MEMPOOL_INIT(PPPOE_IF); +#endif +#if PPPOL2TP_SUPPORT + LWIP_MEMPOOL_INIT(PPPOL2TP_PCB); +#endif +#if LWIP_PPP_API && LWIP_MPU_COMPATIBLE + LWIP_MEMPOOL_INIT(PPPAPI_MSG); +#endif + + LWIP_MEMPOOL_INIT(PPP_PCB); + + /* + * Initialize magic number generator now so that protocols may + * use magic numbers in initialization. + */ + magic_init(); + + return 0; +} + +/* + * Create a new PPP control block. + * + * This initializes the PPP control block but does not + * attempt to negotiate the LCP session. + * + * Return a new PPP connection control block pointer + * on success or a null pointer on failure. + */ +ppp_pcb *ppp_new(struct netif *pppif, const struct link_callbacks *callbacks, void *link_ctx_cb, ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { + ppp_pcb *pcb; + const struct protent *protp; + int i; + + /* PPP is single-threaded: without a callback, + * there is no way to know when the link is up. */ + if (link_status_cb == NULL) { + return NULL; + } + + pcb = (ppp_pcb*)LWIP_MEMPOOL_ALLOC(PPP_PCB); + if (pcb == NULL) { + return NULL; + } + + memset(pcb, 0, sizeof(ppp_pcb)); + + /* default configuration */ +#if PAP_SUPPORT + pcb->settings.pap_timeout_time = UPAP_DEFTIMEOUT; + pcb->settings.pap_max_transmits = UPAP_DEFTRANSMITS; +#if PPP_SERVER + pcb->settings.pap_req_timeout = UPAP_DEFREQTIME; +#endif /* PPP_SERVER */ +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + pcb->settings.chap_timeout_time = CHAP_DEFTIMEOUT; + pcb->settings.chap_max_transmits = CHAP_DEFTRANSMITS; +#if PPP_SERVER + pcb->settings.chap_rechallenge_time = CHAP_DEFRECHALLENGETIME; +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPPORT */ + +#if EAP_SUPPORT + pcb->settings.eap_req_time = EAP_DEFREQTIME; + pcb->settings.eap_allow_req = EAP_DEFALLOWREQ; +#if PPP_SERVER + pcb->settings.eap_timeout_time = EAP_DEFTIMEOUT; + pcb->settings.eap_max_transmits = EAP_DEFTRANSMITS; +#endif /* PPP_SERVER */ +#endif /* EAP_SUPPORT */ + + pcb->settings.lcp_loopbackfail = LCP_DEFLOOPBACKFAIL; + pcb->settings.lcp_echo_interval = LCP_ECHOINTERVAL; + pcb->settings.lcp_echo_fails = LCP_MAXECHOFAILS; + + pcb->settings.fsm_timeout_time = FSM_DEFTIMEOUT; + pcb->settings.fsm_max_conf_req_transmits = FSM_DEFMAXCONFREQS; + pcb->settings.fsm_max_term_transmits = FSM_DEFMAXTERMREQS; + pcb->settings.fsm_max_nak_loops = FSM_DEFMAXNAKLOOPS; + + pcb->netif = pppif; + MIB2_INIT_NETIF(pppif, snmp_ifType_ppp, 0); + if (!netif_add(pcb->netif, +#if LWIP_IPV4 + IP4_ADDR_ANY4, IP4_ADDR_BROADCAST, IP4_ADDR_ANY4, +#endif /* LWIP_IPV4 */ + (void *)pcb, ppp_netif_init_cb, NULL)) { + LWIP_MEMPOOL_FREE(PPP_PCB, pcb); + PPPDEBUG(LOG_ERR, ("ppp_new: netif_add failed\n")); + return NULL; + } + + pcb->link_cb = callbacks; + pcb->link_ctx_cb = link_ctx_cb; + pcb->link_status_cb = link_status_cb; + pcb->ctx_cb = ctx_cb; + + /* + * Initialize each protocol. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + (*protp->init)(pcb); + } + + new_phase(pcb, PPP_PHASE_DEAD); + return pcb; +} + +/** Initiate LCP open request */ +void ppp_start(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_start[%d]\n", pcb->netif->num)); + + /* Clean data not taken care by anything else, mostly shared data. */ +#if PPP_STATS_SUPPORT + link_stats_valid = 0; +#endif /* PPP_STATS_SUPPORT */ +#if MPPE_SUPPORT + pcb->mppe_keys_set = 0; + memset(&pcb->mppe_comp, 0, sizeof(pcb->mppe_comp)); + memset(&pcb->mppe_decomp, 0, sizeof(pcb->mppe_decomp)); +#endif /* MPPE_SUPPORT */ +#if VJ_SUPPORT + vj_compress_init(&pcb->vj_comp); +#endif /* VJ_SUPPORT */ + + /* Start protocol */ + new_phase(pcb, PPP_PHASE_ESTABLISH); + lcp_open(pcb); + lcp_lowerup(pcb); + PPPDEBUG(LOG_DEBUG, ("ppp_start[%d]: finished\n", pcb->netif->num)); +} + +/** Called when link failed to setup */ +void ppp_link_failed(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_failed[%d]\n", pcb->netif->num)); + new_phase(pcb, PPP_PHASE_DEAD); + pcb->err_code = PPPERR_OPEN; + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); +} + +/** Called when link is normally down (i.e. it was asked to end) */ +void ppp_link_end(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_end[%d]\n", pcb->netif->num)); + new_phase(pcb, PPP_PHASE_DEAD); + if (pcb->err_code == PPPERR_NONE) { + pcb->err_code = PPPERR_CONNECT; + } + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); +} + +/* + * Pass the processed input packet to the appropriate handler. + * This function and all handlers run in the context of the tcpip_thread + */ +void ppp_input(ppp_pcb *pcb, struct pbuf *pb) { + u16_t protocol; +#if PPP_DEBUG && PPP_PROTOCOLNAME + const char *pname; +#endif /* PPP_DEBUG && PPP_PROTOCOLNAME */ + + magic_randomize(); + + if (pb->len < 2) { + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: packet too short\n", pcb->netif->num)); + goto drop; + } + protocol = (((u8_t *)pb->payload)[0] << 8) | ((u8_t*)pb->payload)[1]; + +#if PRINTPKT_SUPPORT + ppp_dump_packet(pcb, "rcvd", (unsigned char *)pb->payload, pb->len); +#endif /* PRINTPKT_SUPPORT */ + + pbuf_remove_header(pb, sizeof(protocol)); + + LINK_STATS_INC(link.recv); + MIB2_STATS_NETIF_INC(pcb->netif, ifinucastpkts); + MIB2_STATS_NETIF_ADD(pcb->netif, ifinoctets, pb->tot_len); + + /* + * Toss all non-LCP packets unless LCP is OPEN. + */ + if (protocol != PPP_LCP && pcb->lcp_fsm.state != PPP_FSM_OPENED) { + ppp_dbglog("Discarded non-LCP packet when LCP not open"); + goto drop; + } + + /* + * Until we get past the authentication phase, toss all packets + * except LCP, LQR and authentication packets. + */ + if (pcb->phase <= PPP_PHASE_AUTHENTICATE + && !(protocol == PPP_LCP +#if LQR_SUPPORT + || protocol == PPP_LQR +#endif /* LQR_SUPPORT */ +#if PAP_SUPPORT + || protocol == PPP_PAP +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || protocol == PPP_CHAP +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || protocol == PPP_EAP +#endif /* EAP_SUPPORT */ + )) { + ppp_dbglog("discarding proto 0x%x in phase %d", protocol, pcb->phase); + goto drop; + } + +#if CCP_SUPPORT +#if MPPE_SUPPORT + /* + * MPPE is required and unencrypted data has arrived (this + * should never happen!). We should probably drop the link if + * the protocol is in the range of what should be encrypted. + * At the least, we drop this packet. + */ + if (pcb->settings.require_mppe && protocol != PPP_COMP && protocol < 0x8000) { + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: MPPE required, received unencrypted data!\n", pcb->netif->num)); + goto drop; + } +#endif /* MPPE_SUPPORT */ + + if (protocol == PPP_COMP) { + u8_t *pl; + + switch (pcb->ccp_receive_method) { +#if MPPE_SUPPORT + case CI_MPPE: + if (mppe_decompress(pcb, &pcb->mppe_decomp, &pb) != ERR_OK) { + goto drop; + } + break; +#endif /* MPPE_SUPPORT */ + default: + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: bad CCP receive method\n", pcb->netif->num)); + goto drop; /* Cannot really happen, we only negotiate what we are able to do */ + } + + /* Assume no PFC */ + if (pb->len < 2) { + goto drop; + } + + /* Extract and hide protocol (do PFC decompression if necessary) */ + pl = (u8_t*)pb->payload; + if (pl[0] & 0x01) { + protocol = pl[0]; + pbuf_remove_header(pb, 1); + } else { + protocol = (pl[0] << 8) | pl[1]; + pbuf_remove_header(pb, 2); + } + } +#endif /* CCP_SUPPORT */ + + switch(protocol) { + +#if PPP_IPV4_SUPPORT + case PPP_IP: /* Internet Protocol */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: ip in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + ip4_input(pb, pcb->netif); + return; +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT + case PPP_IPV6: /* Internet Protocol Version 6 */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: ip6 in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + ip6_input(pb, pcb->netif); + return; +#endif /* PPP_IPV6_SUPPORT */ + +#if VJ_SUPPORT + case PPP_VJC_COMP: /* VJ compressed TCP */ + /* + * Clip off the VJ header and prepend the rebuilt TCP/IP header and + * pass the result to IP. + */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: vj_comp in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + if (pcb->vj_enabled && vj_uncompress_tcp(&pb, &pcb->vj_comp) >= 0) { + ip4_input(pb, pcb->netif); + return; + } + /* Something's wrong so drop it. */ + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping VJ compressed\n", pcb->netif->num)); + break; + + case PPP_VJC_UNCOMP: /* VJ uncompressed TCP */ + /* + * Process the TCP/IP header for VJ header compression and then pass + * the packet to IP. + */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: vj_un in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + if (pcb->vj_enabled && vj_uncompress_uncomp(pb, &pcb->vj_comp) >= 0) { + ip4_input(pb, pcb->netif); + return; + } + /* Something's wrong so drop it. */ + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping VJ uncompressed\n", pcb->netif->num)); + break; +#endif /* VJ_SUPPORT */ + + default: { + int i; + const struct protent *protp; + + /* + * Upcall the proper protocol input routine. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol == protocol) { + pb = pbuf_coalesce(pb, PBUF_RAW); + (*protp->input)(pcb, (u8_t*)pb->payload, pb->len); + goto out; + } +#if 0 /* UNUSED + * + * This is actually a (hacked?) way for the Linux kernel to pass a data + * packet to pppd. pppd in normal condition only do signaling + * (LCP, PAP, CHAP, IPCP, ...) and does not handle any data packet at all. + * + * We don't even need this interface, which is only there because of PPP + * interface limitation between Linux kernel and pppd. For MPPE, which uses + * CCP to negotiate although it is not really a (de)compressor, we added + * ccp_resetrequest() in CCP and MPPE input data flow is calling either + * ccp_resetrequest() or lcp_close() if the issue is, respectively, non-fatal + * or fatal, this is what ccp_datainput() really do. + */ + if (protocol == (protp->protocol & ~0x8000) + && protp->datainput != NULL) { + (*protp->datainput)(pcb, pb->payload, pb->len); + goto out; + } +#endif /* UNUSED */ + } + +#if PPP_DEBUG +#if PPP_PROTOCOLNAME + pname = protocol_name(protocol); + if (pname != NULL) { + ppp_warn("Unsupported protocol '%s' (0x%x) received", pname, protocol); + } else +#endif /* PPP_PROTOCOLNAME */ + ppp_warn("Unsupported protocol 0x%x received", protocol); +#endif /* PPP_DEBUG */ + if (pbuf_add_header(pb, sizeof(protocol))) { + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping (pbuf_add_header failed)\n", pcb->netif->num)); + goto drop; + } + lcp_sprotrej(pcb, (u8_t*)pb->payload, pb->len); + } + break; + } + +drop: + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pcb->netif, ifindiscards); + +out: + pbuf_free(pb); +} + +/* + * Write a pbuf to a ppp link, only used from PPP functions + * to send PPP packets. + * + * IPv4 and IPv6 packets from lwIP are sent, respectively, + * with ppp_netif_output_ip4() and ppp_netif_output_ip6() + * functions (which are callbacks of the netif PPP interface). + */ +err_t ppp_write(ppp_pcb *pcb, struct pbuf *p) { +#if PRINTPKT_SUPPORT + ppp_dump_packet(pcb, "sent", (unsigned char *)p->payload+2, p->len-2); +#endif /* PRINTPKT_SUPPORT */ + return pcb->link_cb->write(pcb, pcb->link_ctx_cb, p); +} + +void ppp_link_terminated(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_terminated[%d]\n", pcb->netif->num)); + pcb->link_cb->disconnect(pcb, pcb->link_ctx_cb); + PPPDEBUG(LOG_DEBUG, ("ppp_link_terminated[%d]: finished.\n", pcb->netif->num)); +} + + +/************************************************************************ + * Functions called by various PPP subsystems to configure + * the PPP interface or change the PPP phase. + */ + +/* + * new_phase - signal the start of a new phase of pppd's operation. + */ +void new_phase(ppp_pcb *pcb, int p) { + pcb->phase = p; + PPPDEBUG(LOG_DEBUG, ("ppp phase changed[%d]: phase=%d\n", pcb->netif->num, pcb->phase)); +#if PPP_NOTIFY_PHASE + if (pcb->notify_phase_cb != NULL) { + pcb->notify_phase_cb(pcb, p, pcb->ctx_cb); + } +#endif /* PPP_NOTIFY_PHASE */ +} + +/* + * ppp_send_config - configure the transmit-side characteristics of + * the ppp interface. + */ +int ppp_send_config(ppp_pcb *pcb, int mtu, u32_t accm, int pcomp, int accomp) { + LWIP_UNUSED_ARG(mtu); + /* pcb->mtu = mtu; -- set correctly with netif_set_mtu */ + + if (pcb->link_cb->send_config) { + pcb->link_cb->send_config(pcb, pcb->link_ctx_cb, accm, pcomp, accomp); + } + + PPPDEBUG(LOG_INFO, ("ppp_send_config[%d]\n", pcb->netif->num) ); + return 0; +} + +/* + * ppp_recv_config - configure the receive-side characteristics of + * the ppp interface. + */ +int ppp_recv_config(ppp_pcb *pcb, int mru, u32_t accm, int pcomp, int accomp) { + LWIP_UNUSED_ARG(mru); + + if (pcb->link_cb->recv_config) { + pcb->link_cb->recv_config(pcb, pcb->link_ctx_cb, accm, pcomp, accomp); + } + + PPPDEBUG(LOG_INFO, ("ppp_recv_config[%d]\n", pcb->netif->num)); + return 0; +} + +#if PPP_IPV4_SUPPORT +/* + * sifaddr - Config the interface IP addresses and netmask. + */ +int sifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr, u32_t netmask) { + ip4_addr_t ip, nm, gw; + + ip4_addr_set_u32(&ip, our_adr); + ip4_addr_set_u32(&nm, netmask); + ip4_addr_set_u32(&gw, his_adr); + netif_set_addr(pcb->netif, &ip, &nm, &gw); + return 1; +} + +/******************************************************************** + * + * cifaddr - Clear the interface IP addresses, and delete routes + * through the interface if possible. + */ +int cifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr) { + LWIP_UNUSED_ARG(our_adr); + LWIP_UNUSED_ARG(his_adr); + + netif_set_addr(pcb->netif, IP4_ADDR_ANY4, IP4_ADDR_BROADCAST, IP4_ADDR_ANY4); + return 1; +} + +#if 0 /* UNUSED - PROXY ARP */ +/******************************************************************** + * + * sifproxyarp - Make a proxy ARP entry for the peer. + */ + +int sifproxyarp(ppp_pcb *pcb, u32_t his_adr) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(his_adr); + return 0; +} + +/******************************************************************** + * + * cifproxyarp - Delete the proxy ARP entry for the peer. + */ + +int cifproxyarp(ppp_pcb *pcb, u32_t his_adr) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(his_adr); + return 0; +} +#endif /* UNUSED - PROXY ARP */ + +#if LWIP_DNS +/* + * sdns - Config the DNS servers + */ +int sdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2) { + ip_addr_t ns; + LWIP_UNUSED_ARG(pcb); + + ip_addr_set_ip4_u32_val(ns, ns1); + dns_setserver(0, &ns); + ip_addr_set_ip4_u32_val(ns, ns2); + dns_setserver(1, &ns); + return 1; +} + +/******************************************************************** + * + * cdns - Clear the DNS servers + */ +int cdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2) { + const ip_addr_t *nsa; + ip_addr_t nsb; + LWIP_UNUSED_ARG(pcb); + + nsa = dns_getserver(0); + ip_addr_set_ip4_u32_val(nsb, ns1); + if (ip_addr_cmp(nsa, &nsb)) { + dns_setserver(0, IP_ADDR_ANY); + } + nsa = dns_getserver(1); + ip_addr_set_ip4_u32_val(nsb, ns2); + if (ip_addr_cmp(nsa, &nsb)) { + dns_setserver(1, IP_ADDR_ANY); + } + return 1; +} +#endif /* LWIP_DNS */ + +#if VJ_SUPPORT +/******************************************************************** + * + * sifvjcomp - config tcp header compression + */ +int sifvjcomp(ppp_pcb *pcb, int vjcomp, int cidcomp, int maxcid) { + pcb->vj_enabled = vjcomp; + pcb->vj_comp.compressSlot = cidcomp; + pcb->vj_comp.maxSlotIndex = maxcid; + PPPDEBUG(LOG_INFO, ("sifvjcomp[%d]: VJ compress enable=%d slot=%d max slot=%d\n", + pcb->netif->num, vjcomp, cidcomp, maxcid)); + return 0; +} +#endif /* VJ_SUPPORT */ + +/* + * sifup - Config the interface up and enable IP packets to pass. + */ +int sifup(ppp_pcb *pcb) { + pcb->if4_up = 1; + pcb->err_code = PPPERR_NONE; + netif_set_link_up(pcb->netif); + + PPPDEBUG(LOG_DEBUG, ("sifup[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return 1; +} + +/******************************************************************** + * + * sifdown - Disable the indicated protocol and config the interface + * down if there are no remaining protocols. + */ +int sifdown(ppp_pcb *pcb) { + + pcb->if4_up = 0; + + if (1 +#if PPP_IPV6_SUPPORT + /* set the interface down if IPv6 is down as well */ + && !pcb->if6_up +#endif /* PPP_IPV6_SUPPORT */ + ) { + /* make sure the netif link callback is called */ + netif_set_link_down(pcb->netif); + } + PPPDEBUG(LOG_DEBUG, ("sifdown[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + return 1; +} + +/******************************************************************** + * + * Return user specified netmask, modified by any mask we might determine + * for address `addr' (in network byte order). + * Here we scan through the system's list of interfaces, looking for + * any non-point-to-point interfaces which might appear to be on the same + * network as `addr'. If we find any, we OR in their netmask to the + * user-specified netmask. + */ +u32_t get_mask(u32_t addr) { +#if 0 + u32_t mask, nmask; + + addr = lwip_htonl(addr); + if (IP_CLASSA(addr)) { /* determine network mask for address class */ + nmask = IP_CLASSA_NET; + } else if (IP_CLASSB(addr)) { + nmask = IP_CLASSB_NET; + } else { + nmask = IP_CLASSC_NET; + } + + /* class D nets are disallowed by bad_ip_adrs */ + mask = PP_HTONL(0xffffff00UL) | lwip_htonl(nmask); + + /* XXX + * Scan through the system's network interfaces. + * Get each netmask and OR them into our mask. + */ + /* return mask; */ + return mask; +#endif /* 0 */ + LWIP_UNUSED_ARG(addr); + return IPADDR_BROADCAST; +} +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +#define IN6_LLADDR_FROM_EUI64(ip6, eui64) do { \ + ip6.addr[0] = PP_HTONL(0xfe800000); \ + ip6.addr[1] = 0; \ + eui64_copy(eui64, ip6.addr[2]); \ + } while (0) + +/******************************************************************** + * + * sif6addr - Config the interface with an IPv6 link-local address + */ +int sif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64) { + ip6_addr_t ip6; + LWIP_UNUSED_ARG(his_eui64); + + IN6_LLADDR_FROM_EUI64(ip6, our_eui64); + netif_ip6_addr_set(pcb->netif, 0, &ip6); + netif_ip6_addr_set_state(pcb->netif, 0, IP6_ADDR_PREFERRED); + /* FIXME: should we add an IPv6 static neighbor using his_eui64 ? */ + return 1; +} + +/******************************************************************** + * + * cif6addr - Remove IPv6 address from interface + */ +int cif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64) { + LWIP_UNUSED_ARG(our_eui64); + LWIP_UNUSED_ARG(his_eui64); + + netif_ip6_addr_set_state(pcb->netif, 0, IP6_ADDR_INVALID); + netif_ip6_addr_set(pcb->netif, 0, IP6_ADDR_ANY6); + return 1; +} + +/* + * sif6up - Config the interface up and enable IPv6 packets to pass. + */ +int sif6up(ppp_pcb *pcb) { + + pcb->if6_up = 1; + pcb->err_code = PPPERR_NONE; + netif_set_link_up(pcb->netif); + + PPPDEBUG(LOG_DEBUG, ("sif6up[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return 1; +} + +/******************************************************************** + * + * sif6down - Disable the indicated protocol and config the interface + * down if there are no remaining protocols. + */ +int sif6down(ppp_pcb *pcb) { + + pcb->if6_up = 0; + + if (1 +#if PPP_IPV4_SUPPORT + /* set the interface down if IPv4 is down as well */ + && !pcb->if4_up +#endif /* PPP_IPV4_SUPPORT */ + ) { + /* make sure the netif link callback is called */ + netif_set_link_down(pcb->netif); + } + PPPDEBUG(LOG_DEBUG, ("sif6down[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + return 1; +} +#endif /* PPP_IPV6_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * sifnpmode - Set the mode for handling packets for a given NP. + */ +int sifnpmode(ppp_pcb *pcb, int proto, enum NPmode mode) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(proto); + LWIP_UNUSED_ARG(mode); + return 0; +} +#endif /* DEMAND_SUPPORT */ + +/* + * netif_set_mtu - set the MTU on the PPP network interface. + */ +void netif_set_mtu(ppp_pcb *pcb, int mtu) { + + pcb->netif->mtu = mtu; + PPPDEBUG(LOG_INFO, ("netif_set_mtu[%d]: mtu=%d\n", pcb->netif->num, mtu)); +} + +/* + * netif_get_mtu - get PPP interface MTU + */ +int netif_get_mtu(ppp_pcb *pcb) { + + return pcb->netif->mtu; +} + +#if CCP_SUPPORT +#if 0 /* unused */ +/* + * ccp_test - whether a given compression method is acceptable for use. + */ +int +ccp_test(ppp_pcb *pcb, u_char *opt_ptr, int opt_len, int for_transmit) +{ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(opt_ptr); + LWIP_UNUSED_ARG(opt_len); + LWIP_UNUSED_ARG(for_transmit); + return -1; +} +#endif /* unused */ + +/* + * ccp_set - inform about the current state of CCP. + */ +void +ccp_set(ppp_pcb *pcb, u8_t isopen, u8_t isup, u8_t receive_method, u8_t transmit_method) +{ + LWIP_UNUSED_ARG(isopen); + LWIP_UNUSED_ARG(isup); + pcb->ccp_receive_method = receive_method; + pcb->ccp_transmit_method = transmit_method; + PPPDEBUG(LOG_DEBUG, ("ccp_set[%d]: is_open=%d, is_up=%d, receive_method=%u, transmit_method=%u\n", + pcb->netif->num, isopen, isup, receive_method, transmit_method)); +} + +void +ccp_reset_comp(ppp_pcb *pcb) +{ + switch (pcb->ccp_transmit_method) { +#if MPPE_SUPPORT + case CI_MPPE: + mppe_comp_reset(pcb, &pcb->mppe_comp); + break; +#endif /* MPPE_SUPPORT */ + default: + break; + } +} + +void +ccp_reset_decomp(ppp_pcb *pcb) +{ + switch (pcb->ccp_receive_method) { +#if MPPE_SUPPORT + case CI_MPPE: + mppe_decomp_reset(pcb, &pcb->mppe_decomp); + break; +#endif /* MPPE_SUPPORT */ + default: + break; + } +} + +#if 0 /* unused */ +/* + * ccp_fatal_error - returns 1 if decompression was disabled as a + * result of an error detected after decompression of a packet, + * 0 otherwise. This is necessary because of patent nonsense. + */ +int +ccp_fatal_error(ppp_pcb *pcb) +{ + LWIP_UNUSED_ARG(pcb); + return 1; +} +#endif /* unused */ +#endif /* CCP_SUPPORT */ + +#if PPP_IDLETIMELIMIT +/******************************************************************** + * + * get_idle_time - return how long the link has been idle. + */ +int get_idle_time(ppp_pcb *pcb, struct ppp_idle *ip) { + /* FIXME: add idle time support and make it optional */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(ip); + return 1; +} +#endif /* PPP_IDLETIMELIMIT */ + +#if DEMAND_SUPPORT +/******************************************************************** + * + * get_loop_output - get outgoing packets from the ppp device, + * and detect when we want to bring the real link up. + * Return value is 1 if we need to bring up the link, 0 otherwise. + */ +int get_loop_output(void) { + return 0; +} +#endif /* DEMAND_SUPPORT */ + +#if PPP_PROTOCOLNAME +/* List of protocol names, to make our messages a little more informative. */ +struct protocol_list { + u_short proto; + const char *name; +} const protocol_list[] = { + { 0x21, "IP" }, + { 0x23, "OSI Network Layer" }, + { 0x25, "Xerox NS IDP" }, + { 0x27, "DECnet Phase IV" }, + { 0x29, "Appletalk" }, + { 0x2b, "Novell IPX" }, + { 0x2d, "VJ compressed TCP/IP" }, + { 0x2f, "VJ uncompressed TCP/IP" }, + { 0x31, "Bridging PDU" }, + { 0x33, "Stream Protocol ST-II" }, + { 0x35, "Banyan Vines" }, + { 0x39, "AppleTalk EDDP" }, + { 0x3b, "AppleTalk SmartBuffered" }, + { 0x3d, "Multi-Link" }, + { 0x3f, "NETBIOS Framing" }, + { 0x41, "Cisco Systems" }, + { 0x43, "Ascom Timeplex" }, + { 0x45, "Fujitsu Link Backup and Load Balancing (LBLB)" }, + { 0x47, "DCA Remote Lan" }, + { 0x49, "Serial Data Transport Protocol (PPP-SDTP)" }, + { 0x4b, "SNA over 802.2" }, + { 0x4d, "SNA" }, + { 0x4f, "IP6 Header Compression" }, + { 0x51, "KNX Bridging Data" }, + { 0x53, "Encryption" }, + { 0x55, "Individual Link Encryption" }, + { 0x57, "IPv6" }, + { 0x59, "PPP Muxing" }, + { 0x5b, "Vendor-Specific Network Protocol" }, + { 0x61, "RTP IPHC Full Header" }, + { 0x63, "RTP IPHC Compressed TCP" }, + { 0x65, "RTP IPHC Compressed non-TCP" }, + { 0x67, "RTP IPHC Compressed UDP 8" }, + { 0x69, "RTP IPHC Compressed RTP 8" }, + { 0x6f, "Stampede Bridging" }, + { 0x73, "MP+" }, + { 0xc1, "NTCITS IPI" }, + { 0xfb, "single-link compression" }, + { 0xfd, "Compressed Datagram" }, + { 0x0201, "802.1d Hello Packets" }, + { 0x0203, "IBM Source Routing BPDU" }, + { 0x0205, "DEC LANBridge100 Spanning Tree" }, + { 0x0207, "Cisco Discovery Protocol" }, + { 0x0209, "Netcs Twin Routing" }, + { 0x020b, "STP - Scheduled Transfer Protocol" }, + { 0x020d, "EDP - Extreme Discovery Protocol" }, + { 0x0211, "Optical Supervisory Channel Protocol" }, + { 0x0213, "Optical Supervisory Channel Protocol" }, + { 0x0231, "Luxcom" }, + { 0x0233, "Sigma Network Systems" }, + { 0x0235, "Apple Client Server Protocol" }, + { 0x0281, "MPLS Unicast" }, + { 0x0283, "MPLS Multicast" }, + { 0x0285, "IEEE p1284.4 standard - data packets" }, + { 0x0287, "ETSI TETRA Network Protocol Type 1" }, + { 0x0289, "Multichannel Flow Treatment Protocol" }, + { 0x2063, "RTP IPHC Compressed TCP No Delta" }, + { 0x2065, "RTP IPHC Context State" }, + { 0x2067, "RTP IPHC Compressed UDP 16" }, + { 0x2069, "RTP IPHC Compressed RTP 16" }, + { 0x4001, "Cray Communications Control Protocol" }, + { 0x4003, "CDPD Mobile Network Registration Protocol" }, + { 0x4005, "Expand accelerator protocol" }, + { 0x4007, "ODSICP NCP" }, + { 0x4009, "DOCSIS DLL" }, + { 0x400B, "Cetacean Network Detection Protocol" }, + { 0x4021, "Stacker LZS" }, + { 0x4023, "RefTek Protocol" }, + { 0x4025, "Fibre Channel" }, + { 0x4027, "EMIT Protocols" }, + { 0x405b, "Vendor-Specific Protocol (VSP)" }, + { 0x8021, "Internet Protocol Control Protocol" }, + { 0x8023, "OSI Network Layer Control Protocol" }, + { 0x8025, "Xerox NS IDP Control Protocol" }, + { 0x8027, "DECnet Phase IV Control Protocol" }, + { 0x8029, "Appletalk Control Protocol" }, + { 0x802b, "Novell IPX Control Protocol" }, + { 0x8031, "Bridging NCP" }, + { 0x8033, "Stream Protocol Control Protocol" }, + { 0x8035, "Banyan Vines Control Protocol" }, + { 0x803d, "Multi-Link Control Protocol" }, + { 0x803f, "NETBIOS Framing Control Protocol" }, + { 0x8041, "Cisco Systems Control Protocol" }, + { 0x8043, "Ascom Timeplex" }, + { 0x8045, "Fujitsu LBLB Control Protocol" }, + { 0x8047, "DCA Remote Lan Network Control Protocol (RLNCP)" }, + { 0x8049, "Serial Data Control Protocol (PPP-SDCP)" }, + { 0x804b, "SNA over 802.2 Control Protocol" }, + { 0x804d, "SNA Control Protocol" }, + { 0x804f, "IP6 Header Compression Control Protocol" }, + { 0x8051, "KNX Bridging Control Protocol" }, + { 0x8053, "Encryption Control Protocol" }, + { 0x8055, "Individual Link Encryption Control Protocol" }, + { 0x8057, "IPv6 Control Protocol" }, + { 0x8059, "PPP Muxing Control Protocol" }, + { 0x805b, "Vendor-Specific Network Control Protocol (VSNCP)" }, + { 0x806f, "Stampede Bridging Control Protocol" }, + { 0x8073, "MP+ Control Protocol" }, + { 0x80c1, "NTCITS IPI Control Protocol" }, + { 0x80fb, "Single Link Compression Control Protocol" }, + { 0x80fd, "Compression Control Protocol" }, + { 0x8207, "Cisco Discovery Protocol Control" }, + { 0x8209, "Netcs Twin Routing" }, + { 0x820b, "STP - Control Protocol" }, + { 0x820d, "EDPCP - Extreme Discovery Protocol Ctrl Prtcl" }, + { 0x8235, "Apple Client Server Protocol Control" }, + { 0x8281, "MPLSCP" }, + { 0x8285, "IEEE p1284.4 standard - Protocol Control" }, + { 0x8287, "ETSI TETRA TNP1 Control Protocol" }, + { 0x8289, "Multichannel Flow Treatment Protocol" }, + { 0xc021, "Link Control Protocol" }, + { 0xc023, "Password Authentication Protocol" }, + { 0xc025, "Link Quality Report" }, + { 0xc027, "Shiva Password Authentication Protocol" }, + { 0xc029, "CallBack Control Protocol (CBCP)" }, + { 0xc02b, "BACP Bandwidth Allocation Control Protocol" }, + { 0xc02d, "BAP" }, + { 0xc05b, "Vendor-Specific Authentication Protocol (VSAP)" }, + { 0xc081, "Container Control Protocol" }, + { 0xc223, "Challenge Handshake Authentication Protocol" }, + { 0xc225, "RSA Authentication Protocol" }, + { 0xc227, "Extensible Authentication Protocol" }, + { 0xc229, "Mitsubishi Security Info Exch Ptcl (SIEP)" }, + { 0xc26f, "Stampede Bridging Authorization Protocol" }, + { 0xc281, "Proprietary Authentication Protocol" }, + { 0xc283, "Proprietary Authentication Protocol" }, + { 0xc481, "Proprietary Node ID Authentication Protocol" }, + { 0, NULL }, +}; + +/* + * protocol_name - find a name for a PPP protocol. + */ +const char * protocol_name(int proto) { + const struct protocol_list *lp; + + for (lp = protocol_list; lp->proto != 0; ++lp) { + if (proto == lp->proto) { + return lp->name; + } + } + return NULL; +} +#endif /* PPP_PROTOCOLNAME */ + +#if PPP_STATS_SUPPORT + +/* ---- Note on PPP Stats support ---- + * + * The one willing link stats support should add the get_ppp_stats() + * to fetch statistics from lwIP. + */ + +/* + * reset_link_stats - "reset" stats when link goes up. + */ +void reset_link_stats(int u) { + if (!get_ppp_stats(u, &old_link_stats)) { + return; + } + gettimeofday(&start_time, NULL); +} + +/* + * update_link_stats - get stats at link termination. + */ +void update_link_stats(int u) { + struct timeval now; + char numbuf[32]; + + if (!get_ppp_stats(u, &link_stats) || gettimeofday(&now, NULL) < 0) { + return; + } + link_connect_time = now.tv_sec - start_time.tv_sec; + link_stats_valid = 1; + + link_stats.bytes_in -= old_link_stats.bytes_in; + link_stats.bytes_out -= old_link_stats.bytes_out; + link_stats.pkts_in -= old_link_stats.pkts_in; + link_stats.pkts_out -= old_link_stats.pkts_out; +} + +void print_link_stats() { + /* + * Print connect time and statistics. + */ + if (link_stats_valid) { + int t = (link_connect_time + 5) / 6; /* 1/10ths of minutes */ + info("Connect time %d.%d minutes.", t/10, t%10); + info("Sent %u bytes, received %u bytes.", link_stats.bytes_out, link_stats.bytes_in); + link_stats_valid = 0; + } +} +#endif /* PPP_STATS_SUPPORT */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c new file mode 100644 index 00000000..947f7ba8 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c @@ -0,0 +1,427 @@ +/** + * @file + * Point To Point Protocol Sequential API module + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" + +#if LWIP_PPP_API /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/pppapi.h" +#include "lwip/priv/tcpip_priv.h" +#include "netif/ppp/pppoe.h" +#include "netif/ppp/pppol2tp.h" +#include "netif/ppp/pppos.h" + +#if LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL_DECLARE(PPPAPI_MSG, MEMP_NUM_PPP_API_MSG, sizeof(struct pppapi_msg), "PPPAPI_MSG") +#endif + +#define PPPAPI_VAR_REF(name) API_VAR_REF(name) +#define PPPAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct pppapi_msg, name) +#define PPPAPI_VAR_ALLOC(name) API_VAR_ALLOC_POOL(struct pppapi_msg, PPPAPI_MSG, name, ERR_MEM) +#define PPPAPI_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC_POOL(struct pppapi_msg, PPPAPI_MSG, name, NULL) +#define PPPAPI_VAR_FREE(name) API_VAR_FREE_POOL(PPPAPI_MSG, name) + +/** + * Call ppp_set_default() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_set_default(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + ppp_set_default(msg->msg.ppp); + return ERR_OK; +} + +/** + * Call ppp_set_default() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_set_default(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_set_default, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +#if PPP_NOTIFY_PHASE +/** + * Call ppp_set_notify_phase_callback() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_set_notify_phase_callback(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + ppp_set_notify_phase_callback(msg->msg.ppp, msg->msg.msg.setnotifyphasecb.notify_phase_cb); + return ERR_OK; +} + +/** + * Call ppp_set_notify_phase_callback() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.setnotifyphasecb.notify_phase_cb = notify_phase_cb; + err = tcpip_api_call(pppapi_do_ppp_set_notify_phase_callback, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} +#endif /* PPP_NOTIFY_PHASE */ + + +#if PPPOS_SUPPORT +/** + * Call pppos_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppos_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppos_create(msg->msg.msg.serialcreate.pppif, msg->msg.msg.serialcreate.output_cb, + msg->msg.msg.serialcreate.link_status_cb, msg->msg.msg.serialcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppos_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.output_cb = output_cb; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppos_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOS_SUPPORT */ + + +#if PPPOE_SUPPORT +/** + * Call pppoe_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppoe_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppoe_create(msg->msg.msg.ethernetcreate.pppif, msg->msg.msg.ethernetcreate.ethif, + msg->msg.msg.ethernetcreate.service_name, msg->msg.msg.ethernetcreate.concentrator_name, + msg->msg.msg.ethernetcreate.link_status_cb, msg->msg.msg.ethernetcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppoe_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppoe_create(struct netif *pppif, struct netif *ethif, const char *service_name, + const char *concentrator_name, ppp_link_status_cb_fn link_status_cb, + void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.ethif = ethif; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.service_name = service_name; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.concentrator_name = concentrator_name; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppoe_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOE_SUPPORT */ + + +#if PPPOL2TP_SUPPORT +/** + * Call pppol2tp_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppol2tp_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppol2tp_create(msg->msg.msg.l2tpcreate.pppif, + msg->msg.msg.l2tpcreate.netif, API_EXPR_REF(msg->msg.msg.l2tpcreate.ipaddr), msg->msg.msg.l2tpcreate.port, +#if PPPOL2TP_AUTH_SUPPORT + msg->msg.msg.l2tpcreate.secret, + msg->msg.msg.l2tpcreate.secret_len, +#else /* PPPOL2TP_AUTH_SUPPORT */ + NULL, + 0, +#endif /* PPPOL2TP_AUTH_SUPPORT */ + msg->msg.msg.l2tpcreate.link_status_cb, msg->msg.msg.l2tpcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppol2tp_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppol2tp_create(struct netif *pppif, struct netif *netif, ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); +#if !PPPOL2TP_AUTH_SUPPORT + LWIP_UNUSED_ARG(secret); + LWIP_UNUSED_ARG(secret_len); +#endif /* !PPPOL2TP_AUTH_SUPPORT */ + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.netif = netif; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.ipaddr = PPPAPI_VAR_REF(ipaddr); + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.port = port; +#if PPPOL2TP_AUTH_SUPPORT + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.secret = secret; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.secret_len = secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppol2tp_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOL2TP_SUPPORT */ + + +/** + * Call ppp_connect() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_connect(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_connect(msg->msg.ppp, msg->msg.msg.connect.holdoff); +} + +/** + * Call ppp_connect() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_connect(ppp_pcb *pcb, u16_t holdoff) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.connect.holdoff = holdoff; + err = tcpip_api_call(pppapi_do_ppp_connect, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +#if PPP_SERVER +/** + * Call ppp_listen() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_listen(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_listen(msg->msg.ppp); +} + +/** + * Call ppp_listen() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_listen(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_listen, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} +#endif /* PPP_SERVER */ + + +/** + * Call ppp_close() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_close(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_close(msg->msg.ppp, msg->msg.msg.close.nocarrier); +} + +/** + * Call ppp_close() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_close(ppp_pcb *pcb, u8_t nocarrier) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.close.nocarrier = nocarrier; + err = tcpip_api_call(pppapi_do_ppp_close, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +/** + * Call ppp_free() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_free(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_free(msg->msg.ppp); +} + +/** + * Call ppp_free() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_free(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_free, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +/** + * Call ppp_ioctl() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_ioctl(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_ioctl(msg->msg.ppp, msg->msg.msg.ioctl.cmd, msg->msg.msg.ioctl.arg); +} + +/** + * Call ppp_ioctl() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.ioctl.cmd = cmd; + PPPAPI_VAR_REF(msg).msg.msg.ioctl.arg = arg; + err = tcpip_api_call(pppapi_do_ppp_ioctl, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + +#endif /* LWIP_PPP_API */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c new file mode 100644 index 00000000..82d78c13 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c @@ -0,0 +1,66 @@ +/* + * pppcrypt.c - PPP/DES linkage for MS-CHAP and EAP SRP-SHA1 + * + * Extracted from chap_ms.c by James Carlson. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not necessary */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/pppcrypt.h" + + +static u_char pppcrypt_get_7bits(u_char *input, int startBit) { + unsigned int word; + + word = (unsigned)input[startBit / 8] << 8; + word |= (unsigned)input[startBit / 8 + 1]; + + word >>= 15 - (startBit % 8 + 7); + + return word & 0xFE; +} + +/* IN 56 bit DES key missing parity bits + * OUT 64 bit DES key with parity bits added + */ +void pppcrypt_56_to_64_bit_key(u_char *key, u_char * des_key) { + des_key[0] = pppcrypt_get_7bits(key, 0); + des_key[1] = pppcrypt_get_7bits(key, 7); + des_key[2] = pppcrypt_get_7bits(key, 14); + des_key[3] = pppcrypt_get_7bits(key, 21); + des_key[4] = pppcrypt_get_7bits(key, 28); + des_key[5] = pppcrypt_get_7bits(key, 35); + des_key[6] = pppcrypt_get_7bits(key, 42); + des_key[7] = pppcrypt_get_7bits(key, 49); +} + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c new file mode 100644 index 00000000..8ed2d638 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c @@ -0,0 +1,1201 @@ +/***************************************************************************** +* pppoe.c - PPP Over Ethernet implementation for lwIP. +* +* Copyright (c) 2006 by Marc Boucher, Services Informatiques (MBSI) inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 06-01-01 Marc Boucher +* Ported to lwIP. +*****************************************************************************/ + + + +/* based on NetBSD: if_pppoe.c,v 1.64 2006/01/31 23:50:15 martin Exp */ + +/*- + * Copyright (c) 2002 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Martin Husemann . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "lwip/timeouts.h" +#include "lwip/memp.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" + +#include "netif/ethernet.h" +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/pppoe.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOE_IF, MEMP_NUM_PPPOE_INTERFACES, sizeof(struct pppoe_softc), "PPPOE_IF") + +/* Add a 16 bit unsigned value to a buffer pointed to by PTR */ +#define PPPOE_ADD_16(PTR, VAL) \ + *(PTR)++ = (u8_t)((VAL) / 256); \ + *(PTR)++ = (u8_t)((VAL) % 256) + +/* Add a complete PPPoE header to the buffer pointed to by PTR */ +#define PPPOE_ADD_HEADER(PTR, CODE, SESS, LEN) \ + *(PTR)++ = PPPOE_VERTYPE; \ + *(PTR)++ = (CODE); \ + PPPOE_ADD_16(PTR, SESS); \ + PPPOE_ADD_16(PTR, LEN) + +#define PPPOE_DISC_TIMEOUT (5*1000) /* base for quick timeout calculation */ +#define PPPOE_SLOW_RETRY (60*1000) /* persistent retry interval */ +#define PPPOE_DISC_MAXPADI 4 /* retry PADI four times (quickly) */ +#define PPPOE_DISC_MAXPADR 2 /* retry PADR twice */ + +#ifdef PPPOE_SERVER +#error "PPPOE_SERVER is not yet supported under lwIP!" +/* from if_spppsubr.c */ +#define IFF_PASSIVE IFF_LINK0 /* wait passively for connection */ +#endif + +#define PPPOE_ERRORSTRING_LEN 64 + + +/* callbacks called from PPP core */ +static err_t pppoe_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppoe_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol); +static void pppoe_connect(ppp_pcb *ppp, void *ctx); +static void pppoe_disconnect(ppp_pcb *ppp, void *ctx); +static err_t pppoe_destroy(ppp_pcb *ppp, void *ctx); + +/* management routines */ +static void pppoe_abort_connect(struct pppoe_softc *); +#if 0 /* UNUSED */ +static void pppoe_clear_softc(struct pppoe_softc *, const char *); +#endif /* UNUSED */ + +/* internal timeout handling */ +static void pppoe_timeout(void *); + +/* sending actual protocol controll packets */ +static err_t pppoe_send_padi(struct pppoe_softc *); +static err_t pppoe_send_padr(struct pppoe_softc *); +#ifdef PPPOE_SERVER +static err_t pppoe_send_pado(struct pppoe_softc *); +static err_t pppoe_send_pads(struct pppoe_softc *); +#endif +static err_t pppoe_send_padt(struct netif *, u_int, const u8_t *); + +/* internal helper functions */ +static err_t pppoe_xmit(struct pppoe_softc *sc, struct pbuf *pb); +static struct pppoe_softc* pppoe_find_softc_by_session(u_int session, struct netif *rcvif); +static struct pppoe_softc* pppoe_find_softc_by_hunique(u8_t *token, size_t len, struct netif *rcvif); + +/** linked list of created pppoe interfaces */ +static struct pppoe_softc *pppoe_softc_list; + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppoe_callbacks = { + pppoe_connect, +#if PPP_SERVER + NULL, +#endif /* PPP_SERVER */ + pppoe_disconnect, + pppoe_destroy, + pppoe_write, + pppoe_netif_output, + NULL, + NULL +}; + +/* + * Create a new PPP Over Ethernet (PPPoE) connection. + * + * Return 0 on success, an error code on failure. + */ +ppp_pcb *pppoe_create(struct netif *pppif, + struct netif *ethif, + const char *service_name, const char *concentrator_name, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb *ppp; + struct pppoe_softc *sc; + LWIP_UNUSED_ARG(service_name); + LWIP_UNUSED_ARG(concentrator_name); + LWIP_ASSERT_CORE_LOCKED(); + + sc = (struct pppoe_softc *)LWIP_MEMPOOL_ALLOC(PPPOE_IF); + if (sc == NULL) { + return NULL; + } + + ppp = ppp_new(pppif, &pppoe_callbacks, sc, link_status_cb, ctx_cb); + if (ppp == NULL) { + LWIP_MEMPOOL_FREE(PPPOE_IF, sc); + return NULL; + } + + memset(sc, 0, sizeof(struct pppoe_softc)); + sc->pcb = ppp; + sc->sc_ethif = ethif; + /* put the new interface at the head of the list */ + sc->next = pppoe_softc_list; + pppoe_softc_list = sc; + return ppp; +} + +/* Called by PPP core */ +static err_t pppoe_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) { + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pbuf *ph; /* Ethernet + PPPoE header */ + err_t ret; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* skip address & flags */ + pbuf_remove_header(p, 2); + + ph = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN), PBUF_RAM); + if(!ph) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + pbuf_remove_header(ph, PPPOE_HEADERLEN); /* hide PPPoE header */ + pbuf_cat(ph, p); +#if MIB2_STATS + tot_len = ph->tot_len; +#endif /* MIB2_STATS */ + + ret = pppoe_xmit(sc, ph); + if (ret != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ret; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, (u16_t)tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Called by PPP core */ +static err_t pppoe_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol) { + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pbuf *pb; + u8_t *pl; + err_t err; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* @todo: try to use pbuf_header() here! */ + pb = pbuf_alloc(PBUF_LINK, PPPOE_HEADERLEN + sizeof(protocol), PBUF_RAM); + if(!pb) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + pbuf_remove_header(pb, PPPOE_HEADERLEN); + + pl = (u8_t*)pb->payload; + PUTSHORT(protocol, pl); + + pbuf_chain(pb, p); +#if MIB2_STATS + tot_len = pb->tot_len; +#endif /* MIB2_STATS */ + + if( (err = pppoe_xmit(sc, pb)) != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return err; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +static err_t +pppoe_destroy(ppp_pcb *ppp, void *ctx) +{ + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pppoe_softc **copp, *freep; + LWIP_UNUSED_ARG(ppp); + + sys_untimeout(pppoe_timeout, sc); + + /* remove interface from list */ + for (copp = &pppoe_softc_list; (freep = *copp); copp = &freep->next) { + if (freep == sc) { + *copp = freep->next; + break; + } + } + +#ifdef PPPOE_TODO + if (sc->sc_concentrator_name) { + mem_free(sc->sc_concentrator_name); + } + if (sc->sc_service_name) { + mem_free(sc->sc_service_name); + } +#endif /* PPPOE_TODO */ + LWIP_MEMPOOL_FREE(PPPOE_IF, sc); + + return ERR_OK; +} + +/* + * Find the interface handling the specified session. + * Note: O(number of sessions open), this is a client-side only, mean + * and lean implementation, so number of open sessions typically should + * be 1. + */ +static struct pppoe_softc* pppoe_find_softc_by_session(u_int session, struct netif *rcvif) { + struct pppoe_softc *sc; + + for (sc = pppoe_softc_list; sc != NULL; sc = sc->next) { + if (sc->sc_state == PPPOE_STATE_SESSION + && sc->sc_session == session + && sc->sc_ethif == rcvif) { + return sc; + } + } + return NULL; +} + +/* Check host unique token passed and return appropriate softc pointer, + * or NULL if token is bogus. */ +static struct pppoe_softc* pppoe_find_softc_by_hunique(u8_t *token, size_t len, struct netif *rcvif) { + struct pppoe_softc *sc, *t; + + if (len != sizeof sc) { + return NULL; + } + MEMCPY(&t, token, len); + + for (sc = pppoe_softc_list; sc != NULL; sc = sc->next) { + if (sc == t) { + break; + } + } + + if (sc == NULL) { + PPPDEBUG(LOG_DEBUG, ("pppoe: alien host unique tag, no session found\n")); + return NULL; + } + + /* should be safe to access *sc now */ + if (sc->sc_state < PPPOE_STATE_PADI_SENT || sc->sc_state >= PPPOE_STATE_SESSION) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": host unique tag found, but it belongs to a connection in state %d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, sc->sc_state)); + return NULL; + } + if (sc->sc_ethif != rcvif) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": wrong interface, not accepting host unique\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + return NULL; + } + return sc; +} + +/* analyze and handle a single received packet while not in session state */ +void +pppoe_disc_input(struct netif *netif, struct pbuf *pb) +{ + u16_t tag, len, off; + u16_t session, plen; + struct pppoe_softc *sc; +#if PPP_DEBUG + const char *err_msg = NULL; +#endif /* PPP_DEBUG */ + u8_t *ac_cookie; + u16_t ac_cookie_len; +#ifdef PPPOE_SERVER + u8_t *hunique; + size_t hunique_len; +#endif + struct pppoehdr *ph; + struct pppoetag pt; + int err; + struct eth_hdr *ethhdr; + + /* don't do anything if there is not a single PPPoE instance */ + if (pppoe_softc_list == NULL) { + pbuf_free(pb); + return; + } + + pb = pbuf_coalesce(pb, PBUF_RAW); + + ethhdr = (struct eth_hdr *)pb->payload; + + ac_cookie = NULL; + ac_cookie_len = 0; +#ifdef PPPOE_SERVER + hunique = NULL; + hunique_len = 0; +#endif + session = 0; + off = sizeof(struct eth_hdr) + sizeof(struct pppoehdr); + if (pb->len < off) { + PPPDEBUG(LOG_DEBUG, ("pppoe: packet too short: %d\n", pb->len)); + goto done; + } + + ph = (struct pppoehdr *) (ethhdr + 1); + if (ph->vertype != PPPOE_VERTYPE) { + PPPDEBUG(LOG_DEBUG, ("pppoe: unknown version/type packet: 0x%x\n", ph->vertype)); + goto done; + } + session = lwip_ntohs(ph->session); + plen = lwip_ntohs(ph->plen); + + if (plen > (pb->len - off)) { + PPPDEBUG(LOG_DEBUG, ("pppoe: packet content does not fit: data available = %d, packet size = %u\n", + pb->len - off, plen)); + goto done; + } + if(pb->tot_len == pb->len) { + u16_t framelen = off + plen; + if (framelen < pb->len) { + /* ignore trailing garbage */ + pb->tot_len = pb->len = framelen; + } + } + tag = 0; + len = 0; + sc = NULL; + while (off + sizeof(pt) <= pb->len) { + MEMCPY(&pt, (u8_t*)pb->payload + off, sizeof(pt)); + tag = lwip_ntohs(pt.tag); + len = lwip_ntohs(pt.len); + if (off + sizeof(pt) + len > pb->len) { + PPPDEBUG(LOG_DEBUG, ("pppoe: tag 0x%x len 0x%x is too long\n", tag, len)); + goto done; + } + switch (tag) { + case PPPOE_TAG_EOL: + goto breakbreak; + case PPPOE_TAG_SNAME: + break; /* ignored */ + case PPPOE_TAG_ACNAME: + break; /* ignored */ + case PPPOE_TAG_HUNIQUE: + if (sc != NULL) { + break; + } +#ifdef PPPOE_SERVER + hunique = (u8_t*)pb->payload + off + sizeof(pt); + hunique_len = len; +#endif + sc = pppoe_find_softc_by_hunique((u8_t*)pb->payload + off + sizeof(pt), len, netif); + break; + case PPPOE_TAG_ACCOOKIE: + if (ac_cookie == NULL) { + if (len > PPPOE_MAX_AC_COOKIE_LEN) { + PPPDEBUG(LOG_DEBUG, ("pppoe: AC cookie is too long: len = %d, max = %d\n", len, PPPOE_MAX_AC_COOKIE_LEN)); + goto done; + } + ac_cookie = (u8_t*)pb->payload + off + sizeof(pt); + ac_cookie_len = len; + } + break; +#if PPP_DEBUG + case PPPOE_TAG_SNAME_ERR: + err_msg = "SERVICE NAME ERROR"; + break; + case PPPOE_TAG_ACSYS_ERR: + err_msg = "AC SYSTEM ERROR"; + break; + case PPPOE_TAG_GENERIC_ERR: + err_msg = "GENERIC ERROR"; + break; +#endif /* PPP_DEBUG */ + default: + break; + } +#if PPP_DEBUG + if (err_msg != NULL) { + char error_tmp[PPPOE_ERRORSTRING_LEN]; + u16_t error_len = LWIP_MIN(len, sizeof(error_tmp)-1); + strncpy(error_tmp, (char*)pb->payload + off + sizeof(pt), error_len); + error_tmp[error_len] = '\0'; + if (sc) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": %s: %s\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err_msg, error_tmp)); + } else { + PPPDEBUG(LOG_DEBUG, ("pppoe: %s: %s\n", err_msg, error_tmp)); + } + } +#endif /* PPP_DEBUG */ + off += sizeof(pt) + len; + } + +breakbreak:; + switch (ph->code) { + case PPPOE_CODE_PADI: +#ifdef PPPOE_SERVER + /* + * got service name, concentrator name, and/or host unique. + * ignore if we have no interfaces with IFF_PASSIVE|IFF_UP. + */ + if (LIST_EMPTY(&pppoe_softc_list)) { + goto done; + } + LIST_FOREACH(sc, &pppoe_softc_list, sc_list) { + if (!(sc->sc_sppp.pp_if.if_flags & IFF_UP)) { + continue; + } + if (!(sc->sc_sppp.pp_if.if_flags & IFF_PASSIVE)) { + continue; + } + if (sc->sc_state == PPPOE_STATE_INITIAL) { + break; + } + } + if (sc == NULL) { + /* PPPDEBUG(LOG_DEBUG, ("pppoe: free passive interface is not found\n")); */ + goto done; + } + if (hunique) { + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + } + sc->sc_hunique = mem_malloc(hunique_len); + if (sc->sc_hunique == NULL) { + goto done; + } + sc->sc_hunique_len = hunique_len; + MEMCPY(sc->sc_hunique, hunique, hunique_len); + } + MEMCPY(&sc->sc_dest, eh->ether_shost, sizeof sc->sc_dest); + sc->sc_state = PPPOE_STATE_PADO_SENT; + pppoe_send_pado(sc); + break; +#endif /* PPPOE_SERVER */ + case PPPOE_CODE_PADR: +#ifdef PPPOE_SERVER + /* + * get sc from ac_cookie if IFF_PASSIVE + */ + if (ac_cookie == NULL) { + /* be quiet if there is not a single pppoe instance */ + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADR but not includes ac_cookie\n")); + goto done; + } + sc = pppoe_find_softc_by_hunique(ac_cookie, ac_cookie_len, netif); + if (sc == NULL) { + /* be quiet if there is not a single pppoe instance */ + if (!LIST_EMPTY(&pppoe_softc_list)) { + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADR but could not find request for it\n")); + } + goto done; + } + if (sc->sc_state != PPPOE_STATE_PADO_SENT) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": received unexpected PADR\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + goto done; + } + if (hunique) { + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + } + sc->sc_hunique = mem_malloc(hunique_len); + if (sc->sc_hunique == NULL) { + goto done; + } + sc->sc_hunique_len = hunique_len; + MEMCPY(sc->sc_hunique, hunique, hunique_len); + } + pppoe_send_pads(sc); + sc->sc_state = PPPOE_STATE_SESSION; + ppp_start(sc->pcb); /* notify upper layers */ + break; +#else + /* ignore, we are no access concentrator */ + goto done; +#endif /* PPPOE_SERVER */ + case PPPOE_CODE_PADO: + if (sc == NULL) { + /* be quiet if there is not a single pppoe instance */ + if (pppoe_softc_list != NULL) { + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADO but could not find request for it\n")); + } + goto done; + } + if (sc->sc_state != PPPOE_STATE_PADI_SENT) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": received unexpected PADO\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + goto done; + } + if (ac_cookie) { + sc->sc_ac_cookie_len = ac_cookie_len; + MEMCPY(sc->sc_ac_cookie, ac_cookie, ac_cookie_len); + } + MEMCPY(&sc->sc_dest, ethhdr->src.addr, sizeof(sc->sc_dest.addr)); + sys_untimeout(pppoe_timeout, sc); + sc->sc_padr_retried = 0; + sc->sc_state = PPPOE_STATE_PADR_SENT; + if ((err = pppoe_send_padr(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADR, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padr_retried), pppoe_timeout, sc); + break; + case PPPOE_CODE_PADS: + if (sc == NULL) { + goto done; + } + sc->sc_session = session; + sys_untimeout(pppoe_timeout, sc); + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": session 0x%x connected\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, session)); + sc->sc_state = PPPOE_STATE_SESSION; + ppp_start(sc->pcb); /* notify upper layers */ + break; + case PPPOE_CODE_PADT: + /* Don't disconnect here, we let the LCP Echo/Reply find the fact + * that PPP session is down. Asking the PPP stack to end the session + * require strict checking about the PPP phase to prevent endless + * disconnection loops. + */ +#if 0 /* UNUSED */ + if (sc == NULL) { /* PADT frames are rarely sent with a hunique tag, this is actually almost always true */ + goto done; + } + pppoe_clear_softc(sc, "received PADT"); +#endif /* UNUSED */ + break; + default: + if(sc) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": unknown code (0x%"X16_F") session = 0x%"X16_F"\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, + (u16_t)ph->code, session)); + } else { + PPPDEBUG(LOG_DEBUG, ("pppoe: unknown code (0x%"X16_F") session = 0x%"X16_F"\n", (u16_t)ph->code, session)); + } + break; + } + +done: + pbuf_free(pb); + return; +} + +void +pppoe_data_input(struct netif *netif, struct pbuf *pb) +{ + u16_t session, plen; + struct pppoe_softc *sc; + struct pppoehdr *ph; +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + u8_t shost[ETHER_ADDR_LEN]; +#endif + +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + MEMCPY(shost, ((struct eth_hdr *)pb->payload)->src.addr, sizeof(shost)); +#endif + if (pbuf_remove_header(pb, sizeof(struct eth_hdr)) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe_data_input: pbuf_remove_header failed\n")); + LINK_STATS_INC(link.lenerr); + goto drop; + } + + if (pb->len < sizeof(*ph)) { + PPPDEBUG(LOG_DEBUG, ("pppoe_data_input: could not get PPPoE header\n")); + goto drop; + } + ph = (struct pppoehdr *)pb->payload; + + if (ph->vertype != PPPOE_VERTYPE) { + PPPDEBUG(LOG_DEBUG, ("pppoe (data): unknown version/type packet: 0x%x\n", ph->vertype)); + goto drop; + } + if (ph->code != 0) { + goto drop; + } + + session = lwip_ntohs(ph->session); + sc = pppoe_find_softc_by_session(session, netif); + if (sc == NULL) { +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + PPPDEBUG(LOG_DEBUG, ("pppoe: input for unknown session 0x%x, sending PADT\n", session)); + pppoe_send_padt(netif, session, shost); +#endif + goto drop; + } + + plen = lwip_ntohs(ph->plen); + + if (pbuf_remove_header(pb, PPPOE_HEADERLEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe_data_input: pbuf_remove_header PPPOE_HEADERLEN failed\n")); + LINK_STATS_INC(link.lenerr); + goto drop; + } + + PPPDEBUG(LOG_DEBUG, ("pppoe_data_input: %c%c%"U16_F": pkthdr.len=%d, pppoe.len=%d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, + pb->len, plen)); + + if (pb->tot_len < plen) { + goto drop; + } + + /* Dispatch the packet thereby consuming it. */ + ppp_input(sc->pcb, pb); + return; + +drop: + pbuf_free(pb); +} + +static err_t +pppoe_output(struct pppoe_softc *sc, struct pbuf *pb) +{ + struct eth_hdr *ethhdr; + u16_t etype; + err_t res; + + /* make room for Ethernet header - should not fail */ + if (pbuf_add_header(pb, sizeof(struct eth_hdr)) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe: %c%c%"U16_F": pppoe_output: could not allocate room for Ethernet header\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + ethhdr = (struct eth_hdr *)pb->payload; + etype = sc->sc_state == PPPOE_STATE_SESSION ? ETHTYPE_PPPOE : ETHTYPE_PPPOEDISC; + ethhdr->type = lwip_htons(etype); + MEMCPY(ðhdr->dest.addr, &sc->sc_dest.addr, sizeof(ethhdr->dest.addr)); + MEMCPY(ðhdr->src.addr, &sc->sc_ethif->hwaddr, sizeof(ethhdr->src.addr)); + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F" (%x) state=%d, session=0x%x output -> %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F", len=%d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, etype, + sc->sc_state, sc->sc_session, + sc->sc_dest.addr[0], sc->sc_dest.addr[1], sc->sc_dest.addr[2], sc->sc_dest.addr[3], sc->sc_dest.addr[4], sc->sc_dest.addr[5], + pb->tot_len)); + + res = sc->sc_ethif->linkoutput(sc->sc_ethif, pb); + + pbuf_free(pb); + + return res; +} + +static err_t +pppoe_send_padi(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + int len; +#ifdef PPPOE_TODO + int l1 = 0, l2 = 0; /* XXX: gcc */ +#endif /* PPPOE_TODO */ + + /* calculate length of frame (excluding ethernet header + pppoe header) */ + len = 2 + 2 + 2 + 2 + sizeof sc; /* service name tag is required, host unique is send too */ +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + l1 = (int)strlen(sc->sc_service_name); + len += l1; + } + if (sc->sc_concentrator_name != NULL) { + l2 = (int)strlen(sc->sc_concentrator_name); + len += 2 + 2 + l2; + } +#endif /* PPPOE_TODO */ + LWIP_ASSERT("sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff", + sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff); + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADI, 0, (u16_t)len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else +#endif /* PPPOE_TODO */ + { + PPPOE_ADD_16(p, 0); + } +#ifdef PPPOE_TODO + if (sc->sc_concentrator_name != NULL) { + PPPOE_ADD_16(p, PPPOE_TAG_ACNAME); + PPPOE_ADD_16(p, l2); + MEMCPY(p, sc->sc_concentrator_name, l2); + p += l2; + } +#endif /* PPPOE_TODO */ + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof sc); + + /* send pkt */ + return pppoe_output(sc, pb); +} + +static void +pppoe_timeout(void *arg) +{ + u32_t retry_wait; + int err; + struct pppoe_softc *sc = (struct pppoe_softc*)arg; + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": timeout\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + + switch (sc->sc_state) { + case PPPOE_STATE_PADI_SENT: + /* + * We have two basic ways of retrying: + * - Quick retry mode: try a few times in short sequence + * - Slow retry mode: we already had a connection successfully + * established and will try infinitely (without user + * intervention) + * We only enter slow retry mode if IFF_LINK1 (aka autodial) + * is not set. + */ + if (sc->sc_padi_retried < 0xff) { + sc->sc_padi_retried++; + } + if (!sc->pcb->settings.persist && sc->sc_padi_retried >= PPPOE_DISC_MAXPADI) { +#if 0 + if ((sc->sc_sppp.pp_if.if_flags & IFF_LINK1) == 0) { + /* slow retry mode */ + retry_wait = PPPOE_SLOW_RETRY; + } else +#endif + { + pppoe_abort_connect(sc); + return; + } + } + /* initialize for quick retry mode */ + retry_wait = LWIP_MIN(PPPOE_DISC_TIMEOUT * sc->sc_padi_retried, PPPOE_SLOW_RETRY); + if ((err = pppoe_send_padi(sc)) != 0) { + sc->sc_padi_retried--; + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to transmit PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(retry_wait, pppoe_timeout, sc); + break; + + case PPPOE_STATE_PADR_SENT: + sc->sc_padr_retried++; + if (sc->sc_padr_retried >= PPPOE_DISC_MAXPADR) { + MEMCPY(&sc->sc_dest, ethbroadcast.addr, sizeof(sc->sc_dest)); + sc->sc_state = PPPOE_STATE_PADI_SENT; + sc->sc_padr_retried = 0; + if ((err = pppoe_send_padi(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padi_retried), pppoe_timeout, sc); + return; + } + if ((err = pppoe_send_padr(sc)) != 0) { + sc->sc_padr_retried--; + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADR, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padr_retried), pppoe_timeout, sc); + break; + default: + return; /* all done, work in peace */ + } +} + +/* Start a connection (i.e. initiate discovery phase) */ +static void +pppoe_connect(ppp_pcb *ppp, void *ctx) +{ + err_t err; + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + lcp_options *lcp_wo; + lcp_options *lcp_ao; +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_options *ipcp_wo; + ipcp_options *ipcp_ao; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + sc->sc_session = 0; + sc->sc_ac_cookie_len = 0; + sc->sc_padi_retried = 0; + sc->sc_padr_retried = 0; + /* changed to real address later */ + MEMCPY(&sc->sc_dest, ethbroadcast.addr, sizeof(sc->sc_dest)); +#ifdef PPPOE_SERVER + /* wait PADI if IFF_PASSIVE */ + if ((sc->sc_sppp.pp_if.if_flags & IFF_PASSIVE)) { + return 0; + } +#endif + + lcp_wo = &ppp->lcp_wantoptions; + lcp_wo->mru = sc->sc_ethif->mtu-PPPOE_HEADERLEN-2; /* two byte PPP protocol discriminator, then IP data */ + lcp_wo->neg_asyncmap = 0; + lcp_wo->neg_pcompression = 0; + lcp_wo->neg_accompression = 0; + lcp_wo->passive = 0; + lcp_wo->silent = 0; + + lcp_ao = &ppp->lcp_allowoptions; + lcp_ao->mru = sc->sc_ethif->mtu-PPPOE_HEADERLEN-2; /* two byte PPP protocol discriminator, then IP data */ + lcp_ao->neg_asyncmap = 0; + lcp_ao->neg_pcompression = 0; + lcp_ao->neg_accompression = 0; + +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_wo = &ppp->ipcp_wantoptions; + ipcp_wo->neg_vj = 0; + ipcp_wo->old_vj = 0; + + ipcp_ao = &ppp->ipcp_allowoptions; + ipcp_ao->neg_vj = 0; + ipcp_ao->old_vj = 0; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + /* save state, in case we fail to send PADI */ + sc->sc_state = PPPOE_STATE_PADI_SENT; + if ((err = pppoe_send_padi(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + } + sys_timeout(PPPOE_DISC_TIMEOUT, pppoe_timeout, sc); +} + +/* disconnect */ +static void +pppoe_disconnect(ppp_pcb *ppp, void *ctx) +{ + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": disconnecting\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + if (sc->sc_state == PPPOE_STATE_SESSION) { + pppoe_send_padt(sc->sc_ethif, sc->sc_session, (const u8_t *)&sc->sc_dest); + } + + /* stop any timer, disconnect can be called while initiating is in progress */ + sys_untimeout(pppoe_timeout, sc); + sc->sc_state = PPPOE_STATE_INITIAL; +#ifdef PPPOE_SERVER + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + sc->sc_hunique = NULL; /* probably not necessary, if state is initial we shouldn't have to access hunique anyway */ + } + sc->sc_hunique_len = 0; /* probably not necessary, if state is initial we shouldn't have to access hunique anyway */ +#endif + ppp_link_end(ppp); /* notify upper layers */ + return; +} + +/* Connection attempt aborted */ +static void +pppoe_abort_connect(struct pppoe_softc *sc) +{ + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": could not establish connection\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + sc->sc_state = PPPOE_STATE_INITIAL; + ppp_link_failed(sc->pcb); /* notify upper layers */ +} + +/* Send a PADR packet */ +static err_t +pppoe_send_padr(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len; +#ifdef PPPOE_TODO + size_t l1 = 0; /* XXX: gcc */ +#endif /* PPPOE_TODO */ + + len = 2 + 2 + 2 + 2 + sizeof(sc); /* service name, host unique */ +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { /* service name tag maybe empty */ + l1 = strlen(sc->sc_service_name); + len += l1; + } +#endif /* PPPOE_TODO */ + if (sc->sc_ac_cookie_len > 0) { + len += 2 + 2 + sc->sc_ac_cookie_len; /* AC cookie */ + } + LWIP_ASSERT("sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff", + sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff); + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADR, 0, len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else +#endif /* PPPOE_TODO */ + { + PPPOE_ADD_16(p, 0); + } + if (sc->sc_ac_cookie_len > 0) { + PPPOE_ADD_16(p, PPPOE_TAG_ACCOOKIE); + PPPOE_ADD_16(p, sc->sc_ac_cookie_len); + MEMCPY(p, sc->sc_ac_cookie, sc->sc_ac_cookie_len); + p += sc->sc_ac_cookie_len; + } + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof sc); + + return pppoe_output(sc, pb); +} + +/* send a PADT packet */ +static err_t +pppoe_send_padt(struct netif *outgoing_if, u_int session, const u8_t *dest) +{ + struct pbuf *pb; + struct eth_hdr *ethhdr; + err_t res; + u8_t *p; + + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + if (pbuf_add_header(pb, sizeof(struct eth_hdr))) { + PPPDEBUG(LOG_ERR, ("pppoe: pppoe_send_padt: could not allocate room for PPPoE header\n")); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + ethhdr = (struct eth_hdr *)pb->payload; + ethhdr->type = PP_HTONS(ETHTYPE_PPPOEDISC); + MEMCPY(ðhdr->dest.addr, dest, sizeof(ethhdr->dest.addr)); + MEMCPY(ðhdr->src.addr, &outgoing_if->hwaddr, sizeof(ethhdr->src.addr)); + + p = (u8_t*)(ethhdr + 1); + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADT, session, 0); + + res = outgoing_if->linkoutput(outgoing_if, pb); + + pbuf_free(pb); + + return res; +} + +#ifdef PPPOE_SERVER +static err_t +pppoe_send_pado(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len; + + /* calc length */ + len = 0; + /* include ac_cookie */ + len += 2 + 2 + sizeof(sc); + /* include hunique */ + len += 2 + 2 + sc->sc_hunique_len; + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADO, 0, len); + PPPOE_ADD_16(p, PPPOE_TAG_ACCOOKIE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof(sc)); + p += sizeof(sc); + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sc->sc_hunique_len); + MEMCPY(p, sc->sc_hunique, sc->sc_hunique_len); + return pppoe_output(sc, pb); +} + +static err_t +pppoe_send_pads(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len, l1 = 0; /* XXX: gcc */ + + sc->sc_session = mono_time.tv_sec % 0xff + 1; + /* calc length */ + len = 0; + /* include hunique */ + len += 2 + 2 + 2 + 2 + sc->sc_hunique_len; /* service name, host unique*/ + if (sc->sc_service_name != NULL) { /* service name tag maybe empty */ + l1 = strlen(sc->sc_service_name); + len += l1; + } + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADS, sc->sc_session, len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else { + PPPOE_ADD_16(p, 0); + } + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sc->sc_hunique_len); + MEMCPY(p, sc->sc_hunique, sc->sc_hunique_len); + return pppoe_output(sc, pb); +} +#endif + +static err_t +pppoe_xmit(struct pppoe_softc *sc, struct pbuf *pb) +{ + u8_t *p; + size_t len; + + len = pb->tot_len; + + /* make room for PPPoE header - should not fail */ + if (pbuf_add_header(pb, PPPOE_HEADERLEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe: %c%c%"U16_F": pppoe_xmit: could not allocate room for PPPoE header\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, 0, sc->sc_session, len); + + return pppoe_output(sc, pb); +} + +#if 0 /*def PFIL_HOOKS*/ +static int +pppoe_ifattach_hook(void *arg, struct pbuf **mp, struct netif *ifp, int dir) +{ + struct pppoe_softc *sc; + int s; + + if (mp != (struct pbuf **)PFIL_IFNET_DETACH) { + return 0; + } + + LIST_FOREACH(sc, &pppoe_softc_list, sc_list) { + if (sc->sc_ethif != ifp) { + continue; + } + if (sc->sc_sppp.pp_if.if_flags & IFF_UP) { + sc->sc_sppp.pp_if.if_flags &= ~(IFF_UP|IFF_RUNNING); + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": ethernet interface detached, going down\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + } + sc->sc_ethif = NULL; + pppoe_clear_softc(sc, "ethernet interface detached"); + } + + return 0; +} +#endif + +#if 0 /* UNUSED */ +static void +pppoe_clear_softc(struct pppoe_softc *sc, const char *message) +{ + LWIP_UNUSED_ARG(message); + + /* stop timer */ + sys_untimeout(pppoe_timeout, sc); + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": session 0x%x terminated, %s\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, sc->sc_session, message)); + sc->sc_state = PPPOE_STATE_INITIAL; + ppp_link_end(sc->pcb); /* notify upper layers - /!\ dangerous /!\ - see pppoe_disc_input() */ +} +#endif /* UNUSED */ +#endif /* PPP_SUPPORT && PPPOE_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c new file mode 100644 index 00000000..4c4557fc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c @@ -0,0 +1,1159 @@ +/** + * @file + * Network Point to Point Protocol over Layer 2 Tunneling Protocol program file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +/* + * L2TP Support status: + * + * Supported: + * - L2TPv2 (PPP over L2TP, a.k.a. UDP tunnels) + * - LAC + * + * Not supported: + * - LNS (require PPP server support) + * - L2TPv3 ethernet pseudowires + * - L2TPv3 VLAN pseudowire + * - L2TPv3 PPP pseudowires + * - L2TPv3 IP encapsulation + * - L2TPv3 IP pseudowire + * - L2TP tunnel switching - http://tools.ietf.org/html/draft-ietf-l2tpext-tunnel-switching-08 + * - Multiple tunnels per UDP socket, as well as multiple sessions per tunnel + * - Hidden AVPs + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOL2TP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/memp.h" +#include "lwip/netif.h" +#include "lwip/udp.h" +#include "lwip/snmp.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/pppol2tp.h" +#include "netif/ppp/pppcrypt.h" +#include "netif/ppp/magic.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOL2TP_PCB, MEMP_NUM_PPPOL2TP_INTERFACES, sizeof(pppol2tp_pcb), "PPPOL2TP_PCB") + +/* callbacks called from PPP core */ +static err_t pppol2tp_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppol2tp_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol); +static err_t pppol2tp_destroy(ppp_pcb *ppp, void *ctx); /* Destroy a L2TP control block */ +static void pppol2tp_connect(ppp_pcb *ppp, void *ctx); /* Be a LAC, connect to a LNS. */ +static void pppol2tp_disconnect(ppp_pcb *ppp, void *ctx); /* Disconnect */ + + /* Prototypes for procedures local to this file. */ +static void pppol2tp_input(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); +static void pppol2tp_dispatch_control_packet(pppol2tp_pcb *l2tp, u16_t port, struct pbuf *p, u16_t ns, u16_t nr); +static void pppol2tp_timeout(void *arg); +static void pppol2tp_abort_connect(pppol2tp_pcb *l2tp); +static err_t pppol2tp_send_sccrq(pppol2tp_pcb *l2tp); +static err_t pppol2tp_send_scccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_icrq(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_iccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_zlb(pppol2tp_pcb *l2tp, u16_t ns, u16_t nr); +static err_t pppol2tp_send_stopccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_xmit(pppol2tp_pcb *l2tp, struct pbuf *pb); +static err_t pppol2tp_udp_send(pppol2tp_pcb *l2tp, struct pbuf *pb); + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppol2tp_callbacks = { + pppol2tp_connect, +#if PPP_SERVER + NULL, +#endif /* PPP_SERVER */ + pppol2tp_disconnect, + pppol2tp_destroy, + pppol2tp_write, + pppol2tp_netif_output, + NULL, + NULL +}; + + +/* Create a new L2TP session. */ +ppp_pcb *pppol2tp_create(struct netif *pppif, + struct netif *netif, const ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { + ppp_pcb *ppp; + pppol2tp_pcb *l2tp; + struct udp_pcb *udp; +#if !PPPOL2TP_AUTH_SUPPORT + LWIP_UNUSED_ARG(secret); + LWIP_UNUSED_ARG(secret_len); +#endif /* !PPPOL2TP_AUTH_SUPPORT */ + + if (ipaddr == NULL) { + goto ipaddr_check_failed; + } + + l2tp = (pppol2tp_pcb *)LWIP_MEMPOOL_ALLOC(PPPOL2TP_PCB); + if (l2tp == NULL) { + goto memp_malloc_l2tp_failed; + } + + udp = udp_new_ip_type(IP_GET_TYPE(ipaddr)); + if (udp == NULL) { + goto udp_new_failed; + } + udp_recv(udp, pppol2tp_input, l2tp); + + ppp = ppp_new(pppif, &pppol2tp_callbacks, l2tp, link_status_cb, ctx_cb); + if (ppp == NULL) { + goto ppp_new_failed; + } + + memset(l2tp, 0, sizeof(pppol2tp_pcb)); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + l2tp->ppp = ppp; + l2tp->udp = udp; + l2tp->netif = netif; + ip_addr_copy(l2tp->remote_ip, *ipaddr); + l2tp->remote_port = port; +#if PPPOL2TP_AUTH_SUPPORT + l2tp->secret = secret; + l2tp->secret_len = secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return ppp; + +ppp_new_failed: + udp_remove(udp); +udp_new_failed: + LWIP_MEMPOOL_FREE(PPPOL2TP_PCB, l2tp); +memp_malloc_l2tp_failed: +ipaddr_check_failed: + return NULL; +} + +/* Called by PPP core */ +static err_t pppol2tp_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + struct pbuf *ph; /* UDP + L2TP header */ + err_t ret; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + ph = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(PPPOL2TP_OUTPUT_DATA_HEADER_LEN), PBUF_RAM); + if(!ph) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + pbuf_remove_header(ph, PPPOL2TP_OUTPUT_DATA_HEADER_LEN); /* hide L2TP header */ + pbuf_cat(ph, p); +#if MIB2_STATS + tot_len = ph->tot_len; +#endif /* MIB2_STATS */ + + ret = pppol2tp_xmit(l2tp, ph); + if (ret != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ret; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, (u16_t)tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Called by PPP core */ +static err_t pppol2tp_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + struct pbuf *pb; + u8_t *pl; + err_t err; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* @todo: try to use pbuf_header() here! */ + pb = pbuf_alloc(PBUF_TRANSPORT, PPPOL2TP_OUTPUT_DATA_HEADER_LEN + sizeof(protocol), PBUF_RAM); + if(!pb) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + pbuf_remove_header(pb, PPPOL2TP_OUTPUT_DATA_HEADER_LEN); + + pl = (u8_t*)pb->payload; + PUTSHORT(protocol, pl); + + pbuf_chain(pb, p); +#if MIB2_STATS + tot_len = pb->tot_len; +#endif /* MIB2_STATS */ + + if( (err = pppol2tp_xmit(l2tp, pb)) != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return err; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Destroy a L2TP control block */ +static err_t pppol2tp_destroy(ppp_pcb *ppp, void *ctx) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + + sys_untimeout(pppol2tp_timeout, l2tp); + udp_remove(l2tp->udp); + LWIP_MEMPOOL_FREE(PPPOL2TP_PCB, l2tp); + return ERR_OK; +} + +/* Be a LAC, connect to a LNS. */ +static void pppol2tp_connect(ppp_pcb *ppp, void *ctx) { + err_t err; + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + lcp_options *lcp_wo; + lcp_options *lcp_ao; +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_options *ipcp_wo; + ipcp_options *ipcp_ao; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + l2tp->tunnel_port = l2tp->remote_port; + l2tp->our_ns = 0; + l2tp->peer_nr = 0; + l2tp->peer_ns = 0; + l2tp->source_tunnel_id = 0; + l2tp->remote_tunnel_id = 0; + l2tp->source_session_id = 0; + l2tp->remote_session_id = 0; + /* l2tp->*_retried are cleared when used */ + + lcp_wo = &ppp->lcp_wantoptions; + lcp_wo->mru = PPPOL2TP_DEFMRU; + lcp_wo->neg_asyncmap = 0; + lcp_wo->neg_pcompression = 0; + lcp_wo->neg_accompression = 0; + lcp_wo->passive = 0; + lcp_wo->silent = 0; + + lcp_ao = &ppp->lcp_allowoptions; + lcp_ao->mru = PPPOL2TP_DEFMRU; + lcp_ao->neg_asyncmap = 0; + lcp_ao->neg_pcompression = 0; + lcp_ao->neg_accompression = 0; + +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_wo = &ppp->ipcp_wantoptions; + ipcp_wo->neg_vj = 0; + ipcp_wo->old_vj = 0; + + ipcp_ao = &ppp->ipcp_allowoptions; + ipcp_ao->neg_vj = 0; + ipcp_ao->old_vj = 0; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + /* Listen to a random source port, we need to do that instead of using udp_connect() + * because the L2TP LNS might answer with its own random source port (!= 1701) + */ +#if LWIP_IPV6 + if (IP_IS_V6_VAL(l2tp->udp->local_ip)) { + udp_bind(l2tp->udp, IP6_ADDR_ANY, 0); + } else +#endif /* LWIP_IPV6 */ + udp_bind(l2tp->udp, IP_ADDR_ANY, 0); + +#if PPPOL2TP_AUTH_SUPPORT + /* Generate random vector */ + if (l2tp->secret != NULL) { + magic_random_bytes(l2tp->secret_rv, sizeof(l2tp->secret_rv)); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + do { + l2tp->remote_tunnel_id = magic(); + } while(l2tp->remote_tunnel_id == 0); + /* save state, in case we fail to send SCCRQ */ + l2tp->sccrq_retried = 0; + l2tp->phase = PPPOL2TP_STATE_SCCRQ_SENT; + if ((err = pppol2tp_send_sccrq(l2tp)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCRQ, error=%d\n", err)); + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); +} + +/* Disconnect */ +static void pppol2tp_disconnect(ppp_pcb *ppp, void *ctx) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + + l2tp->our_ns++; + pppol2tp_send_stopccn(l2tp, l2tp->our_ns); + + /* stop any timer, disconnect can be called while initiating is in progress */ + sys_untimeout(pppol2tp_timeout, l2tp); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + ppp_link_end(ppp); /* notify upper layers */ +} + +/* UDP Callback for incoming IPv4 L2TP frames */ +static void pppol2tp_input(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb*)arg; + u16_t hflags, hlen, len=0, tunnel_id=0, session_id=0, ns=0, nr=0, offset=0; + u8_t *inp; + LWIP_UNUSED_ARG(pcb); + + /* we can't unbound a UDP pcb, thus we can still receive UDP frames after the link is closed */ + if (l2tp->phase < PPPOL2TP_STATE_SCCRQ_SENT) { + goto free_and_return; + } + + if (!ip_addr_cmp(&l2tp->remote_ip, addr)) { + goto free_and_return; + } + + /* discard packet if port mismatch, but only if we received a SCCRP */ + if (l2tp->phase > PPPOL2TP_STATE_SCCRQ_SENT && l2tp->tunnel_port != port) { + goto free_and_return; + } + + /* printf("-----------\nL2TP INPUT, %d\n", p->len); */ + + /* L2TP header */ + if (p->len < sizeof(hflags) + sizeof(tunnel_id) + sizeof(session_id) ) { + goto packet_too_short; + } + + inp = (u8_t*)p->payload; + GETSHORT(hflags, inp); + + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL) { + /* check mandatory flags for a control packet */ + if ( (hflags & PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY) != PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: mandatory header flags for control packet not set\n")); + goto free_and_return; + } + /* check forbidden flags for a control packet */ + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL_FORBIDDEN) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: forbidden header flags for control packet found\n")); + goto free_and_return; + } + } else { + /* check mandatory flags for a data packet */ + if ( (hflags & PPPOL2TP_HEADERFLAG_DATA_MANDATORY) != PPPOL2TP_HEADERFLAG_DATA_MANDATORY) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: mandatory header flags for data packet not set\n")); + goto free_and_return; + } + } + + /* Expected header size */ + hlen = sizeof(hflags) + sizeof(tunnel_id) + sizeof(session_id); + if (hflags & PPPOL2TP_HEADERFLAG_LENGTH) { + hlen += sizeof(len); + } + if (hflags & PPPOL2TP_HEADERFLAG_SEQUENCE) { + hlen += sizeof(ns) + sizeof(nr); + } + if (hflags & PPPOL2TP_HEADERFLAG_OFFSET) { + hlen += sizeof(offset); + } + if (p->len < hlen) { + goto packet_too_short; + } + + if (hflags & PPPOL2TP_HEADERFLAG_LENGTH) { + GETSHORT(len, inp); + if (p->len < len || len < hlen) { + goto packet_too_short; + } + } + GETSHORT(tunnel_id, inp); + GETSHORT(session_id, inp); + if (hflags & PPPOL2TP_HEADERFLAG_SEQUENCE) { + GETSHORT(ns, inp); + GETSHORT(nr, inp); + } + if (hflags & PPPOL2TP_HEADERFLAG_OFFSET) { + GETSHORT(offset, inp) + if (offset > 4096) { /* don't be fooled with large offset which might overflow hlen */ + PPPDEBUG(LOG_DEBUG, ("pppol2tp: strange packet received, offset=%d\n", offset)); + goto free_and_return; + } + hlen += offset; + if (p->len < hlen) { + goto packet_too_short; + } + INCPTR(offset, inp); + } + + /* printf("HLEN = %d\n", hlen); */ + + /* skip L2TP header */ + if (pbuf_remove_header(p, hlen) != 0) { + goto free_and_return; + } + + /* printf("LEN=%d, TUNNEL_ID=%d, SESSION_ID=%d, NS=%d, NR=%d, OFFSET=%d\n", len, tunnel_id, session_id, ns, nr, offset); */ + PPPDEBUG(LOG_DEBUG, ("pppol2tp: input packet, len=%"U16_F", tunnel=%"U16_F", session=%"U16_F", ns=%"U16_F", nr=%"U16_F"\n", + len, tunnel_id, session_id, ns, nr)); + + /* Control packet */ + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL) { + pppol2tp_dispatch_control_packet(l2tp, port, p, ns, nr); + goto free_and_return; + } + + /* Data packet */ + if(l2tp->phase != PPPOL2TP_STATE_DATA) { + goto free_and_return; + } + if(tunnel_id != l2tp->remote_tunnel_id) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: tunnel ID mismatch, assigned=%d, received=%d\n", l2tp->remote_tunnel_id, tunnel_id)); + goto free_and_return; + } + if(session_id != l2tp->remote_session_id) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: session ID mismatch, assigned=%d, received=%d\n", l2tp->remote_session_id, session_id)); + goto free_and_return; + } + /* + * skip address & flags if necessary + * + * RFC 2661 does not specify whether the PPP frame in the L2TP payload should + * have a HDLC header or not. We handle both cases for compatibility. + */ + if (p->len >= 2) { + GETSHORT(hflags, inp); + if (hflags == 0xff03) { + pbuf_remove_header(p, 2); + } + } + /* Dispatch the packet thereby consuming it. */ + ppp_input(l2tp->ppp, p); + return; + +packet_too_short: + PPPDEBUG(LOG_DEBUG, ("pppol2tp: packet too short: %d\n", p->len)); +free_and_return: + pbuf_free(p); +} + +/* L2TP Control packet entry point */ +static void pppol2tp_dispatch_control_packet(pppol2tp_pcb *l2tp, u16_t port, struct pbuf *p, u16_t ns, u16_t nr) { + u8_t *inp; + u16_t avplen, avpflags, vendorid, attributetype, messagetype=0; + err_t err; +#if PPPOL2TP_AUTH_SUPPORT + lwip_md5_context md5_ctx; + u8_t md5_hash[16]; + u8_t challenge_id = 0; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* printf("L2TP CTRL INPUT, ns=%d, nr=%d, len=%d\n", ns, nr, p->len); */ + + /* Drop unexpected packet */ + if (ns != l2tp->peer_ns) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: drop unexpected packet: received NS=%d, expected NS=%d\n", ns, l2tp->peer_ns)); + /* + * In order to ensure that all messages are acknowledged properly + * (particularly in the case of a lost ZLB ACK message), receipt + * of duplicate messages MUST be acknowledged. + * + * In this very special case we Ack a packet we previously received. + * Therefore our NS is the NR we just received. And our NR is the + * NS we just received plus one. + */ + if ((s16_t)(ns - l2tp->peer_ns) < 0) { + pppol2tp_send_zlb(l2tp, nr, ns+1); + } + return; + } + + l2tp->peer_nr = nr; + + /* Handle the special case of the ICCN acknowledge */ + if (l2tp->phase == PPPOL2TP_STATE_ICCN_SENT && (s16_t)(l2tp->peer_nr - l2tp->our_ns) > 0) { + l2tp->phase = PPPOL2TP_STATE_DATA; + sys_untimeout(pppol2tp_timeout, l2tp); + ppp_start(l2tp->ppp); /* notify upper layers */ + } + + /* ZLB packets */ + if (p->tot_len == 0) { + return; + } + /* A ZLB packet does not consume a NS slot thus we don't record the NS value for ZLB packets */ + l2tp->peer_ns = ns+1; + + p = pbuf_coalesce(p, PBUF_RAW); + inp = (u8_t*)p->payload; + /* Decode AVPs */ + while (p->len > 0) { + if (p->len < sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype) ) { + goto packet_too_short; + } + GETSHORT(avpflags, inp); + avplen = avpflags & PPPOL2TP_AVPHEADERFLAG_LENGTHMASK; + /* printf("AVPLEN = %d\n", avplen); */ + if (p->len < avplen || avplen < sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype)) { + goto packet_too_short; + } + GETSHORT(vendorid, inp); + GETSHORT(attributetype, inp); + avplen -= sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype); + + /* Message type must be the first AVP */ + if (messagetype == 0) { + if (attributetype != 0 || vendorid != 0 || avplen != sizeof(messagetype) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: message type must be the first AVP\n")); + return; + } + GETSHORT(messagetype, inp); + /* printf("Message type = %d\n", messagetype); */ + switch(messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + /* Only accept SCCRP packet if we sent a SCCRQ */ + if (l2tp->phase != PPPOL2TP_STATE_SCCRQ_SENT) { + goto send_zlb; + } + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + /* Only accept ICRP packet if we sent a IRCQ */ + if (l2tp->phase != PPPOL2TP_STATE_ICRQ_SENT) { + goto send_zlb; + } + break; + /* Stop Control Connection Notification */ + case PPPOL2TP_MESSAGETYPE_STOPCCN: + pppol2tp_send_zlb(l2tp, l2tp->our_ns+1, l2tp->peer_ns); /* Ack the StopCCN before we switch to down state */ + if (l2tp->phase < PPPOL2TP_STATE_DATA) { + pppol2tp_abort_connect(l2tp); + } else if (l2tp->phase == PPPOL2TP_STATE_DATA) { + /* Don't disconnect here, we let the LCP Echo/Reply find the fact + * that PPP session is down. Asking the PPP stack to end the session + * require strict checking about the PPP phase to prevent endless + * disconnection loops. + */ + } + return; + default: + break; + } + goto nextavp; + } + + /* Skip proprietary L2TP extensions */ + if (vendorid != 0) { + goto skipavp; + } + + switch (messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + switch (attributetype) { + case PPPOL2TP_AVPTYPE_TUNNELID: + if (avplen != sizeof(l2tp->source_tunnel_id) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Assign tunnel ID length check failed\n")); + return; + } + GETSHORT(l2tp->source_tunnel_id, inp); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Assigned tunnel ID %"U16_F"\n", l2tp->source_tunnel_id)); + goto nextavp; +#if PPPOL2TP_AUTH_SUPPORT + case PPPOL2TP_AVPTYPE_CHALLENGE: + if (avplen == 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Challenge length check failed\n")); + return; + } + if (l2tp->secret == NULL) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Received challenge from peer and no secret key available\n")); + pppol2tp_abort_connect(l2tp); + return; + } + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + challenge_id = PPPOL2TP_MESSAGETYPE_SCCCN; + lwip_md5_update(&md5_ctx, &challenge_id, 1); + lwip_md5_update(&md5_ctx, l2tp->secret, l2tp->secret_len); + lwip_md5_update(&md5_ctx, inp, avplen); + lwip_md5_finish(&md5_ctx, l2tp->challenge_hash); + lwip_md5_free(&md5_ctx); + l2tp->send_challenge = 1; + goto skipavp; + case PPPOL2TP_AVPTYPE_CHALLENGERESPONSE: + if (avplen != PPPOL2TP_AVPTYPE_CHALLENGERESPONSE_SIZE) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Challenge Response length check failed\n")); + return; + } + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + challenge_id = PPPOL2TP_MESSAGETYPE_SCCRP; + lwip_md5_update(&md5_ctx, &challenge_id, 1); + lwip_md5_update(&md5_ctx, l2tp->secret, l2tp->secret_len); + lwip_md5_update(&md5_ctx, l2tp->secret_rv, sizeof(l2tp->secret_rv)); + lwip_md5_finish(&md5_ctx, md5_hash); + lwip_md5_free(&md5_ctx); + if ( memcmp(inp, md5_hash, sizeof(md5_hash)) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Received challenge response from peer and secret key do not match\n")); + pppol2tp_abort_connect(l2tp); + return; + } + goto skipavp; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + default: + break; + } + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + switch (attributetype) { + case PPPOL2TP_AVPTYPE_SESSIONID: + if (avplen != sizeof(l2tp->source_session_id) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Assign session ID length check failed\n")); + return; + } + GETSHORT(l2tp->source_session_id, inp); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Assigned session ID %"U16_F"\n", l2tp->source_session_id)); + goto nextavp; + default: + break; + } + break; + default: + break; + } + +skipavp: + INCPTR(avplen, inp); +nextavp: + /* printf("AVP Found, vendor=%d, attribute=%d, len=%d\n", vendorid, attributetype, avplen); */ + /* next AVP */ + if (pbuf_remove_header(p, avplen + sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype)) != 0) { + return; + } + } + + switch(messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + do { + l2tp->remote_session_id = magic(); + } while(l2tp->remote_session_id == 0); + l2tp->tunnel_port = port; /* LNS server might have chosen its own local port */ + l2tp->icrq_retried = 0; + l2tp->phase = PPPOL2TP_STATE_ICRQ_SENT; + l2tp->our_ns++; + if ((err = pppol2tp_send_scccn(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + l2tp->our_ns++; + if ((err = pppol2tp_send_icrq(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_untimeout(pppol2tp_timeout, l2tp); + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + l2tp->iccn_retried = 0; + l2tp->phase = PPPOL2TP_STATE_ICCN_SENT; + l2tp->our_ns++; + if ((err = pppol2tp_send_iccn(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_untimeout(pppol2tp_timeout, l2tp); + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + /* Unhandled packet, send ZLB ACK */ + default: + goto send_zlb; + } + return; + +send_zlb: + pppol2tp_send_zlb(l2tp, l2tp->our_ns+1, l2tp->peer_ns); + return; +packet_too_short: + PPPDEBUG(LOG_DEBUG, ("pppol2tp: packet too short: %d\n", p->len)); +} + +/* L2TP Timeout handler */ +static void pppol2tp_timeout(void *arg) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb*)arg; + err_t err; + u32_t retry_wait; + + PPPDEBUG(LOG_DEBUG, ("pppol2tp: timeout\n")); + + switch (l2tp->phase) { + case PPPOL2TP_STATE_SCCRQ_SENT: + /* backoff wait */ + if (l2tp->sccrq_retried < 0xff) { + l2tp->sccrq_retried++; + } + if (!l2tp->ppp->settings.persist && l2tp->sccrq_retried >= PPPOL2TP_MAXSCCRQ) { + pppol2tp_abort_connect(l2tp); + return; + } + retry_wait = LWIP_MIN(PPPOL2TP_CONTROL_TIMEOUT * l2tp->sccrq_retried, PPPOL2TP_SLOW_RETRY); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: sccrq_retried=%d\n", l2tp->sccrq_retried)); + if ((err = pppol2tp_send_sccrq(l2tp)) != 0) { + l2tp->sccrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(retry_wait, pppol2tp_timeout, l2tp); + break; + + case PPPOL2TP_STATE_ICRQ_SENT: + l2tp->icrq_retried++; + if (l2tp->icrq_retried >= PPPOL2TP_MAXICRQ) { + pppol2tp_abort_connect(l2tp); + return; + } + PPPDEBUG(LOG_DEBUG, ("pppol2tp: icrq_retried=%d\n", l2tp->icrq_retried)); + if ((s16_t)(l2tp->peer_nr - l2tp->our_ns) < 0) { /* the SCCCN was not acknowledged */ + if ((err = pppol2tp_send_scccn(l2tp, l2tp->our_ns -1)) != 0) { + l2tp->icrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + } + } + if ((err = pppol2tp_send_icrq(l2tp, l2tp->our_ns)) != 0) { + l2tp->icrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + + case PPPOL2TP_STATE_ICCN_SENT: + l2tp->iccn_retried++; + if (l2tp->iccn_retried >= PPPOL2TP_MAXICCN) { + pppol2tp_abort_connect(l2tp); + return; + } + PPPDEBUG(LOG_DEBUG, ("pppol2tp: iccn_retried=%d\n", l2tp->iccn_retried)); + if ((err = pppol2tp_send_iccn(l2tp, l2tp->our_ns)) != 0) { + l2tp->iccn_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + + default: + return; /* all done, work in peace */ + } +} + +/* Connection attempt aborted */ +static void pppol2tp_abort_connect(pppol2tp_pcb *l2tp) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: could not establish connection\n")); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + ppp_link_failed(l2tp->ppp); /* notify upper layers */ +} + +/* Initiate a new tunnel */ +static err_t pppol2tp_send_sccrq(pppol2tp_pcb *l2tp) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +8 +10 +10 +6+sizeof(PPPOL2TP_HOSTNAME)-1 +6+sizeof(PPPOL2TP_VENDORNAME)-1 +8 +8; +#if PPPOL2TP_AUTH_SUPPORT + if (l2tp->secret != NULL) { + len += 6 + sizeof(l2tp->secret_rv); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(0, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(0, p); /* NS Sequence number - to peer */ + PUTSHORT(0, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_SCCRQ, p); /* Attribute value: Message type: SCCRQ */ + + /* AVP - L2TP Version */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_VERSION, p); /* Attribute type: Version */ + PUTSHORT(PPPOL2TP_VERSION, p); /* Attribute value: L2TP Version */ + + /* AVP - Framing capabilities */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_FRAMINGCAPABILITIES, p); /* Attribute type: Framing capabilities */ + PUTLONG(PPPOL2TP_FRAMINGCAPABILITIES, p); /* Attribute value: Framing capabilities */ + + /* AVP - Bearer capabilities */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_BEARERCAPABILITIES, p); /* Attribute type: Bearer capabilities */ + PUTLONG(PPPOL2TP_BEARERCAPABILITIES, p); /* Attribute value: Bearer capabilities */ + + /* AVP - Host name */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6+sizeof(PPPOL2TP_HOSTNAME)-1, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_HOSTNAME, p); /* Attribute type: Hostname */ + MEMCPY(p, PPPOL2TP_HOSTNAME, sizeof(PPPOL2TP_HOSTNAME)-1); /* Attribute value: Hostname */ + INCPTR(sizeof(PPPOL2TP_HOSTNAME)-1, p); + + /* AVP - Vendor name */ + PUTSHORT(6+sizeof(PPPOL2TP_VENDORNAME)-1, p); /* len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_VENDORNAME, p); /* Attribute type: Vendor name */ + MEMCPY(p, PPPOL2TP_VENDORNAME, sizeof(PPPOL2TP_VENDORNAME)-1); /* Attribute value: Vendor name */ + INCPTR(sizeof(PPPOL2TP_VENDORNAME)-1, p); + + /* AVP - Assign tunnel ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TUNNELID, p); /* Attribute type: Tunnel ID */ + PUTSHORT(l2tp->remote_tunnel_id, p); /* Attribute value: Tunnel ID */ + + /* AVP - Receive window size */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_RECEIVEWINDOWSIZE, p); /* Attribute type: Receive window size */ + PUTSHORT(PPPOL2TP_RECEIVEWINDOWSIZE, p); /* Attribute value: Receive window size */ + +#if PPPOL2TP_AUTH_SUPPORT + /* AVP - Challenge */ + if (l2tp->secret != NULL) { + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6 + sizeof(l2tp->secret_rv), p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CHALLENGE, p); /* Attribute type: Challenge */ + MEMCPY(p, l2tp->secret_rv, sizeof(l2tp->secret_rv)); /* Attribute value: Random vector */ + INCPTR(sizeof(l2tp->secret_rv), p); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Complete tunnel establishment */ +static err_t pppol2tp_send_scccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8; +#if PPPOL2TP_AUTH_SUPPORT + if (l2tp->send_challenge) { + len += 6 + sizeof(l2tp->challenge_hash); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_SCCCN, p); /* Attribute value: Message type: SCCCN */ + +#if PPPOL2TP_AUTH_SUPPORT + /* AVP - Challenge response */ + if (l2tp->send_challenge) { + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6 + sizeof(l2tp->challenge_hash), p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CHALLENGERESPONSE, p); /* Attribute type: Challenge response */ + MEMCPY(p, l2tp->challenge_hash, sizeof(l2tp->challenge_hash)); /* Attribute value: Computed challenge */ + INCPTR(sizeof(l2tp->challenge_hash), p); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Initiate a new session */ +static err_t pppol2tp_send_icrq(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + u32_t serialnumber; + + /* calculate UDP packet length */ + len = 12 +8 +8 +10; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_ICRQ, p); /* Attribute value: Message type: ICRQ */ + + /* AVP - Assign session ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_SESSIONID, p); /* Attribute type: Session ID */ + PUTSHORT(l2tp->remote_session_id, p); /* Attribute value: Session ID */ + + /* AVP - Call Serial Number */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CALLSERIALNUMBER, p); /* Attribute type: Serial number */ + serialnumber = magic(); + PUTLONG(serialnumber, p); /* Attribute value: Serial number */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Complete tunnel establishment */ +static err_t pppol2tp_send_iccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +10 +10; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(l2tp->source_session_id, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_ICCN, p); /* Attribute value: Message type: ICCN */ + + /* AVP - Framing type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_FRAMINGTYPE, p); /* Attribute type: Framing type */ + PUTLONG(PPPOL2TP_FRAMINGTYPE, p); /* Attribute value: Framing type */ + + /* AVP - TX Connect speed */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TXCONNECTSPEED, p); /* Attribute type: TX Connect speed */ + PUTLONG(PPPOL2TP_TXCONNECTSPEED, p); /* Attribute value: TX Connect speed */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Send a ZLB ACK packet */ +static err_t pppol2tp_send_zlb(pppol2tp_pcb *l2tp, u16_t ns, u16_t nr) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(nr, p); /* NR Sequence number - expected for peer */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Send a StopCCN packet */ +static err_t pppol2tp_send_stopccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +8 +8; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_STOPCCN, p); /* Attribute value: Message type: StopCCN */ + + /* AVP - Assign tunnel ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TUNNELID, p); /* Attribute type: Tunnel ID */ + PUTSHORT(l2tp->remote_tunnel_id, p); /* Attribute value: Tunnel ID */ + + /* AVP - Result code */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_RESULTCODE, p); /* Attribute type: Result code */ + PUTSHORT(PPPOL2TP_RESULTCODE, p); /* Attribute value: Result code */ + + return pppol2tp_udp_send(l2tp, pb); +} + +static err_t pppol2tp_xmit(pppol2tp_pcb *l2tp, struct pbuf *pb) { + u8_t *p; + + /* make room for L2TP header - should not fail */ + if (pbuf_add_header(pb, PPPOL2TP_OUTPUT_DATA_HEADER_LEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppol2tp: pppol2tp_pcb: could not allocate room for L2TP header\n")); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + + p = (u8_t*)pb->payload; + PUTSHORT(PPPOL2TP_HEADERFLAG_DATA_MANDATORY, p); + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(l2tp->source_session_id, p); /* Session Id */ + + return pppol2tp_udp_send(l2tp, pb); +} + +static err_t pppol2tp_udp_send(pppol2tp_pcb *l2tp, struct pbuf *pb) { + err_t err; + if (l2tp->netif) { + err = udp_sendto_if(l2tp->udp, pb, &l2tp->remote_ip, l2tp->tunnel_port, l2tp->netif); + } else { + err = udp_sendto(l2tp->udp, pb, &l2tp->remote_ip, l2tp->tunnel_port); + } + pbuf_free(pb); + return err; +} + +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c new file mode 100644 index 00000000..dff02554 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c @@ -0,0 +1,895 @@ +/** + * @file + * Network Point to Point Protocol over Serial file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOS_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "lwip/arch.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/sys.h" +#include "lwip/memp.h" +#include "lwip/netif.h" +#include "lwip/snmp.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/api.h" +#include "lwip/ip4.h" /* for ip4_input() */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppos.h" +#include "netif/ppp/vj.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOS_PCB, MEMP_NUM_PPPOS_INTERFACES, sizeof(pppos_pcb), "PPPOS_PCB") + +/* callbacks called from PPP core */ +static err_t pppos_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppos_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *pb, u16_t protocol); +static void pppos_connect(ppp_pcb *ppp, void *ctx); +#if PPP_SERVER +static void pppos_listen(ppp_pcb *ppp, void *ctx); +#endif /* PPP_SERVER */ +static void pppos_disconnect(ppp_pcb *ppp, void *ctx); +static err_t pppos_destroy(ppp_pcb *ppp, void *ctx); +static void pppos_send_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp); +static void pppos_recv_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp); + +/* Prototypes for procedures local to this file. */ +#if PPP_INPROC_IRQ_SAFE +static void pppos_input_callback(void *arg); +#endif /* PPP_INPROC_IRQ_SAFE */ +static void pppos_input_free_current_packet(pppos_pcb *pppos); +static void pppos_input_drop(pppos_pcb *pppos); +static err_t pppos_output_append(pppos_pcb *pppos, err_t err, struct pbuf *nb, u8_t c, u8_t accm, u16_t *fcs); +static err_t pppos_output_last(pppos_pcb *pppos, err_t err, struct pbuf *nb, u16_t *fcs); + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppos_callbacks = { + pppos_connect, +#if PPP_SERVER + pppos_listen, +#endif /* PPP_SERVER */ + pppos_disconnect, + pppos_destroy, + pppos_write, + pppos_netif_output, + pppos_send_config, + pppos_recv_config +}; + +/* PPP's Asynchronous-Control-Character-Map. The mask array is used + * to select the specific bit for a character. */ +#define ESCAPE_P(accm, c) ((accm)[(c) >> 3] & 1 << (c & 0x07)) + +#if PPP_FCS_TABLE +/* + * FCS lookup table as calculated by genfcstab. + */ +static const u16_t fcstab[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; +#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) +#else /* PPP_FCS_TABLE */ +/* The HDLC polynomial: X**0 + X**5 + X**12 + X**16 (0x8408) */ +#define PPP_FCS_POLYNOMIAL 0x8408 +static u16_t +ppp_get_fcs(u8_t byte) +{ + unsigned int octet; + int bit; + octet = byte; + for (bit = 8; bit-- > 0; ) { + octet = (octet & 0x01) ? ((octet >> 1) ^ PPP_FCS_POLYNOMIAL) : (octet >> 1); + } + return octet & 0xffff; +} +#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ ppp_get_fcs(((fcs) ^ (c)) & 0xff)) +#endif /* PPP_FCS_TABLE */ + +/* + * Values for FCS calculations. + */ +#define PPP_INITFCS 0xffff /* Initial FCS value */ +#define PPP_GOODFCS 0xf0b8 /* Good final FCS value */ + +#if PPP_INPROC_IRQ_SAFE +#define PPPOS_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define PPPOS_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define PPPOS_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +#else +#define PPPOS_DECL_PROTECT(lev) +#define PPPOS_PROTECT(lev) +#define PPPOS_UNPROTECT(lev) +#endif /* PPP_INPROC_IRQ_SAFE */ + + +/* + * Create a new PPP connection using the given serial I/O device. + * + * Return 0 on success, an error code on failure. + */ +ppp_pcb *pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + pppos_pcb *pppos; + ppp_pcb *ppp; + LWIP_ASSERT_CORE_LOCKED(); + + pppos = (pppos_pcb *)LWIP_MEMPOOL_ALLOC(PPPOS_PCB); + if (pppos == NULL) { + return NULL; + } + + ppp = ppp_new(pppif, &pppos_callbacks, pppos, link_status_cb, ctx_cb); + if (ppp == NULL) { + LWIP_MEMPOOL_FREE(PPPOS_PCB, pppos); + return NULL; + } + + memset(pppos, 0, sizeof(pppos_pcb)); + pppos->ppp = ppp; + pppos->output_cb = output_cb; + return ppp; +} + +/* Called by PPP core */ +static err_t +pppos_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + u8_t *s; + struct pbuf *nb; + u16_t n; + u16_t fcs_out; + err_t err; + LWIP_UNUSED_ARG(ppp); + + /* Grab an output buffer. Using PBUF_POOL here for tx is ok since the pbuf + gets freed by 'pppos_output_last' before this function returns and thus + cannot starve rx. */ + nb = pbuf_alloc(PBUF_RAW, 0, PBUF_POOL); + if (nb == NULL) { + PPPDEBUG(LOG_WARNING, ("pppos_write[%d]: alloc fail\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + /* Set nb->tot_len to actual payload length */ + nb->tot_len = p->len; + + /* If the link has been idle, we'll send a fresh flag character to + * flush any noise. */ + err = ERR_OK; + if ((sys_now() - pppos->last_xmit) >= PPP_MAXIDLEFLAG) { + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + } + + /* Load output buffer. */ + fcs_out = PPP_INITFCS; + s = (u8_t*)p->payload; + n = p->len; + while (n-- > 0) { + err = pppos_output_append(pppos, err, nb, *s++, 1, &fcs_out); + } + + err = pppos_output_last(pppos, err, nb, &fcs_out); + if (err == ERR_OK) { + PPPDEBUG(LOG_INFO, ("pppos_write[%d]: len=%d\n", ppp->netif->num, p->len)); + } else { + PPPDEBUG(LOG_WARNING, ("pppos_write[%d]: output failed len=%d\n", ppp->netif->num, p->len)); + } + pbuf_free(p); + return err; +} + +/* Called by PPP core */ +static err_t +pppos_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *pb, u16_t protocol) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + struct pbuf *nb, *p; + u16_t fcs_out; + err_t err; + LWIP_UNUSED_ARG(ppp); + + /* Grab an output buffer. Using PBUF_POOL here for tx is ok since the pbuf + gets freed by 'pppos_output_last' before this function returns and thus + cannot starve rx. */ + nb = pbuf_alloc(PBUF_RAW, 0, PBUF_POOL); + if (nb == NULL) { + PPPDEBUG(LOG_WARNING, ("pppos_netif_output[%d]: alloc fail\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + /* Set nb->tot_len to actual payload length */ + nb->tot_len = pb->tot_len; + + /* If the link has been idle, we'll send a fresh flag character to + * flush any noise. */ + err = ERR_OK; + if ((sys_now() - pppos->last_xmit) >= PPP_MAXIDLEFLAG) { + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + } + + fcs_out = PPP_INITFCS; + if (!pppos->accomp) { + err = pppos_output_append(pppos, err, nb, PPP_ALLSTATIONS, 1, &fcs_out); + err = pppos_output_append(pppos, err, nb, PPP_UI, 1, &fcs_out); + } + if (!pppos->pcomp || protocol > 0xFF) { + err = pppos_output_append(pppos, err, nb, (protocol >> 8) & 0xFF, 1, &fcs_out); + } + err = pppos_output_append(pppos, err, nb, protocol & 0xFF, 1, &fcs_out); + + /* Load packet. */ + for(p = pb; p; p = p->next) { + u16_t n = p->len; + u8_t *s = (u8_t*)p->payload; + + while (n-- > 0) { + err = pppos_output_append(pppos, err, nb, *s++, 1, &fcs_out); + } + } + + err = pppos_output_last(pppos, err, nb, &fcs_out); + if (err == ERR_OK) { + PPPDEBUG(LOG_INFO, ("pppos_netif_output[%d]: proto=0x%"X16_F", len = %d\n", ppp->netif->num, protocol, pb->tot_len)); + } else { + PPPDEBUG(LOG_WARNING, ("pppos_netif_output[%d]: output failed proto=0x%"X16_F", len = %d\n", ppp->netif->num, protocol, pb->tot_len)); + } + return err; +} + +static void +pppos_connect(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left over from last session? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + /* reset PPPoS control block to its initial state */ + memset(&pppos->last_xmit, 0, sizeof(pppos_pcb) - offsetof(pppos_pcb, last_xmit)); + + /* + * Default the in and out accm so that escape and flag characters + * are always escaped. + */ + pppos->in_accm[15] = 0x60; /* no need to protect since RX is not running */ + pppos->out_accm[15] = 0x60; + PPPOS_PROTECT(lev); + pppos->open = 1; + PPPOS_UNPROTECT(lev); + + /* + * Start the connection and handle incoming events (packet or timeout). + */ + PPPDEBUG(LOG_INFO, ("pppos_connect: unit %d: connecting\n", ppp->netif->num)); + ppp_start(ppp); /* notify upper layers */ +} + +#if PPP_SERVER +static void +pppos_listen(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left over from last session? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + /* reset PPPoS control block to its initial state */ + memset(&pppos->last_xmit, 0, sizeof(pppos_pcb) - offsetof(pppos_pcb, last_xmit)); + + /* + * Default the in and out accm so that escape and flag characters + * are always escaped. + */ + pppos->in_accm[15] = 0x60; /* no need to protect since RX is not running */ + pppos->out_accm[15] = 0x60; + PPPOS_PROTECT(lev); + pppos->open = 1; + PPPOS_UNPROTECT(lev); + + /* + * Wait for something to happen. + */ + PPPDEBUG(LOG_INFO, ("pppos_listen: unit %d: listening\n", ppp->netif->num)); + ppp_start(ppp); /* notify upper layers */ +} +#endif /* PPP_SERVER */ + +static void +pppos_disconnect(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + + PPPOS_PROTECT(lev); + pppos->open = 0; + PPPOS_UNPROTECT(lev); + + /* If PPP_INPROC_IRQ_SAFE is used we cannot call + * pppos_input_free_current_packet() here because + * rx IRQ might still call pppos_input(). + */ +#if !PPP_INPROC_IRQ_SAFE + /* input pbuf left ? */ + pppos_input_free_current_packet(pppos); +#endif /* !PPP_INPROC_IRQ_SAFE */ + + ppp_link_end(ppp); /* notify upper layers */ +} + +static err_t +pppos_destroy(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left ? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + LWIP_MEMPOOL_FREE(PPPOS_PCB, pppos); + return ERR_OK; +} + +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +/** Pass received raw characters to PPPoS to be decoded through lwIP TCPIP thread. + * + * This is one of the only functions that may be called outside of the TCPIP thread! + * + * @param ppp PPP descriptor index, returned by pppos_create() + * @param s received data + * @param l length of received data + */ +err_t +pppos_input_tcpip(ppp_pcb *ppp, u8_t *s, int l) +{ + struct pbuf *p; + err_t err; + + p = pbuf_alloc(PBUF_RAW, l, PBUF_POOL); + if (!p) { + return ERR_MEM; + } + pbuf_take(p, s, l); + + err = tcpip_inpkt(p, ppp_netif(ppp), pppos_input_sys); + if (err != ERR_OK) { + pbuf_free(p); + } + return err; +} + +/* called from TCPIP thread */ +err_t pppos_input_sys(struct pbuf *p, struct netif *inp) { + ppp_pcb *ppp = (ppp_pcb*)inp->state; + struct pbuf *n; + LWIP_ASSERT_CORE_LOCKED(); + + for (n = p; n; n = n->next) { + pppos_input(ppp, (u8_t*)n->payload, n->len); + } + pbuf_free(p); + return ERR_OK; +} +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +/** PPPoS input helper struct, must be packed since it is stored + * to pbuf->payload, which might be unaligned. */ +#if PPP_INPROC_IRQ_SAFE +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppos_input_header { + PACK_STRUCT_FIELD(ppp_pcb *ppp); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#endif /* PPP_INPROC_IRQ_SAFE */ + +/** Pass received raw characters to PPPoS to be decoded. + * + * @param ppp PPP descriptor index, returned by pppos_create() + * @param s received data + * @param l length of received data + */ +void +pppos_input(ppp_pcb *ppp, u8_t *s, int l) +{ + pppos_pcb *pppos = (pppos_pcb *)ppp->link_ctx_cb; + struct pbuf *next_pbuf; + u8_t cur_char; + u8_t escaped; + PPPOS_DECL_PROTECT(lev); +#if !PPP_INPROC_IRQ_SAFE + LWIP_ASSERT_CORE_LOCKED(); +#endif + + PPPDEBUG(LOG_DEBUG, ("pppos_input[%d]: got %d bytes\n", ppp->netif->num, l)); + while (l-- > 0) { + cur_char = *s++; + + PPPOS_PROTECT(lev); + /* ppp_input can disconnect the interface, we need to abort to prevent a memory + * leak if there are remaining bytes because pppos_connect and pppos_listen + * functions expect input buffer to be free. Furthermore there are no real + * reason to continue reading bytes if we are disconnected. + */ + if (!pppos->open) { + PPPOS_UNPROTECT(lev); + return; + } + escaped = ESCAPE_P(pppos->in_accm, cur_char); + PPPOS_UNPROTECT(lev); + /* Handle special characters. */ + if (escaped) { + /* Check for escape sequences. */ + /* XXX Note that this does not handle an escaped 0x5d character which + * would appear as an escape character. Since this is an ASCII ']' + * and there is no reason that I know of to escape it, I won't complicate + * the code to handle this case. GLL */ + if (cur_char == PPP_ESCAPE) { + pppos->in_escaped = 1; + /* Check for the flag character. */ + } else if (cur_char == PPP_FLAG) { + /* If this is just an extra flag character, ignore it. */ + if (pppos->in_state <= PDADDRESS) { + /* ignore it */; + /* If we haven't received the packet header, drop what has come in. */ + } else if (pppos->in_state < PDDATA) { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Dropping incomplete packet %d\n", + ppp->netif->num, pppos->in_state)); + LINK_STATS_INC(link.lenerr); + pppos_input_drop(pppos); + /* If the fcs is invalid, drop the packet. */ + } else if (pppos->in_fcs != PPP_GOODFCS) { + PPPDEBUG(LOG_INFO, + ("pppos_input[%d]: Dropping bad fcs 0x%"X16_F" proto=0x%"X16_F"\n", + ppp->netif->num, pppos->in_fcs, pppos->in_protocol)); + /* Note: If you get lots of these, check for UART frame errors or try different baud rate */ + LINK_STATS_INC(link.chkerr); + pppos_input_drop(pppos); + /* Otherwise it's a good packet so pass it on. */ + } else { + struct pbuf *inp; + /* Trim off the checksum. */ + if(pppos->in_tail->len > 2) { + pppos->in_tail->len -= 2; + + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + } + } else { + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + } + + pbuf_realloc(pppos->in_head, pppos->in_head->tot_len - 2); + } + + /* Dispatch the packet thereby consuming it. */ + inp = pppos->in_head; + /* Packet consumed, release our references. */ + pppos->in_head = NULL; + pppos->in_tail = NULL; +#if IP_FORWARD || LWIP_IPV6_FORWARD + /* hide the room for Ethernet forwarding header */ + pbuf_remove_header(inp, PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN); +#endif /* IP_FORWARD || LWIP_IPV6_FORWARD */ +#if PPP_INPROC_IRQ_SAFE + if(tcpip_try_callback(pppos_input_callback, inp) != ERR_OK) { + PPPDEBUG(LOG_ERR, ("pppos_input[%d]: tcpip_callback() failed, dropping packet\n", ppp->netif->num)); + pbuf_free(inp); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifindiscards); + } +#else /* PPP_INPROC_IRQ_SAFE */ + ppp_input(ppp, inp); +#endif /* PPP_INPROC_IRQ_SAFE */ + } + + /* Prepare for a new packet. */ + pppos->in_fcs = PPP_INITFCS; + pppos->in_state = PDADDRESS; + pppos->in_escaped = 0; + /* Other characters are usually control characters that may have + * been inserted by the physical layer so here we just drop them. */ + } else { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Dropping ACCM char <%d>\n", ppp->netif->num, cur_char)); + } + /* Process other characters. */ + } else { + /* Unencode escaped characters. */ + if (pppos->in_escaped) { + pppos->in_escaped = 0; + cur_char ^= PPP_TRANS; + } + + /* Process character relative to current state. */ + switch(pppos->in_state) { + case PDIDLE: /* Idle state - waiting. */ + /* Drop the character if it's not 0xff + * we would have processed a flag character above. */ + if (cur_char != PPP_ALLSTATIONS) { + break; + } + /* no break */ + /* Fall through */ + + case PDSTART: /* Process start flag. */ + /* Prepare for a new packet. */ + pppos->in_fcs = PPP_INITFCS; + /* no break */ + /* Fall through */ + + case PDADDRESS: /* Process address field. */ + if (cur_char == PPP_ALLSTATIONS) { + pppos->in_state = PDCONTROL; + break; + } + /* no break */ + + /* Else assume compressed address and control fields so + * fall through to get the protocol... */ + /* Fall through */ + case PDCONTROL: /* Process control field. */ + /* If we don't get a valid control code, restart. */ + if (cur_char == PPP_UI) { + pppos->in_state = PDPROTOCOL1; + break; + } + /* no break */ + +#if 0 + else { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Invalid control <%d>\n", ppp->netif->num, cur_char)); + pppos->in_state = PDSTART; + } +#endif + /* Fall through */ + + case PDPROTOCOL1: /* Process protocol field 1. */ + /* If the lower bit is set, this is the end of the protocol + * field. */ + if (cur_char & 1) { + pppos->in_protocol = cur_char; + pppos->in_state = PDDATA; + } else { + pppos->in_protocol = (u16_t)cur_char << 8; + pppos->in_state = PDPROTOCOL2; + } + break; + case PDPROTOCOL2: /* Process protocol field 2. */ + pppos->in_protocol |= cur_char; + pppos->in_state = PDDATA; + break; + case PDDATA: /* Process data byte. */ + /* Make space to receive processed data. */ + if (pppos->in_tail == NULL || pppos->in_tail->len == PBUF_POOL_BUFSIZE) { + u16_t pbuf_alloc_len; + if (pppos->in_tail != NULL) { + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + /* give up the in_tail reference now */ + pppos->in_tail = NULL; + } + } + /* If we haven't started a packet, we need a packet header. */ + pbuf_alloc_len = 0; +#if IP_FORWARD || LWIP_IPV6_FORWARD + /* If IP forwarding is enabled we are reserving PBUF_LINK_ENCAPSULATION_HLEN + * + PBUF_LINK_HLEN bytes so the packet is being allocated with enough header + * space to be forwarded (to Ethernet for example). + */ + if (pppos->in_head == NULL) { + pbuf_alloc_len = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; + } +#endif /* IP_FORWARD || LWIP_IPV6_FORWARD */ + next_pbuf = pbuf_alloc(PBUF_RAW, pbuf_alloc_len, PBUF_POOL); + if (next_pbuf == NULL) { + /* No free buffers. Drop the input packet and let the + * higher layers deal with it. Continue processing + * the received pbuf chain in case a new packet starts. */ + PPPDEBUG(LOG_ERR, ("pppos_input[%d]: NO FREE PBUFS!\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + pppos_input_drop(pppos); + pppos->in_state = PDSTART; /* Wait for flag sequence. */ + break; + } + if (pppos->in_head == NULL) { + u8_t *payload = ((u8_t*)next_pbuf->payload) + pbuf_alloc_len; +#if PPP_INPROC_IRQ_SAFE + ((struct pppos_input_header*)payload)->ppp = ppp; + payload += sizeof(struct pppos_input_header); + next_pbuf->len += sizeof(struct pppos_input_header); +#endif /* PPP_INPROC_IRQ_SAFE */ + next_pbuf->len += sizeof(pppos->in_protocol); + *(payload++) = pppos->in_protocol >> 8; + *(payload) = pppos->in_protocol & 0xFF; + pppos->in_head = next_pbuf; + } + pppos->in_tail = next_pbuf; + } + /* Load character into buffer. */ + ((u8_t*)pppos->in_tail->payload)[pppos->in_tail->len++] = cur_char; + break; + default: + break; + } + + /* update the frame check sequence number. */ + pppos->in_fcs = PPP_FCS(pppos->in_fcs, cur_char); + } + } /* while (l-- > 0), all bytes processed */ +} + +#if PPP_INPROC_IRQ_SAFE +/* PPPoS input callback using one input pointer + */ +static void pppos_input_callback(void *arg) { + struct pbuf *pb = (struct pbuf*)arg; + ppp_pcb *ppp; + + ppp = ((struct pppos_input_header*)pb->payload)->ppp; + if(pbuf_remove_header(pb, sizeof(struct pppos_input_header))) { + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto drop; + } + + /* Dispatch the packet thereby consuming it. */ + ppp_input(ppp, pb); + return; + +drop: + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifindiscards); + pbuf_free(pb); +} +#endif /* PPP_INPROC_IRQ_SAFE */ + +static void +pppos_send_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp) +{ + int i; + pppos_pcb *pppos = (pppos_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + + pppos->pcomp = pcomp; + pppos->accomp = accomp; + + /* Load the ACCM bits for the 32 control codes. */ + for (i = 0; i < 32/8; i++) { + pppos->out_accm[i] = (u8_t)((accm >> (8 * i)) & 0xFF); + } + + PPPDEBUG(LOG_INFO, ("pppos_send_config[%d]: out_accm=%X %X %X %X\n", + pppos->ppp->netif->num, + pppos->out_accm[0], pppos->out_accm[1], pppos->out_accm[2], pppos->out_accm[3])); +} + +static void +pppos_recv_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp) +{ + int i; + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + LWIP_UNUSED_ARG(ppp); + LWIP_UNUSED_ARG(pcomp); + LWIP_UNUSED_ARG(accomp); + + /* Load the ACCM bits for the 32 control codes. */ + PPPOS_PROTECT(lev); + for (i = 0; i < 32 / 8; i++) { + pppos->in_accm[i] = (u8_t)(accm >> (i * 8)); + } + PPPOS_UNPROTECT(lev); + + PPPDEBUG(LOG_INFO, ("pppos_recv_config[%d]: in_accm=%X %X %X %X\n", + pppos->ppp->netif->num, + pppos->in_accm[0], pppos->in_accm[1], pppos->in_accm[2], pppos->in_accm[3])); +} + +/* + * Drop the input packet. + */ +static void +pppos_input_free_current_packet(pppos_pcb *pppos) +{ + if (pppos->in_head != NULL) { + if (pppos->in_tail && (pppos->in_tail != pppos->in_head)) { + pbuf_free(pppos->in_tail); + } + pbuf_free(pppos->in_head); + pppos->in_head = NULL; + } + pppos->in_tail = NULL; +} + +/* + * Drop the input packet and increase error counters. + */ +static void +pppos_input_drop(pppos_pcb *pppos) +{ + if (pppos->in_head != NULL) { +#if 0 + PPPDEBUG(LOG_INFO, ("pppos_input_drop: %d:%.*H\n", pppos->in_head->len, min(60, pppos->in_head->len * 2), pppos->in_head->payload)); +#endif + PPPDEBUG(LOG_INFO, ("pppos_input_drop: pbuf len=%d, addr %p\n", pppos->in_head->len, (void*)pppos->in_head)); + } + pppos_input_free_current_packet(pppos); +#if VJ_SUPPORT + vj_uncompress_err(&pppos->ppp->vj_comp); +#endif /* VJ_SUPPORT */ + + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pppos->ppp->netif, ifindiscards); +} + +/* + * pppos_output_append - append given character to end of given pbuf. + * If out_accm is not 0 and the character needs to be escaped, do so. + * If pbuf is full, send the pbuf and reuse it. + * Return the current pbuf. + */ +static err_t +pppos_output_append(pppos_pcb *pppos, err_t err, struct pbuf *nb, u8_t c, u8_t accm, u16_t *fcs) +{ + if (err != ERR_OK) { + return err; + } + + /* Make sure there is room for the character and an escape code. + * Sure we don't quite fill the buffer if the character doesn't + * get escaped but is one character worth complicating this? */ + if ((PBUF_POOL_BUFSIZE - nb->len) < 2) { + u32_t l = pppos->output_cb(pppos->ppp, (u8_t*)nb->payload, nb->len, pppos->ppp->ctx_cb); + if (l != nb->len) { + return ERR_IF; + } + nb->len = 0; + } + + /* Update FCS before checking for special characters. */ + if (fcs) { + *fcs = PPP_FCS(*fcs, c); + } + + /* Copy to output buffer escaping special characters. */ + if (accm && ESCAPE_P(pppos->out_accm, c)) { + *((u8_t*)nb->payload + nb->len++) = PPP_ESCAPE; + *((u8_t*)nb->payload + nb->len++) = c ^ PPP_TRANS; + } else { + *((u8_t*)nb->payload + nb->len++) = c; + } + + return ERR_OK; +} + +static err_t +pppos_output_last(pppos_pcb *pppos, err_t err, struct pbuf *nb, u16_t *fcs) +{ + ppp_pcb *ppp = pppos->ppp; + + /* Add FCS and trailing flag. */ + err = pppos_output_append(pppos, err, nb, ~(*fcs) & 0xFF, 1, NULL); + err = pppos_output_append(pppos, err, nb, (~(*fcs) >> 8) & 0xFF, 1, NULL); + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + + if (err != ERR_OK) { + goto failed; + } + + /* Send remaining buffer if not empty */ + if (nb->len > 0) { + u32_t l = pppos->output_cb(ppp, (u8_t*)nb->payload, nb->len, ppp->ctx_cb); + if (l != nb->len) { + err = ERR_IF; + goto failed; + } + } + + pppos->last_xmit = sys_now(); + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, nb->tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + pbuf_free(nb); + return ERR_OK; + +failed: + pppos->last_xmit = 0; /* prepend PPP_FLAG to next packet */ + LINK_STATS_INC(link.err); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(nb); + return err; +} + +#endif /* PPP_SUPPORT && PPPOS_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c new file mode 100644 index 00000000..3b2399dc --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c @@ -0,0 +1,677 @@ +/* + * upap.c - User/Password Authentication Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/upap.h" + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t pap_option_list[] = { + { "hide-password", o_bool, &hide_password, + "Don't output passwords to log", OPT_PRIO | 1 }, + { "show-password", o_bool, &hide_password, + "Show password string in debug log messages", OPT_PRIOSUB | 0 }, + + { "pap-restart", o_int, &upap[0].us_timeouttime, + "Set retransmit timeout for PAP", OPT_PRIO }, + { "pap-max-authreq", o_int, &upap[0].us_maxtransmits, + "Set max number of transmissions for auth-reqs", OPT_PRIO }, + { "pap-timeout", o_int, &upap[0].us_reqtimeout, + "Set time limit for peer PAP authentication", OPT_PRIO }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points. + */ +static void upap_init(ppp_pcb *pcb); +static void upap_lowerup(ppp_pcb *pcb); +static void upap_lowerdown(ppp_pcb *pcb); +static void upap_input(ppp_pcb *pcb, u_char *inpacket, int l); +static void upap_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int upap_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent pap_protent = { + PPP_PAP, + upap_init, + upap_input, + upap_protrej, + upap_lowerup, + upap_lowerdown, + NULL, + NULL, +#if PRINTPKT_SUPPORT + upap_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "PAP", + NULL, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + pap_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +static void upap_timeout(void *arg); +#if PPP_SERVER +static void upap_reqtimeout(void *arg); +static void upap_rauthreq(ppp_pcb *pcb, u_char *inp, int id, int len); +#endif /* PPP_SERVER */ +static void upap_rauthack(ppp_pcb *pcb, u_char *inp, int id, int len); +static void upap_rauthnak(ppp_pcb *pcb, u_char *inp, int id, int len); +static void upap_sauthreq(ppp_pcb *pcb); +#if PPP_SERVER +static void upap_sresp(ppp_pcb *pcb, u_char code, u_char id, const char *msg, int msglen); +#endif /* PPP_SERVER */ + + +/* + * upap_init - Initialize a UPAP unit. + */ +static void upap_init(ppp_pcb *pcb) { + pcb->upap.us_user = NULL; + pcb->upap.us_userlen = 0; + pcb->upap.us_passwd = NULL; + pcb->upap.us_passwdlen = 0; + pcb->upap.us_clientstate = UPAPCS_INITIAL; +#if PPP_SERVER + pcb->upap.us_serverstate = UPAPSS_INITIAL; +#endif /* PPP_SERVER */ + pcb->upap.us_id = 0; +} + + +/* + * upap_authwithpeer - Authenticate us with our peer (start client). + * + * Set new state and send authenticate's. + */ +void upap_authwithpeer(ppp_pcb *pcb, const char *user, const char *password) { + + if(!user || !password) + return; + + /* Save the username and password we're given */ + pcb->upap.us_user = user; + pcb->upap.us_userlen = (u8_t)LWIP_MIN(strlen(user), 0xff); + pcb->upap.us_passwd = password; + pcb->upap.us_passwdlen = (u8_t)LWIP_MIN(strlen(password), 0xff); + pcb->upap.us_transmits = 0; + + /* Lower layer up yet? */ + if (pcb->upap.us_clientstate == UPAPCS_INITIAL || + pcb->upap.us_clientstate == UPAPCS_PENDING) { + pcb->upap.us_clientstate = UPAPCS_PENDING; + return; + } + + upap_sauthreq(pcb); /* Start protocol */ +} + +#if PPP_SERVER +/* + * upap_authpeer - Authenticate our peer (start server). + * + * Set new state. + */ +void upap_authpeer(ppp_pcb *pcb) { + + /* Lower layer up yet? */ + if (pcb->upap.us_serverstate == UPAPSS_INITIAL || + pcb->upap.us_serverstate == UPAPSS_PENDING) { + pcb->upap.us_serverstate = UPAPSS_PENDING; + return; + } + + pcb->upap.us_serverstate = UPAPSS_LISTEN; + if (pcb->settings.pap_req_timeout > 0) + TIMEOUT(upap_reqtimeout, pcb, pcb->settings.pap_req_timeout); +} +#endif /* PPP_SERVER */ + +/* + * upap_timeout - Retransmission timer for sending auth-reqs expired. + */ +static void upap_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) + return; + + if (pcb->upap.us_transmits >= pcb->settings.pap_max_transmits) { + /* give up in disgust */ + ppp_error("No response to PAP authenticate-requests"); + pcb->upap.us_clientstate = UPAPCS_BADAUTH; + auth_withpeer_fail(pcb, PPP_PAP); + return; + } + + upap_sauthreq(pcb); /* Send Authenticate-Request */ +} + + +#if PPP_SERVER +/* + * upap_reqtimeout - Give up waiting for the peer to send an auth-req. + */ +static void upap_reqtimeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->upap.us_serverstate != UPAPSS_LISTEN) + return; /* huh?? */ + + auth_peer_fail(pcb, PPP_PAP); + pcb->upap.us_serverstate = UPAPSS_BADAUTH; +} +#endif /* PPP_SERVER */ + + +/* + * upap_lowerup - The lower layer is up. + * + * Start authenticating if pending. + */ +static void upap_lowerup(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_INITIAL) + pcb->upap.us_clientstate = UPAPCS_CLOSED; + else if (pcb->upap.us_clientstate == UPAPCS_PENDING) { + upap_sauthreq(pcb); /* send an auth-request */ + } + +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_INITIAL) + pcb->upap.us_serverstate = UPAPSS_CLOSED; + else if (pcb->upap.us_serverstate == UPAPSS_PENDING) { + pcb->upap.us_serverstate = UPAPSS_LISTEN; + if (pcb->settings.pap_req_timeout > 0) + TIMEOUT(upap_reqtimeout, pcb, pcb->settings.pap_req_timeout); + } +#endif /* PPP_SERVER */ +} + + +/* + * upap_lowerdown - The lower layer is down. + * + * Cancel all timeouts. + */ +static void upap_lowerdown(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_AUTHREQ) /* Timeout pending? */ + UNTIMEOUT(upap_timeout, pcb); /* Cancel timeout */ +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_LISTEN && pcb->settings.pap_req_timeout > 0) + UNTIMEOUT(upap_reqtimeout, pcb); +#endif /* PPP_SERVER */ + + pcb->upap.us_clientstate = UPAPCS_INITIAL; +#if PPP_SERVER + pcb->upap.us_serverstate = UPAPSS_INITIAL; +#endif /* PPP_SERVER */ +} + + +/* + * upap_protrej - Peer doesn't speak this protocol. + * + * This shouldn't happen. In any case, pretend lower layer went down. + */ +static void upap_protrej(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_AUTHREQ) { + ppp_error("PAP authentication failed due to protocol-reject"); + auth_withpeer_fail(pcb, PPP_PAP); + } +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_LISTEN) { + ppp_error("PAP authentication of peer failed (protocol-reject)"); + auth_peer_fail(pcb, PPP_PAP); + } +#endif /* PPP_SERVER */ + upap_lowerdown(pcb); +} + + +/* + * upap_input - Input UPAP packet. + */ +static void upap_input(ppp_pcb *pcb, u_char *inpacket, int l) { + u_char *inp; + u_char code, id; + int len; + + /* + * Parse header (code, id and length). + * If packet too short, drop it. + */ + inp = inpacket; + if (l < UPAP_HEADERLEN) { + UPAPDEBUG(("pap_input: rcvd short header.")); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < UPAP_HEADERLEN) { + UPAPDEBUG(("pap_input: rcvd illegal length.")); + return; + } + if (len > l) { + UPAPDEBUG(("pap_input: rcvd short packet.")); + return; + } + len -= UPAP_HEADERLEN; + + /* + * Action depends on code. + */ + switch (code) { + case UPAP_AUTHREQ: +#if PPP_SERVER + upap_rauthreq(pcb, inp, id, len); +#endif /* PPP_SERVER */ + break; + + case UPAP_AUTHACK: + upap_rauthack(pcb, inp, id, len); + break; + + case UPAP_AUTHNAK: + upap_rauthnak(pcb, inp, id, len); + break; + + default: /* XXX Need code reject */ + break; + } +} + +#if PPP_SERVER +/* + * upap_rauth - Receive Authenticate. + */ +static void upap_rauthreq(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char ruserlen, rpasswdlen; + char *ruser; + char *rpasswd; + char rhostname[256]; + int retcode; + const char *msg; + int msglen; + + if (pcb->upap.us_serverstate < UPAPSS_LISTEN) + return; + + /* + * If we receive a duplicate authenticate-request, we are + * supposed to return the same status as for the first request. + */ + if (pcb->upap.us_serverstate == UPAPSS_OPEN) { + upap_sresp(pcb, UPAP_AUTHACK, id, "", 0); /* return auth-ack */ + return; + } + if (pcb->upap.us_serverstate == UPAPSS_BADAUTH) { + upap_sresp(pcb, UPAP_AUTHNAK, id, "", 0); /* return auth-nak */ + return; + } + + /* + * Parse user/passwd. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + GETCHAR(ruserlen, inp); + len -= sizeof (u_char) + ruserlen + sizeof (u_char); + if (len < 0) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + ruser = (char *) inp; + INCPTR(ruserlen, inp); + GETCHAR(rpasswdlen, inp); + if (len < rpasswdlen) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + + rpasswd = (char *) inp; + + /* + * Check the username and password given. + */ + retcode = UPAP_AUTHNAK; + if (auth_check_passwd(pcb, ruser, ruserlen, rpasswd, rpasswdlen, &msg, &msglen)) { + retcode = UPAP_AUTHACK; + } + BZERO(rpasswd, rpasswdlen); + +#if 0 /* UNUSED */ + /* + * Check remote number authorization. A plugin may have filled in + * the remote number or added an allowed number, and rather than + * return an authenticate failure, is leaving it for us to verify. + */ + if (retcode == UPAP_AUTHACK) { + if (!auth_number()) { + /* We do not want to leak info about the pap result. */ + retcode = UPAP_AUTHNAK; /* XXX exit value will be "wrong" */ + warn("calling number %q is not authorized", remote_number); + } + } + + msglen = strlen(msg); + if (msglen > 255) + msglen = 255; +#endif /* UNUSED */ + + upap_sresp(pcb, retcode, id, msg, msglen); + + /* Null terminate and clean remote name. */ + ppp_slprintf(rhostname, sizeof(rhostname), "%.*v", ruserlen, ruser); + + if (retcode == UPAP_AUTHACK) { + pcb->upap.us_serverstate = UPAPSS_OPEN; + ppp_notice("PAP peer authentication succeeded for %q", rhostname); + auth_peer_success(pcb, PPP_PAP, 0, ruser, ruserlen); + } else { + pcb->upap.us_serverstate = UPAPSS_BADAUTH; + ppp_warn("PAP peer authentication failed for %q", rhostname); + auth_peer_fail(pcb, PPP_PAP); + } + + if (pcb->settings.pap_req_timeout > 0) + UNTIMEOUT(upap_reqtimeout, pcb); +} +#endif /* PPP_SERVER */ + +/* + * upap_rauthack - Receive Authenticate-Ack. + */ +static void upap_rauthack(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char msglen; + char *msg; + LWIP_UNUSED_ARG(id); + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) /* XXX */ + return; + + /* + * Parse message. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauthack: ignoring missing msg-length.")); + } else { + GETCHAR(msglen, inp); + if (msglen > 0) { + len -= sizeof (u_char); + if (len < msglen) { + UPAPDEBUG(("pap_rauthack: rcvd short packet.")); + return; + } + msg = (char *) inp; + PRINTMSG(msg, msglen); + } + } + + pcb->upap.us_clientstate = UPAPCS_OPEN; + + auth_withpeer_success(pcb, PPP_PAP, 0); +} + + +/* + * upap_rauthnak - Receive Authenticate-Nak. + */ +static void upap_rauthnak(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char msglen; + char *msg; + LWIP_UNUSED_ARG(id); + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) /* XXX */ + return; + + /* + * Parse message. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauthnak: ignoring missing msg-length.")); + } else { + GETCHAR(msglen, inp); + if (msglen > 0) { + len -= sizeof (u_char); + if (len < msglen) { + UPAPDEBUG(("pap_rauthnak: rcvd short packet.")); + return; + } + msg = (char *) inp; + PRINTMSG(msg, msglen); + } + } + + pcb->upap.us_clientstate = UPAPCS_BADAUTH; + + ppp_error("PAP authentication failed"); + auth_withpeer_fail(pcb, PPP_PAP); +} + + +/* + * upap_sauthreq - Send an Authenticate-Request. + */ +static void upap_sauthreq(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + int outlen; + + outlen = UPAP_HEADERLEN + 2 * sizeof (u_char) + + pcb->upap.us_userlen + pcb->upap.us_passwdlen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +outlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_PAP); + + PUTCHAR(UPAP_AUTHREQ, outp); + PUTCHAR(++pcb->upap.us_id, outp); + PUTSHORT(outlen, outp); + PUTCHAR(pcb->upap.us_userlen, outp); + MEMCPY(outp, pcb->upap.us_user, pcb->upap.us_userlen); + INCPTR(pcb->upap.us_userlen, outp); + PUTCHAR(pcb->upap.us_passwdlen, outp); + MEMCPY(outp, pcb->upap.us_passwd, pcb->upap.us_passwdlen); + + ppp_write(pcb, p); + + TIMEOUT(upap_timeout, pcb, pcb->settings.pap_timeout_time); + ++pcb->upap.us_transmits; + pcb->upap.us_clientstate = UPAPCS_AUTHREQ; +} + +#if PPP_SERVER +/* + * upap_sresp - Send a response (ack or nak). + */ +static void upap_sresp(ppp_pcb *pcb, u_char code, u_char id, const char *msg, int msglen) { + struct pbuf *p; + u_char *outp; + int outlen; + + outlen = UPAP_HEADERLEN + sizeof (u_char) + msglen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +outlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_PAP); + + PUTCHAR(code, outp); + PUTCHAR(id, outp); + PUTSHORT(outlen, outp); + PUTCHAR(msglen, outp); + MEMCPY(outp, msg, msglen); + + ppp_write(pcb, p); +} +#endif /* PPP_SERVER */ + +#if PRINTPKT_SUPPORT +/* + * upap_printpkt - print the contents of a PAP packet. + */ +static const char* const upap_codenames[] = { + "AuthReq", "AuthAck", "AuthNak" +}; + +static int upap_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len; + int mlen, ulen, wlen; + const u_char *user, *pwd, *msg; + const u_char *pstart; + + if (plen < UPAP_HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < UPAP_HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(upap_codenames)) + printer(arg, " %s", upap_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= UPAP_HEADERLEN; + switch (code) { + case UPAP_AUTHREQ: + if (len < 1) + break; + ulen = p[0]; + if (len < ulen + 2) + break; + wlen = p[ulen + 1]; + if (len < ulen + wlen + 2) + break; + user = (const u_char *) (p + 1); + pwd = (const u_char *) (p + ulen + 2); + p += ulen + wlen + 2; + len -= ulen + wlen + 2; + printer(arg, " user="); + ppp_print_string(user, ulen, printer, arg); + printer(arg, " password="); +/* FIXME: require ppp_pcb struct as printpkt() argument */ +#if 0 + if (!pcb->settings.hide_password) +#endif + ppp_print_string(pwd, wlen, printer, arg); +#if 0 + else + printer(arg, ""); +#endif + break; + case UPAP_AUTHACK: + case UPAP_AUTHNAK: + if (len < 1) + break; + mlen = p[0]; + if (len < mlen + 1) + break; + msg = (const u_char *) (p + 1); + p += mlen + 1; + len -= mlen + 1; + printer(arg, " "); + ppp_print_string(msg, mlen, printer, arg); + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && PAP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c new file mode 100644 index 00000000..f1366dae --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c @@ -0,0 +1,957 @@ +/* + * utils.c - various utility functions used in pppd. + * + * Copyright (c) 1999-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef SVR4 +#include +#endif +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" + +#if defined(SUNOS4) +extern char *strerror(); +#endif + +static void ppp_logit(int level, const char *fmt, va_list args); +static void ppp_log_write(int level, char *buf); +#if PRINTPKT_SUPPORT +static void ppp_vslp_printer(void *arg, const char *fmt, ...); +static void ppp_format_packet(const u_char *p, int len, + void (*printer) (void *, const char *, ...), void *arg); + +struct buffer_info { + char *ptr; + int len; +}; +#endif /* PRINTPKT_SUPPORT */ + +/* + * ppp_strlcpy - like strcpy/strncpy, doesn't overflow destination buffer, + * always leaves destination null-terminated (for len > 0). + */ +size_t ppp_strlcpy(char *dest, const char *src, size_t len) { + size_t ret = strlen(src); + + if (len != 0) { + if (ret < len) + strcpy(dest, src); + else { + strncpy(dest, src, len - 1); + dest[len-1] = 0; + } + } + return ret; +} + +/* + * ppp_strlcat - like strcat/strncat, doesn't overflow destination buffer, + * always leaves destination null-terminated (for len > 0). + */ +size_t ppp_strlcat(char *dest, const char *src, size_t len) { + size_t dlen = strlen(dest); + + return dlen + ppp_strlcpy(dest + dlen, src, (len > dlen? len - dlen: 0)); +} + + +/* + * ppp_slprintf - format a message into a buffer. Like sprintf except we + * also specify the length of the output buffer, and we handle + * %m (error message), %v (visible string), + * %q (quoted string), %t (current time) and %I (IP address) formats. + * Doesn't do floating-point formats. + * Returns the number of chars put into buf. + */ +int ppp_slprintf(char *buf, int buflen, const char *fmt, ...) { + va_list args; + int n; + + va_start(args, fmt); + n = ppp_vslprintf(buf, buflen, fmt, args); + va_end(args); + return n; +} + +/* + * ppp_vslprintf - like ppp_slprintf, takes a va_list instead of a list of args. + */ +#define OUTCHAR(c) (buflen > 0? (--buflen, *buf++ = (c)): 0) + +int ppp_vslprintf(char *buf, int buflen, const char *fmt, va_list args) { + int c, i, n; + int width, prec, fillch; + int base, len, neg, quoted; + unsigned long val = 0; + const char *f; + char *str, *buf0; + const unsigned char *p; + char num[32]; +#if 0 /* need port */ + time_t t; +#endif /* need port */ + u32_t ip; + static char hexchars[] = "0123456789abcdef"; +#if PRINTPKT_SUPPORT + struct buffer_info bufinfo; +#endif /* PRINTPKT_SUPPORT */ + + buf0 = buf; + --buflen; + while (buflen > 0) { + for (f = fmt; *f != '%' && *f != 0; ++f) + ; + if (f > fmt) { + len = f - fmt; + if (len > buflen) + len = buflen; + memcpy(buf, fmt, len); + buf += len; + buflen -= len; + fmt = f; + } + if (*fmt == 0) + break; + c = *++fmt; + width = 0; + prec = -1; + fillch = ' '; + if (c == '0') { + fillch = '0'; + c = *++fmt; + } + if (c == '*') { + width = va_arg(args, int); + c = *++fmt; + } else { + while (lwip_isdigit(c)) { + width = width * 10 + c - '0'; + c = *++fmt; + } + } + if (c == '.') { + c = *++fmt; + if (c == '*') { + prec = va_arg(args, int); + c = *++fmt; + } else { + prec = 0; + while (lwip_isdigit(c)) { + prec = prec * 10 + c - '0'; + c = *++fmt; + } + } + } + str = 0; + base = 0; + neg = 0; + ++fmt; + switch (c) { + case 'l': + c = *fmt++; + switch (c) { + case 'd': + val = va_arg(args, long); + if ((long)val < 0) { + neg = 1; + val = (unsigned long)-(long)val; + } + base = 10; + break; + case 'u': + val = va_arg(args, unsigned long); + base = 10; + break; + default: + OUTCHAR('%'); + OUTCHAR('l'); + --fmt; /* so %lz outputs %lz etc. */ + continue; + } + break; + case 'd': + i = va_arg(args, int); + if (i < 0) { + neg = 1; + val = -i; + } else + val = i; + base = 10; + break; + case 'u': + val = va_arg(args, unsigned int); + base = 10; + break; + case 'o': + val = va_arg(args, unsigned int); + base = 8; + break; + case 'x': + case 'X': + val = va_arg(args, unsigned int); + base = 16; + break; +#if 0 /* unused (and wrong on LLP64 systems) */ + case 'p': + val = (unsigned long) va_arg(args, void *); + base = 16; + neg = 2; + break; +#endif /* unused (and wrong on LLP64 systems) */ + case 's': + str = va_arg(args, char *); + break; + case 'c': + num[0] = va_arg(args, int); + num[1] = 0; + str = num; + break; +#if 0 /* do we always have strerror() in embedded ? */ + case 'm': + str = strerror(errno); + break; +#endif /* do we always have strerror() in embedded ? */ + case 'I': + ip = va_arg(args, u32_t); + ip = lwip_ntohl(ip); + ppp_slprintf(num, sizeof(num), "%d.%d.%d.%d", (ip >> 24) & 0xff, + (ip >> 16) & 0xff, (ip >> 8) & 0xff, ip & 0xff); + str = num; + break; +#if 0 /* need port */ + case 't': + time(&t); + str = ctime(&t); + str += 4; /* chop off the day name */ + str[15] = 0; /* chop off year and newline */ + break; +#endif /* need port */ + case 'v': /* "visible" string */ + case 'q': /* quoted string */ + quoted = c == 'q'; + p = va_arg(args, unsigned char *); + if (p == NULL) + p = (const unsigned char *)""; + if (fillch == '0' && prec >= 0) { + n = prec; + } else { + n = strlen((const char *)p); + if (prec >= 0 && n > prec) + n = prec; + } + while (n > 0 && buflen > 0) { + c = *p++; + --n; + if (!quoted && c >= 0x80) { + OUTCHAR('M'); + OUTCHAR('-'); + c -= 0x80; + } + if (quoted && (c == '"' || c == '\\')) + OUTCHAR('\\'); + if (c < 0x20 || (0x7f <= c && c < 0xa0)) { + if (quoted) { + OUTCHAR('\\'); + switch (c) { + case '\t': OUTCHAR('t'); break; + case '\n': OUTCHAR('n'); break; + case '\b': OUTCHAR('b'); break; + case '\f': OUTCHAR('f'); break; + default: + OUTCHAR('x'); + OUTCHAR(hexchars[c >> 4]); + OUTCHAR(hexchars[c & 0xf]); + } + } else { + if (c == '\t') + OUTCHAR(c); + else { + OUTCHAR('^'); + OUTCHAR(c ^ 0x40); + } + } + } else + OUTCHAR(c); + } + continue; +#if PRINTPKT_SUPPORT + case 'P': /* print PPP packet */ + bufinfo.ptr = buf; + bufinfo.len = buflen + 1; + p = va_arg(args, unsigned char *); + n = va_arg(args, int); + ppp_format_packet(p, n, ppp_vslp_printer, &bufinfo); + buf = bufinfo.ptr; + buflen = bufinfo.len - 1; + continue; +#endif /* PRINTPKT_SUPPORT */ + case 'B': + p = va_arg(args, unsigned char *); + for (n = prec; n > 0; --n) { + c = *p++; + if (fillch == ' ') + OUTCHAR(' '); + OUTCHAR(hexchars[(c >> 4) & 0xf]); + OUTCHAR(hexchars[c & 0xf]); + } + continue; + default: + *buf++ = '%'; + if (c != '%') + --fmt; /* so %z outputs %z etc. */ + --buflen; + continue; + } + if (base != 0) { + str = num + sizeof(num); + *--str = 0; + while (str > num + neg) { + *--str = hexchars[val % base]; + val = val / base; + if (--prec <= 0 && val == 0) + break; + } + switch (neg) { + case 1: + *--str = '-'; + break; + case 2: + *--str = 'x'; + *--str = '0'; + break; + default: + break; + } + len = num + sizeof(num) - 1 - str; + } else { + len = strlen(str); + if (prec >= 0 && len > prec) + len = prec; + } + if (width > 0) { + if (width > buflen) + width = buflen; + if ((n = width - len) > 0) { + buflen -= n; + for (; n > 0; --n) + *buf++ = fillch; + } + } + if (len > buflen) + len = buflen; + memcpy(buf, str, len); + buf += len; + buflen -= len; + } + *buf = 0; + return buf - buf0; +} + +#if PRINTPKT_SUPPORT +/* + * vslp_printer - used in processing a %P format + */ +static void ppp_vslp_printer(void *arg, const char *fmt, ...) { + int n; + va_list pvar; + struct buffer_info *bi; + + va_start(pvar, fmt); + bi = (struct buffer_info *) arg; + n = ppp_vslprintf(bi->ptr, bi->len, fmt, pvar); + va_end(pvar); + + bi->ptr += n; + bi->len -= n; +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* UNUSED */ +/* + * log_packet - format a packet and log it. + */ + +void +log_packet(p, len, prefix, level) + u_char *p; + int len; + char *prefix; + int level; +{ + init_pr_log(prefix, level); + ppp_format_packet(p, len, pr_log, &level); + end_pr_log(); +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ppp_format_packet - make a readable representation of a packet, + * calling `printer(arg, format, ...)' to output it. + */ +static void ppp_format_packet(const u_char *p, int len, + void (*printer) (void *, const char *, ...), void *arg) { + int i, n; + u_short proto; + const struct protent *protp; + + if (len >= 2) { + GETSHORT(proto, p); + len -= 2; + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (proto == protp->protocol) + break; + if (protp != NULL) { + printer(arg, "[%s", protp->name); + n = (*protp->printpkt)(p, len, printer, arg); + printer(arg, "]"); + p += n; + len -= n; + } else { + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (proto == (protp->protocol & ~0x8000)) + break; + if (protp != 0 && protp->data_name != 0) { + printer(arg, "[%s data]", protp->data_name); + if (len > 8) + printer(arg, "%.8B ...", p); + else + printer(arg, "%.*B", len, p); + len = 0; + } else + printer(arg, "[proto=0x%x]", proto); + } + } + + if (len > 32) + printer(arg, "%.32B ...", p); + else + printer(arg, "%.*B", len, p); +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* UNUSED */ +/* + * init_pr_log, end_pr_log - initialize and finish use of pr_log. + */ + +static char line[256]; /* line to be logged accumulated here */ +static char *linep; /* current pointer within line */ +static int llevel; /* level for logging */ + +void +init_pr_log(prefix, level) + const char *prefix; + int level; +{ + linep = line; + if (prefix != NULL) { + ppp_strlcpy(line, prefix, sizeof(line)); + linep = line + strlen(line); + } + llevel = level; +} + +void +end_pr_log() +{ + if (linep != line) { + *linep = 0; + ppp_log_write(llevel, line); + } +} + +/* + * pr_log - printer routine for outputting to log + */ +void +pr_log (void *arg, const char *fmt, ...) +{ + int l, n; + va_list pvar; + char *p, *eol; + char buf[256]; + + va_start(pvar, fmt); + n = ppp_vslprintf(buf, sizeof(buf), fmt, pvar); + va_end(pvar); + + p = buf; + eol = strchr(buf, '\n'); + if (linep != line) { + l = (eol == NULL)? n: eol - buf; + if (linep + l < line + sizeof(line)) { + if (l > 0) { + memcpy(linep, buf, l); + linep += l; + } + if (eol == NULL) + return; + p = eol + 1; + eol = strchr(p, '\n'); + } + *linep = 0; + ppp_log_write(llevel, line); + linep = line; + } + + while (eol != NULL) { + *eol = 0; + ppp_log_write(llevel, p); + p = eol + 1; + eol = strchr(p, '\n'); + } + + /* assumes sizeof(buf) <= sizeof(line) */ + l = buf + n - p; + if (l > 0) { + memcpy(line, p, n); + linep = line + l; + } +} +#endif /* UNUSED */ + +/* + * ppp_print_string - print a readable representation of a string using + * printer. + */ +void ppp_print_string(const u_char *p, int len, void (*printer) (void *, const char *, ...), void *arg) { + int c; + + printer(arg, "\""); + for (; len > 0; --len) { + c = *p++; + if (' ' <= c && c <= '~') { + if (c == '\\' || c == '"') + printer(arg, "\\"); + printer(arg, "%c", c); + } else { + switch (c) { + case '\n': + printer(arg, "\\n"); + break; + case '\r': + printer(arg, "\\r"); + break; + case '\t': + printer(arg, "\\t"); + break; + default: + printer(arg, "\\%.3o", (u8_t)c); + /* no break */ + } + } + } + printer(arg, "\""); +} + +/* + * ppp_logit - does the hard work for fatal et al. + */ +static void ppp_logit(int level, const char *fmt, va_list args) { + char buf[1024]; + + ppp_vslprintf(buf, sizeof(buf), fmt, args); + ppp_log_write(level, buf); +} + +static void ppp_log_write(int level, char *buf) { + LWIP_UNUSED_ARG(level); /* necessary if PPPDEBUG is defined to an empty function */ + LWIP_UNUSED_ARG(buf); + PPPDEBUG(level, ("%s\n", buf) ); +#if 0 + if (log_to_fd >= 0 && (level != LOG_DEBUG || debug)) { + int n = strlen(buf); + + if (n > 0 && buf[n-1] == '\n') + --n; + if (write(log_to_fd, buf, n) != n + || write(log_to_fd, "\n", 1) != 1) + log_to_fd = -1; + } +#endif +} + +/* + * ppp_fatal - log an error message and die horribly. + */ +void ppp_fatal(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_ERR, fmt, pvar); + va_end(pvar); + + LWIP_ASSERT("ppp_fatal", 0); /* as promised */ +} + +/* + * ppp_error - log an error message. + */ +void ppp_error(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_ERR, fmt, pvar); + va_end(pvar); +#if 0 /* UNUSED */ + ++error_count; +#endif /* UNUSED */ +} + +/* + * ppp_warn - log a warning message. + */ +void ppp_warn(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_WARNING, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_notice - log a notice-level message. + */ +void ppp_notice(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_NOTICE, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_info - log an informational message. + */ +void ppp_info(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_INFO, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_dbglog - log a debug message. + */ +void ppp_dbglog(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_DEBUG, fmt, pvar); + va_end(pvar); +} + +#if PRINTPKT_SUPPORT +/* + * ppp_dump_packet - print out a packet in readable form if it is interesting. + * Assumes len >= PPP_HDRLEN. + */ +void ppp_dump_packet(ppp_pcb *pcb, const char *tag, unsigned char *p, int len) { + int proto; + + /* + * don't print data packets, i.e. IPv4, IPv6, VJ, and compressed packets. + */ + proto = (p[0] << 8) + p[1]; + if (proto < 0xC000 && (proto & ~0x8000) == proto) + return; + + /* + * don't print valid LCP echo request/reply packets if the link is up. + */ + if (proto == PPP_LCP && pcb->phase == PPP_PHASE_RUNNING && len >= 2 + HEADERLEN) { + unsigned char *lcp = p + 2; + int l = (lcp[2] << 8) + lcp[3]; + + if ((lcp[0] == ECHOREQ || lcp[0] == ECHOREP) + && l >= HEADERLEN && l <= len - 2) + return; + } + + ppp_dbglog("%s %P", tag, p, len); +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* Unused */ + +/* + * complete_read - read a full `count' bytes from fd, + * unless end-of-file or an error other than EINTR is encountered. + */ +ssize_t +complete_read(int fd, void *buf, size_t count) +{ + size_t done; + ssize_t nb; + char *ptr = buf; + + for (done = 0; done < count; ) { + nb = read(fd, ptr, count - done); + if (nb < 0) { + if (errno == EINTR) + continue; + return -1; + } + if (nb == 0) + break; + done += nb; + ptr += nb; + } + return done; +} + +/* Procedures for locking the serial device using a lock file. */ +#ifndef LOCK_DIR +#ifdef __linux__ +#define LOCK_DIR "/var/lock" +#else +#ifdef SVR4 +#define LOCK_DIR "/var/spool/locks" +#else +#define LOCK_DIR "/var/spool/lock" +#endif +#endif +#endif /* LOCK_DIR */ + +static char lock_file[MAXPATHLEN]; + +/* + * lock - create a lock file for the named device + */ +int +lock(dev) + char *dev; +{ +#ifdef LOCKLIB + int result; + + result = mklock (dev, (void *) 0); + if (result == 0) { + ppp_strlcpy(lock_file, dev, sizeof(lock_file)); + return 0; + } + + if (result > 0) + ppp_notice("Device %s is locked by pid %d", dev, result); + else + ppp_error("Can't create lock file %s", lock_file); + return -1; + +#else /* LOCKLIB */ + + char lock_buffer[12]; + int fd, pid, n; + +#ifdef SVR4 + struct stat sbuf; + + if (stat(dev, &sbuf) < 0) { + ppp_error("Can't get device number for %s: %m", dev); + return -1; + } + if ((sbuf.st_mode & S_IFMT) != S_IFCHR) { + ppp_error("Can't lock %s: not a character device", dev); + return -1; + } + ppp_slprintf(lock_file, sizeof(lock_file), "%s/LK.%03d.%03d.%03d", + LOCK_DIR, major(sbuf.st_dev), + major(sbuf.st_rdev), minor(sbuf.st_rdev)); +#else + char *p; + char lockdev[MAXPATHLEN]; + + if ((p = strstr(dev, "dev/")) != NULL) { + dev = p + 4; + strncpy(lockdev, dev, MAXPATHLEN-1); + lockdev[MAXPATHLEN-1] = 0; + while ((p = strrchr(lockdev, '/')) != NULL) { + *p = '_'; + } + dev = lockdev; + } else + if ((p = strrchr(dev, '/')) != NULL) + dev = p + 1; + + ppp_slprintf(lock_file, sizeof(lock_file), "%s/LCK..%s", LOCK_DIR, dev); +#endif + + while ((fd = open(lock_file, O_EXCL | O_CREAT | O_RDWR, 0644)) < 0) { + if (errno != EEXIST) { + ppp_error("Can't create lock file %s: %m", lock_file); + break; + } + + /* Read the lock file to find out who has the device locked. */ + fd = open(lock_file, O_RDONLY, 0); + if (fd < 0) { + if (errno == ENOENT) /* This is just a timing problem. */ + continue; + ppp_error("Can't open existing lock file %s: %m", lock_file); + break; + } +#ifndef LOCK_BINARY + n = read(fd, lock_buffer, 11); +#else + n = read(fd, &pid, sizeof(pid)); +#endif /* LOCK_BINARY */ + close(fd); + fd = -1; + if (n <= 0) { + ppp_error("Can't read pid from lock file %s", lock_file); + break; + } + + /* See if the process still exists. */ +#ifndef LOCK_BINARY + lock_buffer[n] = 0; + pid = atoi(lock_buffer); +#endif /* LOCK_BINARY */ + if (pid == getpid()) + return 1; /* somebody else locked it for us */ + if (pid == 0 + || (kill(pid, 0) == -1 && errno == ESRCH)) { + if (unlink (lock_file) == 0) { + ppp_notice("Removed stale lock on %s (pid %d)", dev, pid); + continue; + } + ppp_warn("Couldn't remove stale lock on %s", dev); + } else + ppp_notice("Device %s is locked by pid %d", dev, pid); + break; + } + + if (fd < 0) { + lock_file[0] = 0; + return -1; + } + + pid = getpid(); +#ifndef LOCK_BINARY + ppp_slprintf(lock_buffer, sizeof(lock_buffer), "%10d\n", pid); + write (fd, lock_buffer, 11); +#else + write(fd, &pid, sizeof (pid)); +#endif + close(fd); + return 0; + +#endif +} + +/* + * relock - called to update our lockfile when we are about to detach, + * thus changing our pid (we fork, the child carries on, and the parent dies). + * Note that this is called by the parent, with pid equal to the pid + * of the child. This avoids a potential race which would exist if + * we had the child rewrite the lockfile (the parent might die first, + * and another process could think the lock was stale if it checked + * between when the parent died and the child rewrote the lockfile). + */ +int +relock(pid) + int pid; +{ +#ifdef LOCKLIB + /* XXX is there a way to do this? */ + return -1; +#else /* LOCKLIB */ + + int fd; + char lock_buffer[12]; + + if (lock_file[0] == 0) + return -1; + fd = open(lock_file, O_WRONLY, 0); + if (fd < 0) { + ppp_error("Couldn't reopen lock file %s: %m", lock_file); + lock_file[0] = 0; + return -1; + } + +#ifndef LOCK_BINARY + ppp_slprintf(lock_buffer, sizeof(lock_buffer), "%10d\n", pid); + write (fd, lock_buffer, 11); +#else + write(fd, &pid, sizeof(pid)); +#endif /* LOCK_BINARY */ + close(fd); + return 0; + +#endif /* LOCKLIB */ +} + +/* + * unlock - remove our lockfile + */ +void +unlock() +{ + if (lock_file[0]) { +#ifdef LOCKLIB + (void) rmlock(lock_file, (void *) 0); +#else + unlink(lock_file); +#endif + lock_file[0] = 0; + } +} + +#endif /* Unused */ + +#endif /* PPP_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c b/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c new file mode 100644 index 00000000..3fecba69 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c @@ -0,0 +1,685 @@ +/* + * Routines to compress and uncompess tcp packets (for transmission + * over low speed serial lines. + * + * Copyright (c) 1989 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: + * Initial distribution. + * + * Modified June 1993 by Paul Mackerras, paulus@cs.anu.edu.au, + * so that the entire packet being decompressed doesn't have + * to be in contiguous memory (just the compressed header). + * + * Modified March 1998 by Guy Lancaster, glanca@gesn.com, + * for a 16 bit processor. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && VJ_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppdebug.h" + +#include "netif/ppp/vj.h" + +#include + +#if LINK_STATS +#define INCR(counter) ++comp->stats.counter +#else +#define INCR(counter) +#endif + +void +vj_compress_init(struct vjcompress *comp) +{ + u8_t i; + struct cstate *tstate = comp->tstate; + +#if MAX_SLOTS == 0 + memset((char *)comp, 0, sizeof(*comp)); +#endif + comp->maxSlotIndex = MAX_SLOTS - 1; + comp->compressSlot = 0; /* Disable slot ID compression by default. */ + for (i = MAX_SLOTS - 1; i > 0; --i) { + tstate[i].cs_id = i; + tstate[i].cs_next = &tstate[i - 1]; + } + tstate[0].cs_next = &tstate[MAX_SLOTS - 1]; + tstate[0].cs_id = 0; + comp->last_cs = &tstate[0]; + comp->last_recv = 255; + comp->last_xmit = 255; + comp->flags = VJF_TOSS; +} + + +/* ENCODE encodes a number that is known to be non-zero. ENCODEZ + * checks for zero (since zero has to be encoded in the long, 3 byte + * form). + */ +#define ENCODE(n) { \ + if ((u16_t)(n) >= 256) { \ + *cp++ = 0; \ + cp[1] = (u8_t)(n); \ + cp[0] = (u8_t)((n) >> 8); \ + cp += 2; \ + } else { \ + *cp++ = (u8_t)(n); \ + } \ +} +#define ENCODEZ(n) { \ + if ((u16_t)(n) >= 256 || (u16_t)(n) == 0) { \ + *cp++ = 0; \ + cp[1] = (u8_t)(n); \ + cp[0] = (u8_t)((n) >> 8); \ + cp += 2; \ + } else { \ + *cp++ = (u8_t)(n); \ + } \ +} + +#define DECODEL(f) { \ + if (*cp == 0) {\ + u32_t tmp_ = lwip_ntohl(f) + ((cp[1] << 8) | cp[2]); \ + (f) = lwip_htonl(tmp_); \ + cp += 3; \ + } else { \ + u32_t tmp_ = lwip_ntohl(f) + (u32_t)*cp++; \ + (f) = lwip_htonl(tmp_); \ + } \ +} + +#define DECODES(f) { \ + if (*cp == 0) {\ + u16_t tmp_ = lwip_ntohs(f) + (((u16_t)cp[1] << 8) | cp[2]); \ + (f) = lwip_htons(tmp_); \ + cp += 3; \ + } else { \ + u16_t tmp_ = lwip_ntohs(f) + (u16_t)*cp++; \ + (f) = lwip_htons(tmp_); \ + } \ +} + +#define DECODEU(f) { \ + if (*cp == 0) {\ + (f) = lwip_htons(((u16_t)cp[1] << 8) | cp[2]); \ + cp += 3; \ + } else { \ + (f) = lwip_htons((u16_t)*cp++); \ + } \ +} + +/* Helper structures for unaligned *u32_t and *u16_t accesses */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct vj_u32_t { + PACK_STRUCT_FIELD(u32_t v); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct vj_u16_t { + PACK_STRUCT_FIELD(u16_t v); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* + * vj_compress_tcp - Attempt to do Van Jacobson header compression on a + * packet. This assumes that nb and comp are not null and that the first + * buffer of the chain contains a valid IP header. + * Return the VJ type code indicating whether or not the packet was + * compressed. + */ +u8_t +vj_compress_tcp(struct vjcompress *comp, struct pbuf **pb) +{ + struct pbuf *np = *pb; + struct ip_hdr *ip = (struct ip_hdr *)np->payload; + struct cstate *cs = comp->last_cs->cs_next; + u16_t ilen = IPH_HL(ip); + u16_t hlen; + struct tcp_hdr *oth; + struct tcp_hdr *th; + u16_t deltaS, deltaA = 0; + u32_t deltaL; + u32_t changes = 0; + u8_t new_seq[16]; + u8_t *cp = new_seq; + + /* + * Check that the packet is IP proto TCP. + */ + if (IPH_PROTO(ip) != IP_PROTO_TCP) { + return (TYPE_IP); + } + + /* + * Bail if this is an IP fragment or if the TCP packet isn't + * `compressible' (i.e., ACK isn't set or some other control bit is + * set). + */ + if ((IPH_OFFSET(ip) & PP_HTONS(0x3fff)) || np->tot_len < 40) { + return (TYPE_IP); + } + th = (struct tcp_hdr *)&((struct vj_u32_t*)ip)[ilen]; + if ((TCPH_FLAGS(th) & (TCP_SYN|TCP_FIN|TCP_RST|TCP_ACK)) != TCP_ACK) { + return (TYPE_IP); + } + + /* Check that the TCP/IP headers are contained in the first buffer. */ + hlen = ilen + TCPH_HDRLEN(th); + hlen <<= 2; + if (np->len < hlen) { + PPPDEBUG(LOG_INFO, ("vj_compress_tcp: header len %d spans buffers\n", hlen)); + return (TYPE_IP); + } + + /* TCP stack requires that we don't change the packet payload, therefore we copy + * the whole packet before compression. */ + np = pbuf_clone(PBUF_RAW, PBUF_RAM, *pb); + if (!np) { + return (TYPE_IP); + } + + *pb = np; + ip = (struct ip_hdr *)np->payload; + + /* + * Packet is compressible -- we're going to send either a + * COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way we need + * to locate (or create) the connection state. Special case the + * most recently used connection since it's most likely to be used + * again & we don't have to do any reordering if it's used. + */ + INCR(vjs_packets); + if (!ip4_addr_cmp(&ip->src, &cs->cs_ip.src) + || !ip4_addr_cmp(&ip->dest, &cs->cs_ip.dest) + || (*(struct vj_u32_t*)th).v != (((struct vj_u32_t*)&cs->cs_ip)[IPH_HL(&cs->cs_ip)]).v) { + /* + * Wasn't the first -- search for it. + * + * States are kept in a circularly linked list with + * last_cs pointing to the end of the list. The + * list is kept in lru order by moving a state to the + * head of the list whenever it is referenced. Since + * the list is short and, empirically, the connection + * we want is almost always near the front, we locate + * states via linear search. If we don't find a state + * for the datagram, the oldest state is (re-)used. + */ + struct cstate *lcs; + struct cstate *lastcs = comp->last_cs; + + do { + lcs = cs; cs = cs->cs_next; + INCR(vjs_searches); + if (ip4_addr_cmp(&ip->src, &cs->cs_ip.src) + && ip4_addr_cmp(&ip->dest, &cs->cs_ip.dest) + && (*(struct vj_u32_t*)th).v == (((struct vj_u32_t*)&cs->cs_ip)[IPH_HL(&cs->cs_ip)]).v) { + goto found; + } + } while (cs != lastcs); + + /* + * Didn't find it -- re-use oldest cstate. Send an + * uncompressed packet that tells the other side what + * connection number we're using for this conversation. + * Note that since the state list is circular, the oldest + * state points to the newest and we only need to set + * last_cs to update the lru linkage. + */ + INCR(vjs_misses); + comp->last_cs = lcs; + goto uncompressed; + + found: + /* + * Found it -- move to the front on the connection list. + */ + if (cs == lastcs) { + comp->last_cs = lcs; + } else { + lcs->cs_next = cs->cs_next; + cs->cs_next = lastcs->cs_next; + lastcs->cs_next = cs; + } + } + + oth = (struct tcp_hdr *)&((struct vj_u32_t*)&cs->cs_ip)[ilen]; + deltaS = ilen; + + /* + * Make sure that only what we expect to change changed. The first + * line of the `if' checks the IP protocol version, header length & + * type of service. The 2nd line checks the "Don't fragment" bit. + * The 3rd line checks the time-to-live and protocol (the protocol + * check is unnecessary but costless). The 4th line checks the TCP + * header length. The 5th line checks IP options, if any. The 6th + * line checks TCP options, if any. If any of these things are + * different between the previous & current datagram, we send the + * current datagram `uncompressed'. + */ + if ((((struct vj_u16_t*)ip)[0]).v != (((struct vj_u16_t*)&cs->cs_ip)[0]).v + || (((struct vj_u16_t*)ip)[3]).v != (((struct vj_u16_t*)&cs->cs_ip)[3]).v + || (((struct vj_u16_t*)ip)[4]).v != (((struct vj_u16_t*)&cs->cs_ip)[4]).v + || TCPH_HDRLEN(th) != TCPH_HDRLEN(oth) + || (deltaS > 5 && BCMP(ip + 1, &cs->cs_ip + 1, (deltaS - 5) << 2)) + || (TCPH_HDRLEN(th) > 5 && BCMP(th + 1, oth + 1, (TCPH_HDRLEN(th) - 5) << 2))) { + goto uncompressed; + } + + /* + * Figure out which of the changing fields changed. The + * receiver expects changes in the order: urgent, window, + * ack, seq (the order minimizes the number of temporaries + * needed in this section of code). + */ + if (TCPH_FLAGS(th) & TCP_URG) { + deltaS = lwip_ntohs(th->urgp); + ENCODEZ(deltaS); + changes |= NEW_U; + } else if (th->urgp != oth->urgp) { + /* argh! URG not set but urp changed -- a sensible + * implementation should never do this but RFC793 + * doesn't prohibit the change so we have to deal + * with it. */ + goto uncompressed; + } + + if ((deltaS = (u16_t)(lwip_ntohs(th->wnd) - lwip_ntohs(oth->wnd))) != 0) { + ENCODE(deltaS); + changes |= NEW_W; + } + + if ((deltaL = lwip_ntohl(th->ackno) - lwip_ntohl(oth->ackno)) != 0) { + if (deltaL > 0xffff) { + goto uncompressed; + } + deltaA = (u16_t)deltaL; + ENCODE(deltaA); + changes |= NEW_A; + } + + if ((deltaL = lwip_ntohl(th->seqno) - lwip_ntohl(oth->seqno)) != 0) { + if (deltaL > 0xffff) { + goto uncompressed; + } + deltaS = (u16_t)deltaL; + ENCODE(deltaS); + changes |= NEW_S; + } + + switch(changes) { + case 0: + /* + * Nothing changed. If this packet contains data and the + * last one didn't, this is probably a data packet following + * an ack (normal on an interactive connection) and we send + * it compressed. Otherwise it's probably a retransmit, + * retransmitted ack or window probe. Send it uncompressed + * in case the other side missed the compressed version. + */ + if (IPH_LEN(ip) != IPH_LEN(&cs->cs_ip) && + lwip_ntohs(IPH_LEN(&cs->cs_ip)) == hlen) { + break; + } + /* no break */ + /* fall through */ + + case SPECIAL_I: + case SPECIAL_D: + /* + * actual changes match one of our special case encodings -- + * send packet uncompressed. + */ + goto uncompressed; + + case NEW_S|NEW_A: + if (deltaS == deltaA && deltaS == lwip_ntohs(IPH_LEN(&cs->cs_ip)) - hlen) { + /* special case for echoed terminal traffic */ + changes = SPECIAL_I; + cp = new_seq; + } + break; + + case NEW_S: + if (deltaS == lwip_ntohs(IPH_LEN(&cs->cs_ip)) - hlen) { + /* special case for data xfer */ + changes = SPECIAL_D; + cp = new_seq; + } + break; + default: + break; + } + + deltaS = (u16_t)(lwip_ntohs(IPH_ID(ip)) - lwip_ntohs(IPH_ID(&cs->cs_ip))); + if (deltaS != 1) { + ENCODEZ(deltaS); + changes |= NEW_I; + } + if (TCPH_FLAGS(th) & TCP_PSH) { + changes |= TCP_PUSH_BIT; + } + /* + * Grab the cksum before we overwrite it below. Then update our + * state with this packet's header. + */ + deltaA = lwip_ntohs(th->chksum); + MEMCPY(&cs->cs_ip, ip, hlen); + + /* + * We want to use the original packet as our compressed packet. + * (cp - new_seq) is the number of bytes we need for compressed + * sequence numbers. In addition we need one byte for the change + * mask, one for the connection id and two for the tcp checksum. + * So, (cp - new_seq) + 4 bytes of header are needed. hlen is how + * many bytes of the original packet to toss so subtract the two to + * get the new packet size. + */ + deltaS = (u16_t)(cp - new_seq); + if (!comp->compressSlot || comp->last_xmit != cs->cs_id) { + comp->last_xmit = cs->cs_id; + hlen -= deltaS + 4; + if (pbuf_remove_header(np, hlen)){ + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + } + cp = (u8_t*)np->payload; + *cp++ = (u8_t)(changes | NEW_C); + *cp++ = cs->cs_id; + } else { + hlen -= deltaS + 3; + if (pbuf_remove_header(np, hlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + } + cp = (u8_t*)np->payload; + *cp++ = (u8_t)changes; + } + *cp++ = (u8_t)(deltaA >> 8); + *cp++ = (u8_t)deltaA; + MEMCPY(cp, new_seq, deltaS); + INCR(vjs_compressed); + return (TYPE_COMPRESSED_TCP); + + /* + * Update connection state cs & send uncompressed packet (that is, + * a regular ip/tcp packet but with the 'conversation id' we hope + * to use on future compressed packets in the protocol field). + */ +uncompressed: + MEMCPY(&cs->cs_ip, ip, hlen); + IPH_PROTO_SET(ip, cs->cs_id); + comp->last_xmit = cs->cs_id; + return (TYPE_UNCOMPRESSED_TCP); +} + +/* + * Called when we may have missed a packet. + */ +void +vj_uncompress_err(struct vjcompress *comp) +{ + comp->flags |= VJF_TOSS; + INCR(vjs_errorin); +} + +/* + * "Uncompress" a packet of type TYPE_UNCOMPRESSED_TCP. + * Return 0 on success, -1 on failure. + */ +int +vj_uncompress_uncomp(struct pbuf *nb, struct vjcompress *comp) +{ + u32_t hlen; + struct cstate *cs; + struct ip_hdr *ip; + + ip = (struct ip_hdr *)nb->payload; + hlen = IPH_HL(ip) << 2; + if (IPH_PROTO(ip) >= MAX_SLOTS + || hlen + sizeof(struct tcp_hdr) > nb->len + || (hlen += TCPH_HDRLEN_BYTES((struct tcp_hdr *)&((char *)ip)[hlen])) + > nb->len + || hlen > MAX_HDR) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_uncomp: bad cid=%d, hlen=%d buflen=%d\n", + IPH_PROTO(ip), hlen, nb->len)); + vj_uncompress_err(comp); + return -1; + } + cs = &comp->rstate[comp->last_recv = IPH_PROTO(ip)]; + comp->flags &=~ VJF_TOSS; + IPH_PROTO_SET(ip, IP_PROTO_TCP); + /* copy from/to bigger buffers checked above instead of cs->cs_ip and ip + just to help static code analysis to see this is correct ;-) */ + MEMCPY(&cs->cs_hdr, nb->payload, hlen); + cs->cs_hlen = (u16_t)hlen; + INCR(vjs_uncompressedin); + return 0; +} + +/* + * Uncompress a packet of type TYPE_COMPRESSED_TCP. + * The packet is composed of a buffer chain and the first buffer + * must contain an accurate chain length. + * The first buffer must include the entire compressed TCP/IP header. + * This procedure replaces the compressed header with the uncompressed + * header and returns the length of the VJ header. + */ +int +vj_uncompress_tcp(struct pbuf **nb, struct vjcompress *comp) +{ + u8_t *cp; + struct tcp_hdr *th; + struct cstate *cs; + struct vj_u16_t *bp; + struct pbuf *n0 = *nb; + u32_t tmp; + u32_t vjlen, hlen, changes; + + INCR(vjs_compressedin); + cp = (u8_t*)n0->payload; + changes = *cp++; + if (changes & NEW_C) { + /* + * Make sure the state index is in range, then grab the state. + * If we have a good state index, clear the 'discard' flag. + */ + if (*cp >= MAX_SLOTS) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: bad cid=%d\n", *cp)); + goto bad; + } + + comp->flags &=~ VJF_TOSS; + comp->last_recv = *cp++; + } else { + /* + * this packet has an implicit state index. If we've + * had a line error since the last time we got an + * explicit state index, we have to toss the packet. + */ + if (comp->flags & VJF_TOSS) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: tossing\n")); + INCR(vjs_tossed); + return (-1); + } + } + cs = &comp->rstate[comp->last_recv]; + hlen = IPH_HL(&cs->cs_ip) << 2; + th = (struct tcp_hdr *)&((u8_t*)&cs->cs_ip)[hlen]; + th->chksum = lwip_htons((*cp << 8) | cp[1]); + cp += 2; + if (changes & TCP_PUSH_BIT) { + TCPH_SET_FLAG(th, TCP_PSH); + } else { + TCPH_UNSET_FLAG(th, TCP_PSH); + } + + switch (changes & SPECIALS_MASK) { + case SPECIAL_I: + { + u32_t i = lwip_ntohs(IPH_LEN(&cs->cs_ip)) - cs->cs_hlen; + /* some compilers can't nest inline assembler.. */ + tmp = lwip_ntohl(th->ackno) + i; + th->ackno = lwip_htonl(tmp); + tmp = lwip_ntohl(th->seqno) + i; + th->seqno = lwip_htonl(tmp); + } + break; + + case SPECIAL_D: + /* some compilers can't nest inline assembler.. */ + tmp = lwip_ntohl(th->seqno) + lwip_ntohs(IPH_LEN(&cs->cs_ip)) - cs->cs_hlen; + th->seqno = lwip_htonl(tmp); + break; + + default: + if (changes & NEW_U) { + TCPH_SET_FLAG(th, TCP_URG); + DECODEU(th->urgp); + } else { + TCPH_UNSET_FLAG(th, TCP_URG); + } + if (changes & NEW_W) { + DECODES(th->wnd); + } + if (changes & NEW_A) { + DECODEL(th->ackno); + } + if (changes & NEW_S) { + DECODEL(th->seqno); + } + break; + } + if (changes & NEW_I) { + DECODES(cs->cs_ip._id); + } else { + IPH_ID_SET(&cs->cs_ip, lwip_ntohs(IPH_ID(&cs->cs_ip)) + 1); + IPH_ID_SET(&cs->cs_ip, lwip_htons(IPH_ID(&cs->cs_ip))); + } + + /* + * At this point, cp points to the first byte of data in the + * packet. Fill in the IP total length and update the IP + * header checksum. + */ + vjlen = (u16_t)(cp - (u8_t*)n0->payload); + if (n0->len < vjlen) { + /* + * We must have dropped some characters (crc should detect + * this but the old slip framing won't) + */ + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: head buffer %d too short %d\n", + n0->len, vjlen)); + goto bad; + } + +#if BYTE_ORDER == LITTLE_ENDIAN + tmp = n0->tot_len - vjlen + cs->cs_hlen; + IPH_LEN_SET(&cs->cs_ip, lwip_htons((u16_t)tmp)); +#else + IPH_LEN_SET(&cs->cs_ip, lwip_htons(n0->tot_len - vjlen + cs->cs_hlen)); +#endif + + /* recompute the ip header checksum */ + bp = (struct vj_u16_t*) &cs->cs_ip; + IPH_CHKSUM_SET(&cs->cs_ip, 0); + for (tmp = 0; hlen > 0; hlen -= 2) { + tmp += (*bp++).v; + } + tmp = (tmp & 0xffff) + (tmp >> 16); + tmp = (tmp & 0xffff) + (tmp >> 16); + IPH_CHKSUM_SET(&cs->cs_ip, (u16_t)(~tmp)); + + /* Remove the compressed header and prepend the uncompressed header. */ + if (pbuf_remove_header(n0, vjlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto bad; + } + + if(LWIP_MEM_ALIGN(n0->payload) != n0->payload) { + struct pbuf *np; + +#if IP_FORWARD + /* If IP forwarding is enabled we are using a PBUF_LINK packet type so + * the packet is being allocated with enough header space to be + * forwarded (to Ethernet for example). + */ + np = pbuf_alloc(PBUF_LINK, n0->len + cs->cs_hlen, PBUF_POOL); +#else /* IP_FORWARD */ + np = pbuf_alloc(PBUF_RAW, n0->len + cs->cs_hlen, PBUF_POOL); +#endif /* IP_FORWARD */ + if(!np) { + PPPDEBUG(LOG_WARNING, ("vj_uncompress_tcp: realign failed\n")); + goto bad; + } + + if (pbuf_remove_header(np, cs->cs_hlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto bad; + } + + pbuf_take(np, n0->payload, n0->len); + + if(n0->next) { + pbuf_chain(np, n0->next); + pbuf_dechain(n0); + } + pbuf_free(n0); + n0 = np; + } + + if (pbuf_add_header(n0, cs->cs_hlen)) { + struct pbuf *np; + + LWIP_ASSERT("vj_uncompress_tcp: cs->cs_hlen <= PBUF_POOL_BUFSIZE", cs->cs_hlen <= PBUF_POOL_BUFSIZE); + np = pbuf_alloc(PBUF_RAW, cs->cs_hlen, PBUF_POOL); + if(!np) { + PPPDEBUG(LOG_WARNING, ("vj_uncompress_tcp: prepend failed\n")); + goto bad; + } + pbuf_cat(np, n0); + n0 = np; + } + LWIP_ASSERT("n0->len >= cs->cs_hlen", n0->len >= cs->cs_hlen); + MEMCPY(n0->payload, &cs->cs_ip, cs->cs_hlen); + + *nb = n0; + + return vjlen; + +bad: + vj_uncompress_err(comp); + return (-1); +} + +#endif /* PPP_SUPPORT && VJ_SUPPORT */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/slipif.c b/Middlewares/Third_Party/LwIP/src/netif/slipif.c new file mode 100644 index 00000000..9b175dc3 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/slipif.c @@ -0,0 +1,558 @@ +/** + * @file + * SLIP Interface + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is built upon the file: src/arch/rtxc/netif/sioslip.c + * + * Author: Magnus Ivarsson + * Simon Goldschmidt + */ + + +/** + * @defgroup slipif SLIP + * @ingroup netifs + * + * This is an arch independent SLIP netif. The specific serial hooks must be + * provided by another file. They are sio_open, sio_read/sio_tryread and sio_send + * + * Usage: This netif can be used in three ways:\n + * 1) For NO_SYS==0, an RX thread can be used which blocks on sio_read() + * until data is received.\n + * 2) In your main loop, call slipif_poll() to check for new RX bytes, + * completed packets are fed into netif->input().\n + * 3) Call slipif_received_byte[s]() from your serial RX ISR and + * slipif_process_rxqueue() from your main loop. ISR level decodes + * packets and puts completed packets on a queue which is fed into + * the stack from the main loop (needs SYS_LIGHTWEIGHT_PROT for + * pbuf_alloc to work on ISR level!). + * + */ + +#include "netif/slipif.h" +#include "lwip/opt.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/sys.h" +#include "lwip/sio.h" + +#define SLIP_END 0xC0 /* 0300: start and end of every packet */ +#define SLIP_ESC 0xDB /* 0333: escape start (one byte escaped data follows) */ +#define SLIP_ESC_END 0xDC /* 0334: following escape: original byte is 0xC0 (END) */ +#define SLIP_ESC_ESC 0xDD /* 0335: following escape: original byte is 0xDB (ESC) */ + +/** Maximum packet size that is received by this netif */ +#ifndef SLIP_MAX_SIZE +#define SLIP_MAX_SIZE 1500 +#endif + +/** Define this to the interface speed for SNMP + * (sio_fd is the sio_fd_t returned by sio_open). + * The default value of zero means 'unknown'. + */ +#ifndef SLIP_SIO_SPEED +#define SLIP_SIO_SPEED(sio_fd) 0 +#endif + +enum slipif_recv_state { + SLIP_RECV_NORMAL, + SLIP_RECV_ESCAPE +}; + +struct slipif_priv { + sio_fd_t sd; + /* q is the whole pbuf chain for a packet, p is the current pbuf in the chain */ + struct pbuf *p, *q; + u8_t state; + u16_t i, recved; +#if SLIP_RX_FROM_ISR + struct pbuf *rxpackets; +#endif +}; + +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output(struct netif *netif, struct pbuf *p) +{ + struct slipif_priv *priv; + struct pbuf *q; + u16_t i; + u8_t c; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + LWIP_ASSERT("p != NULL", (p != NULL)); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_output: sending %"U16_F" bytes\n", p->tot_len)); + priv = (struct slipif_priv *)netif->state; + + /* Send pbuf out on the serial I/O device. */ + /* Start with packet delimiter. */ + sio_send(SLIP_END, priv->sd); + + for (q = p; q != NULL; q = q->next) { + for (i = 0; i < q->len; i++) { + c = ((u8_t *)q->payload)[i]; + switch (c) { + case SLIP_END: + /* need to escape this byte (0xC0 -> 0xDB, 0xDC) */ + sio_send(SLIP_ESC, priv->sd); + sio_send(SLIP_ESC_END, priv->sd); + break; + case SLIP_ESC: + /* need to escape this byte (0xDB -> 0xDB, 0xDD) */ + sio_send(SLIP_ESC, priv->sd); + sio_send(SLIP_ESC_ESC, priv->sd); + break; + default: + /* normal byte - no need for escaping */ + sio_send(c, priv->sd); + break; + } + } + } + /* End with packet delimiter. */ + sio_send(SLIP_END, priv->sd); + return ERR_OK; +} + +#if LWIP_IPV4 +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @param ipaddr the ip address to send the packet to (not used for slipif) + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output_v4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(ipaddr); + return slipif_output(netif, p); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @param ipaddr the ip address to send the packet to (not used for slipif) + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output_v6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(ipaddr); + return slipif_output(netif, p); +} +#endif /* LWIP_IPV6 */ + +/** + * Handle the incoming SLIP stream character by character + * + * @param netif the lwip network interface structure for this slipif + * @param c received character (multiple calls to this function will + * return a complete packet, NULL is returned before - used for polling) + * @return The IP packet when SLIP_END is received + */ +static struct pbuf * +slipif_rxbyte(struct netif *netif, u8_t c) +{ + struct slipif_priv *priv; + struct pbuf *t; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + switch (priv->state) { + case SLIP_RECV_NORMAL: + switch (c) { + case SLIP_END: + if (priv->recved > 0) { + /* Received whole packet. */ + /* Trim the pbuf to the size of the received packet. */ + pbuf_realloc(priv->q, priv->recved); + + LINK_STATS_INC(link.recv); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif: Got packet (%"U16_F" bytes)\n", priv->recved)); + t = priv->q; + priv->p = priv->q = NULL; + priv->i = priv->recved = 0; + return t; + } + return NULL; + case SLIP_ESC: + priv->state = SLIP_RECV_ESCAPE; + return NULL; + default: + break; + } /* end switch (c) */ + break; + case SLIP_RECV_ESCAPE: + /* un-escape END or ESC bytes, leave other bytes + (although that would be a protocol error) */ + switch (c) { + case SLIP_ESC_END: + c = SLIP_END; + break; + case SLIP_ESC_ESC: + c = SLIP_ESC; + break; + default: + break; + } + priv->state = SLIP_RECV_NORMAL; + break; + default: + break; + } /* end switch (priv->state) */ + + /* byte received, packet not yet completely received */ + if (priv->p == NULL) { + /* allocate a new pbuf */ + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_input: alloc\n")); + priv->p = pbuf_alloc(PBUF_LINK, (PBUF_POOL_BUFSIZE - PBUF_LINK_HLEN - PBUF_LINK_ENCAPSULATION_HLEN), PBUF_POOL); + + if (priv->p == NULL) { + LINK_STATS_INC(link.drop); + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_input: no new pbuf! (DROP)\n")); + /* don't process any further since we got no pbuf to receive to */ + return NULL; + } + + if (priv->q != NULL) { + /* 'chain' the pbuf to the existing chain */ + pbuf_cat(priv->q, priv->p); + } else { + /* p is the first pbuf in the chain */ + priv->q = priv->p; + } + } + + /* this automatically drops bytes if > SLIP_MAX_SIZE */ + if ((priv->p != NULL) && (priv->recved <= SLIP_MAX_SIZE)) { + ((u8_t *)priv->p->payload)[priv->i] = c; + priv->recved++; + priv->i++; + if (priv->i >= priv->p->len) { + /* on to the next pbuf */ + priv->i = 0; + if (priv->p->next != NULL && priv->p->next->len > 0) { + /* p is a chain, on to the next in the chain */ + priv->p = priv->p->next; + } else { + /* p is a single pbuf, set it to NULL so next time a new + * pbuf is allocated */ + priv->p = NULL; + } + } + } + return NULL; +} + +/** Like slipif_rxbyte, but passes completed packets to netif->input + * + * @param netif The lwip network interface structure for this slipif + * @param c received character + */ +static void +slipif_rxbyte_input(struct netif *netif, u8_t c) +{ + struct pbuf *p; + p = slipif_rxbyte(netif, c); + if (p != NULL) { + if (netif->input(p, netif) != ERR_OK) { + pbuf_free(p); + } + } +} + +#if SLIP_USE_RX_THREAD +/** + * The SLIP input thread. + * + * Feed the IP layer with incoming packets + * + * @param nf the lwip network interface structure for this slipif + */ +static void +slipif_loop_thread(void *nf) +{ + u8_t c; + struct netif *netif = (struct netif *)nf; + struct slipif_priv *priv = (struct slipif_priv *)netif->state; + + while (1) { + if (sio_read(priv->sd, &c, 1) > 0) { + slipif_rxbyte_input(netif, c); + } + } +} +#endif /* SLIP_USE_RX_THREAD */ + +/** + * @ingroup slipif + * SLIP netif initialization + * + * Call the arch specific sio_open and remember + * the opened device in the state field of the netif. + * + * @param netif the lwip network interface structure for this slipif + * @return ERR_OK if serial line could be opened, + * ERR_MEM if no memory could be allocated, + * ERR_IF is serial line couldn't be opened + * + * @note If netif->state is interpreted as an u8_t serial port number. + * + */ +err_t +slipif_init(struct netif *netif) +{ + struct slipif_priv *priv; + u8_t sio_num; + + LWIP_ASSERT("slipif needs an input callback", netif->input != NULL); + + /* netif->state contains serial port number */ + sio_num = LWIP_PTR_NUMERIC_CAST(u8_t, netif->state); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_init: netif->num=%"U16_F"\n", (u16_t)sio_num)); + + /* Allocate private data */ + priv = (struct slipif_priv *)mem_malloc(sizeof(struct slipif_priv)); + if (!priv) { + return ERR_MEM; + } + + netif->name[0] = 's'; + netif->name[1] = 'l'; +#if LWIP_IPV4 + netif->output = slipif_output_v4; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + netif->output_ip6 = slipif_output_v6; +#endif /* LWIP_IPV6 */ + netif->mtu = SLIP_MAX_SIZE; + + /* Try to open the serial port. */ + priv->sd = sio_open(sio_num); + if (!priv->sd) { + /* Opening the serial port failed. */ + mem_free(priv); + return ERR_IF; + } + + /* Initialize private data */ + priv->p = NULL; + priv->q = NULL; + priv->state = SLIP_RECV_NORMAL; + priv->i = 0; + priv->recved = 0; +#if SLIP_RX_FROM_ISR + priv->rxpackets = NULL; +#endif + + netif->state = priv; + + /* initialize the snmp variables and counters inside the struct netif */ + MIB2_INIT_NETIF(netif, snmp_ifType_slip, SLIP_SIO_SPEED(priv->sd)); + +#if SLIP_USE_RX_THREAD + /* Create a thread to poll the serial line. */ + sys_thread_new(SLIPIF_THREAD_NAME, slipif_loop_thread, netif, + SLIPIF_THREAD_STACKSIZE, SLIPIF_THREAD_PRIO); +#endif /* SLIP_USE_RX_THREAD */ + return ERR_OK; +} + +/** + * @ingroup slipif + * Polls the serial device and feeds the IP layer with incoming packets. + * + * @param netif The lwip network interface structure for this slipif + */ +void +slipif_poll(struct netif *netif) +{ + u8_t c; + struct slipif_priv *priv; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + while (sio_tryread(priv->sd, &c, 1) > 0) { + slipif_rxbyte_input(netif, c); + } +} + +#if SLIP_RX_FROM_ISR +/** + * @ingroup slipif + * Feeds the IP layer with incoming packets that were receive + * + * @param netif The lwip network interface structure for this slipif + */ +void +slipif_process_rxqueue(struct netif *netif) +{ + struct slipif_priv *priv; + SYS_ARCH_DECL_PROTECT(old_level); + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + SYS_ARCH_PROTECT(old_level); + while (priv->rxpackets != NULL) { + struct pbuf *p = priv->rxpackets; +#if SLIP_RX_QUEUE + /* dequeue packet */ + struct pbuf *q = p; + while ((q->len != q->tot_len) && (q->next != NULL)) { + q = q->next; + } + priv->rxpackets = q->next; + q->next = NULL; +#else /* SLIP_RX_QUEUE */ + priv->rxpackets = NULL; +#endif /* SLIP_RX_QUEUE */ + SYS_ARCH_UNPROTECT(old_level); + if (netif->input(p, netif) != ERR_OK) { + pbuf_free(p); + } + SYS_ARCH_PROTECT(old_level); + } + SYS_ARCH_UNPROTECT(old_level); +} + +/** Like slipif_rxbyte, but queues completed packets. + * + * @param netif The lwip network interface structure for this slipif + * @param data Received serial byte + */ +static void +slipif_rxbyte_enqueue(struct netif *netif, u8_t data) +{ + struct pbuf *p; + struct slipif_priv *priv = (struct slipif_priv *)netif->state; + SYS_ARCH_DECL_PROTECT(old_level); + + p = slipif_rxbyte(netif, data); + if (p != NULL) { + SYS_ARCH_PROTECT(old_level); + if (priv->rxpackets != NULL) { +#if SLIP_RX_QUEUE + /* queue multiple pbufs */ + struct pbuf *q = p; + while (q->next != NULL) { + q = q->next; + } + q->next = p; + } else { +#else /* SLIP_RX_QUEUE */ + pbuf_free(priv->rxpackets); + } + { +#endif /* SLIP_RX_QUEUE */ + priv->rxpackets = p; + } + SYS_ARCH_UNPROTECT(old_level); + } +} + +/** + * @ingroup slipif + * Process a received byte, completed packets are put on a queue that is + * fed into IP through slipif_process_rxqueue(). + * + * This function can be called from ISR if SYS_LIGHTWEIGHT_PROT is enabled. + * + * @param netif The lwip network interface structure for this slipif + * @param data received character + */ +void +slipif_received_byte(struct netif *netif, u8_t data) +{ + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + slipif_rxbyte_enqueue(netif, data); +} + +/** + * @ingroup slipif + * Process multiple received byte, completed packets are put on a queue that is + * fed into IP through slipif_process_rxqueue(). + * + * This function can be called from ISR if SYS_LIGHTWEIGHT_PROT is enabled. + * + * @param netif The lwip network interface structure for this slipif + * @param data received character + * @param len Number of received characters + */ +void +slipif_received_bytes(struct netif *netif, u8_t *data, u8_t len) +{ + u8_t i; + u8_t *rxdata = data; + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + for (i = 0; i < len; i++, rxdata++) { + slipif_rxbyte_enqueue(netif, *rxdata); + } +} +#endif /* SLIP_RX_FROM_ISR */ diff --git a/Middlewares/Third_Party/LwIP/src/netif/zepif.c b/Middlewares/Third_Party/LwIP/src/netif/zepif.c new file mode 100644 index 00000000..b4033030 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/src/netif/zepif.c @@ -0,0 +1,300 @@ +/** + * @file + * + * @defgroup zepif ZEP - ZigBee Encapsulation Protocol + * @ingroup netifs + * A netif implementing the ZigBee Encapsulation Protocol (ZEP). + * This is used to tunnel 6LowPAN over UDP. + * + * Usage (there must be a default netif before!): + * @code{.c} + * netif_add(&zep_netif, NULL, NULL, NULL, NULL, zepif_init, tcpip_6lowpan_input); + * netif_create_ip6_linklocal_address(&zep_netif, 1); + * netif_set_up(&zep_netif); + * netif_set_link_up(&zep_netif); + * @endcode + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "netif/zepif.h" + +#if LWIP_IPV6 && LWIP_UDP + +#include "netif/lowpan6.h" +#include "lwip/udp.h" +#include "lwip/timeouts.h" +#include + +/** Define this to 1 to loop back TX packets for testing */ +#ifndef ZEPIF_LOOPBACK +#define ZEPIF_LOOPBACK 0 +#endif + +#define ZEP_MAX_DATA_LEN 127 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct zep_hdr { + PACK_STRUCT_FLD_8(u8_t prot_id[2]); + PACK_STRUCT_FLD_8(u8_t prot_version); + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t channel_id); + PACK_STRUCT_FIELD(u16_t device_id); + PACK_STRUCT_FLD_8(u8_t crc_mode); + PACK_STRUCT_FLD_8(u8_t unknown_1); + PACK_STRUCT_FIELD(u32_t timestamp[2]); + PACK_STRUCT_FIELD(u32_t seq_num); + PACK_STRUCT_FLD_8(u8_t unknown_2[10]); + PACK_STRUCT_FLD_8(u8_t len); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +struct zepif_state { + struct zepif_init init; + struct udp_pcb *pcb; + u32_t seqno; +}; + +static u8_t zep_lowpan_timer_running; + +/* Helper function that calls the 6LoWPAN timer and reschedules itself */ +static void +zep_lowpan_timer(void *arg) +{ + lowpan6_tmr(); + if (zep_lowpan_timer_running) { + sys_timeout(LOWPAN6_TMR_INTERVAL, zep_lowpan_timer, arg); + } +} + +/* Pass received pbufs into 6LowPAN netif */ +static void +zepif_udp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port) +{ + err_t err; + struct netif *netif_lowpan6 = (struct netif *)arg; + struct zep_hdr *zep; + + LWIP_ASSERT("arg != NULL", arg != NULL); + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_UNUSED_ARG(pcb); /* for LWIP_NOASSERT */ + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + if (p == NULL) { + return; + } + + /* Parse and hide the ZEP header */ + if (p->len < sizeof(struct zep_hdr)) { + /* need the zep_hdr in one piece */ + goto err_return; + } + zep = (struct zep_hdr *)p->payload; + if (zep->prot_id[0] != 'E') { + goto err_return; + } + if (zep->prot_id[1] != 'X') { + goto err_return; + } + if (zep->prot_version != 2) { + /* we only support this version for now */ + goto err_return; + } + if (zep->type != 1) { + goto err_return; + } + if (zep->crc_mode != 1) { + goto err_return; + } + if (zep->len != p->tot_len - sizeof(struct zep_hdr)) { + goto err_return; + } + /* everything seems to be OK, hide the ZEP header */ + if (pbuf_remove_header(p, sizeof(struct zep_hdr))) { + goto err_return; + } + /* TODO Check CRC? */ + /* remove CRC trailer */ + pbuf_realloc(p, p->tot_len - 2); + + /* Call into 6LoWPAN code. */ + err = netif_lowpan6->input(p, netif_lowpan6); + if (err == ERR_OK) { + return; + } +err_return: + pbuf_free(p); +} + +/* Send 6LoWPAN TX packets as UDP broadcast */ +static err_t +zepif_linkoutput(struct netif *netif, struct pbuf *p) +{ + err_t err; + struct pbuf *q; + struct zep_hdr *zep; + struct zepif_state *state; + + LWIP_ASSERT("invalid netif", netif != NULL); + LWIP_ASSERT("invalid pbuf", p != NULL); + + if (p->tot_len > ZEP_MAX_DATA_LEN) { + return ERR_VAL; + } + LWIP_ASSERT("TODO: support chained pbufs", p->next == NULL); + + state = (struct zepif_state *)netif->state; + LWIP_ASSERT("state->pcb != NULL", state->pcb != NULL); + + q = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct zep_hdr) + p->tot_len, PBUF_RAM); + if (q == NULL) { + return ERR_MEM; + } + zep = (struct zep_hdr *)q->payload; + memset(zep, 0, sizeof(struct zep_hdr)); + zep->prot_id[0] = 'E'; + zep->prot_id[1] = 'X'; + zep->prot_version = 2; + zep->type = 1; /* Data */ + zep->channel_id = 0; /* whatever */ + zep->device_id = lwip_htons(1); /* whatever */ + zep->crc_mode = 1; + zep->unknown_1 = 0xff; + zep->seq_num = lwip_htonl(state->seqno); + state->seqno++; + zep->len = (u8_t)p->tot_len; + + err = pbuf_take_at(q, p->payload, p->tot_len, sizeof(struct zep_hdr)); + if (err == ERR_OK) { +#if ZEPIF_LOOPBACK + zepif_udp_recv(netif, state->pcb, pbuf_clone(PBUF_RAW, PBUF_RAM, q), NULL, 0); +#endif + err = udp_sendto(state->pcb, q, state->init.zep_dst_ip_addr, state->init.zep_dst_udp_port); + } + pbuf_free(q); + + return err; +} + +/** + * @ingroup zepif + * Set up a raw 6LowPAN netif and surround it with input- and output + * functions for ZEP + */ +err_t +zepif_init(struct netif *netif) +{ + err_t err; + struct zepif_init *init_state = (struct zepif_init *)netif->state; + struct zepif_state *state = (struct zepif_state *)mem_malloc(sizeof(struct zepif_state)); + + LWIP_ASSERT("zepif needs an input callback", netif->input != NULL); + + if (state == NULL) { + return ERR_MEM; + } + memset(state, 0, sizeof(struct zepif_state)); + if (init_state != NULL) { + memcpy(&state->init, init_state, sizeof(struct zepif_init)); + } + if (state->init.zep_src_udp_port == 0) { + state->init.zep_src_udp_port = ZEPIF_DEFAULT_UDP_PORT; + } + if (state->init.zep_dst_udp_port == 0) { + state->init.zep_dst_udp_port = ZEPIF_DEFAULT_UDP_PORT; + } +#if LWIP_IPV4 + if (state->init.zep_dst_ip_addr == NULL) { + /* With IPv4 enabled, default to broadcasting packets if no address is set */ + state->init.zep_dst_ip_addr = IP_ADDR_BROADCAST; + } +#endif /* LWIP_IPV4 */ + + netif->state = NULL; + + state->pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + if (state->pcb == NULL) { + err = ERR_MEM; + goto err_ret; + } + err = udp_bind(state->pcb, state->init.zep_src_ip_addr, state->init.zep_src_udp_port); + if (err != ERR_OK) { + goto err_ret; + } + if (state->init.zep_netif != NULL) { + udp_bind_netif(state->pcb, state->init.zep_netif); + } + LWIP_ASSERT("udp_bind(lowpan6_broadcast_pcb) failed", err == ERR_OK); + ip_set_option(state->pcb, SOF_BROADCAST); + udp_recv(state->pcb, zepif_udp_recv, netif); + + err = lowpan6_if_init(netif); + LWIP_ASSERT("lowpan6_if_init set a state", netif->state == NULL); + if (err == ERR_OK) { + netif->state = state; + netif->hwaddr_len = 6; + if (init_state != NULL) { + memcpy(netif->hwaddr, init_state->addr, 6); + } else { + u8_t i; + for (i = 0; i < 6; i++) { + netif->hwaddr[i] = i; + } + netif->hwaddr[0] &= 0xfc; + } + netif->linkoutput = zepif_linkoutput; + + if (!zep_lowpan_timer_running) { + sys_timeout(LOWPAN6_TMR_INTERVAL, zep_lowpan_timer, NULL); + zep_lowpan_timer_running = 1; + } + + return ERR_OK; + } + +err_ret: + if (state->pcb != NULL) { + udp_remove(state->pcb); + } + mem_free(state); + return err; +} + +#endif /* LWIP_IPV6 && LWIP_UDP */ diff --git a/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h b/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h new file mode 100644 index 00000000..177758c5 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#if defined(__IAR_SYSTEMS_ICC__) +#pragma pack(1) +#endif + diff --git a/Middlewares/Third_Party/LwIP/system/arch/cc.h b/Middlewares/Third_Party/LwIP/system/arch/cc.h new file mode 100644 index 00000000..3065318b --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/cc.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __CC_H__ +#define __CC_H__ + +#include "cpu.h" +#include +#include + +typedef int sys_prot_t; + +#define LWIP_PROVIDE_ERRNO + +#if defined (__GNUC__) & !defined (__CC_ARM) + +#define LWIP_TIMEVAL_PRIVATE 0 +#include + +#endif + +/* define compiler specific symbols */ +#if defined (__ICCARM__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x +#define PACK_STRUCT_USE_INCLUDES + +#elif defined (__GNUC__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT __attribute__ ((__packed__)) +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#elif defined (__CC_ARM) + +#define PACK_STRUCT_BEGIN __packed +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#elif defined (__TASKING__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#endif + +#define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ + x, __LINE__, __FILE__); } while(0) + +/* Define random number generator function */ +#define LWIP_RAND() ((u32_t)rand()) + +#endif /* __CC_H__ */ diff --git a/Middlewares/Third_Party/LwIP/system/arch/cpu.h b/Middlewares/Third_Party/LwIP/system/arch/cpu.h new file mode 100644 index 00000000..bfdf8811 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/cpu.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __CPU_H__ +#define __CPU_H__ + +#ifndef BYTE_ORDER +#define BYTE_ORDER LITTLE_ENDIAN +#endif + +#endif /* __CPU_H__ */ diff --git a/Middlewares/Third_Party/LwIP/system/arch/epstruct.h b/Middlewares/Third_Party/LwIP/system/arch/epstruct.h new file mode 100644 index 00000000..1e1a049c --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/epstruct.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#if defined(__IAR_SYSTEMS_ICC__) +#pragma pack() +#endif + diff --git a/Middlewares/Third_Party/LwIP/system/arch/init.h b/Middlewares/Third_Party/LwIP/system/arch/init.h new file mode 100644 index 00000000..e622c73a --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/init.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __ARCH_INIT_H__ +#define __ARCH_INIT_H__ + +#define TCPIP_INIT_DONE(arg) tcpip_init_done(arg) + +void tcpip_init_done(void *); +int wait_for_tcpip_init(void); + +#endif /* __ARCH_INIT_H__ */ + + + + diff --git a/Middlewares/Third_Party/LwIP/system/arch/lib.h b/Middlewares/Third_Party/LwIP/system/arch/lib.h new file mode 100644 index 00000000..378f25b8 --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/lib.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __LIB_H__ +#define __LIB_H__ + +#include + + +#endif /* __LIB_H__ */ diff --git a/Middlewares/Third_Party/LwIP/system/arch/perf.h b/Middlewares/Third_Party/LwIP/system/arch/perf.h new file mode 100644 index 00000000..334d42af --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/perf.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __PERF_H__ +#define __PERF_H__ + +#define PERF_START /* null definition */ +#define PERF_STOP(x) /* null definition */ + +#endif /* __PERF_H__ */ diff --git a/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h b/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h new file mode 100644 index 00000000..49bfd4eb --- /dev/null +++ b/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __SYS_ARCH_H__ +#define __SYS_ARCH_H__ + +#include "lwip/opt.h" + +#if (NO_SYS != 0) +#error "NO_SYS need to be set to 0 to use threaded API" +#endif + +#include "cmsis_os.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if (osCMSIS < 0x20000U) + +#define SYS_MBOX_NULL (osMessageQId)0 +#define SYS_SEM_NULL (osSemaphoreId)0 + +typedef osSemaphoreId sys_sem_t; +typedef osSemaphoreId sys_mutex_t; +typedef osMessageQId sys_mbox_t; +typedef osThreadId sys_thread_t; +#else + +#define SYS_MBOX_NULL (osMessageQueueId_t)0 +#define SYS_SEM_NULL (osSemaphoreId_t)0 + +typedef osSemaphoreId_t sys_sem_t; +typedef osSemaphoreId_t sys_mutex_t; +typedef osMessageQueueId_t sys_mbox_t; +typedef osThreadId_t sys_thread_t; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __SYS_ARCH_H__ */ + diff --git a/Src/driver.c b/Src/driver.c index ef26aee7..e135221d 100644 --- a/Src/driver.c +++ b/Src/driver.c @@ -238,6 +238,9 @@ static input_signal_t inputpin[] = { #ifdef SPI_IRQ_PORT { .id = Input_SPIIRQ, .port = SPI_IRQ_PORT, .pin = SPI_IRQ_PIN, .group = PinGroup_SPI }, #endif +#if SDCARD_ENABLE && defined(SD_DETECT_PIN) + { .id = Input_SdCardDetect, .port = SD_DETECT_PORT, .pin = SD_DETECT_PIN, .group = PinGroup_SdCard }, +#endif // Aux input pins must be consecutive in this array #ifdef AUXINPUT0_PIN { .id = Input_Aux0, .port = AUXINPUT0_PORT, .pin = AUXINPUT0_PIN, .group = PinGroup_AuxInput }, @@ -2265,7 +2268,7 @@ void settings_changed (settings_t *settings, settings_changed_flags_t changed) case Input_HomeC: input->mode.pull_mode = PullMode_Up; // settings->limits.disable_pullup.c ? PullMode_None : PullMode_Up; break; -#endif +#endif // HOME_MASK case Input_SPIIRQ: input->mode.pull_mode = true; input->mode.irq_mode = IRQ_Mode_Falling; @@ -2293,7 +2296,16 @@ void settings_changed (settings_t *settings, settings_changed_flags_t changed) break; #endif +#endif // QEI_ENABLE + +#if SDCARD_ENABLE && defined(SD_DETECT_PIN) + case Input_SdCardDetect: + input->mode.pull_mode = PullMode_Up; + input->mode.irq_mode = IRQ_Mode_Change; + input->mode.debounce = On; + break; #endif + default: break; } @@ -2543,14 +2555,12 @@ static bool bus_ok = false; static bool sdcard_unmount (FATFS **fs) { - /* - if(card && esp_vfs_fat_sdcard_unmount("/sdcard", card) == ESP_OK) { - card = NULL; - bus_ok = false; - spi_bus_free(SDSPI_DEFAULT_HOST); - } -*/ - return false; // card == NULL; + bool ok; + + if((ok = f_unmount(SDPath) == FR_OK)) + FATFS_UnLinkDriver(SDPath); + + return ok; } static char *sdcard_mount (FATFS **fs) @@ -2567,47 +2577,13 @@ static char *sdcard_mount (FATFS **fs) if(*fs == NULL) *fs = malloc(sizeof(FATFS)); - if(*fs && f_mount(*fs, "0:/", 1) != FR_OK) { + if(*fs && f_mount(*fs, SDPath, 1) != FR_OK) { free(*fs ); *fs = NULL; } } -/* - * - if(card == NULL) { - - esp_err_t ret = ESP_FAIL; - esp_vfs_fat_sdmmc_mount_config_t mount_config = { - .format_if_mount_failed = false, - .max_files = 5, - .allocation_unit_size = 16 * 1024 - }; - - sdmmc_host_t host = SDSPI_HOST_DEFAULT(); -// host.max_freq_khz = SDMMC_FREQ_HIGHSPEED; - - sdspi_device_config_t slot_config = SDSPI_DEVICE_CONFIG_DEFAULT(); - slot_config.gpio_cs = PIN_NUM_CS; - slot_config.host_id = host.slot; - - gpio_set_drive_capability(PIN_NUM_CS, GPIO_DRIVE_CAP_3); - - if ((ret = esp_vfs_fat_sdspi_mount("/sdcard", &host, &slot_config, &mount_config, &card)) != ESP_OK) - report_message(ret == ESP_FAIL ? "Failed to mount filesystem" : "Failed to initialize SD card", Message_Warning); - } - - if(card && fs) { - if(*fs == NULL) - *fs = malloc(sizeof(FATFS)); - - if(*fs && f_mount(*fs, "", 1) != FR_OK) { - free(*fs ); - *fs = NULL; - } - } -*/ - return ""; + return SDPath; } #endif // SDCARD_SDIO @@ -2823,6 +2799,11 @@ static bool driver_setup (settings_t *settings) encoder_start(&qei.encoder); #endif +#if SDCARD_ENABLE && defined(SD_DETECT_PIN) + if(!DIGITAL_IN(SD_DETECT_PORT, SD_DETECT_PIN)) + sdcard_detect(true); +#endif + return IOInitDone; } @@ -2989,7 +2970,7 @@ bool driver_init (void) #else hal.info = "STM32F401"; #endif - hal.driver_version = "241221"; + hal.driver_version = "241230"; hal.driver_url = GRBL_URL "/STM32F4xx"; #ifdef BOARD_NAME hal.board = BOARD_NAME; @@ -3057,11 +3038,11 @@ bool driver_init (void) HAL_RCC_GetOscConfig(&OscInitStruct); - if(OscInitStruct.OscillatorType & (RCC_OSCILLATORTYPE_LSI|RCC_OSCILLATORTYPE_LSE)) { + if(OscInitStruct.LSIState || OscInitStruct.LSEState) { RCC_PeriphCLKInitTypeDef PeriphClkInitStruct = { .PeriphClockSelection = RCC_PERIPHCLK_RTC, - .RTCClockSelection = (OscInitStruct.OscillatorType & RCC_OSCILLATORTYPE_LSI) ? RCC_RTCCLKSOURCE_LSI : RCC_RTCCLKSOURCE_LSE + .RTCClockSelection = OscInitStruct.LSEState ? RCC_RTCCLKSOURCE_LSE : RCC_RTCCLKSOURCE_LSI }; if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInitStruct) == HAL_OK) { @@ -3069,9 +3050,15 @@ bool driver_init (void) __HAL_RCC_RTC_ENABLE(); if((hal.driver_cap.rtc = HAL_RTC_Init(&hrtc) == HAL_OK)) { + + struct tm time; + + hal.driver_cap.rtc_set = On; hal.rtc.get_datetime = get_rtc_time; hal.rtc.set_datetime = set_rtc_time; - // TODO: read the time and set hal.driver_cap.rtc_set if >= core build date? + + if((hal.driver_cap.rtc_set = get_rtc_time(&time))) + hal.driver_cap.rtc_set = ((time.tm_year + 1900) * 10000 + (time.tm_mon + 1) * 100 + time.tm_mday) >= GRBL_BUILD; } } } @@ -3190,6 +3177,9 @@ bool driver_init (void) z_limit_pin = input; #endif limit_inputs.n_pins++; + } else if(input->group & PinGroup_SdCard) { + if(input->bit & DEVICES_IRQ_MASK) + pin_irq[__builtin_ffs(input->bit) - 1] = input; } } @@ -3391,14 +3381,18 @@ void core_pin_debounce (void *pin) if(input->mode.irq_mode == IRQ_Mode_Change || DIGITAL_IN(input->port, input->pin) == (input->mode.irq_mode == IRQ_Mode_Falling ? 0 : 1)) { - if(input->group & (PinGroup_Control)) { + if(input->group & PinGroup_Control) { hal.control.interrupt_callback(systemGetState()); } if(input->group & (PinGroup_Limit|PinGroup_LimitMax)) { limit_signals_t state = limitsGetState(); - if(limit_signals_merge(state).value) //TODO: add check for limit switches having same state as when limit_isr were invoked? + if(limit_signals_merge(state).value) // TODO: add check for limit switches having same state as when limit_isr were invoked? hal.limits.interrupt_callback(state); } +#if SDCARD_ENABLE && defined(SD_DETECT_PIN) + if(input->group & PinGroup_SdCard) + sdcard_detect(!DIGITAL_IN(SD_DETECT_PORT, SD_DETECT_PIN)); // TODO: add check for having same state as when isr were invoked? +#endif } #ifdef Z_LIMIT_POLL @@ -3461,7 +3455,7 @@ void EXTI0_IRQHandler(void) __HAL_GPIO_EXTI_CLEAR_IT(ifg); #if CONTROL_MASK & (1<<0) hal.control.interrupt_callback(systemGetState()); -#elif LIMIT_MASK & (1<<0) +#elif (LIMIT_MASK|SD_DETECT_BIT) & (1<<0) core_pin_irq(ifg); #elif SPI_IRQ_BIT & (1<<0) if(spi_irq.callback) @@ -3493,7 +3487,7 @@ void EXTI1_IRQHandler(void) __HAL_GPIO_EXTI_CLEAR_IT(ifg); #if CONTROL_MASK & (1<<1) hal.control.interrupt_callback(systemGetState()); -#elif LIMIT_MASK & (1<<1) +#elif (LIMIT_MASK|SD_DETECT_BIT) & (1<<1) core_pin_irq(ifg); #elif SPI_IRQ_BIT & (1<<1) if(spi_irq.callback) @@ -3525,7 +3519,7 @@ void EXTI2_IRQHandler(void) __HAL_GPIO_EXTI_CLEAR_IT(ifg); #if CONTROL_MASK & (1<<2) hal.control.interrupt_callback(systemGetState()); -#elif LIMIT_MASK & (1<<2) +#elif (LIMIT_MASK|SD_DETECT_BIT) & (1<<2) core_pin_irq(ifg); #elif SPI_IRQ_BIT & (1<<2) if(spi_irq.callback) @@ -3557,7 +3551,7 @@ void EXTI3_IRQHandler(void) __HAL_GPIO_EXTI_CLEAR_IT(ifg); #if CONTROL_MASK & (1<<3) hal.control.interrupt_callback(systemGetState()); -#elif LIMIT_MASK & (1<<3) +#elif (LIMIT_MASK|SD_DETECT_BIT) & (1<<3) core_pin_irq(ifg); #elif SPI_IRQ_BIT & (1<<3) if(spi_irq.callback) @@ -3589,7 +3583,7 @@ void EXTI4_IRQHandler(void) __HAL_GPIO_EXTI_CLEAR_IT(ifg); #if CONTROL_MASK & (1<<4) hal.control.interrupt_callback(systemGetState()); -#elif LIMIT_MASK & (1<<4) +#elif (LIMIT_MASK|SD_DETECT_BIT) & (1<<4) core_pin_irq(ifg); #elif SPI_IRQ_BIT & (1<<4) if(spi_irq.callback) @@ -3646,8 +3640,8 @@ void EXTI9_5_IRQHandler(void) if(ifg & CONTROL_MASK) hal.control.interrupt_callback(systemGetState()); #endif -#if LIMIT_MASK & 0x03E0 - if(ifg & LIMIT_MASK) +#if (LIMIT_MASK|SD_DETECT_BIT) & 0x03E0 + if(ifg & (LIMIT_MASK|SD_DETECT_BIT)) core_pin_irq(ifg); #endif #if AUXINPUT_MASK & 0x03E0 @@ -3692,8 +3686,8 @@ void EXTI15_10_IRQHandler(void) if(ifg & CONTROL_MASK) hal.control.interrupt_callback(systemGetState()); #endif -#if LIMIT_MASK & 0xFC00 - if(ifg & LIMIT_MASK) +#if (LIMIT_MASK|SD_DETECT_BIT) & 0xFC00 + if(ifg & (LIMIT_MASK|SD_DETECT_BIT)) core_pin_irq(ifg); #endif #if AUXINPUT_MASK & 0xFC00 diff --git a/Src/enet.c b/Src/enet.c new file mode 100644 index 00000000..d9d851c9 --- /dev/null +++ b/Src/enet.c @@ -0,0 +1,649 @@ +/* + enet.c - lwIP driver glue code for STM32F4xx processors + + Part of grblHAL + + Copyright (c) 2021-2024 Terje Io + + grblHAL is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + grblHAL is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with grblHAL. If not, see . +*/ + +#include "driver.h" + +#if ETHERNET_ENABLE && !defined(_WIZCHIP_) + +#include +#include +#include + +#include +#include +#include "lwip/dhcp.h" +#include "lwip.h" +#include "lwip/init.h" +#include "ethernetif.h" + +#include "grbl/report.h" +#include "grbl/nvs_buffer.h" + +#include "networking/networking.h" + +#define MDNS_TTL 32 + +static volatile bool linkUp = false; +static char IPAddress[IP4ADDR_STRLEN_MAX]; +static stream_type_t active_stream = StreamType_Null; +static network_services_t services = {0}, allowed_services; +static nvs_address_t nvs_address; +static network_settings_t ethernet, network; +static on_report_options_ptr on_report_options; +static on_execute_realtime_ptr on_execute_realtime; +static on_stream_changed_ptr on_stream_changed; +static char netservices[NETWORK_SERVICES_LEN] = ""; +#if MQTT_ENABLE + +static bool mqtt_connected = false; +static on_mqtt_client_connected_ptr on_client_connected; + +static void mqtt_connection_changed (bool connected) +{ + mqtt_connected = connected; + + if(on_client_connected) + on_client_connected(connected); +} + +#endif + +static void report_options (bool newopt) +{ + on_report_options(newopt); + + if(newopt) { + hal.stream.write(",ETH"); +#if FTP_ENABLE + if(services.ftp) + hal.stream.write(",FTP"); +#endif +#if WEBDAV_ENABLE + if(services.webdav) + hal.stream.write(",WebDAV"); +#endif +#if MDNS_ENABLE + if(services.mdns) + hal.stream.write(",mDNS"); +#endif +#if SSDP_ENABLE + if(services.ssdp) + hal.stream.write(",SSDP"); +#endif + } else { + + network_info_t *network = networking_get_info(); + + hal.stream.write("[MAC:"); + hal.stream.write(network->mac); + hal.stream.write("]" ASCII_EOL); + + hal.stream.write("[IP:"); + hal.stream.write(network->status.ip); + hal.stream.write("]" ASCII_EOL); + + if(active_stream == StreamType_Telnet || active_stream == StreamType_WebSocket) { + hal.stream.write("[NETCON:"); + hal.stream.write(active_stream == StreamType_Telnet ? "Telnet" : "Websocket"); + hal.stream.write("]" ASCII_EOL); + } + +#if MQTT_ENABLE + char *client_id; + if(*(client_id = networking_get_info()->mqtt_client_id)) { + hal.stream.write("[MQTT CLIENTID:"); + hal.stream.write(client_id); + hal.stream.write(mqtt_connected ? "]" ASCII_EOL : " (offline)]" ASCII_EOL); + } +#endif + } +} + +network_info_t *networking_get_info (void) +{ + static network_info_t info; + + memcpy(&info.status, &network, sizeof(network_settings_t)); + + strcpy(info.status.ip, IPAddress); + + if(info.status.ip_mode == IpMode_DHCP) { + *info.status.gateway = '\0'; + *info.status.mask = '\0'; + } + + info.is_ethernet = true; + info.link_up = linkUp; + info.mbps = 100; + info.status.services = services; + + struct netif *netif = netif_default; // netif_get_by_index(0); + + if(netif) { + + if(linkUp) { + ip4addr_ntoa_r(netif_ip_gw4(netif), info.status.gateway, IP4ADDR_STRLEN_MAX); + ip4addr_ntoa_r(netif_ip_netmask4(netif), info.status.mask, IP4ADDR_STRLEN_MAX); + } + + strcpy(info.mac, networking_mac_to_string(netif->hwaddr)); + } + +#if MQTT_ENABLE + networking_make_mqtt_clientid(info.mac, info.mqtt_client_id); +#endif + + return &info; +} + +static void link_status_callback (struct netif *netif) +{ + bool isLinkUp = netif_is_link_up(netif); + + if(isLinkUp != linkUp) { + linkUp = isLinkUp; +#if TELNET_ENABLE + telnetd_notify_link_status(linkUp); +#endif + } +} + +#if MDNS_ENABLE + +static void mdns_device_info (struct mdns_service *service, void *txt_userdata) +{ + char build[20] = "build="; + + strcat(build, uitoa(GRBL_BUILD)); + mdns_resp_add_service_txtitem(service, "model=grblHAL", 13); + mdns_resp_add_service_txtitem(service, (char *)txt_userdata, strlen((char *)txt_userdata)); + mdns_resp_add_service_txtitem(service, build, strlen(build)); +} + +static void mdns_service_info (struct mdns_service *service, void *txt_userdata) +{ + if(txt_userdata) + mdns_resp_add_service_txtitem(service, (char *)txt_userdata, strlen((char *)txt_userdata)); +} + +#endif + +static void netif_status_callback (struct netif *netif) +{ +#if IP_V6 + if(netif->ip_addr.u_addr.ip4.addr == 0) + return; +#else + if(netif->ip_addr.addr == 0) + return; +#endif + + ip4addr_ntoa_r(netif_ip_addr4(netif), IPAddress, IP4ADDR_STRLEN_MAX); + +#if TELNET_ENABLE + if(network.services.telnet && !services.telnet) + services.telnet = telnetd_init(network.telnet_port); +#endif + +#if FTP_ENABLE + if(network.services.ftp && !services.ftp) + services.ftp = ftpd_init(network.ftp_port); +#endif + +#if HTTP_ENABLE + if(network.services.http && !services.http) { + services.http = httpd_init(network.http_port); + #if WEBDAV_ENABLE + if(network.services.webdav && !services.webdav) + services.webdav = webdav_init(); + #endif + #if SSDP_ENABLE + if(network.services.ssdp && !services.ssdp) + services.ssdp = ssdp_init(network.http_port); + #endif + } +#endif + +#if WEBSOCKET_ENABLE + if(network.services.websocket && !services.websocket) + services.websocket = websocketd_init(network.websocket_port); +#endif + +#if MDNS_ENABLE + if(*network.hostname && network.services.mdns && !services.mdns) { + + mdns_resp_init(); + + if((services.mdns = mdns_resp_add_netif(netif_default, network.hostname, MDNS_TTL) == ERR_OK)) { + + mdns_resp_add_service(netif_default, network.hostname, "_device-info", DNSSD_PROTO_TCP, 0, MDNS_TTL, mdns_device_info, "version=" GRBL_VERSION); + + if(services.http) + mdns_resp_add_service(netif_default, network.hostname, "_http", DNSSD_PROTO_TCP, network.http_port, MDNS_TTL, mdns_service_info, "path=/"); + if(services.webdav) + mdns_resp_add_service(netif_default, network.hostname, "_webdav", DNSSD_PROTO_TCP, network.http_port, MDNS_TTL, mdns_service_info, "path=/"); + if(services.websocket) + mdns_resp_add_service(netif_default, network.hostname, "_websocket", DNSSD_PROTO_TCP, network.websocket_port, MDNS_TTL, mdns_service_info, NULL); + if(services.telnet) + mdns_resp_add_service(netif_default, network.hostname, "_telnet", DNSSD_PROTO_TCP, network.telnet_port, MDNS_TTL, mdns_service_info, NULL); + if(services.ftp) + mdns_resp_add_service(netif_default, network.hostname, "_ftp", DNSSD_PROTO_TCP, network.ftp_port, MDNS_TTL, mdns_service_info, "path=/"); + +// mdns_resp_announce(netif_default); + } + } +#endif + +#if MQTT_ENABLE + if(!mqtt_connected) + mqtt_connect(&network.mqtt, networking_get_info()->mqtt_client_id); +#endif + +#if MODBUS_ENABLE & MODBUS_TCP_ENABLED + modbus_tcp_client_start(); +#endif +} + +static void enet_poll (sys_state_t state) +{ + static uint32_t last_ms0, last_link_check; + + on_execute_realtime(state); + + uint32_t ms = hal.get_elapsed_ticks(); + + if(ms - last_link_check >= 100) { + last_link_check = ms; + ethernet_link_check_state(netif_default); + } + + sys_check_timeouts(); + ethernetif_input(netif_default); + + if(linkUp && ms - last_ms0 > 3) { + last_ms0 = ms; +#if TELNET_ENABLE + if(services.telnet) + telnetd_poll(); +#endif +#if FTP_ENABLE + if(services.ftp) + ftpd_poll(); +#endif +#if WEBSOCKET_ENABLE + if(services.websocket) + websocketd_poll(); +#endif +#if MODBUS_ENABLE & MODBUS_TCP_ENABLED + modbus_tcp_client_poll(); +#endif + } +} + +bool enet_start (void) +{ + static struct netif ethif; + + if(nvs_address != 0) { + + *IPAddress = '\0'; + on_execute_realtime = grbl.on_execute_realtime; + grbl.on_execute_realtime = enet_poll; + + memcpy(&network, ðernet, sizeof(network_settings_t)); +#ifdef HAS_MAC_SETTING + memcpy(ðif.hwaddr, &network.mac, 6); +#endif + + if(network.telnet_port == 0) + network.telnet_port = NETWORK_TELNET_PORT; + if(network.websocket_port == 0) + network.websocket_port = NETWORK_WEBSOCKET_PORT; + if(network.http_port == 0) + network.http_port = NETWORK_HTTP_PORT; + if(network.ftp_port == 0) + network.ftp_port = NETWORK_FTP_PORT; +#if MQTT_ENABLE + if(network.mqtt.port == 0) + network.mqtt.port = NETWORK_MQTT_PORT; +#endif + + lwip_init(); + + if(network.ip_mode == IpMode_Static) + netif_add(ðif, (ip_addr_t *)&network.ip, (ip_addr_t *)&network.mask, (ip_addr_t *)&network.gateway, NULL, ðernetif_init, ðernet_input); + else + netif_add(ðif, NULL, NULL, NULL, NULL, ðernetif_init, ðernet_input); + + netif_set_default(ðif); + netif_set_link_callback(netif_default, link_status_callback); + netif_set_status_callback(netif_default, netif_status_callback); + + link_status_callback(netif_default); + netif_status_callback(netif_default); + + #if LWIP_NETIF_HOSTNAME + netif_set_hostname(netif_default, network.hostname); + #endif + if(network.ip_mode == IpMode_DHCP) + dhcp_start(netif_default); + } + +#if MDNS_ENABLE || SSDP_ENABLE || LWIP_IGMP + + if(network.services.mdns || network.services.ssdp) { + + ETH_MACFilterConfigTypeDef filters; + + HAL_ETH_GetMACFilterConfig(&heth, &filters); + + // TODO: add filters for SSDP and mDNS + filters.PassAllMulticast = On; + // filters.PromiscuousMode = On; + + HAL_ETH_SetMACFilterConfig(&heth, &filters); + + netif_default->flags |= NETIF_FLAG_IGMP; + } + +#endif + + return nvs_address != 0; +} + +static inline void set_addr (char *ip, ip4_addr_t *addr) +{ + memcpy(ip, addr, sizeof(ip4_addr_t)); +} + +static status_code_t ethernet_set_ip (setting_id_t setting, char *value) +{ + ip_addr_t addr; + + if(ip4addr_aton(value, &addr) != 1) + return Status_InvalidStatement; + + status_code_t status = Status_OK; + + switch(setting) { + + case Setting_IpAddress: + set_addr(ethernet.ip, &addr); + break; + + case Setting_Gateway: + set_addr(ethernet.gateway, &addr); + break; + + case Setting_NetMask: + set_addr(ethernet.mask, &addr); + break; + +#if MQTT_ENABLE + case Setting_MQTTBrokerIpAddress: + set_addr(ethernet.mqtt.ip, &addr); + break; +#endif + + default: + status = Status_Unhandled; + break; + } + + return status; +} + +static char *ethernet_get_ip (setting_id_t setting) +{ + static char ip[IPADDR_STRLEN_MAX]; + + switch(setting) { + + case Setting_IpAddress: + ip4addr_ntoa_r((const ip_addr_t *)ðernet.ip, ip, IPADDR_STRLEN_MAX); + break; + + case Setting_Gateway: + ip4addr_ntoa_r((const ip_addr_t *)ðernet.gateway, ip, IPADDR_STRLEN_MAX); + break; + + case Setting_NetMask: + ip4addr_ntoa_r((const ip_addr_t *)ðernet.mask, ip, IPADDR_STRLEN_MAX); + break; + +#if MQTT_ENABLE + case Setting_MQTTBrokerIpAddress: + ip4addr_ntoa_r((const ip_addr_t *)ðernet.mqtt.ip, ip, IPADDR_STRLEN_MAX); + break; +#endif + + default: + *ip = '\0'; + break; + } + + return ip; +} + +static status_code_t ethernet_set_services (setting_id_t setting, uint_fast16_t int_value) +{ + ethernet.services.mask = int_value & allowed_services.mask; + + return Status_OK; +} + +static uint32_t ethernet_get_services (setting_id_t id) +{ + return (uint32_t)ethernet.services.mask; +} + +#ifdef HAS_MAC_SETTING + +static status_code_t ethernet_set_mac (setting_id_t setting, char *value) +{ + return networking_string_to_mac(value, ethernet.mac) ? Status_OK : Status_InvalidStatement; +} + +static char *ethernet_get_mac (setting_id_t setting) +{ + return networking_mac_to_string(ethernet.mac); +} + +#endif + +static const setting_group_detail_t ethernet_groups [] = { + { Group_Root, Group_Networking, "Networking" } +}; + +static const setting_detail_t ethernet_settings[] = { + { Setting_NetworkServices, Group_Networking, "Network Services", NULL, Format_Bitfield, netservices, NULL, NULL, Setting_NonCoreFn, ethernet_set_services, ethernet_get_services, NULL, { .reboot_required = On } }, + { Setting_Hostname, Group_Networking, "Hostname", NULL, Format_String, "x(64)", NULL, "64", Setting_NonCore, ethernet.hostname, NULL, NULL, { .reboot_required = On } }, + { Setting_IpMode, Group_Networking, "IP Mode", NULL, Format_RadioButtons, "Static,DHCP,AutoIP", NULL, NULL, Setting_NonCore, ðernet.ip_mode, NULL, NULL, { .reboot_required = On } }, + { Setting_IpAddress, Group_Networking, "IP Address", NULL, Format_IPv4, NULL, NULL, NULL, Setting_NonCoreFn, ethernet_set_ip, ethernet_get_ip, NULL, { .reboot_required = On } }, + { Setting_Gateway, Group_Networking, "Gateway", NULL, Format_IPv4, NULL, NULL, NULL, Setting_NonCoreFn, ethernet_set_ip, ethernet_get_ip, NULL, { .reboot_required = On } }, + { Setting_NetMask, Group_Networking, "Netmask", NULL, Format_IPv4, NULL, NULL, NULL, Setting_NonCoreFn, ethernet_set_ip, ethernet_get_ip, NULL, { .reboot_required = On } }, +#ifdef HAS_MAC_SETTING + { Setting_NetworkMAC, Group_Networking, "MAC Address", NULL, Format_String, "x(17)", "17", "17", Setting_NonCoreFn, ethernet_set_mac, ethernet_get_mac, NULL, { .allow_null = On, .reboot_required = On } }, +#endif + { Setting_TelnetPort, Group_Networking, "Telnet port", NULL, Format_Int16, "####0", "1", "65535", Setting_NonCore, ðernet.telnet_port, NULL, NULL, { .reboot_required = On } }, +#if FTP_ENABLE + { Setting_FtpPort, Group_Networking, "FTP port", NULL, Format_Int16, "####0", "1", "65535", Setting_NonCore, ðernet.ftp_port, NULL, NULL, { .reboot_required = On } }, +#endif +#if HTTP_ENABLE + { Setting_HttpPort, Group_Networking, "HTTP port", NULL, Format_Int16, "####0", "1", "65535", Setting_NonCore, ðernet.http_port, NULL, NULL, { .reboot_required = On } }, +#endif + { Setting_WebSocketPort, Group_Networking, "Websocket port", NULL, Format_Int16, "####0", "1", "65535", Setting_NonCore, ðernet.websocket_port, NULL, NULL, { .reboot_required = On } }, +#if MQTT_ENABLE + { Setting_MQTTBrokerIpAddress, Group_Networking, "MQTT broker IP Address", NULL, Format_IPv4, NULL, NULL, NULL, Setting_NonCoreFn, ethernet_set_ip, ethernet_get_ip, NULL, { .reboot_required = On } }, + { Setting_MQTTBrokerPort, Group_Networking, "MQTT broker port", NULL, Format_Int16, "####0", "1", "65535", Setting_NonCore, ðernet.mqtt.port, NULL, NULL, { .reboot_required = On } }, + { Setting_MQTTBrokerUserName, Group_Networking, "MQTT broker username", NULL, Format_String, "x(32)", NULL, "32", Setting_NonCore, ðernet.mqtt.user, NULL, NULL, { .allow_null = On } }, + { Setting_MQTTBrokerPassword, Group_Networking, "MQTT broker password", NULL, Format_Password, "x(32)", NULL, "32", Setting_NonCore, ðernet.mqtt.password, NULL, NULL, { .allow_null = On } }, +#endif +}; + +#ifndef NO_SETTINGS_DESCRIPTIONS + +static const setting_descr_t ethernet_settings_descr[] = { + { Setting_NetworkServices, "Network services/protocols to enable." }, + { Setting_Hostname, "Network hostname." }, + { Setting_IpMode, "IP Mode." }, + { Setting_IpAddress, "Static IP address." }, + { Setting_Gateway, "Static gateway address." }, + { Setting_NetMask, "Static netmask." }, + { Setting_TelnetPort, "(Raw) Telnet port number listening for incoming connections." }, +#if FTP_ENABLE + { Setting_FtpPort, "FTP port number listening for incoming connections." }, +#endif +#if HTTP_ENABLE + { Setting_HttpPort, "HTTP port number listening for incoming connections." }, +#endif + { Setting_WebSocketPort, "Websocket port number listening for incoming connections." + "\\nNOTE: WebUI requires this to be HTTP port number + 1." + }, +#if MQTT_ENABLE + { Setting_MQTTBrokerIpAddress, "IP address for remote MQTT broker. Set to 0.0.0.0 to disable connection." }, + { Setting_MQTTBrokerPort, "Remote MQTT broker portnumber." }, + { Setting_MQTTBrokerUserName, "Remote MQTT broker username." }, + { Setting_MQTTBrokerPassword, "Remote MQTT broker password." }, +#endif +}; + +#endif + +static void ethernet_settings_save (void) +{ + hal.nvs.memcpy_to_nvs(nvs_address, (uint8_t *)ðernet, sizeof(network_settings_t), true); +} + +void ethernet_settings_restore (void) +{ + memset(ðernet, 0, sizeof(network_settings_t)); + + strcpy(ethernet.hostname, NETWORK_HOSTNAME); + + ip4_addr_t addr; + + ethernet.ip_mode = (ip_mode_t)NETWORK_IPMODE; + + if(ip4addr_aton(NETWORK_IP, &addr) == 1) + set_addr(ethernet.ip, &addr); + + if(ip4addr_aton(NETWORK_GATEWAY, &addr) == 1) + set_addr(ethernet.gateway, &addr); + +#if NETWORK_IPMODE == 0 + if(ip4addr_aton(NETWORK_MASK, &addr) == 1) + set_addr(ethernet.mask, &addr); +#else + if(ip4addr_aton("255.255.255.0", &addr) == 1) + set_addr(ethernet.mask, &addr); +#endif + +#ifdef HAS_MAC_SETTING + bmac_eth_get(ethernet.mac); +#endif + + ethernet.ftp_port = NETWORK_FTP_PORT; + ethernet.telnet_port = NETWORK_TELNET_PORT; + ethernet.http_port = NETWORK_HTTP_PORT; + ethernet.websocket_port = NETWORK_WEBSOCKET_PORT; + ethernet.services.mask = allowed_services.mask; + +#if MQTT_ENABLE + + ethernet.mqtt.port = NETWORK_MQTT_PORT; + + #ifdef MQTT_IP_ADDRESS + if(ip4addr_aton(MQTT_IP_ADDRESS, &addr) == 1) + set_addr(ethernet.mqtt.ip, &addr); + #endif + + #ifdef MQTT_USERNAME + strcpy(ethernet.mqtt.user, MQTT_USERNAME); + #endif + #ifdef MQTT_PASSWORD + strcpy(ethernet.mqtt.password, MQTT_PASSWORD); + #endif + +#endif + + hal.nvs.memcpy_to_nvs(nvs_address, (uint8_t *)ðernet, sizeof(network_settings_t), true); +} + +static void ethernet_settings_load (void) +{ + if(hal.nvs.memcpy_from_nvs((uint8_t *)ðernet, nvs_address, sizeof(network_settings_t), true) != NVS_TransferResult_OK) + ethernet_settings_restore(); + + ethernet.services.mask &= allowed_services.mask; +} + +static setting_details_t setting_details = { + .groups = ethernet_groups, + .n_groups = sizeof(ethernet_groups) / sizeof(setting_group_detail_t), + .settings = ethernet_settings, + .n_settings = sizeof(ethernet_settings) / sizeof(setting_detail_t), +#ifndef NO_SETTINGS_DESCRIPTIONS + .descriptions = ethernet_settings_descr, + .n_descriptions = sizeof(ethernet_settings_descr) / sizeof(setting_descr_t), +#endif + .save = ethernet_settings_save, + .load = ethernet_settings_load, + .restore = ethernet_settings_restore +}; + +static void stream_changed (stream_type_t type) +{ + if(type != StreamType_SDCard) + active_stream = type; + + if(on_stream_changed) + on_stream_changed(type); +} + +bool enet_init (network_settings_t *settings) +{ + if((nvs_address = nvs_alloc(sizeof(network_settings_t)))) { + + on_report_options = grbl.on_report_options; + grbl.on_report_options = report_options; + + on_stream_changed = grbl.on_stream_changed; + grbl.on_stream_changed = stream_changed; + +#if MQTT_ENABLE + on_client_connected = mqtt_events.on_client_connected; + mqtt_events.on_client_connected = mqtt_connection_changed; +#endif + + settings_register(&setting_details); + +#if MODBUS_ENABLE & MODBUS_TCP_ENABLED + modbus_tcp_client_init (); +#endif + + allowed_services.mask = networking_get_services_list((char *)netservices).mask; + } + + return nvs_address != 0; +} + +#endif // ETHERNET_ENABLE diff --git a/Src/main.c b/Src/main.c index 3c8db5f6..25316c76 100644 --- a/Src/main.c +++ b/Src/main.c @@ -276,7 +276,12 @@ static void SystemClock_Config (void) __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE1); RCC_OscInitTypeDef RCC_OscInitStruct = { +#if RTC_ENABLE + .OscillatorType = RCC_OSCILLATORTYPE_HSE|RCC_OSCILLATORTYPE_LSE, + .LSEState = RCC_LSE_ON, +#else .OscillatorType = RCC_OSCILLATORTYPE_HSE, +#endif .HSEState = RCC_HSE_ON, .PLL.PLLState = RCC_PLL_ON, .PLL.PLLSource = RCC_PLLSOURCE_HSE, diff --git a/Src/w5x00_ll_driver.c b/Src/w5x00_ll_driver.c index ac736c14..ed93848a 100644 --- a/Src/w5x00_ll_driver.c +++ b/Src/w5x00_ll_driver.c @@ -6,7 +6,7 @@ #include "w5x00_ll_driver.h" -#if ETHERNET_ENABLE && (_WIZCHIP_ == W5100S || _WIZCHIP_ == W5500) +#if ETHERNET_ENABLE && defined(_WIZCHIP_) && (_WIZCHIP_ == W5100S || _WIZCHIP_ == W5500) #include diff --git a/USB_DEVICE/App/usb_device.c b/USB_DEVICE/App/usb_device.c index 6173ea25..3fd58f57 100644 --- a/USB_DEVICE/App/usb_device.c +++ b/USB_DEVICE/App/usb_device.c @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2021 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -99,4 +98,3 @@ void MX_USB_DEVICE_Init(void) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/USB_DEVICE/App/usb_device.h b/USB_DEVICE/App/usb_device.h index ad9b9d8f..e7b43796 100644 --- a/USB_DEVICE/App/usb_device.h +++ b/USB_DEVICE/App/usb_device.h @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2021 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -101,5 +100,3 @@ void MX_USB_DEVICE_Init(void); #endif #endif /* __USB_DEVICE__H__ */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/USB_DEVICE/App/usbd_cdc_if.c b/USB_DEVICE/App/usbd_cdc_if.c index ddefa511..7c46469b 100644 --- a/USB_DEVICE/App/usbd_cdc_if.c +++ b/USB_DEVICE/App/usbd_cdc_if.c @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2021 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -23,7 +22,6 @@ #include "usbd_cdc_if.h" /* USER CODE BEGIN INCLUDE */ -#include "driver.h" #include "usb_serial.h" /* USER CODE END INCLUDE */ @@ -302,7 +300,7 @@ uint8_t CDC_Transmit_FS(uint8_t* Buf, uint16_t Len) /** * @brief CDC_TransmitCplt_FS - * Data transmited callback + * Data transmitted callback * * @note * This function is IN transfer complete callback used to inform user that @@ -334,5 +332,3 @@ static int8_t CDC_TransmitCplt_FS(uint8_t *Buf, uint32_t *Len, uint8_t epnum) /** * @} */ - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/USB_DEVICE/App/usbd_cdc_if.h b/USB_DEVICE/App/usbd_cdc_if.h index b3c27a45..3d674f11 100644 --- a/USB_DEVICE/App/usbd_cdc_if.h +++ b/USB_DEVICE/App/usbd_cdc_if.h @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2021 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -48,11 +47,10 @@ * @brief Defines. * @{ */ -/* USER CODE BEGIN EXPORTED_DEFINES */ /* Define size for the receive and transmit buffer over CDC */ -/* It's up to user to redefine and/or remove those define */ #define APP_RX_DATA_SIZE 2048 #define APP_TX_DATA_SIZE 2048 +/* USER CODE BEGIN EXPORTED_DEFINES */ /* USER CODE END EXPORTED_DEFINES */ @@ -131,4 +129,3 @@ uint8_t CDC_Transmit_FS(uint8_t* Buf, uint16_t Len); #endif /* __USBD_CDC_IF_H__ */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/USB_DEVICE/App/usbd_desc.c b/USB_DEVICE/App/usbd_desc.c index 7c96754b..64d13bfb 100644 --- a/USB_DEVICE/App/usbd_desc.c +++ b/USB_DEVICE/App/usbd_desc.c @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2021 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -218,7 +217,7 @@ __ALIGN_BEGIN uint8_t USBD_FS_BOSDesc[USB_SIZ_BOS_DESC] __ALIGN_END = #pragma data_alignment=4 #endif /* defined ( __ICCARM__ ) */ -/** USB lang indentifier descriptor. */ +/** USB lang identifier descriptor. */ __ALIGN_BEGIN uint8_t USBD_LangIDDesc[USB_LEN_LANGID_STR_DESC] __ALIGN_END = { USB_LEN_LANGID_STR_DESC, @@ -388,7 +387,9 @@ uint8_t * USBD_FS_USR_BOSDescriptor(USBD_SpeedTypeDef speed, uint16_t *length) */ static void Get_SerialNum(void) { - uint32_t deviceserial0, deviceserial1, deviceserial2; + uint32_t deviceserial0; + uint32_t deviceserial1; + uint32_t deviceserial2; deviceserial0 = *(uint32_t *) DEVICE_ID1; deviceserial1 = *(uint32_t *) DEVICE_ID2; @@ -442,4 +443,3 @@ static void IntToUnicode(uint32_t value, uint8_t * pbuf, uint8_t len) * @} */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/USB_DEVICE/App/usbd_desc.h b/USB_DEVICE/App/usbd_desc.h index e22e45a7..0146169c 100644 --- a/USB_DEVICE/App/usbd_desc.h +++ b/USB_DEVICE/App/usbd_desc.h @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2021 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -142,4 +141,3 @@ extern USBD_DescriptorsTypeDef FS_Desc; #endif /* __USBD_DESC__C__ */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/USB_DEVICE/Target/usbd_conf.c b/USB_DEVICE/Target/usbd_conf.c index f3d13405..7834d5b7 100644 --- a/USB_DEVICE/Target/usbd_conf.c +++ b/USB_DEVICE/Target/usbd_conf.c @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2021 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -25,6 +24,8 @@ #include "usbd_def.h" #include "usbd_core.h" +#include "usbd_cdc.h" + /* USER CODE BEGIN Includes */ /* USER CODE END Includes */ @@ -192,9 +193,17 @@ static void PCD_ResetCallback(PCD_HandleTypeDef *hpcd) void HAL_PCD_ResetCallback(PCD_HandleTypeDef *hpcd) #endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ { - USBD_SpeedTypeDef speed = USBD_SPEED_FULL; + USBD_SpeedTypeDef speed = USBD_SPEED_FULL; - if ( hpcd->Init.speed != PCD_SPEED_FULL) + if ( hpcd->Init.speed == PCD_SPEED_HIGH) + { + speed = USBD_SPEED_HIGH; + } + else if ( hpcd->Init.speed == PCD_SPEED_FULL) + { + speed = USBD_SPEED_FULL; + } + else { Error_Handler(); } @@ -582,16 +591,53 @@ USBD_StatusTypeDef USBD_LL_PrepareReceive(USBD_HandleTypeDef *pdev, uint8_t ep_a } /** - * @brief Returns the last transfered packet size. + * @brief Returns the last transferred packet size. * @param pdev: Device handle * @param ep_addr: Endpoint number - * @retval Recived Data Size + * @retval Received Data Size */ uint32_t USBD_LL_GetRxDataSize(USBD_HandleTypeDef *pdev, uint8_t ep_addr) { return HAL_PCD_EP_GetRxCount((PCD_HandleTypeDef*) pdev->pData, ep_addr); } +#ifdef USBD_HS_TESTMODE_ENABLE +/** + * @brief Set High speed Test mode. + * @param pdev: Device handle + * @param testmode: test mode + * @retval USBD Status + */ +USBD_StatusTypeDef USBD_LL_SetTestMode(USBD_HandleTypeDef *pdev, uint8_t testmode) +{ + UNUSED(pdev); + UNUSED(testmode); + + return USBD_OK; +} +#endif /* USBD_HS_TESTMODE_ENABLE */ + +/** + * @brief Static single allocation. + * @param size: Size of allocated memory + * @retval None + */ +void *USBD_static_malloc(uint32_t size) +{ + static uint32_t mem[(sizeof(USBD_CDC_HandleTypeDef)/4)+1];/* On 32-bit boundary */ + return mem; +} + +/** + * @brief Dummy memory free + * @param p: Pointer to allocated memory address + * @retval None + */ +void USBD_static_free(void *p) +{ + +} + /** * @brief Delays routine for the USB Device Library. * @param Delay: Delay in ms @@ -603,7 +649,7 @@ void USBD_LL_Delay(uint32_t Delay) } /** - * @brief Retuns the USB status depending on the HAL status: + * @brief Returns the USB status depending on the HAL status: * @param hal_status: HAL status * @retval USB status */ @@ -631,5 +677,3 @@ USBD_StatusTypeDef USBD_Get_USB_Status(HAL_StatusTypeDef hal_status) } return usb_status; } - -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/USB_DEVICE/Target/usbd_conf.h b/USB_DEVICE/Target/usbd_conf.h index 09719637..dd1d5b18 100644 --- a/USB_DEVICE/Target/usbd_conf.h +++ b/USB_DEVICE/Target/usbd_conf.h @@ -7,13 +7,12 @@ ****************************************************************************** * @attention * - *

© Copyright (c) 2021 STMicroelectronics. - * All rights reserved.

+ * Copyright (c) 2024 STMicroelectronics. + * All rights reserved. * - * This software component is licensed by ST under Ultimate Liberty license - * SLA0044, the "License"; You may not use this file except in compliance with - * the License. You may obtain a copy of the License at: - * www.st.com/SLA0044 + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ @@ -89,14 +88,13 @@ * @brief Aliases. * @{ */ - -/* Memory management macros */ - +/* Memory management macros make sure to use static memory allocation */ /** Alias for memory allocation. */ -#define USBD_malloc malloc + +#define USBD_malloc (void *)USBD_static_malloc /** Alias for memory release. */ -#define USBD_free free +#define USBD_free USBD_static_free /** Alias for memory set. */ #define USBD_memset memset @@ -114,24 +112,24 @@ printf("\n"); #else #define USBD_UsrLog(...) -#endif +#endif /* (USBD_DEBUG_LEVEL > 0U) */ #if (USBD_DEBUG_LEVEL > 1) -#define USBD_ErrLog(...) printf("ERROR: ") ;\ +#define USBD_ErrLog(...) printf("ERROR: ");\ printf(__VA_ARGS__);\ printf("\n"); #else #define USBD_ErrLog(...) -#endif +#endif /* (USBD_DEBUG_LEVEL > 1U) */ #if (USBD_DEBUG_LEVEL > 2) -#define USBD_DbgLog(...) printf("DEBUG : ") ;\ +#define USBD_DbgLog(...) printf("DEBUG : ");\ printf(__VA_ARGS__);\ printf("\n"); #else #define USBD_DbgLog(...) -#endif +#endif /* (USBD_DEBUG_LEVEL > 2U) */ /** * @} @@ -152,6 +150,8 @@ */ /* Exported functions -------------------------------------------------------*/ +void *USBD_static_malloc(uint32_t size); +void USBD_static_free(void *p); /** * @} @@ -171,4 +171,3 @@ #endif /* __USBD_CONF__H__ */ -/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/boards/longboard32.c b/boards/longboard32.c index f59a1745..bd247cbb 100644 --- a/boards/longboard32.c +++ b/boards/longboard32.c @@ -129,15 +129,23 @@ static void clear_ringleds (void *data) void board_init (void) { + xbar_t *port; uint8_t tool_probe_port = SLB_TLS_AUX_INPUT; - hal.signals_pullup_disable_cap.probe_triggered = Off; // SLB has isolated inputs +#if defined(MODBUS_DIR_AUX) && !(MODBUS_ENABLE & MODBUS_RTU_DIR_ENABLED) + + if((port = hal.port.get_pin_info(Port_Digital, Port_Output, MODBUS_DIR_AUX)) && !port->mode.claimed) { + uint8_t modbus_dir = MODBUS_DIR_AUX; + ioport_claim(Port_Digital, Port_Output, &modbus_dir, "N/A"); + } - xbar_t *tls = hal.port.get_pin_info(Port_Digital, Port_Input, tool_probe_port); +#endif + + hal.signals_pullup_disable_cap.probe_triggered = Off; // SLB has isolated inputs - if(tls && !tls->mode.claimed) { + if((port = hal.port.get_pin_info(Port_Digital, Port_Input, tool_probe_port)) && !port->mode.claimed) { - memcpy(&toolsetter, tls, sizeof(xbar_t)); + memcpy(&toolsetter, port, sizeof(xbar_t)); ioport_claim(Port_Digital, Port_Input, &tool_probe_port, "Toolsetter"); diff --git a/boards/longboard32_map.h b/boards/longboard32_map.h index 569c35d0..1f90d4d5 100644 --- a/boards/longboard32_map.h +++ b/boards/longboard32_map.h @@ -29,6 +29,10 @@ #error "This board has STM32F412 processor with a 25MHz crystal, select a corresponding build!" #endif +#if ETHERNET_ENABLE && _WIZCHIP_ != 5500 +#error "Board has a WZ5500 ethernet chip, please uncomment _WIZCHIP_ in my_machine.h!" +#endif + #ifndef BOARD_NAME #define BOARD_NAME "SuperLongBoard" #endif diff --git a/boards/st_morpho.c b/boards/st_morpho.c deleted file mode 100644 index 04f5b346..00000000 --- a/boards/st_morpho.c +++ /dev/null @@ -1,196 +0,0 @@ -/* - st_morpho.c - driver code for STM32F4xx ARM processors - - Part of grblHAL - - Copyright (c) 2020-2024 Terje Io - - Grbl is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - Grbl is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Grbl. If not, see . -*/ - -#include "driver.h" - -#if defined(BOARD_MORPHO_CNC) - -#include -#include - -#include "main.h" -#include "spi.h" -#include "grbl/protocol.h" -#include "grbl/settings.h" - -#ifdef NEOPIXEL_GPO -extern void neopixel_init (void); -#endif - -#if TRINAMIC_SPI_ENABLE - -static struct { - GPIO_TypeDef *port; - uint32_t pin; -} cs; - -static uint_fast8_t n_motors; -static TMC_spi_datagram_t datagram[TMC_N_MOTORS_MAX]; - -TMC_spi_status_t tmc_spi_read (trinamic_motor_t driver, TMC_spi_datagram_t *reg) -{ - static TMC_spi_status_t status = 0; - - uint8_t res; - uint_fast8_t idx = n_motors; - uint32_t f_spi = spi_set_speed(SPI_BAUDRATEPRESCALER_32); - volatile uint32_t dly = 100; - - datagram[driver.seq].addr.value = reg->addr.value; - datagram[driver.seq].addr.write = 0; - - DIGITAL_OUT(cs.port, cs.pin, 0); - - do { - spi_put_byte(datagram[--idx].addr.value); - spi_put_byte(0); - spi_put_byte(0); - spi_put_byte(0); - spi_put_byte(0); - } while(idx); - - while(--dly) {}; - - DIGITAL_OUT(cs.port, cs.pin, 1); - - dly = 50; - while(--dly) {}; - - DIGITAL_OUT(cs.port, cs.pin, 0); - - idx = n_motors; - do { - res = spi_put_byte(datagram[--idx].addr.value); - - if(idx == driver.seq) { - status = res; - reg->payload.data[3] = spi_get_byte(); - reg->payload.data[2] = spi_get_byte(); - reg->payload.data[1] = spi_get_byte(); - reg->payload.data[0] = spi_get_byte(); - } else { - spi_get_byte(); - spi_get_byte(); - spi_get_byte(); - spi_get_byte(); - } - } while(idx); - - dly = 100; - while(--dly) {}; - - DIGITAL_OUT(cs.port, cs.pin, 1); - - dly = 50; - while(--dly) {}; - - spi_set_speed(f_spi); - - return status; -} - -TMC_spi_status_t tmc_spi_write (trinamic_motor_t driver, TMC_spi_datagram_t *reg) -{ - TMC_spi_status_t status = 0; - - uint8_t res; - uint_fast8_t idx = n_motors; - uint32_t f_spi = spi_set_speed(SPI_BAUDRATEPRESCALER_32); - volatile uint32_t dly = 100; - - memcpy(&datagram[driver.seq], reg, sizeof(TMC_spi_datagram_t)); - datagram[driver.seq].addr.write = 1; - - DIGITAL_OUT(cs.port, cs.pin, 0); - - do { - res = spi_put_byte(datagram[--idx].addr.value); - spi_put_byte(datagram[idx].payload.data[3]); - spi_put_byte(datagram[idx].payload.data[2]); - spi_put_byte(datagram[idx].payload.data[1]); - spi_put_byte(datagram[idx].payload.data[0]); - - if(idx == driver.seq) { - status = res; - datagram[idx].addr.idx = 0; // TMC_SPI_STATUS_REG; - datagram[idx].addr.write = 0; - } - } while(idx); - - while(--dly) {}; - - DIGITAL_OUT(cs.port, cs.pin, 1); - - dly = 50; - while(--dly) {}; - - spi_set_speed(f_spi); - - return status; -} - -static void add_cs_pin (xbar_t *gpio, void *data) -{ - if(gpio->function == Output_MotorChipSelect) { - cs.pin = gpio->pin; - cs.port = (GPIO_TypeDef *)gpio->port; - } -} - -static void if_init (uint8_t motors, axes_signals_t axisflags) -{ - n_motors = motors; - hal.enumerate_pins(true, add_cs_pin, NULL); -} - -#endif - -void board_init (void) -{ -#if TRINAMIC_SPI_ENABLE - - trinamic_driver_if_t driver = { - .on_drivers_init = if_init - }; - - spi_init(); - - uint_fast8_t idx = TMC_N_MOTORS_MAX; - do { - datagram[--idx].addr.idx = 0; //TMC_SPI_STATUS_REG; - } while(idx); - - trinamic_if_init(&driver); - -#elif TRINAMIC_UART_ENABLE - - extern void tmc_uart_init (void); - - tmc_uart_init(); - -#endif - -#ifdef NEOPIXEL_GPO - neopixel_init(); -#endif -} - -#endif diff --git a/boards/stm32f407vet6_dev_board.h b/boards/stm32f407vet6_dev_board.h index a465b673..dba42cd5 100644 --- a/boards/stm32f407vet6_dev_board.h +++ b/boards/stm32f407vet6_dev_board.h @@ -19,15 +19,34 @@ along with grblHAL. If not, see . */ +// https://github.com/art103/JZ-F407VET6 + #if N_ABC_MOTORS > 2 #error "Axis configuration is not supported!" #endif -#define BOARD_NAME "STM32F407VET6 Dev Board" -#define BOARD_URL "https://github.com/terjeio/CNC_Breakout_Nucleo64" +#if !defined(STM32F407xx) || HSE_VALUE != 25000000 +#error "This board has STM32F407 processor with a 25MHz crystal, select a corresponding build!" +#endif + +#if ETHERNET_ENABLE && defined(_WIZCHIP_) +#error "Board has RMII ethernet chip, please comment out _WIZCHIP_ in my_machine.h!" +#endif + +#define BOARD_NAME "JZ-F407VET6 Dev Board" +#define BOARD_URL "https://github.com/yym36100/stm32f407_industrial_control_board" + +/* !! PRELIMINARY !! */ /* +P3: +------------------- +GND - 1 2 - 3V3 +PE7 - 3 4 - PE8 +PB10 - 5 6 - PC3 +PC2 - 7 8 - PE9 + P4: ------------------- 3V3 - 1 2 - GND @@ -50,12 +69,20 @@ PD9 - 11 12 - PD10 PB1 - 13 14 - PD8 PB0 - 15 16 - NC +P7: +-------- +GND - 1 +PE2 - 2 +3V3 - 3 + +-- + PA0 - SS -PA3 - LX -PA4 - LY -PA5 LZ -PA6 LM3 -PA8 LM4 +PA3 - +PA4 - LX +PA5 - LY +PA6 - LZ +PA8 - LM3 PB0 - SPWM PB1 - Start PB7 - Hold @@ -65,13 +92,15 @@ PC7 - SPDIR PC13 - DOOR PE0 DM4 PE1 DM3 +PE2 -> P7 hdr PE4 DX PE5 DY PE6 DZ +PD3 SD detect PD4 SEna PD8 FLOOD PD9 MIST -PD10 +PD10 - LM4 PD11 SX PD12 SY PD13 SZ @@ -90,16 +119,21 @@ PD15 SM4 //#define EEPROM_IS_FRAM 1 #endif */ -#define SERIAL_PORT 1 // GPIOA: TX = 9, RX = 10 +#define SERIAL_PORT 1 // GPIOA: TX = 9, RX = 10 + #if I2C_ENABLE -#define I2C_PORT 1 // GPIOB: SCL = 8, SDA = 9 +#define I2C_PORT 1 // GPIOB: SCL = 8, SDA = 9 #endif #if SDCARD_ENABLE #define SDCARD_SDIO 1 +#define SD_DETECT_PORT GPIOD +#define SD_DETECT_PIN 3 #endif -#define SPI_PORT 21 // GOPIB: SCK = 10, GPIOC: MISO - 2, MOSI - 3 +#define RTC_ENABLE 1 + +//#define SPI_PORT 21 // GOPIB: SCK = 10, GPIOC: MISO - 2, MOSI - 3 // Define step pulse output pins. @@ -138,9 +172,9 @@ PD15 SM4 // Define homing/hard limit switch input pins. #define LIMIT_PORT GPIOA -#define X_LIMIT_PIN 3 -#define Y_LIMIT_PIN 4 -#define Z_LIMIT_PIN 5 +#define X_LIMIT_PIN 4 +#define Y_LIMIT_PIN 5 +#define Z_LIMIT_PIN 6 #define LIMIT_INMODE GPIO_BITBAND // Define ganged axis or A axis step pulse and step direction output pins. @@ -151,7 +185,7 @@ PD15 SM4 #define M3_DIRECTION_PORT GPIOE #define M3_DIRECTION_PIN 0 #define M3_LIMIT_PORT GPIOA -#define M3_LIMIT_PIN 6 +#define M3_LIMIT_PIN 8 #endif // Define ganged axis or B axis step pulse and step direction output pins. @@ -161,57 +195,47 @@ PD15 SM4 #define M4_STEP_PIN 15 #define M4_DIRECTION_PORT GPIOE #define M4_DIRECTION_PIN 1 -#define M4_LIMIT_PORT GPIOA -#define M4_LIMIT_PIN 8 +#define M4_LIMIT_PORT GPIOD +#define M4_LIMIT_PIN 10 #endif -/* -#define AUXOUTPUT0_PORT GPIOB // Aux 0 -#define AUXOUTPUT0_PIN 15 -#if !ETHERNET_ENABLE -#define AUXOUTPUT1_PORT GPIOB // Aux 1 -#define AUXOUTPUT1_PIN 2 -#endif -#ifndef SPI_PORT -#define AUXOUTPUT2_PORT GPIOA // SDO -#define AUXOUTPUT2_PIN 6 -#define AUXOUTPUT3_PORT GPIOA // SCK -#define AUXOUTPUT3_PIN 5 +#define AUXOUTPUT0_PORT GPIOB // Spindle PWM +#define AUXOUTPUT0_PIN 0 +#define AUXOUTPUT1_PORT GPIOC // Spindle direction +#define AUXOUTPUT1_PIN 6 +#define AUXOUTPUT2_PORT GPIOC // Spindle enable +#define AUXOUTPUT2_PIN 7 +#define AUXOUTPUT3_PORT GPIOD // Coolant flood +#define AUXOUTPUT3_PIN 8 +#define AUXOUTPUT4_PORT GPIOD // Coolant mist +#define AUXOUTPUT4_PIN 9 +#if MODBUS_ENABLE & MODBUS_RTU_ENABLED +#define AUXOUTPUT5_PORT GPIOD // Modbus direction +#define AUXOUTPUT5_PIN 7 #endif -*/ -#define AUXOUTPUT4_PORT GPIOB // Spindle PWM -#define AUXOUTPUT4_PIN 0 -#define AUXOUTPUT5_PORT GPIOC // Spindle direction -#define AUXOUTPUT5_PIN 6 -#define AUXOUTPUT6_PORT GPIOC // Spindle enable -#define AUXOUTPUT6_PIN 7 -#define AUXOUTPUT7_PORT GPIOD // Coolant flood -#define AUXOUTPUT7_PIN 8 -#define AUXOUTPUT8_PORT GPIOD // Coolant mist -#define AUXOUTPUT8_PIN 9 // Define driver spindle pins #if DRIVER_SPINDLE_ENABLE & SPINDLE_ENA -#define SPINDLE_ENABLE_PORT AUXOUTPUT6_PORT -#define SPINDLE_ENABLE_PIN AUXOUTPUT6_PIN +#define SPINDLE_ENABLE_PORT AUXOUTPUT2_PORT +#define SPINDLE_ENABLE_PIN AUXOUTPUT2_PIN #endif #if DRIVER_SPINDLE_ENABLE & SPINDLE_PWM -#define SPINDLE_PWM_PORT AUXOUTPUT4_PORT -#define SPINDLE_PWM_PIN AUXOUTPUT4_PIN +#define SPINDLE_PWM_PORT AUXOUTPUT0_PORT +#define SPINDLE_PWM_PIN AUXOUTPUT0_PIN #endif #if DRIVER_SPINDLE_ENABLE & SPINDLE_DIR -#define SPINDLE_DIRECTION_PORT AUXOUTPUT5_PORT -#define SPINDLE_DIRECTION_PIN AUXOUTPUT5_PIN +#define SPINDLE_DIRECTION_PORT AUXOUTPUT1_PORT +#define SPINDLE_DIRECTION_PIN AUXOUTPUT1_PIN #endif // Define flood and mist coolant enable output pins. #if COOLANT_ENABLE & COOLANT_FLOOD -#define COOLANT_FLOOD_PORT AUXOUTPUT7_PORT -#define COOLANT_FLOOD_PIN AUXOUTPUT7_PIN +#define COOLANT_FLOOD_PORT AUXOUTPUT3_PORT +#define COOLANT_FLOOD_PIN AUXOUTPUT3_PIN #endif #if COOLANT_ENABLE & COOLANT_MIST -#define COOLANT_MIST_PORT AUXOUTPUT8_PORT -#define COOLANT_MIST_PIN AUXOUTPUT8_PIN +#define COOLANT_MIST_PORT AUXOUTPUT4_PORT +#define COOLANT_MIST_PIN AUXOUTPUT4_PIN #endif // Define user-control controls (cycle start, reset, feed hold) input pins. @@ -343,11 +367,11 @@ PD15 SM4 #endif #if MODBUS_ENABLE & MODBUS_RTU_ENABLED -#define SERIAL1_PORT 21 // GPIOD: TX = 5, RX = 6 -#if MODBUS_ENABLE & MODBUS_RTU_DIR_ENABLED +#undef MODBUS_ENABLE +#define MODBUS_ENABLE (MODBUS_RTU_ENABLED|MODBUS_RTU_DIR_ENABLED) +#define SERIAL1_PORT 21 // GPIOD: TX = 5, RX = 6 #define MODBUS_RTU_STREAM 1 -#define MODBUS_DIR_AUX 0 // GPIOD 7 -#endif +#define MODBUS_DIR_AUX 5 // GPIOD 7 #endif #define FLASH_CS_PORT GPIOE diff --git a/platformio.tpl b/platformio.tpl index 73d0200a..e28fc96d 100644 --- a/platformio.tpl +++ b/platformio.tpl @@ -52,14 +52,35 @@ lib_extra_dirs = Middlewares/ST/STM32_USB_Device_Library USB_DEVICE +[eth_networking] +build_flags = + -I /${ProjName}/LWIP/App + -I /${ProjName}/LWIP/Target + -I /${ProjName}/Middlewares/Third_Party/LwIP/src/include + -I /${ProjName}/Middlewares/Third_Party/LwIP/system + -I /${ProjName}/Middlewares/Third_Party/LwIP/src/include/netif + -I /${ProjName}/Middlewares/Third_Party/LwIP/src/include/lwip + -I /${ProjName}/Drivers/BSP/Components/dp83848 +lib_deps = + networking + webui + LWIP/App + LWIP/Target + Middlewares/Third_Party/LwIP + Drivers/BSP/Components/dp83848 +lib_extra_dirs = + [wiznet_networking] build_flags = - -I lwip/src/include -I networking/wiznet + -I Middlewares/Third_Party/LwIP/src/include + -I Middlewares/Third_Party/LwIP/system + -I Middlewares/Third_Party/LwIP/src/include/netif + -I Middlewares/Third_Party/LwIP/src/include/lwip lib_deps = - lwip networking webui + Middlewares/Third_Party/LwIP lib_extra_dirs = [env]