diff --git a/Makefile b/Makefile
index d204e2b5..c3b31a54 100644
--- a/Makefile
+++ b/Makefile
@@ -34,7 +34,7 @@ APPVERSION_P = 0
APPVERSION = "$(APPVERSION_M).$(APPVERSION_N).$(APPVERSION_P)"
# Application source files
-APP_SOURCE_PATH += src proto
+APP_SOURCE_PATH += src
SDK_SOURCE_PATH += lib_u2f
# Application icons
@@ -103,18 +103,16 @@ APP_SOURCE_FILES += ${BOLOS_SDK}/lib_standard_app/crypto_helpers.c
# Additional include paths
INCLUDES_PATH += ${BOLOS_SDK}/lib_standard_app $(NANOPB_DIR) .
-
+INCLUDES_PATH += proto
include vendor/nanopb/extra/nanopb.mk
DEFINES += PB_NO_ERRMSG=1
-SOURCE_FILES += $(NANOPB_CORE)
PB_FILES = $(wildcard proto/*.proto)
C_PB_FILES = $(patsubst %.proto,%.pb.c,$(PB_FILES))
PYTHON_PB_FILES = $(patsubst %.proto,%_pb2.py,$(PB_FILES))
# Build rule for C proto files
-SOURCE_FILES += $(C_PB_FILES)
.PHONY: c_pb python_pb clean_python_pb
@@ -143,3 +141,28 @@ check:
$(addprefix -I, $(INCLUDES_PATH))
include $(BOLOS_SDK)/Makefile.standard_app
+
+# --- nanopb + proto built with -fPIC, linked with the rest ---
+NANOPB_PIC_OBJDIR := $(TARGET_BUILD_DIR)/nanopb_pic
+NANOPB_PIC_CFLAGS := $(filter-out -fropi -frwpi,$(CFLAGS)) -fPIC
+NANOPB_PIC_CORE_O := $(NANOPB_PIC_OBJDIR)/pb_encode.o $(NANOPB_PIC_OBJDIR)/pb_decode.o $(NANOPB_PIC_OBJDIR)/pb_common.o
+NANOPB_PIC_PROTO_O := $(addprefix $(NANOPB_PIC_OBJDIR)/,$(notdir $(C_PB_FILES:.c=.o)))
+NANOPB_PIC_OBJECTS := $(NANOPB_PIC_CORE_O) $(NANOPB_PIC_PROTO_O)
+OBJECT_FILES += $(NANOPB_PIC_OBJECTS)
+
+$(NANOPB_PIC_OBJDIR):
+ @mkdir -p $@
+
+NANOPB_PIC_CC = $(CC) -c $(NANOPB_PIC_CFLAGS) -MMD -MT $@ -MF $(NANOPB_PIC_OBJDIR)/$*.d $(addprefix -D,$(DEFINES)) $(addprefix -I,$(INCLUDES_PATH)) -o $@ $<
+
+# nanopb core
+$(NANOPB_PIC_OBJDIR)/%.o: $(NANOPB_DIR)/%.c | $(NANOPB_PIC_OBJDIR) prepare
+ @echo "[CC-PIC] $@"
+ @$(NANOPB_PIC_CC)
+
+# proto *.pb.c
+$(NANOPB_PIC_OBJDIR)/%.pb.o: proto/%.pb.c | $(NANOPB_PIC_OBJDIR) prepare
+ @echo "[CC-PIC] $@"
+ @$(NANOPB_PIC_CC)
+
+$(BIN_DIR)/app.elf: $(NANOPB_PIC_OBJECTS)
diff --git a/proto/basic_types.pb.c b/proto/basic_types.pb.c
index 36c64644..852bdbe1 100644
--- a/proto/basic_types.pb.c
+++ b/proto/basic_types.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/basic_types.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/basic_types.pb.h b/proto/basic_types.pb.h
index 0e019d6e..8a52e219 100644
--- a/proto/basic_types.pb.h
+++ b/proto/basic_types.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_BASIC_TYPES_PB_H_INCLUDED
#define PB_HEDERA_PROTO_BASIC_TYPES_PB_H_INCLUDED
@@ -12,137 +12,284 @@
#endif
/* Struct definitions */
+/* *
+ Each shard has a nonnegative shard number. Each realm within a given shard
+ has a nonnegative realm number (that number might be reused in other
+ shards). And each account, file, and smart contract instance within a given
+ realm has a nonnegative number (which might be reused in other realms).
+ Every account, file, and smart contract instance is within exactly one
+ realm. So a FileID is a triplet of numbers, like 0.1.2 for entity number 2
+ within realm 1 within shard 0. Each realm maintains a single counter for
+ assigning numbers, so if there is a file with ID 0.1.2, then there won't be
+ an account or smart contract instance with ID 0.1.2.
+
+ Everything is partitioned into realms so that each Solidity smart contract
+ can access everything in just a single realm, locking all those entities
+ while it's running, but other smart contracts could potentially run in
+ other realms in parallel. So realms allow Solidity to be parallelized
+ somewhat, even though the language itself assumes everything is serial. */
+typedef struct _Hedera_ShardID {
+ /* *
+ the shard number (nonnegative) */
+ int64_t shardNum;
+} Hedera_ShardID;
+
+/* *
+ The ID for a realm. Within a given shard, every realm has a unique ID. Each
+ account, file, and contract instance belongs to exactly one realm. */
+typedef struct _Hedera_RealmID {
+ /* *
+ The shard number (nonnegative) */
+ int64_t shardNum;
+ /* *
+ The realm number (nonnegative) */
+ int64_t realmNum;
+} Hedera_RealmID;
+
typedef PB_BYTES_ARRAY_T(32) Hedera_AccountID_alias_t;
/* *
The ID for an a cryptocurrency account */
-typedef struct _Hedera_AccountID {
+typedef struct _Hedera_AccountID {
/* *
The shard number (nonnegative) */
- int64_t shardNum;
+ int64_t shardNum;
/* *
The realm number (nonnegative) */
- int64_t realmNum;
- /* *
- A non-negative account number unique within its realm */
+ int64_t realmNum;
pb_size_t which_account;
union {
+ /* *
+ A non-negative account number unique within its realm */
int64_t accountNum;
+ /* *
+ The public key bytes to be used as the account's alias. The public key
+ bytes are the result of serializing a protobuf Key message for any
+ primitive key type. Currently only primitive key bytes are supported as
+ an alias (ThresholdKey, KeyList, ContractID, and delegatable_contract_id
+ are not supported)
+
+ At most one account can ever have a given alias and it is used for
+ account creation if it was automatically created using a crypto
+ transfer. It will be null if an account is created normally. It is
+ immutable once it is set for an account.
+
+ If a transaction auto-creates the account, any further transfers to that
+ alias will simply be deposited in that account, without creating
+ anything, and with no creation fee being charged. */
Hedera_AccountID_alias_t alias;
- } account;
+ } account;
} Hedera_AccountID;
+/* *
+ The ID for a file */
+typedef struct _Hedera_FileID {
+ /* *
+ The shard number (nonnegative) */
+ int64_t shardNum;
+ /* *
+ The realm number (nonnegative) */
+ int64_t realmNum;
+ /* *
+ A nonnegative File number unique within its realm */
+ int64_t fileNum;
+} Hedera_FileID;
+
typedef PB_BYTES_ARRAY_T(20) Hedera_ContractID_evm_address_t;
/* *
The ID for a smart contract instance */
-typedef struct _Hedera_ContractID {
+typedef struct _Hedera_ContractID {
/* *
The shard number (nonnegative) */
- int64_t shardNum;
+ int64_t shardNum;
/* *
The realm number (nonnegative) */
- int64_t realmNum;
- /* *
- A nonnegative number unique within a given shard and realm */
+ int64_t realmNum;
pb_size_t which_contract;
union {
+ /* *
+ A nonnegative number unique within a given shard and realm */
int64_t contractNum;
+ /* *
+ The 20-byte EVM address of the contract to call.
+
+ Every contract has an EVM address determined by its
+ shard.realm.num id. This address is as follows:
- The
+ first 4 bytes are the big-endian representation of the shard.
+ - The next 8 bytes are the big-endian representation of the
+ realm.
- The final 8 bytes are the big-endian representation of
+ the number.
+
+
+ Contracts created via CREATE2 have an additional, primary address
+ that is derived from the EIP-1014
+ specification, and does not have a simple relation to a
+ shard.realm.num id.
+
+ (Please do note that CREATE2 contracts can also be referenced by the
+ three-part EVM address described above.) */
Hedera_ContractID_evm_address_t evm_address;
- } contract;
+ } contract;
} Hedera_ContractID;
/* *
- The ID for a file */
-typedef struct _Hedera_FileID {
+ The ID for a transaction. This is used for retrieving receipts and records
+ for a transaction, for appending to a file right after creating it, for
+ instantiating a smart contract with bytecode in a file just created, and
+ internally by the network for detecting when duplicate transactions are
+ submitted. A user might get a transaction processed faster by submitting it
+ to N nodes, each with a different node account, but all with the same
+ TransactionID. Then, the transaction will take effect when the first of all
+ those nodes submits the transaction and it reaches consensus. The other
+ transactions will not take effect. So this could make the transaction take
+ effect faster, if any given node might be slow. However, the full
+ transaction fee is charged for each transaction, so the total fee is N times
+ as much if the transaction is sent to N nodes.
+
+ Applicable to Scheduled Transactions:
+ - The ID of a Scheduled Transaction has transactionValidStart and
+ accountIDs inherited from the ScheduleCreate transaction that created it.
+ That is to say that they are equal
+ - The scheduled property is true for Scheduled Transactions
+ - transactionValidStart, accountID and scheduled properties should be
+ omitted */
+typedef struct _Hedera_TransactionID {
/* *
- The shard number (nonnegative) */
- int64_t shardNum;
+ The transaction is invalid if consensusTimestamp <
+ transactionID.transactionStartValid */
+ bool has_transactionValidStart;
+ Hedera_Timestamp transactionValidStart;
/* *
- The realm number (nonnegative) */
- int64_t realmNum;
+ The Account ID that paid for this transaction */
+ bool has_accountID;
+ Hedera_AccountID accountID;
/* *
- A nonnegative File number unique within its realm */
- int64_t fileNum;
-} Hedera_FileID;
+ Whether the Transaction is of type Scheduled or no */
+ bool scheduled;
+ /* *
+ The identifier for an internal transaction that was spawned as part
+ of handling a user transaction. (These internal transactions share the
+ transactionValidStart and accountID of the user transaction, so a
+ nonce is necessary to give them a unique TransactionID.)
+
+ An example is when a "parent" ContractCreate or ContractCall transaction
+ calls one or more HTS precompiled contracts; each of the "child"
+ transactions spawned for a precompile has a id with a different nonce. */
+ int32_t nonce;
+} Hedera_TransactionID;
/* *
- A rational number, used to set the amount of a value transfer to collect as
- a custom fee */
-typedef struct _Hedera_Fraction {
+ An account, and the amount that it sends or receives during a cryptocurrency
+ or token transfer. */
+typedef struct _Hedera_AccountAmount {
/* *
- The rational's numerator */
- int64_t numerator;
+ The Account ID that sends/receives cryptocurrency or tokens */
+ bool has_accountID;
+ Hedera_AccountID accountID;
/* *
- The rational's denominator; a zero value will result in
- FRACTION_DIVIDES_BY_ZERO */
- int64_t denominator;
-} Hedera_Fraction;
+ The amount of tinybars (for Crypto transfers) or in the lowest
+ denomination (for Token transfers) that the account sends(negative) or
+ receives(positive) */
+ int64_t amount;
+ /* *
+ If true then the transfer is expected to be an approved allowance and the
+ accountID is expected to be the owner. The default is false (omitted). */
+ bool is_approval;
+} Hedera_AccountAmount;
/* *
- The ID for a realm. Within a given shard, every realm has a unique ID. Each
- account, file, and contract instance belongs to exactly one realm. */
-typedef struct _Hedera_RealmID {
- /* *
- The shard number (nonnegative) */
- int64_t shardNum;
+ A list of accounts and amounts to transfer out of each account (negative) or
+ into it (positive). */
+typedef struct _Hedera_TransferList {
/* *
- The realm number (nonnegative) */
- int64_t realmNum;
-} Hedera_RealmID;
+ Multiple list of AccountAmount pairs, each of which has an account and
+ an amount to transfer into it (positive) or out of it (negative)
+ Limited to 2 for a transfer between two accounts */
+ pb_size_t accountAmounts_count;
+ Hedera_AccountAmount accountAmounts[2];
+} Hedera_TransferList;
/* *
- Each shard has a nonnegative shard number. Each realm within a given shard
- has a nonnegative realm number (that number might be reused in other
- shards). And each account, file, and smart contract instance within a given
- realm has a nonnegative number (which might be reused in other realms).
- Every account, file, and smart contract instance is within exactly one
- realm. So a FileID is a triplet of numbers, like 0.1.2 for entity number 2
- within realm 1 within shard 0. Each realm maintains a single counter for
- assigning numbers, so if there is a file with ID 0.1.2, then there won't be
- an account or smart contract instance with ID 0.1.2.
+ A sender account, a receiver account, and the serial number of an NFT of a
+ Token with NON_FUNGIBLE_UNIQUE type. When minting NFTs the sender will be
+ the default AccountID instance (0.0.0) and when burning NFTs, the receiver
+ will be the default AccountID instance. */
+typedef struct _Hedera_NftTransfer {
+ /* *
+ The accountID of the sender */
+ bool has_senderAccountID;
+ Hedera_AccountID senderAccountID;
+ /* *
+ The accountID of the receiver */
+ bool has_receiverAccountID;
+ Hedera_AccountID receiverAccountID;
+ /* *
+ The serial number of the NFT */
+ int64_t serialNumber;
+ /* *
+ If true then the transfer is expected to be an approved allowance and the
+ senderAccountID is expected to be the owner. The default is false
+ (omitted). */
+ bool is_approval;
+} Hedera_NftTransfer;
- Everything is partitioned into realms so that each Solidity smart contract
- can access everything in just a single realm, locking all those entities
- while it's running, but other smart contracts could potentially run in
- other realms in parallel. So realms allow Solidity to be parallelized
- somewhat, even though the language itself assumes everything is serial. */
-typedef struct _Hedera_ShardID {
+/* *
+ A rational number, used to set the amount of a value transfer to collect as
+ a custom fee */
+typedef struct _Hedera_Fraction {
/* *
- the shard number (nonnegative) */
- int64_t shardNum;
-} Hedera_ShardID;
+ The rational's numerator */
+ int64_t numerator;
+ /* *
+ The rational's denominator; a zero value will result in
+ FRACTION_DIVIDES_BY_ZERO */
+ int64_t denominator;
+} Hedera_Fraction;
/* *
Unique identifier for a token */
-typedef struct _Hedera_TokenID {
+typedef struct _Hedera_TokenID {
/* *
A nonnegative shard number */
- int64_t shardNum;
+ int64_t shardNum;
/* *
A nonnegative realm number */
- int64_t realmNum;
+ int64_t realmNum;
/* *
A nonnegative token number */
- int64_t tokenNum;
+ int64_t tokenNum;
} Hedera_TokenID;
/* *
- An account, and the amount that it sends or receives during a cryptocurrency
- or token transfer. */
-typedef struct _Hedera_AccountAmount {
+ A list of token IDs and amounts representing the transferred out (negative)
+ or into (positive) amounts, represented in the lowest denomination of the
+ token */
+typedef struct _Hedera_TokenTransferList {
/* *
- The Account ID that sends/receives cryptocurrency or tokens */
- bool has_accountID;
- Hedera_AccountID accountID;
+ The ID of the token */
+ bool has_token;
+ Hedera_TokenID token;
/* *
- The amount of tinybars (for Crypto transfers) or in the lowest
- denomination (for Token transfers) that the account sends(negative) or
- receives(positive) */
- int64_t amount;
+ Applicable to tokens of type FUNGIBLE_COMMON. Multiple list of
+ AccountAmounts, each of which has an account and amount
+ Limited to 2 for 1 allowed transfer (reciprocal subtraction of balance +
+ actual transfer) */
+ pb_size_t transfers_count;
+ Hedera_AccountAmount transfers[2];
/* *
- If true then the transfer is expected to be an approved allowance and the
- accountID is expected to be the owner. The default is false (omitted). */
- bool is_approval;
-} Hedera_AccountAmount;
+ Applicable to tokens of type NON_FUNGIBLE_UNIQUE. Multiple list of
+ NftTransfers, each of which has a sender and receiver account, including
+ the serial number of the NFT
+ Limited to 1 here */
+ pb_size_t nftTransfers_count;
+ Hedera_NftTransfer nftTransfers[1];
+ /* *
+ If present, the number of decimals this fungible token type is expected to
+ have. The transfer will fail with UNEXPECTED_TOKEN_DECIMALS if the actual
+ decimals differ. */
+ bool has_expected_decimals;
+ Hedera_UInt32Value expected_decimals;
+} Hedera_TokenTransferList;
typedef PB_BYTES_ARRAY_T(32) Hedera_Key_ed25519_t;
typedef PB_BYTES_ARRAY_T(32) Hedera_Key_RSA_3072_t;
@@ -196,82 +343,66 @@ typedef PB_BYTES_ARRAY_T(32) Hedera_Key_ECDSA_secp256k1_t;
Each Key should not have more than 46 levels, which implies 15 levels of
nested ThresholdKeys. */
-typedef struct _Hedera_Key {
- /* *
- smart contract instance that is authorized as if it had signed with a
- key */
+typedef struct _Hedera_Key {
pb_size_t which_key;
union {
+ /* *
+ smart contract instance that is authorized as if it had signed with a
+ key */
Hedera_ContractID contractID;
+ /* *
+ Ed25519 public key bytes */
Hedera_Key_ed25519_t ed25519;
+ /* *
+ (NOT SUPPORTED) RSA-3072 public key bytes */
Hedera_Key_RSA_3072_t RSA_3072;
+ /* *
+ (NOT SUPPORTED) ECDSA with the p-384 curve public key bytes */
Hedera_Key_ECDSA_384_t ECDSA_384;
+ /* *
+ Compressed ECDSA(secp256k1) public key bytes */
Hedera_Key_ECDSA_secp256k1_t ECDSA_secp256k1;
+ /* *
+ A smart contract that, if the recipient of the active message frame,
+ should be treated as having signed. (Note this does not mean the code
+ being executed in the frame will belong to the given contract, since
+ it could be running another contract's code via delegatecall.
+ So setting this key is a more permissive version of setting the
+ contractID key, which also requires the code in the active message frame
+ belong to the the contract with the given id.) */
Hedera_ContractID delegatable_contract_id;
- } key;
+ } key;
} Hedera_Key;
/* *
- A sender account, a receiver account, and the serial number of an NFT of a
- Token with NON_FUNGIBLE_UNIQUE type. When minting NFTs the sender will be
- the default AccountID instance (0.0.0) and when burning NFTs, the receiver
- will be the default AccountID instance. */
-typedef struct _Hedera_NftTransfer {
- /* *
- The accountID of the sender */
- bool has_senderAccountID;
- Hedera_AccountID senderAccountID;
- /* *
- The accountID of the receiver */
- bool has_receiverAccountID;
- Hedera_AccountID receiverAccountID;
- /* *
- The serial number of the NFT */
- int64_t serialNumber;
+ A list of keys that requires all keys (M-of-M) to sign unless otherwise
+ specified in documentation. A KeyList may contain repeated keys, but all
+ repeated keys are only required to sign once. */
+typedef struct _Hedera_KeyList {
/* *
- If true then the transfer is expected to be an approved allowance and the
- senderAccountID is expected to be the owner. The default is false
- (omitted). */
- bool is_approval;
-} Hedera_NftTransfer;
+ list of keys
+ Limited to 1 here (because we don't have malloc!) */
+ pb_size_t keys_count;
+ Hedera_Key keys[1];
+} Hedera_KeyList;
/* *
- Staking metadata for an account or a contract returned in CryptoGetInfo or
- ContractGetInfo queries */
-typedef struct _Hedera_StakingInfo {
- /* *
- If true, this account or contract declined to receive a staking reward. */
- bool decline_reward;
- /* *
- The staking period during which either the staking settings for this
- account or contract changed (such as starting staking or changing
- staked_node_id) or the most recent reward was earned, whichever is later.
- If this account or contract is not currently staked to a node, then this
- field is not set. */
- bool has_stake_period_start;
- Hedera_Timestamp stake_period_start;
- /* *
- The amount in tinybars that will be received in the next reward situation. */
- int64_t pending_reward;
+ A set of public keys that are used together to form a threshold signature.
+ If the threshold is N and there are M keys, then this is an N of M threshold
+ signature. If an account is associated with ThresholdKeys, then a
+ transaction to move cryptocurrency out of it must be signed by a list of M
+ signatures, where at most M-N of them are blank, and the other at least N of
+ them are valid signatures corresponding to at least N of the public keys
+ listed here. */
+typedef struct _Hedera_ThresholdKey {
/* *
- The total of balance of all accounts staked to this account or contract. */
- int64_t staked_to_me;
+ A valid signature set must have at least this many signatures */
+ uint32_t threshold;
/* *
- The account to which this account or contract is staking. */
- pb_size_t which_staked_id;
- union {
- Hedera_AccountID staked_account_id;
- int64_t staked_node_id;
- } staked_id;
-} Hedera_StakingInfo;
-
-/* A token - account association */
-typedef struct _Hedera_TokenAssociation {
- bool has_token_id;
- Hedera_TokenID token_id; /* The token involved in the association */
- bool has_account_id;
- Hedera_AccountID account_id; /* The account involved in the association */
-} Hedera_TokenAssociation;
+ List of all the keys that can sign */
+ bool has_keys;
+ Hedera_KeyList keys;
+} Hedera_ThresholdKey;
/* *
A number of transferable units of a certain token.
@@ -284,147 +415,68 @@ typedef struct _Hedera_TokenAssociation {
(decimals=8).
Transferable units are not directly comparable across different tokens. */
-typedef struct _Hedera_TokenBalance {
+typedef struct _Hedera_TokenBalance {
/* *
A unique token id */
bool has_tokenId;
- Hedera_TokenID tokenId;
+ Hedera_TokenID tokenId;
/* *
Number of transferable units of the identified token. For token of type
FUNGIBLE_COMMON - balance in the smallest denomination. For token of type
NON_FUNGIBLE_UNIQUE - the number of NFTs held by the account */
- uint64_t balance;
+ uint64_t balance;
/* *
Tokens divide into 10decimals pieces */
- uint32_t decimals;
+ uint32_t decimals;
} Hedera_TokenBalance;
-/* *
- The ID for a transaction. This is used for retrieving receipts and records
- for a transaction, for appending to a file right after creating it, for
- instantiating a smart contract with bytecode in a file just created, and
- internally by the network for detecting when duplicate transactions are
- submitted. A user might get a transaction processed faster by submitting it
- to N nodes, each with a different node account, but all with the same
- TransactionID. Then, the transaction will take effect when the first of all
- those nodes submits the transaction and it reaches consensus. The other
- transactions will not take effect. So this could make the transaction take
- effect faster, if any given node might be slow. However, the full
- transaction fee is charged for each transaction, so the total fee is N times
- as much if the transaction is sent to N nodes.
-
- Applicable to Scheduled Transactions:
- - The ID of a Scheduled Transaction has transactionValidStart and
- accountIDs inherited from the ScheduleCreate transaction that created it.
- That is to say that they are equal
- - The scheduled property is true for Scheduled Transactions
- - transactionValidStart, accountID and scheduled properties should be
- omitted */
-typedef struct _Hedera_TransactionID {
- /* *
- The transaction is invalid if consensusTimestamp <
- transactionID.transactionStartValid */
- bool has_transactionValidStart;
- Hedera_Timestamp transactionValidStart;
- /* *
- The Account ID that paid for this transaction */
- bool has_accountID;
- Hedera_AccountID accountID;
- /* *
- Whether the Transaction is of type Scheduled or no */
- bool scheduled;
- /* *
- The identifier for an internal transaction that was spawned as part
- of handling a user transaction. (These internal transactions share the
- transactionValidStart and accountID of the user transaction, so a
- nonce is necessary to give them a unique TransactionID.)
-
- An example is when a "parent" ContractCreate or ContractCall transaction
- calls one or more HTS precompiled contracts; each of the "child"
- transactions spawned for a precompile has a id with a different nonce. */
- int32_t nonce;
-} Hedera_TransactionID;
-
-/* *
- A list of keys that requires all keys (M-of-M) to sign unless otherwise
- specified in documentation. A KeyList may contain repeated keys, but all
- repeated keys are only required to sign once. */
-typedef struct _Hedera_KeyList {
- /* *
- list of keys
- Limited to 1 here (because we don't have malloc!) */
- pb_size_t keys_count;
- Hedera_Key keys[1];
-} Hedera_KeyList;
-
/* *
A sequence of token balances
Limited to 1 here */
-typedef struct _Hedera_TokenBalances {
+typedef struct _Hedera_TokenBalances {
pb_size_t tokenBalances_count;
- Hedera_TokenBalance tokenBalances[1];
+ Hedera_TokenBalance tokenBalances[1];
} Hedera_TokenBalances;
-/* *
- A list of token IDs and amounts representing the transferred out (negative)
- or into (positive) amounts, represented in the lowest denomination of the
- token */
-typedef struct _Hedera_TokenTransferList {
- /* *
- The ID of the token */
- bool has_token;
- Hedera_TokenID token;
- /* *
- Applicable to tokens of type FUNGIBLE_COMMON. Multiple list of
- AccountAmounts, each of which has an account and amount
- Limited to 2 for 1 allowed transfer (reciprocal subtraction of balance +
- actual transfer) */
- pb_size_t transfers_count;
- Hedera_AccountAmount transfers[2];
- /* *
- Applicable to tokens of type NON_FUNGIBLE_UNIQUE. Multiple list of
- NftTransfers, each of which has a sender and receiver account, including
- the serial number of the NFT
- Limited to 1 here */
- pb_size_t nftTransfers_count;
- Hedera_NftTransfer nftTransfers[1];
- /* *
- If present, the number of decimals this fungible token type is expected to
- have. The transfer will fail with UNEXPECTED_TOKEN_DECIMALS if the actual
- decimals differ. */
- bool has_expected_decimals;
- Hedera_UInt32Value expected_decimals;
-} Hedera_TokenTransferList;
+/* A token - account association */
+typedef struct _Hedera_TokenAssociation {
+ bool has_token_id;
+ Hedera_TokenID token_id; /* The token involved in the association */
+ bool has_account_id;
+ Hedera_AccountID account_id; /* The account involved in the association */
+} Hedera_TokenAssociation;
/* *
- A list of accounts and amounts to transfer out of each account (negative) or
- into it (positive). */
-typedef struct _Hedera_TransferList {
+ Staking metadata for an account or a contract returned in CryptoGetInfo or
+ ContractGetInfo queries */
+typedef struct _Hedera_StakingInfo {
/* *
- Multiple list of AccountAmount pairs, each of which has an account and
- an amount to transfer into it (positive) or out of it (negative)
- Limited to 2 for a transfer between two accounts */
- pb_size_t accountAmounts_count;
- Hedera_AccountAmount accountAmounts[2];
-} Hedera_TransferList;
-
-/* *
- A set of public keys that are used together to form a threshold signature.
- If the threshold is N and there are M keys, then this is an N of M threshold
- signature. If an account is associated with ThresholdKeys, then a
- transaction to move cryptocurrency out of it must be signed by a list of M
- signatures, where at most M-N of them are blank, and the other at least N of
- them are valid signatures corresponding to at least N of the public keys
- listed here. */
-typedef struct _Hedera_ThresholdKey {
+ If true, this account or contract declined to receive a staking reward. */
+ bool decline_reward;
/* *
- A valid signature set must have at least this many signatures */
- uint32_t threshold;
+ The staking period during which either the staking settings for this
+ account or contract changed (such as starting staking or changing
+ staked_node_id) or the most recent reward was earned, whichever is later.
+ If this account or contract is not currently staked to a node, then this
+ field is not set. */
+ bool has_stake_period_start;
+ Hedera_Timestamp stake_period_start;
/* *
- List of all the keys that can sign */
- bool has_keys;
- Hedera_KeyList keys;
-} Hedera_ThresholdKey;
+ The amount in tinybars that will be received in the next reward situation. */
+ int64_t pending_reward;
+ /* *
+ The total of balance of all accounts staked to this account or contract. */
+ int64_t staked_to_me;
+ pb_size_t which_staked_id;
+ union {
+ /* *
+ The account to which this account or contract is staking. */
+ Hedera_AccountID staked_account_id;
+ /* *
+ The ID of the node this account or contract is staked to. */
+ int64_t staked_node_id;
+ } staked_id;
+} Hedera_StakingInfo;
#ifdef __cplusplus
@@ -472,62 +524,62 @@ extern "C" {
#define Hedera_StakingInfo_init_zero {0, false, Hedera_Timestamp_init_zero, 0, 0, 0, {Hedera_AccountID_init_zero}}
/* Field tags (for use in manual encoding/decoding) */
+#define Hedera_ShardID_shardNum_tag 1
+#define Hedera_RealmID_shardNum_tag 1
+#define Hedera_RealmID_realmNum_tag 2
#define Hedera_AccountID_shardNum_tag 1
#define Hedera_AccountID_realmNum_tag 2
#define Hedera_AccountID_accountNum_tag 3
#define Hedera_AccountID_alias_tag 4
+#define Hedera_FileID_shardNum_tag 1
+#define Hedera_FileID_realmNum_tag 2
+#define Hedera_FileID_fileNum_tag 3
#define Hedera_ContractID_shardNum_tag 1
#define Hedera_ContractID_realmNum_tag 2
#define Hedera_ContractID_contractNum_tag 3
#define Hedera_ContractID_evm_address_tag 4
-#define Hedera_FileID_shardNum_tag 1
-#define Hedera_FileID_realmNum_tag 2
-#define Hedera_FileID_fileNum_tag 3
+#define Hedera_TransactionID_transactionValidStart_tag 1
+#define Hedera_TransactionID_accountID_tag 2
+#define Hedera_TransactionID_scheduled_tag 3
+#define Hedera_TransactionID_nonce_tag 4
+#define Hedera_AccountAmount_accountID_tag 1
+#define Hedera_AccountAmount_amount_tag 2
+#define Hedera_AccountAmount_is_approval_tag 3
+#define Hedera_TransferList_accountAmounts_tag 1
+#define Hedera_NftTransfer_senderAccountID_tag 1
+#define Hedera_NftTransfer_receiverAccountID_tag 2
+#define Hedera_NftTransfer_serialNumber_tag 3
+#define Hedera_NftTransfer_is_approval_tag 4
#define Hedera_Fraction_numerator_tag 1
#define Hedera_Fraction_denominator_tag 2
-#define Hedera_RealmID_shardNum_tag 1
-#define Hedera_RealmID_realmNum_tag 2
-#define Hedera_ShardID_shardNum_tag 1
#define Hedera_TokenID_shardNum_tag 1
#define Hedera_TokenID_realmNum_tag 2
#define Hedera_TokenID_tokenNum_tag 3
-#define Hedera_AccountAmount_accountID_tag 1
-#define Hedera_AccountAmount_amount_tag 2
-#define Hedera_AccountAmount_is_approval_tag 3
+#define Hedera_TokenTransferList_token_tag 1
+#define Hedera_TokenTransferList_transfers_tag 2
+#define Hedera_TokenTransferList_nftTransfers_tag 3
+#define Hedera_TokenTransferList_expected_decimals_tag 4
#define Hedera_Key_contractID_tag 1
#define Hedera_Key_ed25519_tag 2
#define Hedera_Key_RSA_3072_tag 3
#define Hedera_Key_ECDSA_384_tag 4
#define Hedera_Key_ECDSA_secp256k1_tag 7
#define Hedera_Key_delegatable_contract_id_tag 8
-#define Hedera_NftTransfer_senderAccountID_tag 1
-#define Hedera_NftTransfer_receiverAccountID_tag 2
-#define Hedera_NftTransfer_serialNumber_tag 3
-#define Hedera_NftTransfer_is_approval_tag 4
+#define Hedera_KeyList_keys_tag 1
+#define Hedera_ThresholdKey_threshold_tag 1
+#define Hedera_ThresholdKey_keys_tag 2
+#define Hedera_TokenBalance_tokenId_tag 1
+#define Hedera_TokenBalance_balance_tag 2
+#define Hedera_TokenBalance_decimals_tag 3
+#define Hedera_TokenBalances_tokenBalances_tag 1
+#define Hedera_TokenAssociation_token_id_tag 1
+#define Hedera_TokenAssociation_account_id_tag 2
#define Hedera_StakingInfo_decline_reward_tag 1
#define Hedera_StakingInfo_stake_period_start_tag 2
#define Hedera_StakingInfo_pending_reward_tag 3
#define Hedera_StakingInfo_staked_to_me_tag 4
#define Hedera_StakingInfo_staked_account_id_tag 5
#define Hedera_StakingInfo_staked_node_id_tag 6
-#define Hedera_TokenAssociation_token_id_tag 1
-#define Hedera_TokenAssociation_account_id_tag 2
-#define Hedera_TokenBalance_tokenId_tag 1
-#define Hedera_TokenBalance_balance_tag 2
-#define Hedera_TokenBalance_decimals_tag 3
-#define Hedera_TransactionID_transactionValidStart_tag 1
-#define Hedera_TransactionID_accountID_tag 2
-#define Hedera_TransactionID_scheduled_tag 3
-#define Hedera_TransactionID_nonce_tag 4
-#define Hedera_KeyList_keys_tag 1
-#define Hedera_TokenBalances_tokenBalances_tag 1
-#define Hedera_TokenTransferList_token_tag 1
-#define Hedera_TokenTransferList_transfers_tag 2
-#define Hedera_TokenTransferList_nftTransfers_tag 3
-#define Hedera_TokenTransferList_expected_decimals_tag 4
-#define Hedera_TransferList_accountAmounts_tag 1
-#define Hedera_ThresholdKey_threshold_tag 1
-#define Hedera_ThresholdKey_keys_tag 2
/* Struct field encoding specification for nanopb */
#define Hedera_ShardID_FIELDLIST(X, a) \
@@ -724,6 +776,7 @@ extern const pb_msgdesc_t Hedera_StakingInfo_msg;
#define Hedera_StakingInfo_fields &Hedera_StakingInfo_msg
/* Maximum encoded size of messages (where known) */
+#define HEDERA_PROTO_BASIC_TYPES_PB_H_MAX_SIZE Hedera_TokenTransferList_size
#define Hedera_AccountAmount_size 71
#define Hedera_AccountID_size 56
#define Hedera_ContractID_size 44
diff --git a/proto/basic_types_pb2.py b/proto/basic_types_pb2.py
deleted file mode 100644
index 1861ee2f..00000000
--- a/proto/basic_types_pb2.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/basic_types.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-from proto import timestamp_pb2 as proto_dot_timestamp__pb2
-from proto import wrappers_pb2 as proto_dot_wrappers__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17proto/basic_types.proto\x12\x06Hedera\x1a\x0cnanopb.proto\x1a\x15proto/timestamp.proto\x1a\x14proto/wrappers.proto\"\x1b\n\x07ShardID\x12\x10\n\x08shardNum\x18\x01 \x01(\x03\"-\n\x07RealmID\x12\x10\n\x08shardNum\x18\x01 \x01(\x03\x12\x10\n\x08realmNum\x18\x02 \x01(\x03\"h\n\tAccountID\x12\x10\n\x08shardNum\x18\x01 \x01(\x03\x12\x10\n\x08realmNum\x18\x02 \x01(\x03\x12\x14\n\naccountNum\x18\x03 \x01(\x03H\x00\x12\x16\n\x05\x61lias\x18\x04 \x01(\x0c\x42\x05\x92?\x02\x08 H\x00\x42\t\n\x07\x61\x63\x63ount\"=\n\x06\x46ileID\x12\x10\n\x08shardNum\x18\x01 \x01(\x03\x12\x10\n\x08realmNum\x18\x02 \x01(\x03\x12\x0f\n\x07\x66ileNum\x18\x03 \x01(\x03\"q\n\nContractID\x12\x10\n\x08shardNum\x18\x01 \x01(\x03\x12\x10\n\x08realmNum\x18\x02 \x01(\x03\x12\x15\n\x0b\x63ontractNum\x18\x03 \x01(\x03H\x00\x12\x1c\n\x0b\x65vm_address\x18\x04 \x01(\x0c\x42\x05\x92?\x02\x08\x14H\x00\x42\n\n\x08\x63ontract\"\x89\x01\n\rTransactionID\x12\x30\n\x15transactionValidStart\x18\x01 \x01(\x0b\x32\x11.Hedera.Timestamp\x12$\n\taccountID\x18\x02 \x01(\x0b\x32\x11.Hedera.AccountID\x12\x11\n\tscheduled\x18\x03 \x01(\x08\x12\r\n\x05nonce\x18\x04 \x01(\x05\"Z\n\rAccountAmount\x12$\n\taccountID\x18\x01 \x01(\x0b\x32\x11.Hedera.AccountID\x12\x0e\n\x06\x61mount\x18\x02 \x01(\x12\x12\x13\n\x0bis_approval\x18\x03 \x01(\x08\"D\n\x0cTransferList\x12\x34\n\x0e\x61\x63\x63ountAmounts\x18\x01 \x03(\x0b\x32\x15.Hedera.AccountAmountB\x05\x92?\x02\x10\x02\"\x92\x01\n\x0bNftTransfer\x12*\n\x0fsenderAccountID\x18\x01 \x01(\x0b\x32\x11.Hedera.AccountID\x12,\n\x11receiverAccountID\x18\x02 \x01(\x0b\x32\x11.Hedera.AccountID\x12\x14\n\x0cserialNumber\x18\x03 \x01(\x03\x12\x13\n\x0bis_approval\x18\x04 \x01(\x08\"\xc6\x01\n\x11TokenTransferList\x12\x1e\n\x05token\x18\x01 \x01(\x0b\x32\x0f.Hedera.TokenID\x12/\n\ttransfers\x18\x02 \x03(\x0b\x32\x15.Hedera.AccountAmountB\x05\x92?\x02\x10\x02\x12\x30\n\x0cnftTransfers\x18\x03 \x03(\x0b\x32\x13.Hedera.NftTransferB\x05\x92?\x02\x10\x01\x12.\n\x11\x65xpected_decimals\x18\x04 \x01(\x0b\x32\x13.Hedera.UInt32Value\"2\n\x08\x46raction\x12\x11\n\tnumerator\x18\x01 \x01(\x03\x12\x13\n\x0b\x64\x65nominator\x18\x02 \x01(\x03\"?\n\x07TokenID\x12\x10\n\x08shardNum\x18\x01 \x01(\x03\x12\x10\n\x08realmNum\x18\x02 \x01(\x03\x12\x10\n\x08tokenNum\x18\x03 \x01(\x03\"\xe0\x01\n\x03Key\x12(\n\ncontractID\x18\x01 \x01(\x0b\x32\x12.Hedera.ContractIDH\x00\x12\x18\n\x07\x65\x64\x32\x35\x35\x31\x39\x18\x02 \x01(\x0c\x42\x05\x92?\x02\x08 H\x00\x12\x19\n\x08RSA_3072\x18\x03 \x01(\x0c\x42\x05\x92?\x02\x08 H\x00\x12\x1a\n\tECDSA_384\x18\x04 \x01(\x0c\x42\x05\x92?\x02\x08 H\x00\x12 \n\x0f\x45\x43\x44SA_secp256k1\x18\x07 \x01(\x0c\x42\x05\x92?\x02\x08 H\x00\x12\x35\n\x17\x64\x65legatable_contract_id\x18\x08 \x01(\x0b\x32\x12.Hedera.ContractIDH\x00\x42\x05\n\x03key\"@\n\x0cThresholdKey\x12\x11\n\tthreshold\x18\x01 \x01(\r\x12\x1d\n\x04keys\x18\x02 \x01(\x0b\x32\x0f.Hedera.KeyList\"+\n\x07KeyList\x12 \n\x04keys\x18\x01 \x03(\x0b\x32\x0b.Hedera.KeyB\x05\x92?\x02\x10\x01\"S\n\x0cTokenBalance\x12 \n\x07tokenId\x18\x01 \x01(\x0b\x32\x0f.Hedera.TokenID\x12\x0f\n\x07\x62\x61lance\x18\x02 \x01(\x04\x12\x10\n\x08\x64\x65\x63imals\x18\x03 \x01(\r\"C\n\rTokenBalances\x12\x32\n\rtokenBalances\x18\x01 \x03(\x0b\x32\x14.Hedera.TokenBalanceB\x05\x92?\x02\x10\x01\"\\\n\x10TokenAssociation\x12!\n\x08token_id\x18\x01 \x01(\x0b\x32\x0f.Hedera.TokenID\x12%\n\naccount_id\x18\x02 \x01(\x0b\x32\x11.Hedera.AccountID\"\xd9\x01\n\x0bStakingInfo\x12\x16\n\x0e\x64\x65\x63line_reward\x18\x01 \x01(\x08\x12-\n\x12stake_period_start\x18\x02 \x01(\x0b\x32\x11.Hedera.Timestamp\x12\x16\n\x0epending_reward\x18\x03 \x01(\x03\x12\x14\n\x0cstaked_to_me\x18\x04 \x01(\x03\x12.\n\x11staked_account_id\x18\x05 \x01(\x0b\x32\x11.Hedera.AccountIDH\x00\x12\x18\n\x0estaked_node_id\x18\x06 \x01(\x03H\x00\x42\x0b\n\tstaked_idb\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.basic_types_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _ACCOUNTID.fields_by_name['alias']._options = None
- _ACCOUNTID.fields_by_name['alias']._serialized_options = b'\222?\002\010 '
- _CONTRACTID.fields_by_name['evm_address']._options = None
- _CONTRACTID.fields_by_name['evm_address']._serialized_options = b'\222?\002\010\024'
- _TRANSFERLIST.fields_by_name['accountAmounts']._options = None
- _TRANSFERLIST.fields_by_name['accountAmounts']._serialized_options = b'\222?\002\020\002'
- _TOKENTRANSFERLIST.fields_by_name['transfers']._options = None
- _TOKENTRANSFERLIST.fields_by_name['transfers']._serialized_options = b'\222?\002\020\002'
- _TOKENTRANSFERLIST.fields_by_name['nftTransfers']._options = None
- _TOKENTRANSFERLIST.fields_by_name['nftTransfers']._serialized_options = b'\222?\002\020\001'
- _KEY.fields_by_name['ed25519']._options = None
- _KEY.fields_by_name['ed25519']._serialized_options = b'\222?\002\010 '
- _KEY.fields_by_name['RSA_3072']._options = None
- _KEY.fields_by_name['RSA_3072']._serialized_options = b'\222?\002\010 '
- _KEY.fields_by_name['ECDSA_384']._options = None
- _KEY.fields_by_name['ECDSA_384']._serialized_options = b'\222?\002\010 '
- _KEY.fields_by_name['ECDSA_secp256k1']._options = None
- _KEY.fields_by_name['ECDSA_secp256k1']._serialized_options = b'\222?\002\010 '
- _KEYLIST.fields_by_name['keys']._options = None
- _KEYLIST.fields_by_name['keys']._serialized_options = b'\222?\002\020\001'
- _TOKENBALANCES.fields_by_name['tokenBalances']._options = None
- _TOKENBALANCES.fields_by_name['tokenBalances']._serialized_options = b'\222?\002\020\001'
- _SHARDID._serialized_start=94
- _SHARDID._serialized_end=121
- _REALMID._serialized_start=123
- _REALMID._serialized_end=168
- _ACCOUNTID._serialized_start=170
- _ACCOUNTID._serialized_end=274
- _FILEID._serialized_start=276
- _FILEID._serialized_end=337
- _CONTRACTID._serialized_start=339
- _CONTRACTID._serialized_end=452
- _TRANSACTIONID._serialized_start=455
- _TRANSACTIONID._serialized_end=592
- _ACCOUNTAMOUNT._serialized_start=594
- _ACCOUNTAMOUNT._serialized_end=684
- _TRANSFERLIST._serialized_start=686
- _TRANSFERLIST._serialized_end=754
- _NFTTRANSFER._serialized_start=757
- _NFTTRANSFER._serialized_end=903
- _TOKENTRANSFERLIST._serialized_start=906
- _TOKENTRANSFERLIST._serialized_end=1104
- _FRACTION._serialized_start=1106
- _FRACTION._serialized_end=1156
- _TOKENID._serialized_start=1158
- _TOKENID._serialized_end=1221
- _KEY._serialized_start=1224
- _KEY._serialized_end=1448
- _THRESHOLDKEY._serialized_start=1450
- _THRESHOLDKEY._serialized_end=1514
- _KEYLIST._serialized_start=1516
- _KEYLIST._serialized_end=1559
- _TOKENBALANCE._serialized_start=1561
- _TOKENBALANCE._serialized_end=1644
- _TOKENBALANCES._serialized_start=1646
- _TOKENBALANCES._serialized_end=1713
- _TOKENASSOCIATION._serialized_start=1715
- _TOKENASSOCIATION._serialized_end=1807
- _STAKINGINFO._serialized_start=1810
- _STAKINGINFO._serialized_end=2027
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/contract_call.pb.c b/proto/contract_call.pb.c
index 07168a52..773718cf 100644
--- a/proto/contract_call.pb.c
+++ b/proto/contract_call.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/contract_call.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/contract_call.pb.h b/proto/contract_call.pb.h
index 0b5e8e97..5e2d64ef 100644
--- a/proto/contract_call.pb.h
+++ b/proto/contract_call.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_CONTRACT_CALL_PB_H_INCLUDED
#define PB_HEDERA_PROTO_CONTRACT_CALL_PB_H_INCLUDED
@@ -25,20 +25,20 @@ typedef PB_BYTES_ARRAY_T(512) Hedera_ContractCallTransactionBody_functionParamet
### Block Stream Effects
A `CallContractOutput` message SHALL be emitted for each transaction. */
-typedef struct _Hedera_ContractCallTransactionBody {
+typedef struct _Hedera_ContractCallTransactionBody {
/* *
The ID of a smart contract to call. */
bool has_contractID;
- Hedera_ContractID contractID;
+ Hedera_ContractID contractID;
/* Gas is defined as int64 in upstream proto (we could have negative gas); the app rejects negative values
Reference: https://github.com/hashgraph/hedera-protobufs/blob/8c27786cec93abab974309074feaef9b48a695b7/services/contract_call.proto#L70 */
- int64_t gas;
+ int64_t gas;
/* *
An amount of tinybar sent via this contract call.
If this is non-zero, the function MUST be `payable`. */
- int64_t amount;
- Hedera_ContractCallTransactionBody_functionParameters_t functionParameters;
+ int64_t amount;
+ Hedera_ContractCallTransactionBody_functionParameters_t functionParameters;
} Hedera_ContractCallTransactionBody;
@@ -72,6 +72,7 @@ extern const pb_msgdesc_t Hedera_ContractCallTransactionBody_msg;
#define Hedera_ContractCallTransactionBody_fields &Hedera_ContractCallTransactionBody_msg
/* Maximum encoded size of messages (where known) */
+#define HEDERA_PROTO_CONTRACT_CALL_PB_H_MAX_SIZE Hedera_ContractCallTransactionBody_size
#define Hedera_ContractCallTransactionBody_size 583
#ifdef __cplusplus
diff --git a/proto/contract_call_pb2.py b/proto/contract_call_pb2.py
deleted file mode 100644
index c432100a..00000000
--- a/proto/contract_call_pb2.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/contract_call.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-from proto import basic_types_pb2 as proto_dot_basic__types__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19proto/contract_call.proto\x12\x06Hedera\x1a\x0cnanopb.proto\x1a\x17proto/basic_types.proto\"\x86\x01\n\x1b\x43ontractCallTransactionBody\x12&\n\ncontractID\x18\x01 \x01(\x0b\x32\x12.Hedera.ContractID\x12\x0b\n\x03gas\x18\x02 \x01(\x03\x12\x0e\n\x06\x61mount\x18\x03 \x01(\x03\x12\"\n\x12\x66unctionParameters\x18\x04 \x01(\x0c\x42\x06\x92?\x03\x08\x80\x04\x42&\n\"com.hederahashgraph.api.proto.javaP\x01\x62\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.contract_call_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- DESCRIPTOR._serialized_options = b'\n\"com.hederahashgraph.api.proto.javaP\001'
- _CONTRACTCALLTRANSACTIONBODY.fields_by_name['functionParameters']._options = None
- _CONTRACTCALLTRANSACTIONBODY.fields_by_name['functionParameters']._serialized_options = b'\222?\003\010\200\004'
- _CONTRACTCALLTRANSACTIONBODY._serialized_start=77
- _CONTRACTCALLTRANSACTIONBODY._serialized_end=211
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/crypto_create.pb.c b/proto/crypto_create.pb.c
index 83ac15ef..222adadf 100644
--- a/proto/crypto_create.pb.c
+++ b/proto/crypto_create.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/crypto_create.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/crypto_create.pb.h b/proto/crypto_create.pb.h
index a5798bbe..9e275062 100644
--- a/proto/crypto_create.pb.h
+++ b/proto/crypto_create.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_CRYPTO_CREATE_PB_H_INCLUDED
#define PB_HEDERA_PROTO_CRYPTO_CREATE_PB_H_INCLUDED
@@ -46,16 +46,16 @@
The current API ignores shardID, realmID, and newRealmAdminKey, and creates
everything in shard 0 and realm 0, with a null key. Future versions of the
API will support multiple realms and multiple shards. */
-typedef struct _Hedera_CryptoCreateTransactionBody {
+typedef struct _Hedera_CryptoCreateTransactionBody {
/* *
The key that must sign each transfer out of the account. If
receiverSigRequired is true, then it must also sign any transfer into the
account. */
bool has_key;
- Hedera_Key key;
+ Hedera_Key key;
/* *
The initial number of tinybars to put into the account */
- uint64_t initialBalance;
+ uint64_t initialBalance;
/* *
[Deprecated] ID of the account to which this account is proxy staked. If
proxyAccountID is null, or is an invalid account, or is an account that
@@ -64,56 +64,59 @@ typedef struct _Hedera_CryptoCreateTransactionBody {
account refuses to accept proxy staking , or if it is not currently
running a node, then it will behave as if proxyAccountID was null. */
bool has_proxyAccountID;
- Hedera_AccountID proxyAccountID;
+ Hedera_AccountID proxyAccountID;
/* *
[Deprecated]. The threshold amount (in tinybars) for which an account
record is created for any send/withdraw transaction */
- uint64_t sendRecordThreshold;
+ uint64_t sendRecordThreshold;
/* *
[Deprecated]. The threshold amount (in tinybars) for which an account
record is created for any receive/deposit transaction */
- uint64_t receiveRecordThreshold;
+ uint64_t receiveRecordThreshold;
/* *
If true, this account's key must sign any transaction depositing into this
account (in addition to all withdrawals) */
- bool receiverSigRequired;
+ bool receiverSigRequired;
/* *
The account is charged to extend its expiration date every this many
seconds. If it doesn't have enough balance, it extends as long as
possible. If it is empty when it expires, then it is deleted. */
bool has_autoRenewPeriod;
- Hedera_Duration autoRenewPeriod;
+ Hedera_Duration autoRenewPeriod;
/* *
The shard in which this account is created */
bool has_shardID;
- Hedera_ShardID shardID;
+ Hedera_ShardID shardID;
/* *
The realm in which this account is created (leave this null to create a
new realm) */
bool has_realmID;
- Hedera_RealmID realmID;
+ Hedera_RealmID realmID;
/* *
If realmID is null, then this the admin key for the new realm that will be
created */
bool has_newRealmAdminKey;
- Hedera_Key newRealmAdminKey;
+ Hedera_Key newRealmAdminKey;
/* *
The memo associated with the account (UTF-8 encoding max 100 bytes) */
- char memo[100];
+ char memo[100];
/* *
The maximum number of tokens that an Account can be implicitly associated
with. Defaults to 0 and up to a maximum value of 1000. */
- int32_t max_automatic_token_associations;
- /* *
- ID of the account to which this account is staking. */
+ int32_t max_automatic_token_associations;
pb_size_t which_staked_id;
union {
+ /* *
+ ID of the account to which this account is staking. */
Hedera_AccountID staked_account_id;
+ /* *
+ ID of the node this account is staked to. */
int64_t staked_node_id;
- } staked_id;
+ } staked_id;
/* *
- ID of the node this account is staked to. */
- bool decline_reward;
+ If true, the account declines receiving a staking reward. The default
+ value is false. */
+ bool decline_reward;
} Hedera_CryptoCreateTransactionBody;
@@ -175,6 +178,7 @@ extern const pb_msgdesc_t Hedera_CryptoCreateTransactionBody_msg;
#define Hedera_CryptoCreateTransactionBody_fields &Hedera_CryptoCreateTransactionBody_msg
/* Maximum encoded size of messages (where known) */
+#define HEDERA_PROTO_CRYPTO_CREATE_PB_H_MAX_SIZE Hedera_CryptoCreateTransactionBody_size
#define Hedera_CryptoCreateTransactionBody_size 412
#ifdef __cplusplus
diff --git a/proto/crypto_create_pb2.py b/proto/crypto_create_pb2.py
deleted file mode 100644
index 6f2009d0..00000000
--- a/proto/crypto_create_pb2.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/crypto_create.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-from proto import basic_types_pb2 as proto_dot_basic__types__pb2
-from proto import duration_pb2 as proto_dot_duration__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19proto/crypto_create.proto\x12\x06Hedera\x1a\x0cnanopb.proto\x1a\x17proto/basic_types.proto\x1a\x14proto/duration.proto\"\xa4\x04\n\x1b\x43ryptoCreateTransactionBody\x12\x18\n\x03key\x18\x01 \x01(\x0b\x32\x0b.Hedera.Key\x12\x16\n\x0einitialBalance\x18\x02 \x01(\x04\x12-\n\x0eproxyAccountID\x18\x03 \x01(\x0b\x32\x11.Hedera.AccountIDB\x02\x18\x01\x12\x1f\n\x13sendRecordThreshold\x18\x06 \x01(\x04\x42\x02\x18\x01\x12\"\n\x16receiveRecordThreshold\x18\x07 \x01(\x04\x42\x02\x18\x01\x12\x1b\n\x13receiverSigRequired\x18\x08 \x01(\x08\x12)\n\x0f\x61utoRenewPeriod\x18\t \x01(\x0b\x32\x10.Hedera.Duration\x12 \n\x07shardID\x18\n \x01(\x0b\x32\x0f.Hedera.ShardID\x12 \n\x07realmID\x18\x0b \x01(\x0b\x32\x0f.Hedera.RealmID\x12%\n\x10newRealmAdminKey\x18\x0c \x01(\x0b\x32\x0b.Hedera.Key\x12\x13\n\x04memo\x18\r \x01(\tB\x05\x92?\x02\x08\x64\x12(\n max_automatic_token_associations\x18\x0e \x01(\x05\x12.\n\x11staked_account_id\x18\x0f \x01(\x0b\x32\x11.Hedera.AccountIDH\x00\x12\x18\n\x0estaked_node_id\x18\x10 \x01(\x03H\x00\x12\x16\n\x0e\x64\x65\x63line_reward\x18\x11 \x01(\x08\x42\x0b\n\tstaked_idb\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.crypto_create_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _CRYPTOCREATETRANSACTIONBODY.fields_by_name['proxyAccountID']._options = None
- _CRYPTOCREATETRANSACTIONBODY.fields_by_name['proxyAccountID']._serialized_options = b'\030\001'
- _CRYPTOCREATETRANSACTIONBODY.fields_by_name['sendRecordThreshold']._options = None
- _CRYPTOCREATETRANSACTIONBODY.fields_by_name['sendRecordThreshold']._serialized_options = b'\030\001'
- _CRYPTOCREATETRANSACTIONBODY.fields_by_name['receiveRecordThreshold']._options = None
- _CRYPTOCREATETRANSACTIONBODY.fields_by_name['receiveRecordThreshold']._serialized_options = b'\030\001'
- _CRYPTOCREATETRANSACTIONBODY.fields_by_name['memo']._options = None
- _CRYPTOCREATETRANSACTIONBODY.fields_by_name['memo']._serialized_options = b'\222?\002\010d'
- _CRYPTOCREATETRANSACTIONBODY._serialized_start=99
- _CRYPTOCREATETRANSACTIONBODY._serialized_end=647
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/crypto_transfer.pb.c b/proto/crypto_transfer.pb.c
index 4dc1ac7c..b4db1e92 100644
--- a/proto/crypto_transfer.pb.c
+++ b/proto/crypto_transfer.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/crypto_transfer.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/crypto_transfer.pb.h b/proto/crypto_transfer.pb.h
index 20469e7c..4003af5b 100644
--- a/proto/crypto_transfer.pb.h
+++ b/proto/crypto_transfer.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_CRYPTO_TRANSFER_PB_H_INCLUDED
#define PB_HEDERA_PROTO_CRYPTO_TRANSFER_PB_H_INCLUDED
@@ -24,11 +24,11 @@
sending accounts, and for any receiving accounts that have
receiverSigRequired == true. The signatures are in the same order as the
accounts, skipping those accounts that don't need a signature. */
-typedef struct _Hedera_CryptoTransferTransactionBody {
+typedef struct _Hedera_CryptoTransferTransactionBody {
/* *
The desired hbar balance adjustments */
bool has_transfers;
- Hedera_TransferList transfers;
+ Hedera_TransferList transfers;
/* *
The desired token unit balance adjustments; if any custom fees are
assessed, the ledger will try to deduct them from the payer of this
@@ -36,7 +36,7 @@ typedef struct _Hedera_CryptoTransferTransactionBody {
INSUFFICIENT_PAYER_BALANCE_FOR_CUSTOM_FEE if this is not possible
Limited to 1 here */
pb_size_t tokenTransfers_count;
- Hedera_TokenTransferList tokenTransfers[1];
+ Hedera_TokenTransferList tokenTransfers[1];
} Hedera_CryptoTransferTransactionBody;
@@ -67,6 +67,7 @@ extern const pb_msgdesc_t Hedera_CryptoTransferTransactionBody_msg;
#define Hedera_CryptoTransferTransactionBody_fields &Hedera_CryptoTransferTransactionBody_msg
/* Maximum encoded size of messages (where known) */
+#define HEDERA_PROTO_CRYPTO_TRANSFER_PB_H_MAX_SIZE Hedera_CryptoTransferTransactionBody_size
#define Hedera_CryptoTransferTransactionBody_size 475
#ifdef __cplusplus
diff --git a/proto/crypto_transfer_pb2.py b/proto/crypto_transfer_pb2.py
deleted file mode 100644
index 9c40c036..00000000
--- a/proto/crypto_transfer_pb2.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/crypto_transfer.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-from proto import basic_types_pb2 as proto_dot_basic__types__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1bproto/crypto_transfer.proto\x12\x06Hedera\x1a\x0cnanopb.proto\x1a\x17proto/basic_types.proto\"\x82\x01\n\x1d\x43ryptoTransferTransactionBody\x12\'\n\ttransfers\x18\x01 \x01(\x0b\x32\x14.Hedera.TransferList\x12\x38\n\x0etokenTransfers\x18\x02 \x03(\x0b\x32\x19.Hedera.TokenTransferListB\x05\x92?\x02\x10\x01\x62\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.crypto_transfer_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _CRYPTOTRANSFERTRANSACTIONBODY.fields_by_name['tokenTransfers']._options = None
- _CRYPTOTRANSFERTRANSACTIONBODY.fields_by_name['tokenTransfers']._serialized_options = b'\222?\002\020\001'
- _CRYPTOTRANSFERTRANSACTIONBODY._serialized_start=79
- _CRYPTOTRANSFERTRANSACTIONBODY._serialized_end=209
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/crypto_update.pb.c b/proto/crypto_update.pb.c
index 22fed33a..6ed979c5 100644
--- a/proto/crypto_update.pb.c
+++ b/proto/crypto_update.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/crypto_update.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/crypto_update.pb.h b/proto/crypto_update.pb.h
index 3b8719d6..0a2aa4d5 100644
--- a/proto/crypto_update.pb.h
+++ b/proto/crypto_update.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_CRYPTO_UPDATE_PB_H_INCLUDED
#define PB_HEDERA_PROTO_CRYPTO_UPDATE_PB_H_INCLUDED
@@ -22,15 +22,15 @@
The old key must sign for security. The new key must sign as a safeguard to
avoid accidentally changing to an invalid key, and then having no way to
recover. */
-typedef struct _Hedera_CryptoUpdateTransactionBody {
+typedef struct _Hedera_CryptoUpdateTransactionBody {
/* *
The account ID which is being updated in this transaction */
bool has_accountIDToUpdate;
- Hedera_AccountID accountIDToUpdate;
+ Hedera_AccountID accountIDToUpdate;
/* *
The new key */
bool has_key;
- Hedera_Key key;
+ Hedera_Key key;
/* *
[Deprecated] ID of the account to which this account is proxy staked. If
proxyAccountID is null, or is an invalid account, or is an account that
@@ -39,70 +39,83 @@ typedef struct _Hedera_CryptoUpdateTransactionBody {
account refuses to accept proxy staking , or if it is not currently running
a node, then it will behave as if proxyAccountID was null. */
bool has_proxyAccountID;
- Hedera_AccountID proxyAccountID;
+ Hedera_AccountID proxyAccountID;
/* *
[Deprecated]. Payments earned from proxy staking are shared between the
node and this account, with proxyFraction / 10000 going to this account */
- int32_t proxyFraction;
- /* *
- [Deprecated]. The new threshold amount (in tinybars) for which an account
- record is created for any send/withdraw transaction */
+ int32_t proxyFraction;
pb_size_t which_sendRecordThresholdField;
union {
+ /* *
+ [Deprecated]. The new threshold amount (in tinybars) for which an account
+ record is created for any send/withdraw transaction */
uint64_t sendRecordThreshold;
+ /* *
+ [Deprecated]. The new threshold amount (in tinybars) for which an account
+ record is created for any send/withdraw transaction */
Hedera_UInt64Value sendRecordThresholdWrapper;
- } sendRecordThresholdField;
- /* *
- [Deprecated]. The new threshold amount (in tinybars) for which an account
- record is created for any send/withdraw transaction */
+ } sendRecordThresholdField;
pb_size_t which_receiveRecordThresholdField;
union {
+ /* *
+ [Deprecated]. The new threshold amount (in tinybars) for which an account
+ record is created for any receive/deposit transaction. */
uint64_t receiveRecordThreshold;
+ /* *
+ [Deprecated]. The new threshold amount (in tinybars) for which an account
+ record is created for any receive/deposit transaction. */
Hedera_UInt64Value receiveRecordThresholdWrapper;
- } receiveRecordThresholdField;
- /* *
- [Deprecated]. The new threshold amount (in tinybars) for which an account
- record is created for any receive/deposit transaction. */
- bool has_autoRenewPeriod;
- Hedera_Duration autoRenewPeriod;
- /* *
- [Deprecated]. The new threshold amount (in tinybars) for which an account
- record is created for any receive/deposit transaction. */
- bool has_expirationTime;
- Hedera_Timestamp expirationTime;
+ } receiveRecordThresholdField;
/* *
The duration in which it will automatically extend the expiration period.
If it doesn't have enough balance, it extends as long as possible. If it is
empty when it expires, then it is deleted. */
+ bool has_autoRenewPeriod;
+ Hedera_Duration autoRenewPeriod;
+ /* *
+ The new expiration time to extend to (ignored if equal to or before the
+ current one) */
+ bool has_expirationTime;
+ Hedera_Timestamp expirationTime;
pb_size_t which_receiverSigRequiredField;
union {
+ /* *
+ [Deprecated] Do NOT use this field to set a false value because the
+ server cannot distinguish from the default value. Use
+ receiverSigRequiredWrapper field for this purpose. */
bool receiverSigRequired;
+ /* *
+ If true, this account's key must sign any transaction depositing into
+ this account (in addition to all withdrawals) */
Hedera_BoolValue receiverSigRequiredWrapper;
- } receiverSigRequiredField;
+ } receiverSigRequiredField;
/* *
- The new expiration time to extend to (ignored if equal to or before the
- current one) */
+ If set, the new memo to be associated with the account (UTF-8 encoding max
+ 100 bytes) */
bool has_memo;
- Hedera_StringValue memo;
+ Hedera_StringValue memo;
/* *
- [Deprecated] Do NOT use this field to set a false value because the
- server cannot distinguish from the default value. Use
- receiverSigRequiredWrapper field for this purpose. */
+ The maximum number of tokens that an Account can be implicitly associated
+ with. Up to a 1000 including implicit and explicit associations. */
bool has_max_automatic_token_associations;
- Hedera_Int32Value max_automatic_token_associations;
- /* *
- If true, this account's key must sign any transaction depositing into
- this account (in addition to all withdrawals) */
+ Hedera_Int32Value max_automatic_token_associations;
pb_size_t which_staked_id;
union {
+ /* *
+ ID of the new account to which this account is staking. If set to the
+ sentinel 0.0.0 AccountID, this field removes this account's
+ staked account ID. */
Hedera_AccountID staked_account_id;
+ /* *
+ ID of the new node this account is staked to. If set to the sentinel
+ -1, this field removes this account's staked node ID. */
int64_t staked_node_id;
- } staked_id;
+ } staked_id;
/* *
- If set, the new memo to be associated with the account (UTF-8 encoding max
- 100 bytes) */
+ If true, the account declines receiving a staking reward. The default value
+ is false. */
bool has_decline_reward;
- Hedera_BoolValue decline_reward;
+ Hedera_BoolValue decline_reward;
} Hedera_CryptoUpdateTransactionBody;
@@ -174,6 +187,7 @@ extern const pb_msgdesc_t Hedera_CryptoUpdateTransactionBody_msg;
/* Maximum encoded size of messages (where known) */
#if defined(Hedera_StringValue_size)
+#define HEDERA_PROTO_CRYPTO_UPDATE_PB_H_MAX_SIZE Hedera_CryptoUpdateTransactionBody_size
#define Hedera_CryptoUpdateTransactionBody_size (325 + Hedera_StringValue_size)
#endif
diff --git a/proto/crypto_update_pb2.py b/proto/crypto_update_pb2.py
deleted file mode 100644
index 1936fb2f..00000000
--- a/proto/crypto_update_pb2.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/crypto_update.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-from proto import basic_types_pb2 as proto_dot_basic__types__pb2
-from proto import duration_pb2 as proto_dot_duration__pb2
-from proto import timestamp_pb2 as proto_dot_timestamp__pb2
-from proto import wrappers_pb2 as proto_dot_wrappers__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19proto/crypto_update.proto\x12\x06Hedera\x1a\x0cnanopb.proto\x1a\x17proto/basic_types.proto\x1a\x14proto/duration.proto\x1a\x15proto/timestamp.proto\x1a\x14proto/wrappers.proto\"\xe5\x06\n\x1b\x43ryptoUpdateTransactionBody\x12,\n\x11\x61\x63\x63ountIDToUpdate\x18\x02 \x01(\x0b\x32\x11.Hedera.AccountID\x12\x18\n\x03key\x18\x03 \x01(\x0b\x32\x0b.Hedera.Key\x12-\n\x0eproxyAccountID\x18\x04 \x01(\x0b\x32\x11.Hedera.AccountIDB\x02\x18\x01\x12\x19\n\rproxyFraction\x18\x05 \x01(\x05\x42\x02\x18\x01\x12!\n\x13sendRecordThreshold\x18\x06 \x01(\x04\x42\x02\x18\x01H\x00\x12=\n\x1asendRecordThresholdWrapper\x18\x0b \x01(\x0b\x32\x13.Hedera.UInt64ValueB\x02\x18\x01H\x00\x12$\n\x16receiveRecordThreshold\x18\x07 \x01(\x04\x42\x02\x18\x01H\x01\x12@\n\x1dreceiveRecordThresholdWrapper\x18\x0c \x01(\x0b\x32\x13.Hedera.UInt64ValueB\x02\x18\x01H\x01\x12)\n\x0f\x61utoRenewPeriod\x18\x08 \x01(\x0b\x32\x10.Hedera.Duration\x12)\n\x0e\x65xpirationTime\x18\t \x01(\x0b\x32\x11.Hedera.Timestamp\x12!\n\x13receiverSigRequired\x18\n \x01(\x08\x42\x02\x18\x01H\x02\x12\x37\n\x1areceiverSigRequiredWrapper\x18\r \x01(\x0b\x32\x11.Hedera.BoolValueH\x02\x12!\n\x04memo\x18\x0e \x01(\x0b\x32\x13.Hedera.StringValue\x12<\n max_automatic_token_associations\x18\x0f \x01(\x0b\x32\x12.Hedera.Int32Value\x12.\n\x11staked_account_id\x18\x10 \x01(\x0b\x32\x11.Hedera.AccountIDH\x03\x12\x18\n\x0estaked_node_id\x18\x11 \x01(\x03H\x03\x12)\n\x0e\x64\x65\x63line_reward\x18\x12 \x01(\x0b\x32\x11.Hedera.BoolValueB\x1a\n\x18sendRecordThresholdFieldB\x1d\n\x1breceiveRecordThresholdFieldB\x1a\n\x18receiverSigRequiredFieldB\x0b\n\tstaked_idb\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.crypto_update_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['proxyAccountID']._options = None
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['proxyAccountID']._serialized_options = b'\030\001'
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['proxyFraction']._options = None
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['proxyFraction']._serialized_options = b'\030\001'
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['sendRecordThreshold']._options = None
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['sendRecordThreshold']._serialized_options = b'\030\001'
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['sendRecordThresholdWrapper']._options = None
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['sendRecordThresholdWrapper']._serialized_options = b'\030\001'
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['receiveRecordThreshold']._options = None
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['receiveRecordThreshold']._serialized_options = b'\030\001'
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['receiveRecordThresholdWrapper']._options = None
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['receiveRecordThresholdWrapper']._serialized_options = b'\030\001'
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['receiverSigRequired']._options = None
- _CRYPTOUPDATETRANSACTIONBODY.fields_by_name['receiverSigRequired']._serialized_options = b'\030\001'
- _CRYPTOUPDATETRANSACTIONBODY._serialized_start=144
- _CRYPTOUPDATETRANSACTIONBODY._serialized_end=1013
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/duration.pb.c b/proto/duration.pb.c
index 40cc71e0..b3f77ca9 100644
--- a/proto/duration.pb.c
+++ b/proto/duration.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/duration.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/duration.pb.h b/proto/duration.pb.h
index e0a1a459..75dfe013 100644
--- a/proto/duration.pb.h
+++ b/proto/duration.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_DURATION_PB_H_INCLUDED
#define PB_HEDERA_PROTO_DURATION_PB_H_INCLUDED
@@ -12,10 +12,10 @@
/* Struct definitions */
/* *
A length of time in seconds. */
-typedef struct _Hedera_Duration {
+typedef struct _Hedera_Duration {
/* *
The number of seconds */
- int64_t seconds;
+ int64_t seconds;
} Hedera_Duration;
@@ -42,6 +42,7 @@ extern const pb_msgdesc_t Hedera_Duration_msg;
#define Hedera_Duration_fields &Hedera_Duration_msg
/* Maximum encoded size of messages (where known) */
+#define HEDERA_PROTO_DURATION_PB_H_MAX_SIZE Hedera_Duration_size
#define Hedera_Duration_size 11
#ifdef __cplusplus
diff --git a/proto/duration_pb2.py b/proto/duration_pb2.py
deleted file mode 100644
index 7dae9331..00000000
--- a/proto/duration_pb2.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/duration.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14proto/duration.proto\x12\x06Hedera\x1a\x0cnanopb.proto\"\x1b\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x62\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.duration_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _DURATION._serialized_start=46
- _DURATION._serialized_end=73
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/timestamp.pb.c b/proto/timestamp.pb.c
index 764dbc10..0be6893a 100644
--- a/proto/timestamp.pb.c
+++ b/proto/timestamp.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/timestamp.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/timestamp.pb.h b/proto/timestamp.pb.h
index b0e4ccd1..09314e2b 100644
--- a/proto/timestamp.pb.h
+++ b/proto/timestamp.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_TIMESTAMP_PB_H_INCLUDED
#define PB_HEDERA_PROTO_TIMESTAMP_PB_H_INCLUDED
@@ -15,21 +15,21 @@
Timestamp.proto (see the comments in
https://github.com/google/protobuf
/blob/master/src/google/protobuf/timestamp.proto) */
-typedef struct _Hedera_Timestamp {
+typedef struct _Hedera_Timestamp {
/* *
Number of complete seconds since the start of the epoch */
- int64_t seconds;
+ int64_t seconds;
/* *
Number of nanoseconds since the start of the last second */
- int32_t nanos;
+ int32_t nanos;
} Hedera_Timestamp;
/* *
An exact date and time, with a resolution of one second (no nanoseconds). */
-typedef struct _Hedera_TimestampSeconds {
+typedef struct _Hedera_TimestampSeconds {
/* *
Number of complete seconds since the start of the epoch */
- int64_t seconds;
+ int64_t seconds;
} Hedera_TimestampSeconds;
@@ -68,6 +68,7 @@ extern const pb_msgdesc_t Hedera_TimestampSeconds_msg;
#define Hedera_TimestampSeconds_fields &Hedera_TimestampSeconds_msg
/* Maximum encoded size of messages (where known) */
+#define HEDERA_PROTO_TIMESTAMP_PB_H_MAX_SIZE Hedera_Timestamp_size
#define Hedera_TimestampSeconds_size 11
#define Hedera_Timestamp_size 22
diff --git a/proto/timestamp_pb2.py b/proto/timestamp_pb2.py
deleted file mode 100644
index ee8d3d1e..00000000
--- a/proto/timestamp_pb2.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/timestamp.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15proto/timestamp.proto\x12\x06Hedera\x1a\x0cnanopb.proto\"+\n\tTimestamp\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\"#\n\x10TimestampSeconds\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x62\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.timestamp_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _TIMESTAMP._serialized_start=47
- _TIMESTAMP._serialized_end=90
- _TIMESTAMPSECONDS._serialized_start=92
- _TIMESTAMPSECONDS._serialized_end=127
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/token_associate.pb.c b/proto/token_associate.pb.c
index eacbb599..c0268924 100644
--- a/proto/token_associate.pb.c
+++ b/proto/token_associate.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/token_associate.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/token_associate.pb.h b/proto/token_associate.pb.h
index 0ce4ea21..caea82be 100644
--- a/proto/token_associate.pb.h
+++ b/proto/token_associate.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_TOKEN_ASSOCIATE_PB_H_INCLUDED
#define PB_HEDERA_PROTO_TOKEN_ASSOCIATE_PB_H_INCLUDED
@@ -25,18 +25,18 @@
account, the transaction will resolve to TOKENS_PER_ACCOUNT_LIMIT_EXCEEDED.
On success, associations between the provided account and tokens are made and
the account is ready to interact with the tokens. */
-typedef struct _Hedera_TokenAssociateTransactionBody {
+typedef struct _Hedera_TokenAssociateTransactionBody {
/* *
The account to be associated with the provided tokens */
bool has_account;
- Hedera_AccountID account;
+ Hedera_AccountID account;
/* *
The tokens to be associated with the provided account. In the case of
NON_FUNGIBLE_UNIQUE Type, once an account is associated, it can hold any
number of NFTs (serial numbers) of that token type
Limited to 1 here (no access to malloc for dynamic decode!) */
pb_size_t tokens_count;
- Hedera_TokenID tokens[1];
+ Hedera_TokenID tokens[1];
} Hedera_TokenAssociateTransactionBody;
@@ -67,6 +67,7 @@ extern const pb_msgdesc_t Hedera_TokenAssociateTransactionBody_msg;
#define Hedera_TokenAssociateTransactionBody_fields &Hedera_TokenAssociateTransactionBody_msg
/* Maximum encoded size of messages (where known) */
+#define HEDERA_PROTO_TOKEN_ASSOCIATE_PB_H_MAX_SIZE Hedera_TokenAssociateTransactionBody_size
#define Hedera_TokenAssociateTransactionBody_size 94
#ifdef __cplusplus
diff --git a/proto/token_associate_pb2.py b/proto/token_associate_pb2.py
deleted file mode 100644
index 21ee64af..00000000
--- a/proto/token_associate_pb2.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/token_associate.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-from proto import basic_types_pb2 as proto_dot_basic__types__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1bproto/token_associate.proto\x12\x06Hedera\x1a\x0cnanopb.proto\x1a\x17proto/basic_types.proto\"k\n\x1dTokenAssociateTransactionBody\x12\"\n\x07\x61\x63\x63ount\x18\x01 \x01(\x0b\x32\x11.Hedera.AccountID\x12&\n\x06tokens\x18\x02 \x03(\x0b\x32\x0f.Hedera.TokenIDB\x05\x92?\x02\x10\x01\x62\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.token_associate_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _TOKENASSOCIATETRANSACTIONBODY.fields_by_name['tokens']._options = None
- _TOKENASSOCIATETRANSACTIONBODY.fields_by_name['tokens']._serialized_options = b'\222?\002\020\001'
- _TOKENASSOCIATETRANSACTIONBODY._serialized_start=78
- _TOKENASSOCIATETRANSACTIONBODY._serialized_end=185
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/token_burn.pb.c b/proto/token_burn.pb.c
index 7bc9314b..84d76f4c 100644
--- a/proto/token_burn.pb.c
+++ b/proto/token_burn.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/token_burn.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/token_burn.pb.h b/proto/token_burn.pb.h
index bddcacf5..7150444f 100644
--- a/proto/token_burn.pb.h
+++ b/proto/token_burn.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_TOKEN_BURN_PB_H_INCLUDED
#define PB_HEDERA_PROTO_TOKEN_BURN_PB_H_INCLUDED
@@ -29,24 +29,24 @@
dynamic property, a BATCH_SIZE_LIMIT_EXCEEDED response code will be returned.
If the serialNumbers list contains a non-positive integer as a serial number,
a INVALID_NFT_ID response code will be returned. */
-typedef struct _Hedera_TokenBurnTransactionBody {
+typedef struct _Hedera_TokenBurnTransactionBody {
/* *
The token for which to burn tokens. If token does not exist, transaction
results in INVALID_TOKEN_ID */
bool has_token;
- Hedera_TokenID token;
+ Hedera_TokenID token;
/* *
Applicable to tokens of type FUNGIBLE_COMMON. The amount to burn from the
Treasury Account. Amount must be a positive non-zero number, not bigger
than the token balance of the treasury account (0; balance], represented in
the lowest denomination. */
- uint64_t amount;
+ uint64_t amount;
/* *
Applicable to tokens of type NON_FUNGIBLE_UNIQUE. The list of serial
numbers to be burned.
Limited to 1 here */
pb_size_t serialNumbers_count;
- int64_t serialNumbers[1];
+ int64_t serialNumbers[1];
} Hedera_TokenBurnTransactionBody;
@@ -78,6 +78,7 @@ extern const pb_msgdesc_t Hedera_TokenBurnTransactionBody_msg;
#define Hedera_TokenBurnTransactionBody_fields &Hedera_TokenBurnTransactionBody_msg
/* Maximum encoded size of messages (where known) */
+#define HEDERA_PROTO_TOKEN_BURN_PB_H_MAX_SIZE Hedera_TokenBurnTransactionBody_size
#define Hedera_TokenBurnTransactionBody_size 58
#ifdef __cplusplus
diff --git a/proto/token_burn_pb2.py b/proto/token_burn_pb2.py
deleted file mode 100644
index 05efdf3f..00000000
--- a/proto/token_burn_pb2.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/token_burn.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-from proto import basic_types_pb2 as proto_dot_basic__types__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16proto/token_burn.proto\x12\x06Hedera\x1a\x0cnanopb.proto\x1a\x17proto/basic_types.proto\"h\n\x18TokenBurnTransactionBody\x12\x1e\n\x05token\x18\x01 \x01(\x0b\x32\x0f.Hedera.TokenID\x12\x0e\n\x06\x61mount\x18\x02 \x01(\x04\x12\x1c\n\rserialNumbers\x18\x03 \x03(\x03\x42\x05\x92?\x02\x10\x01\x62\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.token_burn_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _TOKENBURNTRANSACTIONBODY.fields_by_name['serialNumbers']._options = None
- _TOKENBURNTRANSACTIONBODY.fields_by_name['serialNumbers']._serialized_options = b'\222?\002\020\001'
- _TOKENBURNTRANSACTIONBODY._serialized_start=73
- _TOKENBURNTRANSACTIONBODY._serialized_end=177
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/token_dissociate.pb.c b/proto/token_dissociate.pb.c
index a231ea03..b04f8bb8 100644
--- a/proto/token_dissociate.pb.c
+++ b/proto/token_dissociate.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/token_dissociate.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/token_dissociate.pb.h b/proto/token_dissociate.pb.h
index 0466e158..83c38847 100644
--- a/proto/token_dissociate.pb.h
+++ b/proto/token_dissociate.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_TOKEN_DISSOCIATE_PB_H_INCLUDED
#define PB_HEDERA_PROTO_TOKEN_DISSOCIATE_PB_H_INCLUDED
@@ -28,16 +28,16 @@
not disassociate if their token balance is not zero. The transaction
will resolve to TRANSACTION_REQUIRED_ZERO_TOKEN_BALANCES. On success,
associations between the provided account and tokens are removed. */
-typedef struct _Hedera_TokenDissociateTransactionBody {
+typedef struct _Hedera_TokenDissociateTransactionBody {
/* *
The account to be dissociated with the provided tokens */
bool has_account;
- Hedera_AccountID account;
+ Hedera_AccountID account;
/* *
The tokens to be dissociated with the provided account
Limited to 1 here */
pb_size_t tokens_count;
- Hedera_TokenID tokens[1];
+ Hedera_TokenID tokens[1];
} Hedera_TokenDissociateTransactionBody;
@@ -68,6 +68,7 @@ extern const pb_msgdesc_t Hedera_TokenDissociateTransactionBody_msg;
#define Hedera_TokenDissociateTransactionBody_fields &Hedera_TokenDissociateTransactionBody_msg
/* Maximum encoded size of messages (where known) */
+#define HEDERA_PROTO_TOKEN_DISSOCIATE_PB_H_MAX_SIZE Hedera_TokenDissociateTransactionBody_size
#define Hedera_TokenDissociateTransactionBody_size 94
#ifdef __cplusplus
diff --git a/proto/token_dissociate_pb2.py b/proto/token_dissociate_pb2.py
deleted file mode 100644
index b5809df2..00000000
--- a/proto/token_dissociate_pb2.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/token_dissociate.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-from proto import basic_types_pb2 as proto_dot_basic__types__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cproto/token_dissociate.proto\x12\x06Hedera\x1a\x0cnanopb.proto\x1a\x17proto/basic_types.proto\"l\n\x1eTokenDissociateTransactionBody\x12\"\n\x07\x61\x63\x63ount\x18\x01 \x01(\x0b\x32\x11.Hedera.AccountID\x12&\n\x06tokens\x18\x02 \x03(\x0b\x32\x0f.Hedera.TokenIDB\x05\x92?\x02\x10\x01\x62\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.token_dissociate_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _TOKENDISSOCIATETRANSACTIONBODY.fields_by_name['tokens']._options = None
- _TOKENDISSOCIATETRANSACTIONBODY.fields_by_name['tokens']._serialized_options = b'\222?\002\020\001'
- _TOKENDISSOCIATETRANSACTIONBODY._serialized_start=79
- _TOKENDISSOCIATETRANSACTIONBODY._serialized_end=187
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/token_mint.pb.c b/proto/token_mint.pb.c
index d729ec09..928d5613 100644
--- a/proto/token_mint.pb.c
+++ b/proto/token_mint.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/token_mint.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/token_mint.pb.h b/proto/token_mint.pb.h
index a0bb6e9a..2e17894e 100644
--- a/proto/token_mint.pb.h
+++ b/proto/token_mint.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_TOKEN_MINT_PB_H_INCLUDED
#define PB_HEDERA_PROTO_TOKEN_MINT_PB_H_INCLUDED
@@ -27,24 +27,24 @@ typedef PB_BYTES_ARRAY_T(100) Hedera_TokenMintTransactionBody_metadata_t;
INVALID_TOKEN_MINT_AMOUNT response code will be returned. If the metadata
list count is greater than the batch size limit global dynamic property, a
BATCH_SIZE_LIMIT_EXCEEDED response code will be returned. */
-typedef struct _Hedera_TokenMintTransactionBody {
+typedef struct _Hedera_TokenMintTransactionBody {
/* *
The token for which to mint tokens. If token does not exist, transaction
results in INVALID_TOKEN_ID */
bool has_token;
- Hedera_TokenID token;
+ Hedera_TokenID token;
/* *
Applicable to tokens of type FUNGIBLE_COMMON. The amount to mint to the
Treasury Account. Amount must be a positive non-zero number represented in
the lowest denomination of the token. The new supply must be lower than
2^63. */
- uint64_t amount;
+ uint64_t amount;
/* *
Applicable to tokens of type NON_FUNGIBLE_UNIQUE. A list of metadata that
are being created. Maximum allowed size of each metadata is 100 bytes
Limited to 1 metadata chunk (no access to malloc) */
pb_size_t metadata_count;
- Hedera_TokenMintTransactionBody_metadata_t metadata[1];
+ Hedera_TokenMintTransactionBody_metadata_t metadata[1];
} Hedera_TokenMintTransactionBody;
@@ -76,6 +76,7 @@ extern const pb_msgdesc_t Hedera_TokenMintTransactionBody_msg;
#define Hedera_TokenMintTransactionBody_fields &Hedera_TokenMintTransactionBody_msg
/* Maximum encoded size of messages (where known) */
+#define HEDERA_PROTO_TOKEN_MINT_PB_H_MAX_SIZE Hedera_TokenMintTransactionBody_size
#define Hedera_TokenMintTransactionBody_size 149
#ifdef __cplusplus
diff --git a/proto/token_mint_pb2.py b/proto/token_mint_pb2.py
deleted file mode 100644
index 0ea6629c..00000000
--- a/proto/token_mint_pb2.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/token_mint.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-from proto import basic_types_pb2 as proto_dot_basic__types__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16proto/token_mint.proto\x12\x06Hedera\x1a\x0cnanopb.proto\x1a\x17proto/basic_types.proto\"h\n\x18TokenMintTransactionBody\x12\x1e\n\x05token\x18\x01 \x01(\x0b\x32\x0f.Hedera.TokenID\x12\x0e\n\x06\x61mount\x18\x02 \x01(\x04\x12\x1c\n\x08metadata\x18\x03 \x03(\x0c\x42\n\x92?\x02\x08\x64\x92?\x02\x10\x01\x62\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.token_mint_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _TOKENMINTTRANSACTIONBODY.fields_by_name['metadata']._options = None
- _TOKENMINTTRANSACTIONBODY.fields_by_name['metadata']._serialized_options = b'\222?\002\010d\222?\002\020\001'
- _TOKENMINTTRANSACTIONBODY._serialized_start=73
- _TOKENMINTTRANSACTIONBODY._serialized_end=177
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/transaction_body.pb.c b/proto/transaction_body.pb.c
index 0774cbeb..8aa0d971 100644
--- a/proto/transaction_body.pb.c
+++ b/proto/transaction_body.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/transaction_body.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/transaction_body.pb.h b/proto/transaction_body.pb.h
index 52ce431f..5deebf4d 100644
--- a/proto/transaction_body.pb.h
+++ b/proto/transaction_body.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_TRANSACTION_BODY_PB_H_INCLUDED
#define PB_HEDERA_PROTO_TRANSACTION_BODY_PB_H_INCLUDED
@@ -22,47 +22,61 @@
/* Struct definitions */
/* *
A single transaction. All transaction types are possible here. */
-typedef struct _Hedera_TransactionBody {
+typedef struct _Hedera_TransactionBody {
/* *
The ID for this transaction, which includes the payer's account (the
account paying the transaction fee). If two transactions have the same
transactionID, they won't both have an effect */
bool has_transactionID;
- Hedera_TransactionID transactionID;
+ Hedera_TransactionID transactionID;
/* *
The account of the node that submits the client's transaction to the
network */
bool has_nodeAccountID;
- Hedera_AccountID nodeAccountID;
+ Hedera_AccountID nodeAccountID;
/* *
The maximum transaction fee the client is willing to pay */
- uint64_t transactionFee;
+ uint64_t transactionFee;
/* *
The transaction is invalid if consensusTimestamp >
transactionID.transactionValidStart + transactionValidDuration */
bool has_transactionValidDuration;
- Hedera_Duration transactionValidDuration;
+ Hedera_Duration transactionValidDuration;
/* *
Should a record of this transaction be generated? (A receipt is always
generated, but the record is optional) */
- bool generateRecord;
+ bool generateRecord;
/* *
Any notes or descriptions that should be put into the record (max length
100) */
- char memo[100];
- /* *
- Call a contract */
+ char memo[100];
pb_size_t which_data;
union {
+ /* *
+ Call a contract */
Hedera_ContractCallTransactionBody contractCall;
+ /* *
+ Create a new cryptocurrency account */
Hedera_CryptoCreateTransactionBody cryptoCreateAccount;
+ /* *
+ Transfer amount between accounts */
Hedera_CryptoTransferTransactionBody cryptoTransfer;
+ /* *
+ Modify information such as the expiration date for an account */
Hedera_CryptoUpdateTransactionBody cryptoUpdateAccount;
+ /* *
+ Mints new tokens to a token's treasury account */
Hedera_TokenMintTransactionBody tokenMint;
+ /* *
+ Burns tokens from a token's treasury account */
Hedera_TokenBurnTransactionBody tokenBurn;
+ /* *
+ Associate tokens to an account */
Hedera_TokenAssociateTransactionBody tokenAssociate;
+ /* *
+ Dissociate tokens from an account */
Hedera_TokenDissociateTransactionBody tokenDissociate;
- } data;
+ } data;
} Hedera_TransactionBody;
@@ -127,9 +141,12 @@ extern const pb_msgdesc_t Hedera_TransactionBody_msg;
/* Maximum encoded size of messages (where known) */
#if defined(Hedera_CryptoUpdateTransactionBody_size)
-#define Hedera_TransactionBody_size (282 + sizeof(union Hedera_TransactionBody_data_size_union))
union Hedera_TransactionBody_data_size_union {char f15[(6 + Hedera_CryptoUpdateTransactionBody_size)]; char f0[586];};
#endif
+#if defined(Hedera_CryptoUpdateTransactionBody_size)
+#define HEDERA_PROTO_TRANSACTION_BODY_PB_H_MAX_SIZE Hedera_TransactionBody_size
+#define Hedera_TransactionBody_size (282 + sizeof(union Hedera_TransactionBody_data_size_union))
+#endif
#ifdef __cplusplus
} /* extern "C" */
diff --git a/proto/transaction_body_pb2.py b/proto/transaction_body_pb2.py
deleted file mode 100644
index f852f30f..00000000
--- a/proto/transaction_body_pb2.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/transaction_body.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-from proto import basic_types_pb2 as proto_dot_basic__types__pb2
-from proto import crypto_create_pb2 as proto_dot_crypto__create__pb2
-from proto import crypto_transfer_pb2 as proto_dot_crypto__transfer__pb2
-from proto import crypto_update_pb2 as proto_dot_crypto__update__pb2
-from proto import duration_pb2 as proto_dot_duration__pb2
-from proto import token_associate_pb2 as proto_dot_token__associate__pb2
-from proto import token_burn_pb2 as proto_dot_token__burn__pb2
-from proto import token_dissociate_pb2 as proto_dot_token__dissociate__pb2
-from proto import token_mint_pb2 as proto_dot_token__mint__pb2
-from proto import contract_call_pb2 as proto_dot_contract__call__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cproto/transaction_body.proto\x12\x06Hedera\x1a\x0cnanopb.proto\x1a\x17proto/basic_types.proto\x1a\x19proto/crypto_create.proto\x1a\x1bproto/crypto_transfer.proto\x1a\x19proto/crypto_update.proto\x1a\x14proto/duration.proto\x1a\x1bproto/token_associate.proto\x1a\x16proto/token_burn.proto\x1a\x1cproto/token_dissociate.proto\x1a\x16proto/token_mint.proto\x1a\x19proto/contract_call.proto\"\xe6\x05\n\x0fTransactionBody\x12,\n\rtransactionID\x18\x01 \x01(\x0b\x32\x15.Hedera.TransactionID\x12(\n\rnodeAccountID\x18\x02 \x01(\x0b\x32\x11.Hedera.AccountID\x12\x16\n\x0etransactionFee\x18\x03 \x01(\x04\x12\x32\n\x18transactionValidDuration\x18\x04 \x01(\x0b\x32\x10.Hedera.Duration\x12\x1a\n\x0egenerateRecord\x18\x05 \x01(\x08\x42\x02\x18\x01\x12\x13\n\x04memo\x18\x06 \x01(\tB\x05\x92?\x02\x08\x64\x12;\n\x0c\x63ontractCall\x18\x07 \x01(\x0b\x32#.Hedera.ContractCallTransactionBodyH\x00\x12\x42\n\x13\x63ryptoCreateAccount\x18\x0b \x01(\x0b\x32#.Hedera.CryptoCreateTransactionBodyH\x00\x12?\n\x0e\x63ryptoTransfer\x18\x0e \x01(\x0b\x32%.Hedera.CryptoTransferTransactionBodyH\x00\x12\x42\n\x13\x63ryptoUpdateAccount\x18\x0f \x01(\x0b\x32#.Hedera.CryptoUpdateTransactionBodyH\x00\x12\x35\n\ttokenMint\x18% \x01(\x0b\x32 .Hedera.TokenMintTransactionBodyH\x00\x12\x35\n\ttokenBurn\x18& \x01(\x0b\x32 .Hedera.TokenBurnTransactionBodyH\x00\x12?\n\x0etokenAssociate\x18( \x01(\x0b\x32%.Hedera.TokenAssociateTransactionBodyH\x00\x12\x41\n\x0ftokenDissociate\x18) \x01(\x0b\x32&.Hedera.TokenDissociateTransactionBodyH\x00\x42\x06\n\x04\x64\x61tab\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.transaction_body_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _TRANSACTIONBODY.fields_by_name['generateRecord']._options = None
- _TRANSACTIONBODY.fields_by_name['generateRecord']._serialized_options = b'\030\001'
- _TRANSACTIONBODY.fields_by_name['memo']._options = None
- _TRANSACTIONBODY.fields_by_name['memo']._serialized_options = b'\222?\002\010d'
- _TRANSACTIONBODY._serialized_start=319
- _TRANSACTIONBODY._serialized_end=1061
-# @@protoc_insertion_point(module_scope)
diff --git a/proto/wrappers.pb.c b/proto/wrappers.pb.c
index 9843989e..489bc6ad 100644
--- a/proto/wrappers.pb.c
+++ b/proto/wrappers.pb.c
@@ -1,5 +1,5 @@
/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#include "proto/wrappers.pb.h"
#if PB_PROTO_HEADER_VERSION != 40
diff --git a/proto/wrappers.pb.h b/proto/wrappers.pb.h
index 562a4441..71108f98 100644
--- a/proto/wrappers.pb.h
+++ b/proto/wrappers.pb.h
@@ -1,5 +1,5 @@
/* Automatically generated nanopb header */
-/* Generated by nanopb-0.4.5 */
+/* Generated by nanopb-0.4.9.1 */
#ifndef PB_HEDERA_PROTO_WRAPPERS_PB_H_INCLUDED
#define PB_HEDERA_PROTO_WRAPPERS_PB_H_INCLUDED
@@ -10,69 +10,69 @@
#endif
/* Struct definitions */
-/* Wrapper message for `string`.
-
- The JSON representation for `StringValue` is JSON string. */
-typedef struct _Hedera_StringValue {
- /* The string value. */
- pb_callback_t value;
-} Hedera_StringValue;
-
-/* Wrapper message for `bool`.
-
- The JSON representation for `BoolValue` is JSON `true` and `false`. */
-typedef struct _Hedera_BoolValue {
- /* The bool value. */
- bool value;
-} Hedera_BoolValue;
-
/* Wrapper message for `double`.
The JSON representation for `DoubleValue` is JSON number. */
-typedef struct _Hedera_DoubleValue {
+typedef struct _Hedera_DoubleValue {
/* The double value. */
- double value;
+ double value;
} Hedera_DoubleValue;
/* Wrapper message for `float`.
The JSON representation for `FloatValue` is JSON number. */
-typedef struct _Hedera_FloatValue {
+typedef struct _Hedera_FloatValue {
/* The float value. */
- float value;
+ float value;
} Hedera_FloatValue;
-/* Wrapper message for `int32`.
-
- The JSON representation for `Int32Value` is JSON number. */
-typedef struct _Hedera_Int32Value {
- /* The int32 value. */
- int32_t value;
-} Hedera_Int32Value;
-
/* Wrapper message for `int64`.
The JSON representation for `Int64Value` is JSON string. */
-typedef struct _Hedera_Int64Value {
+typedef struct _Hedera_Int64Value {
/* The int64 value. */
- int64_t value;
+ int64_t value;
} Hedera_Int64Value;
+/* Wrapper message for `uint64`.
+
+ The JSON representation for `UInt64Value` is JSON string. */
+typedef struct _Hedera_UInt64Value {
+ /* The uint64 value. */
+ uint64_t value;
+} Hedera_UInt64Value;
+
+/* Wrapper message for `int32`.
+
+ The JSON representation for `Int32Value` is JSON number. */
+typedef struct _Hedera_Int32Value {
+ /* The int32 value. */
+ int32_t value;
+} Hedera_Int32Value;
+
/* Wrapper message for `uint32`.
The JSON representation for `UInt32Value` is JSON number. */
-typedef struct _Hedera_UInt32Value {
+typedef struct _Hedera_UInt32Value {
/* The uint32 value. */
- uint32_t value;
+ uint32_t value;
} Hedera_UInt32Value;
-/* Wrapper message for `uint64`.
+/* Wrapper message for `bool`.
- The JSON representation for `UInt64Value` is JSON string. */
-typedef struct _Hedera_UInt64Value {
- /* The uint64 value. */
- uint64_t value;
-} Hedera_UInt64Value;
+ The JSON representation for `BoolValue` is JSON `true` and `false`. */
+typedef struct _Hedera_BoolValue {
+ /* The bool value. */
+ bool value;
+} Hedera_BoolValue;
+
+/* Wrapper message for `string`.
+
+ The JSON representation for `StringValue` is JSON string. */
+typedef struct _Hedera_StringValue {
+ /* The string value. */
+ pb_callback_t value;
+} Hedera_StringValue;
#ifdef __cplusplus
@@ -98,14 +98,14 @@ extern "C" {
#define Hedera_StringValue_init_zero {{{NULL}, NULL}}
/* Field tags (for use in manual encoding/decoding) */
-#define Hedera_StringValue_value_tag 1
-#define Hedera_BoolValue_value_tag 1
#define Hedera_DoubleValue_value_tag 1
#define Hedera_FloatValue_value_tag 1
-#define Hedera_Int32Value_value_tag 1
#define Hedera_Int64Value_value_tag 1
-#define Hedera_UInt32Value_value_tag 1
#define Hedera_UInt64Value_value_tag 1
+#define Hedera_Int32Value_value_tag 1
+#define Hedera_UInt32Value_value_tag 1
+#define Hedera_BoolValue_value_tag 1
+#define Hedera_StringValue_value_tag 1
/* Struct field encoding specification for nanopb */
#define Hedera_DoubleValue_FIELDLIST(X, a) \
@@ -169,6 +169,7 @@ extern const pb_msgdesc_t Hedera_StringValue_msg;
/* Maximum encoded size of messages (where known) */
/* Hedera_StringValue_size depends on runtime parameters */
+#define HEDERA_PROTO_WRAPPERS_PB_H_MAX_SIZE Hedera_Int64Value_size
#define Hedera_BoolValue_size 2
#define Hedera_DoubleValue_size 9
#define Hedera_FloatValue_size 5
diff --git a/proto/wrappers_pb2.py b/proto/wrappers_pb2.py
deleted file mode 100644
index 7961d1ed..00000000
--- a/proto/wrappers_pb2.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/wrappers.proto
-"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-import nanopb_pb2 as nanopb__pb2
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14proto/wrappers.proto\x12\x06Hedera\x1a\x0cnanopb.proto\"\x1c\n\x0b\x44oubleValue\x12\r\n\x05value\x18\x01 \x01(\x01\"\x1b\n\nFloatValue\x12\r\n\x05value\x18\x01 \x01(\x02\"\x1b\n\nInt64Value\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1c\n\x0bUInt64Value\x12\r\n\x05value\x18\x01 \x01(\x04\"\x1b\n\nInt32Value\x12\r\n\x05value\x18\x01 \x01(\x05\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\tb\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.wrappers_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
-
- DESCRIPTOR._options = None
- _DOUBLEVALUE._serialized_start=46
- _DOUBLEVALUE._serialized_end=74
- _FLOATVALUE._serialized_start=76
- _FLOATVALUE._serialized_end=103
- _INT64VALUE._serialized_start=105
- _INT64VALUE._serialized_end=132
- _UINT64VALUE._serialized_start=134
- _UINT64VALUE._serialized_end=162
- _INT32VALUE._serialized_start=164
- _INT32VALUE._serialized_end=191
- _UINT32VALUE._serialized_start=193
- _UINT32VALUE._serialized_end=221
- _BOOLVALUE._serialized_start=223
- _BOOLVALUE._serialized_end=249
- _STRINGVALUE._serialized_start=251
- _STRINGVALUE._serialized_end=279
-# @@protoc_insertion_point(module_scope)
diff --git a/vendor/nanopb/.bazelignore b/vendor/nanopb/.bazelignore
new file mode 100644
index 00000000..db6d7e4a
--- /dev/null
+++ b/vendor/nanopb/.bazelignore
@@ -0,0 +1 @@
+tests/bazel_workspace_support
\ No newline at end of file
diff --git a/vendor/nanopb/.gitattributes b/vendor/nanopb/.gitattributes
new file mode 100644
index 00000000..985abfe1
--- /dev/null
+++ b/vendor/nanopb/.gitattributes
@@ -0,0 +1,3 @@
+# Auto-generated files that don't need human review.
+MODULE.bazel.lock linguist-generated=true
+extra/requirements_lock.txt linguist-generated=true
diff --git a/vendor/nanopb/.github/workflows/bazel.yml b/vendor/nanopb/.github/workflows/bazel.yml
new file mode 100644
index 00000000..ee4c974d
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/bazel.yml
@@ -0,0 +1,35 @@
+name: Bazel
+
+on:
+ workflow_dispatch:
+ workflow_call:
+ push:
+ paths:
+ - '**bazel**'
+ pull_request:
+ paths:
+ - '**bazel**'
+
+jobs:
+ build_embedded:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-20.04, macos-14]
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Mount bazel cache
+ uses: actions/cache@v2
+ with:
+ path: "/home/runner/.cache/bazel"
+ key: ${{ runner.os }}-bazel
+
+ - name: Test
+ run: |
+ bazelisk test //...
+
+ - name: Build
+ run: |
+ bazelisk build //...
diff --git a/vendor/nanopb/.github/workflows/binary_packages.yml b/vendor/nanopb/.github/workflows/binary_packages.yml
new file mode 100644
index 00000000..7e4b419f
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/binary_packages.yml
@@ -0,0 +1,157 @@
+name: Build binary packages
+
+on:
+ workflow_dispatch:
+ workflow_call:
+ push:
+ branches:
+ - 'master'
+ tags:
+ - '*'
+
+jobs:
+ build_linux:
+ name: Build binary on Ubuntu 20.04
+ runs-on: ubuntu-20.04
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+ fetch-depth: "0"
+
+ - name: Setup Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.8'
+
+ - name: Install dependencies
+ run: |
+ python3 -m pip install --user --upgrade scons protobuf grpcio-tools pyinstaller
+ python3 -c 'import google.protobuf; print(google.protobuf.__file__)'
+
+ - name: Build binary package
+ run: |
+ cd nanopb
+ git clean -dxf
+ tools/make_linux_package.sh
+
+ - name: Fingerprint binary
+ run: |
+ openssl sha256 nanopb/dist/*.tar.gz
+
+ - name: Upload binary
+ uses: actions/upload-artifact@v4
+ with:
+ path: nanopb/dist/*.tar.gz
+ name: nanopb-binary-linux
+
+ - name: Test binary package
+ run: |
+ tar xzf nanopb/dist/*.tar.gz
+ cd nanopb-*/tests
+ python3 -m SCons
+
+ - name: Test examples
+ run: |
+ cd nanopb-*/examples
+ (cd simple; make; ./simple)
+ (cd network_server; make)
+ (cd using_union_messages; make)
+ (cd cmake_simple; mkdir build; cd build; cmake ..; make)
+ (cd cmake_relpath; mkdir build; cd build; cmake ..; make)
+
+ - name: Run build tests
+ run: |
+ cd nanopb-*/build-tests
+ (cd cmake_with_components; mkdir build; cd build; cmake ..; make)
+ (cd legacy_cmake_relpath; mkdir build; cd build; cmake ..; make)
+ (cd legacy_cmake_simple; mkdir build; cd build; cmake ..; make)
+
+ build_windows:
+ name: Build binary on Windows 2019
+ runs-on: windows-2019
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+ fetch-depth: "0"
+
+ - name: Install dependencies
+ shell: bash
+ run: |
+ python3 -m pip install --user --upgrade scons protobuf grpcio-tools pyinstaller
+ python3 -c 'import google.protobuf; print(google.protobuf.__file__)'
+
+ - name: Build binary package
+ shell: bash
+ run: |
+ cd nanopb
+ git clean -dxf
+ tools/make_windows_package.sh
+
+ - name: Fingerprint binary
+ run: |
+ openssl sha256 nanopb/dist/*.zip
+
+ - name: Upload binary
+ uses: actions/upload-artifact@v4
+ with:
+ path: nanopb/dist/*.zip
+ name: nanopb-binary-windows
+
+ - name: Test binary package
+ shell: bash
+ run: |
+ powershell "Expand-Archive nanopb/dist/*.zip"
+ ls
+ cd nanopb-*/nanopb-*/tests
+ python3 -m SCons
+
+ build_macos:
+ name: Build binary on Mac OS X 14
+ runs-on: macos-14
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+ fetch-depth: "0"
+
+ - name: Install dependencies
+ run: |
+ python3 -m venv venv
+ venv/bin/python3 -m pip install --upgrade scons protobuf grpcio-tools pyinstaller
+ venv/bin/python3 -c 'import google.protobuf; print(google.protobuf.__file__)'
+
+ - name: Build binary package
+ run: |
+ source venv/bin/activate
+ cd nanopb
+ git clean -dxf
+ tools/make_mac_package.sh
+
+ - name: Fingerprint binary
+ run: |
+ openssl sha256 nanopb/dist/*.tar.gz
+
+ - name: Upload binary
+ uses: actions/upload-artifact@v4
+ with:
+ path: nanopb/dist/*.tar.gz
+ name: nanopb-binary-macos
+
+ - name: Test binary package
+ run: |
+ tar xzf nanopb/dist/*.tar.gz
+ cd nanopb-*/tests
+ ../../venv/bin/python3 -m SCons
+ cd ../examples/simple
+ make
+ ./simple
+
+
diff --git a/vendor/nanopb/.github/workflows/cifuzz.yml b/vendor/nanopb/.github/workflows/cifuzz.yml
index e67e68fc..de6fbbbb 100644
--- a/vendor/nanopb/.github/workflows/cifuzz.yml
+++ b/vendor/nanopb/.github/workflows/cifuzz.yml
@@ -1,17 +1,7 @@
-name: CIFuzz
+name: Run CIFuzz fuzz test for 10 minutes
on:
- push:
- branches:
- - master
- paths:
- - '**.c'
- - '**.h'
- pull_request:
- branches:
- - master
- paths:
- - '**.c'
- - '**.h'
+ workflow_dispatch:
+ workflow_call:
jobs:
Fuzzing:
@@ -32,7 +22,7 @@ jobs:
dry-run: false
sanitizer: undefined
- name: Upload Crash
- uses: actions/upload-artifact@v1
+ uses: actions/upload-artifact@v4
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts
diff --git a/vendor/nanopb/.github/workflows/cmake.yml b/vendor/nanopb/.github/workflows/cmake.yml
new file mode 100644
index 00000000..3489686a
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/cmake.yml
@@ -0,0 +1,71 @@
+name: Test CMake-based installation and compilation
+
+on:
+ workflow_dispatch:
+ workflow_call:
+ push:
+ paths:
+ - '**CMakeLists**'
+ - '**cmake**'
+ pull_request:
+ paths:
+ - '**CMakeLists**'
+ - '**cmake**'
+
+jobs:
+ build_cmake_linux:
+ name: CMake on Ubuntu 22.04
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install dependencies
+ run: |
+ python3 -m pip install protobuf grpcio-tools
+
+ - name: Build with CMake
+ run: |
+ mkdir build
+ cd build
+ cmake ..
+ cmake --build .
+ sudo cmake --install .
+
+ - name: Compile example against installed library
+ run: |
+ cd examples/simple
+ nanopb_generator simple.proto
+ gcc -Wall -Werror -osimple simple.pb.c simple.c -lprotobuf-nanopb -I/usr/local/include/nanopb
+ ./simple
+
+ build_cmake_windows:
+ name: CMake on Windows 2022
+ runs-on: windows-2022
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.12'
+
+ - name: Install dependencies
+ run: |
+ pip install protobuf grpcio-tools
+
+ - name: Build with CMake
+ run: |
+ mkdir build
+ cd build
+ cmake ..
+ cmake --build . --config Release
+ cmake --install . --config Release --prefix C:/nanopb-test
+
+ - name: Compile example against installed library
+ shell: cmd
+ run: |
+ call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
+ cd examples/simple
+ call C:\nanopb-test\bin\nanopb_generator simple.proto
+ cl simple.pb.c simple.c /IC:\nanopb-test\include\nanopb C:\nanopb-test\lib\protobuf-nanopb.lib /link /out:simple.exe
+ simple.exe
+
diff --git a/vendor/nanopb/.github/workflows/codeql-buildscript.sh b/vendor/nanopb/.github/workflows/codeql-buildscript.sh
new file mode 100644
index 00000000..0804d646
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/codeql-buildscript.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+sudo apt-get -y update
+sudo apt-get -y install python3-protobuf protobuf-compiler #scons splint valgrind
+cd examples/simple
+make
diff --git a/vendor/nanopb/.github/workflows/codeql.yml b/vendor/nanopb/.github/workflows/codeql.yml
new file mode 100644
index 00000000..c985b2ea
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/codeql.yml
@@ -0,0 +1,126 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ "main", "master" ]
+ schedule:
+ - cron: '0 0 * * *'
+ pull_request:
+ branches: '*'
+
+jobs:
+ analyze:
+ name: Analyze
+ # Runner size impacts CodeQL analysis time. To learn more, please see:
+ # - https://gh.io/recommended-hardware-resources-for-running-codeql
+ # - https://gh.io/supported-runners-and-hardware-resources
+ # - https://gh.io/using-larger-runners
+ # Consider using larger runners for possible analysis time improvements.
+ runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-20.04' }}
+ timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'cpp' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby', 'swift' ]
+ # Use only 'java' to analyze code written in Java, Kotlin or both
+ # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
+ # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ submodules: recursive
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v2
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+
+ # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
+ # queries: security-extended,security-and-quality
+ queries: security-and-quality
+
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ #- name: Autobuild
+ # uses: github/codeql-action/autobuild@v2
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+
+ # If the Autobuild fails above, remove it and uncomment the following three lines.
+ # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
+
+ - run: |
+ ./.github/workflows/codeql-buildscript.sh
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v2
+ with:
+ category: "/language:${{matrix.language}}"
+ upload: false
+ id: step1
+
+ # Filter out rules with low severity or high false positve rate
+ # Also filter out warnings in third-party code
+ - name: Filter out unwanted errors and warnings
+ uses: advanced-security/filter-sarif@v1
+ with:
+ patterns: |
+ -**:cpp/path-injection
+ -**:cpp/world-writable-file-creation
+ -**:cpp/poorly-documented-function
+ -**:cpp/potentially-dangerous-function
+ -**:cpp/use-of-goto
+ -**:cpp/integer-multiplication-cast-to-long
+ -**:cpp/comparison-with-wider-type
+ -**:cpp/leap-year/*
+ -**:cpp/ambiguously-signed-bit-field
+ -**:cpp/suspicious-pointer-scaling
+ -**:cpp/suspicious-pointer-scaling-void
+ -**:cpp/unsigned-comparison-zero
+ -**/cmake*/Modules/**
+ input: ${{ steps.step1.outputs.sarif-output }}/cpp.sarif
+ output: ${{ steps.step1.outputs.sarif-output }}/cpp.sarif
+
+ - name: Upload CodeQL results to code scanning
+ uses: github/codeql-action/upload-sarif@v2
+ with:
+ sarif_file: ${{ steps.step1.outputs.sarif-output }}
+ category: "/language:${{matrix.language}}"
+
+ - name: Upload CodeQL results as an artifact
+ if: success() || failure()
+ uses: actions/upload-artifact@v4
+ with:
+ name: codeql-results
+ path: ${{ steps.step1.outputs.sarif-output }}
+ retention-days: 5
+
+ - name: Fail if an error is found
+ run: |
+ ./.github/workflows/fail_on_error.py \
+ ${{ steps.step1.outputs.sarif-output }}/cpp.sarif
diff --git a/vendor/nanopb/.github/workflows/compiler_tests.yml b/vendor/nanopb/.github/workflows/compiler_tests.yml
new file mode 100644
index 00000000..47b77b28
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/compiler_tests.yml
@@ -0,0 +1,62 @@
+name: Run tests with various compilers
+
+on:
+ workflow_dispatch:
+ workflow_call:
+
+jobs:
+ test_linux:
+ name: Test with gcc & clang on Ubuntu
+ runs-on: ubuntu-20.04
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+ fetch-depth: "0"
+
+ - name: Install dependencies
+ run: |
+ sudo dpkg --add-architecture i386
+ sudo apt-get update
+ sudo apt-get install python3-protobuf protobuf-compiler scons
+ sudo apt-get install clang valgrind splint cmake
+ sudo apt-get install libstdc++6:i386 libgcc-s1:i386 lib32gcc-9-dev lib32stdc++-9-dev libncurses5:i386 libc6-dbg:i386 libc6-dev:i386 binutils-multiarch
+
+ - name: Test with 64-bit GCC
+ run: |
+ cd nanopb/tests
+ rm -rf build
+ scons CC=gcc CXX=g++ CCFLAGS="-O3"
+ rm -rf build
+ scons CC=gcc CXX=g++ CCFLAGS="-Os -flto"
+ rm -rf build
+ scons CC=gcc CXX=g++ CCFLAGS="-O1 -DPB_FIELD_32BIT=1"
+
+ - name: Test with 32-bit GCC
+ run: |
+ cd nanopb/tests
+ rm -rf build
+ scons CC=gcc CXX=g++ CCFLAGS="-O3 -m32" LINKFLAGS="-m32"
+ rm -rf build
+ scons CC=gcc CXX=g++ CCFLAGS="-Os -flto -m32" LINKFLAGS="-m32"
+ rm -rf build
+ scons CC=gcc CXX=g++ CCFLAGS="-O1 -DPB_FIELD_32BIT=1 -m32" LINKFLAGS="-m32"
+
+ - name: Test with 64-bit clang
+ run: |
+ cd nanopb/tests
+ rm -rf build
+ scons CC=clang CXX=clang++ CCFLAGS="-O3"
+ rm -rf build
+ scons CC=clang CXX=clang++ CCFLAGS="-Os -DPB_FIELD_32BIT=1"
+
+ - name: Test with 32-bit clang
+ run: |
+ cd nanopb/tests
+ rm -rf build
+ scons CC=clang CXX=clang++ CCFLAGS="-O3 -m32" LINKFLAGS="-m32"
+ rm -rf build
+ scons CC=clang CXX=clang++ CCFLAGS="-Os -DPB_FIELD_32BIT=1 -m32" LINKFLAGS="-m32"
+
diff --git a/vendor/nanopb/.github/workflows/fail_on_error.py b/vendor/nanopb/.github/workflows/fail_on_error.py
new file mode 100644
index 00000000..29791742
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/fail_on_error.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python3
+
+import json
+import sys
+
+# Return whether SARIF file contains error-level results
+def codeql_sarif_contain_error(filename):
+ with open(filename, 'r') as f:
+ s = json.load(f)
+
+ for run in s.get('runs', []):
+ rules_metadata = run['tool']['driver']['rules']
+ if not rules_metadata:
+ rules_metadata = run['tool']['extensions'][0]['rules']
+
+ for res in run.get('results', []):
+ if 'ruleIndex' in res:
+ rule_index = res['ruleIndex']
+ elif 'rule' in res and 'index' in res['rule']:
+ rule_index = res['rule']['index']
+ else:
+ continue
+ try:
+ rule_level = rules_metadata[rule_index]['defaultConfiguration']['level']
+ except IndexError as e:
+ print(e, rule_index, len(rules_metadata))
+ else:
+ if rule_level == 'error':
+ return True
+ return False
+
+if __name__ == "__main__":
+ if codeql_sarif_contain_error(sys.argv[1]):
+ sys.exit(1)
diff --git a/vendor/nanopb/.github/workflows/ios_swift_tests.yml b/vendor/nanopb/.github/workflows/ios_swift_tests.yml
new file mode 100644
index 00000000..c68ebfbf
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/ios_swift_tests.yml
@@ -0,0 +1,23 @@
+name: Build and test for Apple iOS swift
+
+on:
+ workflow_dispatch:
+ workflow_call:
+ push:
+ paths:
+ - '**spm**'
+ - '**swift**'
+ pull_request:
+ paths:
+ - '**spm**'
+ - '**swift**'
+
+jobs:
+ swift-build-run:
+ runs-on: macOS-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Build
+ run: swift build
+ - name: Run
+ run: swift test
diff --git a/vendor/nanopb/.github/workflows/platformio_tests.yml b/vendor/nanopb/.github/workflows/platformio_tests.yml
new file mode 100644
index 00000000..4448c0ce
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/platformio_tests.yml
@@ -0,0 +1,82 @@
+name: Build and test using platformio
+
+on:
+ workflow_dispatch:
+ workflow_call:
+ push:
+ paths:
+ - '**platformio**'
+ pull_request:
+ paths:
+ - '**platformio**'
+
+jobs:
+ platformio:
+ name: Build and run PlatformIO example
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+
+ - name: Installing dependencies for local act
+ if: ${{ env.ACT }}
+ run: |
+ sudo apt update
+
+ - name: Installing common dependencies
+ run: |
+ sudo apt install -y python3-pip
+
+ - name: Install and setup PlatformIO
+ run: |
+ python3 -m venv venv
+ venv/bin/pip3 install -U platformio
+
+ - name: Build PlatformIO package
+ run: |
+ source venv/bin/activate
+ cd nanopb
+ pio package pack
+
+ - name: Example - Extract PlatformIO package to example dir
+ run: |
+ source venv/bin/activate
+ cp -R nanopb/examples/platformio example
+ mkdir -p example/lib/nanopb
+ tar -xzf nanopb/Nanopb-*.tar.gz -C example/lib/nanopb
+
+ - name: Example - Build
+ run: |
+ source venv/bin/activate
+ cd example
+ pio run
+
+ - name: Example - Run test without options
+ run: example/.pio/build/pio_without_options/program
+
+ - name: Example - Run test with options
+ run: example/.pio/build/pio_with_options/program
+
+ - name: Build in subdirectory with space characters
+ run: |
+ source venv/bin/activate
+ cp -R nanopb/examples/platformio "example with spaces"
+ mkdir -p "example with spaces/lib/nanopb"
+ tar -xzf nanopb/Nanopb-*.tar.gz -C "example with spaces/lib/nanopb"
+ cd "example with spaces"
+ pio run -e pio_with_options # ESP32 platform doesn't support spaces currently
+
+ - name: Build with default platformio.ini
+ run: |
+ source venv/bin/activate
+ mkdir -p test_default_pio_conf
+ cd test_default_pio_conf
+ pio project init
+ ln -s ../nanopb lib/nanopb
+ echo "[env:native]" >> platformio.ini
+ echo "platform = native" >> platformio.ini
+ echo "lib_deps = Nanopb" >> platformio.ini
+ echo "int main(int argc, char *argv[]){}" > src/main.cpp
+ pio run
diff --git a/vendor/nanopb/.github/workflows/pypi_publish.yml b/vendor/nanopb/.github/workflows/pypi_publish.yml
new file mode 100644
index 00000000..8042b967
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/pypi_publish.yml
@@ -0,0 +1,78 @@
+name: Publish generator package to PyPI / pip
+
+on:
+ workflow_dispatch:
+ workflow_call:
+ secrets:
+ PYPI_API_KEY:
+ required: true
+
+jobs:
+ publish_pypi:
+ name: Build and publish pypi package on Ubuntu 20.04
+ runs-on: ubuntu-20.04
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+ fetch-depth: "0"
+
+ - name: Install dependencies
+ run: |
+ python3 -m pip install --user --upgrade pyinstaller poetry protobuf grpcio-tools
+
+ - name: Build PyPI package
+ run: |
+ cd nanopb/extra/poetry
+ ./poetry_build.sh
+
+ - name: Fingerprint package
+ run: |
+ openssl sha256 nanopb/extra/poetry/dist/*.whl
+
+ - name: Check for existence of PyPI package
+ run: |
+ VERSION=$(grep "^version =" nanopb/extra/poetry/build/pyproject.toml | cut -d '"' -f 2)
+ if curl --head --silent --fail https://pypi.org/project/nanopb/$VERSION/; then
+ echo "pypi_exists=true" >> $GITHUB_ENV
+ else
+ echo "pypi_exists=false" >> $GITHUB_ENV
+ fi
+
+ - name: Publish PyPI package
+ if: env.pypi_exists == 'false'
+ env:
+ POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_KEY }}
+ POETRY_HTTP_BASIC_PYPI_USERNAME: __token__
+ POETRY_HTTP_BASIC_PYPI_PASSWORD: ${{ secrets.PYPI_API_KEY }}
+ run: |
+ cd nanopb/extra/poetry/build
+ poetry publish -n -v -u __token__ -p "$POETRY_PYPI_TOKEN_PYPI"
+
+ test_pypi:
+ name: Test pypi package
+ runs-on: ubuntu-20.04
+ needs: publish_pypi
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+
+ - name: Wait for package to become visible
+ run: |
+ sleep 60
+
+ - name: Install PyPI package
+ run: |
+ python3 -m pip install --user --upgrade protobuf grpcio-tools scons
+ python3 -m pip install --user --upgrade --pre nanopb
+
+ - name: Test PyPI package
+ run: |
+ cd nanopb/tests/alltypes/
+ nanopb_generator alltypes.proto
+ gcc -Wall -I ../../ -c alltypes.pb.c
diff --git a/vendor/nanopb/.github/workflows/python2_tests.yml b/vendor/nanopb/.github/workflows/python2_tests.yml
new file mode 100644
index 00000000..3573ea22
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/python2_tests.yml
@@ -0,0 +1,36 @@
+name: Run tests on Python 2
+
+on:
+ workflow_dispatch:
+ workflow_call:
+ push:
+ paths:
+ - '**.py'
+ pull_request:
+ paths:
+ - '**.py'
+
+jobs:
+ test_python2:
+ name: Test with Python 2 on Ubuntu 20.04
+ runs-on: ubuntu-20.04
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+ fetch-depth: "0"
+
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install python-protobuf protobuf-compiler scons python-is-python2
+
+ - name: Run tests with Python 2
+ run: |
+ cd nanopb
+ export GENPATH=$(bash -c 'printf %q "$(pwd)/generator/protoc-gen-nanopb-py2"')
+ cd tests
+ scons PYTHON="/usr/bin/python2" PROTOC="/usr/bin/protoc" PROTOCFLAGS="--plugin=protoc-gen-nanopb=$GENPATH"
+
diff --git a/vendor/nanopb/.github/workflows/simulator_tests.yml b/vendor/nanopb/.github/workflows/simulator_tests.yml
new file mode 100644
index 00000000..15366b67
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/simulator_tests.yml
@@ -0,0 +1,89 @@
+name: Run tests in simulator
+
+on:
+ workflow_dispatch:
+ workflow_call:
+
+jobs:
+ test_avr:
+ name: Test in simavr for ATMega1284
+ runs-on: ubuntu-20.04
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+ fetch-depth: "0"
+
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install python3-protobuf protobuf-compiler scons
+ sudo apt-get install libelf-dev gcc-avr gdb-avr avr-libc
+
+ - name: Install simavr
+ run: |
+ git clone https://github.com/buserror/simavr.git
+ cd simavr
+ make build-simavr
+ sudo make install-simavr
+ sudo ldconfig
+
+ - name: Run tests in AVR simulator
+ run: |
+ cd nanopb/tests
+ scons PLATFORM=AVR
+
+ test_mips:
+ name: Test in qemu for MIPS
+ runs-on: ubuntu-20.04
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+ fetch-depth: "0"
+
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install python3-protobuf protobuf-compiler scons
+ sudo apt-get install gcc-mipsel-linux-gnu g++-mipsel-linux-gnu gcc-mips-linux-gnu g++-mips-linux-gnu qemu-user
+
+ - name: Run tests for big-endian MIPS
+ run: |
+ cd nanopb/tests
+ rm -rf build
+ scons PLATFORM=MIPS
+
+ - name: Run tests for little-endian MIPS
+ run: |
+ cd nanopb/tests
+ rm -rf build
+ scons PLATFORM=MIPSEL
+
+ test_riscv:
+ name: Test in qemu for RISCV64
+ runs-on: ubuntu-20.04
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+ fetch-depth: "0"
+
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install python3-protobuf protobuf-compiler scons
+ sudo apt-get install gcc-riscv64-linux-gnu g++-riscv64-linux-gnu qemu-user
+
+ - name: Run tests for RISCV64
+ run: |
+ cd nanopb/tests
+ rm -rf build
+ scons PLATFORM=RISCV64
+
diff --git a/vendor/nanopb/.github/workflows/spm.yml b/vendor/nanopb/.github/workflows/spm.yml
deleted file mode 100644
index 999851a0..00000000
--- a/vendor/nanopb/.github/workflows/spm.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-name: spm
-
-on:
- push:
- pull_request:
-
-jobs:
- swift-build-run:
- runs-on: macOS-latest
- steps:
- - uses: actions/checkout@v2
- - name: Build
- run: swift build
- - name: Run
- run: swift test
diff --git a/vendor/nanopb/.github/workflows/trigger_on_code_change.yml b/vendor/nanopb/.github/workflows/trigger_on_code_change.yml
new file mode 100644
index 00000000..471df55a
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/trigger_on_code_change.yml
@@ -0,0 +1,64 @@
+name: Test after code changes
+on:
+ workflow_dispatch:
+ push:
+ paths:
+ - '**.c'
+ - '**.h'
+ - '**.py'
+ - '**.sh'
+ - '**.yml'
+ - '**.proto'
+ - '**.mk'
+ - '**.cmake'
+ pull_request:
+ paths:
+ - '**.c'
+ - '**.h'
+ - '**.py'
+ - '**.sh'
+ - '**.yml'
+ - '**.proto'
+ - '**.mk'
+ - '**.cmake'
+
+jobs:
+ smoke_test:
+ name: Run test suite on Ubuntu 20.04
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ python-version: ['3.8', '3.x']
+ os: ['ubuntu-20.04', 'ubuntu-24.04']
+
+ steps:
+ - name: Check out code from GitHub
+ uses: actions/checkout@v4
+ with:
+ path: nanopb
+
+ - name: Setup Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install protobuf-compiler splint valgrind
+ python3 -m pip install --user --upgrade scons protobuf grpcio-tools pyinstaller
+ python3 -c 'import google.protobuf; print(google.protobuf.__file__)'
+
+ - name: Run tests
+ run: |
+ cd nanopb/tests
+ scons
+
+ fuzz_tests:
+ needs: smoke_test
+ uses: ./.github/workflows/cifuzz.yml
+
+ binary_builds:
+ needs: smoke_test
+ uses: ./.github/workflows/binary_packages.yml
+
diff --git a/vendor/nanopb/.github/workflows/trigger_on_schedule.yml b/vendor/nanopb/.github/workflows/trigger_on_schedule.yml
new file mode 100644
index 00000000..eb40f20f
--- /dev/null
+++ b/vendor/nanopb/.github/workflows/trigger_on_schedule.yml
@@ -0,0 +1,38 @@
+name: Test, build and pre-release weekly
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: '32 1 * * 1'
+
+jobs:
+ compiler_tests:
+ uses: ./.github/workflows/compiler_tests.yml
+
+ simulator_tests:
+ uses: ./.github/workflows/simulator_tests.yml
+
+ python2_tests:
+ uses: ./.github/workflows/python2_tests.yml
+
+ ios_swift_tests:
+ uses: ./.github/workflows/ios_swift_tests.yml
+
+ platformio_tests:
+ uses: ./.github/workflows/platformio_tests.yml
+
+ bazel_tests:
+ uses: ./.github/workflows/bazel.yml
+
+ cifuzz:
+ uses: ./.github/workflows/cifuzz.yml
+
+ binary_packages:
+ needs: [compiler_tests, simulator_tests, python2_tests, ios_swift_tests, platformio_tests, cifuzz]
+ uses: ./.github/workflows/binary_packages.yml
+
+ pypi_publish:
+ needs: binary_packages
+ uses: ./.github/workflows/pypi_publish.yml
+ secrets:
+ PYPI_API_KEY: ${{ secrets.PYPI_API_KEY }}
+
diff --git a/vendor/nanopb/.gitignore b/vendor/nanopb/.gitignore
index 25cbf9a4..4f4d38dc 100644
--- a/vendor/nanopb/.gitignore
+++ b/vendor/nanopb/.gitignore
@@ -9,6 +9,7 @@
*_pb2.py
*~
*.tar.gz
+*.swp
.sconsign.dblite
config.log
.sconf_temp
@@ -29,3 +30,4 @@ generator/nanopb_pb2.pyc
!generator-bin/**/*
bazel-*
extra/poetry/build
+build/
diff --git a/vendor/nanopb/.travis.yml b/vendor/nanopb/.travis.yml
deleted file mode 100644
index 0f9775c2..00000000
--- a/vendor/nanopb/.travis.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-language: c
-dist: focal
-
-# Test using both gcc and clang
-env:
- - CC=gcc CXX=g++
- - CC=clang CXX=clang++
-
-addons:
- apt:
- packages:
- - scons
- - python3
- - python3-protobuf
- - protobuf-compiler
-
-
-before_install:
- - export PATH=$HOME/.local/bin:$HOME/protobuf/bin:$PATH
- - export MAKEFLAGS=-j$(nproc)
- - $CC --version
- - $CXX --version
- - python --version
- - lsb_release -a
-
-script:
- - pushd tests && scons CC=$CC CXX=$CXX && popd
-
-notifications:
- email:
- recipients:
- - jpa@travis.mail.kapsi.fi
diff --git a/vendor/nanopb/AUTHORS.txt b/vendor/nanopb/AUTHORS.txt
index 465f1071..94efa86d 100644
--- a/vendor/nanopb/AUTHORS.txt
+++ b/vendor/nanopb/AUTHORS.txt
@@ -63,9 +63,7 @@ Joshua Salzedo
Adam Klama
Anton Matosov
berni155
-bolind
David Lin
-dch
devjoa
Evan Fisher
Fay
@@ -84,7 +82,7 @@ Vitali Lovich
Vojtěch Boček
Wael Nasreddine
wangli28
-Zukaitis
+Gediminas Žukaitis
Alex Pacini
Cong
kurddt
@@ -98,3 +96,56 @@ leabut
Angel ILIEV
Jakub Tymejczyk
Matthew Simmons
+Anthony Pesch
+Avik De
+Conrad Wood
+David Sabatie
+Sebastian Stockhammer
+Gil Shapira
+Ian Frosst
+Ingo Kresse
+Ivan Zrno
+Jonathan Seilkopf
+Karl Ljungkvist
+Mathis Logemann
+Oleg Dolgy <60554929+odolgy@users.noreply.github.com>
+Pavel Sokolov
+Slavey Karadzhov
+Tobias Nießen
+Christian Balcom
+Christopher Hughes <67643395+chughes-pika@users.noreply.github.com>
+Greg Balke
+Jussi Keränen
+Krzysztof Rosinski
+Nathaniel Brough
+Sean Kahler
+Valerii Koval
+Armando Montanez
+Brian Yuan
+Hans Binderup
+Isaac Torres
+Jerry Chen
+jheaff1
+Joseph Duchesne
+Josh Strohminger
+Krishna Ersson <2743723+kersson@users.noreply.github.com>
+Mark
+Nick St.Pierre <120039901+nicholas-stpierre-simplisafe@users.noreply.github.com>
+philippe44
+Richard Patel
+Tilen Majerle
+Tobias Thiel
+Adrian Böckenkamp
+Antony Male
+Chi-Ju Wu
+Georgii Surkov
+Jan Dorniak
+Joseph Chen
+Marco Nilsson
+Nick Cooke
+Pieter De Gendt
+Reinis Veips
+Robin Kastberg
+Ted Pudlik
+Zach Deibert
+Mark Schulte
diff --git a/vendor/nanopb/BUILD.bazel b/vendor/nanopb/BUILD.bazel
index becdc574..b98441a3 100644
--- a/vendor/nanopb/BUILD.bazel
+++ b/vendor/nanopb/BUILD.bazel
@@ -1,9 +1,20 @@
-licenses(["notice"])
+load("@bazel_skylib//rules:copy_file.bzl", "copy_file")
-exports_files(["LICENSE.txt"])
+# Note: if you are still using WORKSPACE, you will need to patch this file to use the following instead
+# load("@python_3_11//:defs.bzl", "py_binary")
+load("@python_versions//3.11:defs.bzl", "py_binary")
+load("@rules_proto//proto:defs.bzl", "proto_library")
+load("@rules_proto_grpc//:defs.bzl", "proto_plugin")
+load("@rules_python//python:proto.bzl", "py_proto_library")
+load("@rules_python//python/pip_install:requirements.bzl", "compile_pip_requirements")
+load("//extra/bazel:nanopb_cc_proto_library.bzl", "cc_nanopb_proto_library")
package(default_visibility = ["//visibility:public"])
+licenses(["notice"])
+
+exports_files(["LICENSE.txt"])
+
cc_library(
name = "nanopb",
srcs = [
@@ -19,3 +30,95 @@ cc_library(
],
visibility = ["//visibility:public"],
)
+
+copy_file(
+ name = "protoc-gen-nanopb.py",
+ src = "generator/protoc-gen-nanopb",
+ out = "generator/protoc-gen-nanopb.py",
+ allow_symlink = True,
+)
+
+py_binary(
+ name = "protoc-gen-nanopb",
+ srcs = glob([
+ "generator/**/*.py",
+ ]) + [
+ ":protoc-gen-nanopb.py",
+ ],
+ data = glob([
+ "generator/**/*.proto",
+ ]),
+ env = {
+ "NANOPB_PB2_NO_REBUILD": "1",
+ },
+ imports = [
+ "generator",
+ ],
+ deps = [
+ ":nanopb_py_proto",
+ ],
+)
+
+proto_plugin(
+ name = "nanopb_plugin",
+ env = {
+ "NANOPB_PB2_NO_REBUILD": "1",
+ },
+ options = [
+ "--library-include-format=quote",
+ ],
+ outputs = [
+ "{protopath}.pb.h",
+ "{protopath}.pb.c",
+ ],
+ separate_options_flag = True,
+ tool = ":protoc-gen-nanopb",
+ use_built_in_shell_environment = False,
+ visibility = ["//visibility:public"],
+)
+
+proto_library(
+ name = "nanopb_proto",
+ srcs = [
+ "generator/proto/nanopb.proto",
+ ],
+ strip_import_prefix = "generator/proto/",
+ deps = ["@protobuf//:descriptor_proto"],
+)
+
+py_proto_library(
+ name = "nanopb_py_proto",
+ deps = [":nanopb_proto"],
+)
+
+cc_nanopb_proto_library(
+ name = "test_compilation",
+ protos = ["@protobuf//:descriptor_proto"],
+ visibility = ["//visibility:private"],
+)
+
+proto_library(
+ name = "all_types_proto",
+ srcs = ["tests/alltypes/alltypes.proto"],
+)
+
+cc_nanopb_proto_library(
+ name = "all_types_nanopb",
+ nanopb_options_files = ["tests/alltypes/alltypes.options"],
+ protos = [":all_types_proto"],
+ visibility = ["//visibility:private"],
+)
+
+cc_test(
+ name = "bazel_options_support",
+ srcs = ["tests/bazel_options_support/bazel_options_support.cc"],
+ deps = [":all_types_nanopb"],
+)
+
+# Run `bazel run //:requirements.update` if you want to update the requirements.
+compile_pip_requirements(
+ name = "requirements",
+ extra_args = ["--allow-unsafe"],
+ requirements_in = "extra/requirements.txt",
+ requirements_txt = "extra/requirements_lock.txt",
+)
diff --git a/vendor/nanopb/CHANGELOG.txt b/vendor/nanopb/CHANGELOG.txt
index 05596461..3641ea75 100644
--- a/vendor/nanopb/CHANGELOG.txt
+++ b/vendor/nanopb/CHANGELOG.txt
@@ -1,3 +1,115 @@
+nanopb-0.4.9.1 (2024-12-01)
+ Fix memory not released on error return from pb_decode_ex() (GHSA-xwqq-qxmw-hj5r)
+ Fix deprecated MakeClass() call in generator (#1015)
+ Fix compiler error with enums and --c-style (#1014)
+ Fix version conflict with bazel build rules (#1034)
+
+nanopb-0.4.9 (2024-09-19)
+ Fix problems with enum intsize introduced in 0.4.8 (#961)
+ Fix Python pkg_resources deprecation (#887)
+ Add option to discard deprecated fields (#997)
+ Use fallback_type when breaking circular dependencies (#996)
+ Discard autogenerated map entry types if the field is skipped (#956)
+ Use pb_byte_t = uint8_t when available (#916)
+ Add enum_validate option (#988, #991)
+ Add check_return attribute for IAR compiler (#965)
+ Add label_override field option (#962, #963)
+ Add Zephyr module specification (#946)
+ Add Apple privacy manifest (#938)
+ Bazel rule improvements, including bzlmod migration (#927, #950)
+ CMake build rule improvements (#936, #939, #942, #958, #968, #971, #974, #978, #1003)
+ Improve C++ descriptors (#931, #933)
+ Test case improvements (#928)
+ Update dependency package versions (#924, #959, #986, #998, #1010)
+ Documentation improvements (#319, #710, #999)
+
+nanopb-0.4.8 (2023-11-11)
+ Fix name mangling with dependent proto files (#892, #899)
+ Fix initializer macros for custom callback datatype (#806)
+ Use UTF-8 encoding for generated files irrespective of locale (#868)
+ Add integer size overrides for fixed width types (#897)
+ Add ability to specify size of generated enums (#867)
+ Provide define for largest message in file (#893)
+ Automatically break circular dependencies (#881)
+ Remove pkg_resources import to avoid deprecation warning (#887)
+ Use static_assert keyword on MSVC in C89 mode (#843)
+ platformio: Allow substitutions in custom_nanopb_protos (#882)
+ CMakeLists: Add missing __init__.py to generator install (#841)
+ CMakeLists: Add nanopb_PYTHON_INSTDIR_OVERRIDE (#875)
+ CMakeLists: Rename installation directories (#845, #907)
+ FindNanopb.cmake: Fix regression bug with relative include path (#888)
+ FindNanopb.cmake: Ensure generator files are updated when cached (#847)
+ FindNanopb.cmake: Fix generator call on Windows (#874)
+ Bazel: multiple build rule fixes and updates (#855, #891, #904, #906, #919)
+
+nanopb-0.4.7 (2022-12-11)
+ Fix comments generation for submessages (#788)
+ Fix handling of spaces in arguments passed through protoc (#810)
+ Fix problems with multiple files and mangle_names option (#783, #820)
+ Fix generator error when using M_STRIP_PACKAGE without package name (#795)
+ Fix compilation error with fixed size array and FT_POINTER (#630)
+ Fix wrong format in Python Poetry project file (#811)
+ Fix unnecessary generator message when using --quiet (#831)
+ Fix enum_to_string with C++ (#838)
+ Fix /* */ inside .proto file comment
+ Workaround python-protobuf version issues (#787)
+ Safeguard substraction in pb_read() with custom streams (#697)
+ Always include pb_release() as function, instead of macro. (#802)
+ Allow using = instead of : with generator option -s
+ Allow specifying include format without special characters (#810)
+ Allow including headers from inside of extern C (#814)
+ Add option NANOPB_PB2_TEMP_DIR to store nanopb_pb2.py in a temporary dir (#601)
+ Add compile-time error message for when PB_FIELD_32BIT is needed (#680, #827)
+ Add --c-style command line option for naming style (#199, #533, #791)
+ Add --protoc-opt to nanopb_generator.py (#628)
+ Add ENUMTYPE convenience macros (#803)
+ Add Bazel build rules (#360, #500)
+ Generator: keep order of messages when possible
+ Test case improvements (#792)
+ PlatformIO build rule improvements (#808, #809, #819, #834, #839, #840)
+ CMake build rule improvements (#822)
+ CMakeLists: use protoc wrapper script by default (#769)
+
+nanopb-0.4.6 (2022-05-30)
+ Fix passing of error message from substream callback (#703)
+ Fix comments going to wrong member variables (#701)
+ Fix regression in 0.4.3 where generator did not find all dependencies (#720)
+ Fix FindNanopb.cmake not finding options file (#659)
+ Fix double-definition errors with size_union (#692)
+ Fix generator error with same inner message name (#746)
+ Fix infinite recursion in generator/protoc script (#762)
+ Fix unicode comment handling for Python 2 (#740)
+ Fix compiler warnings with PB_BUFFER_ONLY (#717)
+ Fix options dependency in nanopb.mk (#666)
+ Fix handling of filenames with dot in them in FindNanopb.cmake (#756)
+ Add fallback_type option (#772, #773)
+ Use C11 static assert mechanism by default (#761, #766)
+ Use 'static_assert' keyword for iar (#679)
+ Explicitly check for pItem == NULL to satisfy Xcode analyzer (#667, #674)
+ Support --proto-path as alias to -I (#749)
+ Refactor name mangling to separate class, improve error messages (#735)
+ Move PB_WT_PACKED definition to the header to fix compiler warnings (#671)
+ FindNanopb.cmake: use --nanopb_opt for option passing by default (#752)
+ FindNanopb.cmake: Add option NANOPB_GENERATE_CPP_STANDALONE (#741)
+ FindNanopb.cmake: Add PROTOC_OPTIONS variable (#768, #771)
+ CMakeLists: add build interface for using as a submodule (#669)
+ CMakeLists: fix error with nanopb_BUILD_GENERATOR=OFF (#764)
+ CMakeLists: make more uniform (#676)
+ CMakeLists: Fix uninitialized PYTHON_INSTDIR (#652)
+ Clean up CMake examples (#741)
+ Rebuild nanopb_pb2.py and print version numbers on import failure (#733, #742)
+ Use memcpy instead of iterating on buf_read/write (#751)
+ Add generator support for PlatformIO (#718)
+ Add clean target to generator/proto/Makefile (#681)
+ Windows .bats: use standard python invocation instead of py.exe launcher (#657)
+ Fix problems running tests with newer SCons version
+ Improve handling of varint overflows
+ Improve optimization for little-endian platforms
+
+NOTE: During development, prereleases were published on PlatformIO registry
+as versions 0.4.6 - 0.4.6.3. The version 0.4.6.4 on PlatformIO corresponds
+to the real final 0.4.6 release.
+
nanopb-0.4.5 (2021-03-22)
Fix invalid free() with oneof (#647, GHSA-7mv5-5mxh-qg88)
Fix unordered field numbers inside oneof causing fields to be ignored (#617)
@@ -149,6 +261,13 @@ nanopb-0.4.0 (2019-12-20)
CMake: Split nanopb_out command (#454)
CMake: install created shared library(dll) in windows to the binary folder (#447)
+nanopb-0.3.9.9 (2022-04-23)
+ Fix Xcode analyzer warnings (#667, #674)
+ Fix clang sanitizer warnings
+
+Note: there are no known functional differences between 0.3.9.8 and 0.3.9.9.
+The changes are merely to fix warnings introduced by new compiler versions.
+
nanopb-0.3.9.8 (2021-03-22)
Fix invalid free() with oneof (#647, GHSA-7mv5-5mxh-qg88)
Don't generate lines with trailing spaces (#622)
@@ -241,7 +360,7 @@ nanopb-0.3.8 (2017-03-05)
Allow overriding proto3 mode (#228)
Add optional enum->string mapping function (#223)
Add transitional options.proto file (#241)
- Add better error message on Python library version imcompatibility (#240)
+ Add better error message on Python library version incompatibility (#240)
Include version number in PlatformIO library.json (#222)
CMake build script changes (#236, #237)
Change download links to https
diff --git a/vendor/nanopb/CMakeLists.txt b/vendor/nanopb/CMakeLists.txt
index d9c5e5e1..7d15e22f 100644
--- a/vendor/nanopb/CMakeLists.txt
+++ b/vendor/nanopb/CMakeLists.txt
@@ -1,8 +1,8 @@
-cmake_minimum_required(VERSION 2.8.12)
+cmake_minimum_required(VERSION 3.14.0)
-project(nanopb C)
+project(nanopb VERSION 0.4.9.1 LANGUAGES C)
-set(nanopb_VERSION_STRING nanopb-0.4.5)
+set(nanopb_VERSION_STRING ${PROJECT_NAME}-${${PROJECT_NAME}_VERSION}-dev)
set(nanopb_SOVERSION 0)
string(REPLACE "nanopb-" "" nanopb_VERSION ${nanopb_VERSION_STRING})
@@ -14,8 +14,12 @@ option(nanopb_BUILD_RUNTIME "Build the headers and libraries needed at runtime"
option(nanopb_BUILD_GENERATOR "Build the protoc plugin for code generation" ON)
option(nanopb_MSVC_STATIC_RUNTIME "Link static runtime libraries" ON)
-if(NOT DEFINED nanopb_PROTOC_PATH)
- set(nanopb_PROTOC_PATH "protoc")
+set(nanopb_PYTHON_INSTDIR_OVERRIDE "" CACHE PATH "Override the default python installation directory with the given path")
+
+find_program(nanopb_PROTOC_PATH protoc PATHS generator-bin generator NO_DEFAULT_PATH)
+find_program(nanopb_PROTOC_PATH protoc)
+if(NOT EXISTS ${nanopb_PROTOC_PATH})
+ message(FATAL_ERROR "protoc compiler not found")
endif()
if(NOT DEFINED CMAKE_DEBUG_POSTFIX)
@@ -38,49 +42,92 @@ if(NOT DEFINED CMAKE_INSTALL_CMAKEDIR)
set(CMAKE_INSTALL_CMAKEDIR "${CMAKE_INSTALL_LIBDIR}/cmake/nanopb")
endif()
+# Determine Python module installation path
+if (NOT nanopb_PYTHON_INSTDIR_OVERRIDE)
+ find_package(Python REQUIRED COMPONENTS Interpreter)
+ file(TO_CMAKE_PATH "${Python_SITELIB}" PYTHON_INSTDIR)
+else()
+ set(PYTHON_INSTDIR ${nanopb_PYTHON_INSTDIR_OVERRIDE})
+endif()
+message(STATUS "Python install dir: ${PYTHON_INSTDIR}")
+
+# Package nanopb generator as Python module 'nanopb'
if(nanopb_BUILD_GENERATOR)
- set(generator_protos nanopb)
-
- find_package(Python REQUIRED)
- execute_process(
- COMMAND ${Python_EXECUTABLE} -c
- "from distutils import sysconfig; print(sysconfig.get_python_lib(prefix=''))"
- OUTPUT_VARIABLE PYTHON_INSTDIR
- OUTPUT_STRIP_TRAILING_WHITESPACE
+ # Copy Python code files related to the generator
+ add_custom_target(nanopb_generator ALL
+ COMMAND ${CMAKE_COMMAND} -E make_directory
+ ${PROJECT_BINARY_DIR}/nanopb/generator/proto
+
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ ${PROJECT_SOURCE_DIR}/generator/proto/_utils.py
+ ${PROJECT_SOURCE_DIR}/generator/proto/__init__.py
+ ${PROJECT_SOURCE_DIR}/generator/proto/nanopb.proto
+ ${PROJECT_BINARY_DIR}/nanopb/generator/proto
+
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ ${PROJECT_SOURCE_DIR}/generator/nanopb_generator.py
+ ${PROJECT_SOURCE_DIR}/generator/__init__.py
+ ${PROJECT_BINARY_DIR}/nanopb/generator
+
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ ${PROJECT_SOURCE_DIR}/generator/__init__.py
+ ${PROJECT_BINARY_DIR}/nanopb
+
+ COMMAND ${nanopb_PROTOC_PATH}
+ --python_out=${PROJECT_BINARY_DIR}/nanopb/generator/proto
+ -I${PROJECT_SOURCE_DIR}/generator/proto
+ ${PROJECT_SOURCE_DIR}/generator/proto/nanopb.proto
)
- foreach(generator_proto IN LISTS generator_protos)
- string(REGEX REPLACE "([^;]+)" "${PROJECT_SOURCE_DIR}/generator/proto/\\1.proto" generator_proto_file "${generator_proto}")
- string(REGEX REPLACE "([^;]+)" "\\1_pb2.py" generator_proto_py_file "${generator_proto}")
- add_custom_command(
- OUTPUT ${generator_proto_py_file}
- COMMAND ${nanopb_PROTOC_PATH} --python_out=${PROJECT_BINARY_DIR} -I${PROJECT_SOURCE_DIR}/generator/proto ${generator_proto_file}
- DEPENDS ${generator_proto_file}
- )
- add_custom_target("generate_${generator_proto_py_file}" ALL DEPENDS ${generator_proto_py_file})
- install(
- FILES ${PROJECT_BINARY_DIR}/${generator_proto_py_file}
- ${generator_proto_file}
- DESTINATION ${PYTHON_INSTDIR}/proto/
- )
- endforeach()
-endif()
-install( FILES generator/proto/_utils.py
- DESTINATION ${PYTHON_INSTDIR}/proto/ )
-if( WIN32 )
+ # Install Python module files
+ install(
+ DIRECTORY ${PROJECT_BINARY_DIR}/nanopb
+ DESTINATION ${PYTHON_INSTDIR}
+ FILES_MATCHING
+ PATTERN *.py
+ PATTERN *.proto
+ PATTERN __pycache__ EXCLUDE
+ )
+
+ # Generate a wrapper script that calls nanopb.generator Python module when invoked
+ configure_file(
+ extra/script_wrappers/nanopb_generator.py.in
+ ${PROJECT_BINARY_DIR}/nanopb_generator.py
+ )
+ install(
+ PROGRAMS ${PROJECT_BINARY_DIR}/nanopb_generator.py
+ DESTINATION ${CMAKE_INSTALL_BINDIR}
+ )
+
+ # Install shell/bat script wrappers for invoking nanopb_generator.py.
+ # protoc-gen-nanopb is automatically used by protoc when --nanopb_out= option is used.
+ if(WIN32)
+ # Include the full path to Python executable in Windows .bat scripts, as it is not in PATH on all systems
+ file(READ generator/protoc-gen-nanopb.bat FILE_CONTENTS)
+ string(REPLACE "python" ${Python_EXECUTABLE} FILE_CONTENTS "${FILE_CONTENTS}")
+ file(WRITE ${PROJECT_BINARY_DIR}/protoc-gen-nanopb.bat "${FILE_CONTENTS}")
+
+ file(READ generator/nanopb_generator.bat FILE_CONTENTS)
+ string(REPLACE "python" ${Python_EXECUTABLE} FILE_CONTENTS "${FILE_CONTENTS}")
+ file(WRITE ${PROJECT_BINARY_DIR}/nanopb_generator.bat "${FILE_CONTENTS}")
+
install(
- PROGRAMS generator/nanopb_generator.py
- generator/protoc-gen-nanopb.bat
+ PROGRAMS
+ ${PROJECT_BINARY_DIR}/protoc-gen-nanopb.bat
+ ${PROJECT_BINARY_DIR}/nanopb_generator.bat
DESTINATION ${CMAKE_INSTALL_BINDIR}
)
-else()
+ else()
+ # Linux/Mac scripts currently use python3 from PATH
install(
- PROGRAMS generator/nanopb_generator.py
- generator/protoc-gen-nanopb
+ PROGRAMS
+ generator/protoc-gen-nanopb
+ generator/nanopb_generator
DESTINATION ${CMAKE_INSTALL_BINDIR}
)
+ endif()
endif()
if(nanopb_BUILD_RUNTIME)
@@ -97,10 +144,11 @@ if(nanopb_BUILD_RUNTIME)
SOVERSION ${nanopb_SOVERSION})
install(TARGETS protobuf-nanopb EXPORT nanopb-targets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
- LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
target_include_directories(protobuf-nanopb INTERFACE
- $
+ $
+ $
)
endif()
@@ -118,7 +166,8 @@ if(nanopb_BUILD_RUNTIME)
install(TARGETS protobuf-nanopb-static EXPORT nanopb-targets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
target_include_directories(protobuf-nanopb-static INTERFACE
- $
+ $
+ $
)
endif()
@@ -134,5 +183,5 @@ if(nanopb_BUILD_RUNTIME)
DESTINATION ${CMAKE_INSTALL_CMAKEDIR})
install(FILES pb.h pb_common.h pb_encode.h pb_decode.h
- DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/nanopb)
endif()
diff --git a/vendor/nanopb/MODULE.bazel b/vendor/nanopb/MODULE.bazel
new file mode 100644
index 00000000..8aaa68e8
--- /dev/null
+++ b/vendor/nanopb/MODULE.bazel
@@ -0,0 +1,28 @@
+module(
+ name = "nanopb",
+ version = "0.4.9",
+)
+
+bazel_dep(name = "bazel_skylib", version = "1.7.1")
+bazel_dep(name = "rules_cc", version = "0.0.10")
+bazel_dep(name = "rules_python", version = "0.35.0")
+bazel_dep(name = "rules_proto", version = "6.0.2")
+bazel_dep(name = "protobuf", version = "28.2")
+bazel_dep(name = "rules_proto_grpc", version = "5.0.0")
+
+pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip")
+
+python = use_extension("@rules_python//python/extensions:python.bzl", "python")
+
+PYTHON_VERSION = "3.11"
+
+# No need for this since rules_python already creates our version.
+# python.toolchain(python_version = PYTHON_VERSION)
+use_repo(python, "python_versions")
+
+use_repo(pip, "nanopb_pypi")
+pip.parse(
+ hub_name = "nanopb_pypi",
+ python_version = PYTHON_VERSION,
+ requirements_lock = "@nanopb//:extra/requirements_lock.txt",
+)
diff --git a/vendor/nanopb/MODULE.bazel.lock b/vendor/nanopb/MODULE.bazel.lock
new file mode 100644
index 00000000..b018b559
--- /dev/null
+++ b/vendor/nanopb/MODULE.bazel.lock
@@ -0,0 +1,166 @@
+{
+ "lockFileVersion": 11,
+ "registryFileHashes": {
+ "https://bcr.bazel.build/bazel_registry.json": "8a28e4aff06ee60aed2a8c281907fb8bcbf3b753c91fb5a5c57da3215d5b3497",
+ "https://bcr.bazel.build/modules/abseil-cpp/20210324.2/MODULE.bazel": "7cd0312e064fde87c8d1cd79ba06c876bd23630c83466e9500321be55c96ace2",
+ "https://bcr.bazel.build/modules/abseil-cpp/20211102.0/MODULE.bazel": "70390338f7a5106231d20620712f7cccb659cd0e9d073d1991c038eb9fc57589",
+ "https://bcr.bazel.build/modules/abseil-cpp/20230125.1/MODULE.bazel": "89047429cb0207707b2dface14ba7f8df85273d484c2572755be4bab7ce9c3a0",
+ "https://bcr.bazel.build/modules/abseil-cpp/20230802.0.bcr.1/MODULE.bazel": "1c8cec495288dccd14fdae6e3f95f772c1c91857047a098fad772034264cc8cb",
+ "https://bcr.bazel.build/modules/abseil-cpp/20230802.0.bcr.1/source.json": "14892cc698e02ffedf4967546e6bedb7245015906888d3465fcf27c90a26da10",
+ "https://bcr.bazel.build/modules/apple_support/1.5.0/MODULE.bazel": "50341a62efbc483e8a2a6aec30994a58749bd7b885e18dd96aa8c33031e558ef",
+ "https://bcr.bazel.build/modules/apple_support/1.5.0/source.json": "eb98a7627c0bc486b57f598ad8da50f6625d974c8f723e9ea71bd39f709c9862",
+ "https://bcr.bazel.build/modules/bazel_features/1.11.0/MODULE.bazel": "f9382337dd5a474c3b7d334c2f83e50b6eaedc284253334cf823044a26de03e8",
+ "https://bcr.bazel.build/modules/bazel_features/1.11.0/source.json": "c9320aa53cd1c441d24bd6b716da087ad7e4ff0d9742a9884587596edfe53015",
+ "https://bcr.bazel.build/modules/bazel_features/1.4.1/MODULE.bazel": "e45b6bb2350aff3e442ae1111c555e27eac1d915e77775f6fdc4b351b758b5d7",
+ "https://bcr.bazel.build/modules/bazel_features/1.9.0/MODULE.bazel": "885151d58d90d8d9c811eb75e3288c11f850e1d6b481a8c9f766adee4712358b",
+ "https://bcr.bazel.build/modules/bazel_features/1.9.1/MODULE.bazel": "8f679097876a9b609ad1f60249c49d68bfab783dd9be012faf9d82547b14815a",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.0.3/MODULE.bazel": "bcb0fd896384802d1ad283b4e4eb4d718eebd8cb820b0a2c3a347fb971afd9d8",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.2.0/MODULE.bazel": "44fe84260e454ed94ad326352a698422dbe372b21a1ac9f3eab76eb531223686",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.2.1/MODULE.bazel": "f35baf9da0efe45fa3da1696ae906eea3d615ad41e2e3def4aeb4e8bc0ef9a7a",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.3.0/MODULE.bazel": "20228b92868bf5cfc41bda7afc8a8ba2a543201851de39d990ec957b513579c5",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.4.1/MODULE.bazel": "a0dcb779424be33100dcae821e9e27e4f2901d9dfd5333efe5ac6a8d7ab75e1d",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.5.0/MODULE.bazel": "32880f5e2945ce6a03d1fbd588e9198c0a959bb42297b2cfaf1685b7bc32e138",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.6.1/MODULE.bazel": "8fdee2dbaace6c252131c00e1de4b165dc65af02ea278476187765e1a617b917",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.7.1/MODULE.bazel": "3120d80c5861aa616222ec015332e5f8d3171e062e3e804a2a0253e1be26e59b",
+ "https://bcr.bazel.build/modules/bazel_skylib/1.7.1/source.json": "f121b43eeefc7c29efbd51b83d08631e2347297c95aac9764a701f2a6a2bb953",
+ "https://bcr.bazel.build/modules/buildozer/7.1.2/MODULE.bazel": "2e8dd40ede9c454042645fd8d8d0cd1527966aa5c919de86661e62953cd73d84",
+ "https://bcr.bazel.build/modules/buildozer/7.1.2/source.json": "c9028a501d2db85793a6996205c8de120944f50a0d570438fcae0457a5f9d1f8",
+ "https://bcr.bazel.build/modules/googletest/1.11.0/MODULE.bazel": "3a83f095183f66345ca86aa13c58b59f9f94a2f81999c093d4eeaa2d262d12f4",
+ "https://bcr.bazel.build/modules/googletest/1.14.0/MODULE.bazel": "cfbcbf3e6eac06ef9d85900f64424708cc08687d1b527f0ef65aa7517af8118f",
+ "https://bcr.bazel.build/modules/googletest/1.14.0/source.json": "2478949479000fdd7de9a3d0107ba2c85bb5f961c3ecb1aa448f52549ce310b5",
+ "https://bcr.bazel.build/modules/platforms/0.0.10/MODULE.bazel": "8cb8efaf200bdeb2150d93e162c40f388529a25852b332cec879373771e48ed5",
+ "https://bcr.bazel.build/modules/platforms/0.0.10/source.json": "f22828ff4cf021a6b577f1bf6341cb9dcd7965092a439f64fc1bb3b7a5ae4bd5",
+ "https://bcr.bazel.build/modules/platforms/0.0.4/MODULE.bazel": "9b328e31ee156f53f3c416a64f8491f7eb731742655a47c9eec4703a71644aee",
+ "https://bcr.bazel.build/modules/platforms/0.0.5/MODULE.bazel": "5733b54ea419d5eaf7997054bb55f6a1d0b5ff8aedf0176fef9eea44f3acda37",
+ "https://bcr.bazel.build/modules/platforms/0.0.6/MODULE.bazel": "ad6eeef431dc52aefd2d77ed20a4b353f8ebf0f4ecdd26a807d2da5aa8cd0615",
+ "https://bcr.bazel.build/modules/platforms/0.0.7/MODULE.bazel": "72fd4a0ede9ee5c021f6a8dd92b503e089f46c227ba2813ff183b71616034814",
+ "https://bcr.bazel.build/modules/platforms/0.0.8/MODULE.bazel": "9f142c03e348f6d263719f5074b21ef3adf0b139ee4c5133e2aa35664da9eb2d",
+ "https://bcr.bazel.build/modules/platforms/0.0.9/MODULE.bazel": "4a87a60c927b56ddd67db50c89acaa62f4ce2a1d2149ccb63ffd871d5ce29ebc",
+ "https://bcr.bazel.build/modules/protobuf/21.7/MODULE.bazel": "a5a29bb89544f9b97edce05642fac225a808b5b7be74038ea3640fae2f8e66a7",
+ "https://bcr.bazel.build/modules/protobuf/23.1/MODULE.bazel": "88b393b3eb4101d18129e5db51847cd40a5517a53e81216144a8c32dfeeca52a",
+ "https://bcr.bazel.build/modules/protobuf/24.4/MODULE.bazel": "7bc7ce5f2abf36b3b7b7c8218d3acdebb9426aeb35c2257c96445756f970eb12",
+ "https://bcr.bazel.build/modules/protobuf/24.4/source.json": "ace4b8c65d4cfe64efe544f09fc5e5df77faf3a67fbb29c5341e0d755d9b15d6",
+ "https://bcr.bazel.build/modules/protobuf/3.19.0/MODULE.bazel": "6b5fbb433f760a99a22b18b6850ed5784ef0e9928a72668b66e4d7ccd47db9b0",
+ "https://bcr.bazel.build/modules/protobuf/3.19.6/MODULE.bazel": "9233edc5e1f2ee276a60de3eaa47ac4132302ef9643238f23128fea53ea12858",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.1/MODULE.bazel": "cb2aa0747f84c6c3a78dad4e2049c154f08ab9d166b1273835a8174940365647",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.2/MODULE.bazel": "6915987c90970493ab97393024c156ea8fb9f3bea953b2f3ec05c34f19b5695c",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.6/MODULE.bazel": "abf360251023dfe3efcef65ab9d56beefa8394d4176dd29529750e1c57eaa33f",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.8/MODULE.bazel": "964c85c82cfeb6f3855e6a07054fdb159aced38e99a5eecf7bce9d53990afa3e",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.9/MODULE.bazel": "836e76439f354b89afe6a911a7adf59a6b2518fafb174483ad78a2a2fde7b1c5",
+ "https://bcr.bazel.build/modules/rules_cc/0.0.9/source.json": "1f1ba6fea244b616de4a554a0f4983c91a9301640c8fe0dd1d410254115c8430",
+ "https://bcr.bazel.build/modules/rules_java/4.0.0/MODULE.bazel": "5a78a7ae82cd1a33cef56dc578c7d2a46ed0dca12643ee45edbb8417899e6f74",
+ "https://bcr.bazel.build/modules/rules_java/7.1.0/MODULE.bazel": "30d9135a2b6561c761bd67bd4990da591e6bdc128790ce3e7afd6a3558b2fb64",
+ "https://bcr.bazel.build/modules/rules_java/7.6.1/MODULE.bazel": "2f14b7e8a1aa2f67ae92bc69d1ec0fa8d9f827c4e17ff5e5f02e91caa3b2d0fe",
+ "https://bcr.bazel.build/modules/rules_java/7.6.1/source.json": "8f3f3076554e1558e8e468b2232991c510ecbcbed9e6f8c06ac31c93bcf38362",
+ "https://bcr.bazel.build/modules/rules_jvm_external/4.4.2/MODULE.bazel": "a56b85e418c83eb1839819f0b515c431010160383306d13ec21959ac412d2fe7",
+ "https://bcr.bazel.build/modules/rules_jvm_external/5.1/MODULE.bazel": "33f6f999e03183f7d088c9be518a63467dfd0be94a11d0055fe2d210f89aa909",
+ "https://bcr.bazel.build/modules/rules_jvm_external/5.1/source.json": "5abb45cc9beb27b77aec6a65a11855ef2b55d95dfdc358e9f312b78ae0ba32d5",
+ "https://bcr.bazel.build/modules/rules_license/0.0.3/MODULE.bazel": "627e9ab0247f7d1e05736b59dbb1b6871373de5ad31c3011880b4133cafd4bd0",
+ "https://bcr.bazel.build/modules/rules_license/0.0.7/MODULE.bazel": "088fbeb0b6a419005b89cf93fe62d9517c0a2b8bb56af3244af65ecfe37e7d5d",
+ "https://bcr.bazel.build/modules/rules_license/0.0.7/source.json": "355cc5737a0f294e560d52b1b7a6492d4fff2caf0bef1a315df5a298fca2d34a",
+ "https://bcr.bazel.build/modules/rules_pkg/0.7.0/MODULE.bazel": "df99f03fc7934a4737122518bb87e667e62d780b610910f0447665a7e2be62dc",
+ "https://bcr.bazel.build/modules/rules_pkg/0.7.0/source.json": "c2557066e0c0342223ba592510ad3d812d4963b9024831f7f66fd0584dd8c66c",
+ "https://bcr.bazel.build/modules/rules_proto/4.0.0/MODULE.bazel": "a7a7b6ce9bee418c1a760b3d84f83a299ad6952f9903c67f19e4edd964894e06",
+ "https://bcr.bazel.build/modules/rules_proto/5.3.0-21.7/MODULE.bazel": "e8dff86b0971688790ae75528fe1813f71809b5afd57facb44dad9e8eca631b7",
+ "https://bcr.bazel.build/modules/rules_proto/6.0.0-rc1/MODULE.bazel": "1e5b502e2e1a9e825eef74476a5a1ee524a92297085015a052510b09a1a09483",
+ "https://bcr.bazel.build/modules/rules_proto/6.0.0/MODULE.bazel": "b531d7f09f58dce456cd61b4579ce8c86b38544da75184eadaf0a7cb7966453f",
+ "https://bcr.bazel.build/modules/rules_proto/6.0.2/MODULE.bazel": "ce916b775a62b90b61888052a416ccdda405212b6aaeb39522f7dc53431a5e73",
+ "https://bcr.bazel.build/modules/rules_proto/6.0.2/source.json": "17a2e195f56cb28d6bbf763e49973d13890487c6945311ed141e196fb660426d",
+ "https://bcr.bazel.build/modules/rules_proto_grpc/5.0.0/MODULE.bazel": "aad0151be788911f9736f413c423342d781a9dc19f35d3373c8581c828a387f4",
+ "https://bcr.bazel.build/modules/rules_proto_grpc/5.0.0/source.json": "f35378dee74074450e6bb0755febb17879bddb705fe57270a213c8ee9c658a46",
+ "https://bcr.bazel.build/modules/rules_python/0.10.2/MODULE.bazel": "cc82bc96f2997baa545ab3ce73f196d040ffb8756fd2d66125a530031cd90e5f",
+ "https://bcr.bazel.build/modules/rules_python/0.22.1/MODULE.bazel": "26114f0c0b5e93018c0c066d6673f1a2c3737c7e90af95eff30cfee38d0bbac7",
+ "https://bcr.bazel.build/modules/rules_python/0.34.0/MODULE.bazel": "1d623d026e075b78c9fde483a889cda7996f5da4f36dffb24c246ab30f06513a",
+ "https://bcr.bazel.build/modules/rules_python/0.34.0/source.json": "113116e287eec64a7d005a9db44865d810499fdc4f621e352aff58214f5ea2d8",
+ "https://bcr.bazel.build/modules/rules_python/0.4.0/MODULE.bazel": "9208ee05fd48bf09ac60ed269791cf17fb343db56c8226a720fbb1cdf467166c",
+ "https://bcr.bazel.build/modules/stardoc/0.5.1/MODULE.bazel": "1a05d92974d0c122f5ccf09291442580317cdd859f07a8655f1db9a60374f9f8",
+ "https://bcr.bazel.build/modules/stardoc/0.5.3/MODULE.bazel": "c7f6948dae6999bf0db32c1858ae345f112cacf98f174c7a8bb707e41b974f1c",
+ "https://bcr.bazel.build/modules/stardoc/0.5.3/source.json": "cd53fe968dc8cd98197c052db3db6d82562960c87b61e7a90ee96f8e4e0dda97",
+ "https://bcr.bazel.build/modules/toolchains_protoc/0.3.1/MODULE.bazel": "b6574a2a314cbd40cafb5ed87b03d1996e015315f80a7e33116c8b2e209cb5cf",
+ "https://bcr.bazel.build/modules/toolchains_protoc/0.3.1/source.json": "b589ee1faec4c789c680afa9d500b5ccbea25422560b8b9dc4e0e6b26471f13b",
+ "https://bcr.bazel.build/modules/upb/0.0.0-20220923-a547704/MODULE.bazel": "7298990c00040a0e2f121f6c32544bab27d4452f80d9ce51349b1a28f3005c43",
+ "https://bcr.bazel.build/modules/upb/0.0.0-20230516-61a97ef/MODULE.bazel": "c0df5e35ad55e264160417fd0875932ee3c9dda63d9fccace35ac62f45e1b6f9",
+ "https://bcr.bazel.build/modules/upb/0.0.0-20230516-61a97ef/source.json": "b2150404947339e8b947c6b16baa39fa75657f4ddec5e37272c7b11c7ab533bc",
+ "https://bcr.bazel.build/modules/zlib/1.2.11/MODULE.bazel": "07b389abc85fdbca459b69e2ec656ae5622873af3f845e1c9d80fe179f3effa0",
+ "https://bcr.bazel.build/modules/zlib/1.2.12/MODULE.bazel": "3b1a8834ada2a883674be8cbd36ede1b6ec481477ada359cd2d3ddc562340b27",
+ "https://bcr.bazel.build/modules/zlib/1.3/MODULE.bazel": "6a9c02f19a24dcedb05572b2381446e27c272cd383aed11d41d99da9e3167a72",
+ "https://bcr.bazel.build/modules/zlib/1.3/source.json": "b6b43d0737af846022636e6e255fd4a96fee0d34f08f3830e6e0bac51465c37c"
+ },
+ "selectedYankedVersions": {},
+ "moduleExtensions": {
+ "@@apple_support~//crosstool:setup.bzl%apple_cc_configure_extension": {
+ "general": {
+ "bzlTransitiveDigest": "PjIds3feoYE8SGbbIq2SFTZy3zmxeO2tQevJZNDo7iY=",
+ "usagesDigest": "aLmqbvowmHkkBPve05yyDNGN7oh7QE9kBADr3QIZTZs=",
+ "recordedFileInputs": {},
+ "recordedDirentsInputs": {},
+ "envVariables": {},
+ "generatedRepoSpecs": {
+ "local_config_apple_cc": {
+ "bzlFile": "@@apple_support~//crosstool:setup.bzl",
+ "ruleClassName": "_apple_cc_autoconf",
+ "attributes": {}
+ },
+ "local_config_apple_cc_toolchains": {
+ "bzlFile": "@@apple_support~//crosstool:setup.bzl",
+ "ruleClassName": "_apple_cc_autoconf_toolchains",
+ "attributes": {}
+ }
+ },
+ "recordedRepoMappingEntries": [
+ [
+ "apple_support~",
+ "bazel_tools",
+ "bazel_tools"
+ ]
+ ]
+ }
+ },
+ "@@platforms//host:extension.bzl%host_platform": {
+ "general": {
+ "bzlTransitiveDigest": "xelQcPZH8+tmuOHVjL9vDxMnnQNMlwj0SlvgoqBkm4U=",
+ "usagesDigest": "V1R2Y2oMxKNfx2WCWpSCaUV1WefW1o8HZGm3v1vHgY4=",
+ "recordedFileInputs": {},
+ "recordedDirentsInputs": {},
+ "envVariables": {},
+ "generatedRepoSpecs": {
+ "host_platform": {
+ "bzlFile": "@@platforms//host:extension.bzl",
+ "ruleClassName": "host_platform_repo",
+ "attributes": {}
+ }
+ },
+ "recordedRepoMappingEntries": []
+ }
+ },
+ "@@protobuf~//:non_module_deps.bzl%non_module_deps": {
+ "general": {
+ "bzlTransitiveDigest": "jsbfONl9OksDWiAs7KDFK5chH/tYI3DngdM30NKdk5Y=",
+ "usagesDigest": "eVrT3hFCIZNRuTKpfWDzSIwTi2p6U6PWbt+tNWl/Tqk=",
+ "recordedFileInputs": {},
+ "recordedDirentsInputs": {},
+ "envVariables": {},
+ "generatedRepoSpecs": {
+ "utf8_range": {
+ "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl",
+ "ruleClassName": "http_archive",
+ "attributes": {
+ "urls": [
+ "https://github.com/protocolbuffers/utf8_range/archive/de0b4a8ff9b5d4c98108bdfe723291a33c52c54f.zip"
+ ],
+ "strip_prefix": "utf8_range-de0b4a8ff9b5d4c98108bdfe723291a33c52c54f",
+ "sha256": "5da960e5e5d92394c809629a03af3c7709d2d3d0ca731dacb3a9fb4bf28f7702"
+ }
+ }
+ },
+ "recordedRepoMappingEntries": [
+ [
+ "protobuf~",
+ "bazel_tools",
+ "bazel_tools"
+ ]
+ ]
+ }
+ }
+ }
+}
diff --git a/vendor/nanopb/Package.swift b/vendor/nanopb/Package.swift
index 1c62f986..f53b7c4f 100644
--- a/vendor/nanopb/Package.swift
+++ b/vendor/nanopb/Package.swift
@@ -1,4 +1,4 @@
-// swift-tools-version:5.0
+// swift-tools-version:5.3
// The swift-tools-version declares the minimum version of Swift required to build this package.
import PackageDescription
@@ -25,6 +25,7 @@ let package = Package(
"pb_encode.h",
"pb_encode.c"
],
+ resources: [.process("spm_resources/PrivacyInfo.xcprivacy")],
publicHeadersPath: "spm_headers",
cSettings: [
.define("PB_FIELD_32BIT", to: "1"),
diff --git a/vendor/nanopb/README.md b/vendor/nanopb/README.md
index 44aa3e41..38e94a37 100644
--- a/vendor/nanopb/README.md
+++ b/vendor/nanopb/README.md
@@ -1,17 +1,19 @@
Nanopb - Protocol Buffers for Embedded Systems
==============================================
-[](https://travis-ci.com/nanopb/nanopb)
+
+
Nanopb is a small code-size Protocol Buffers implementation in ansi C. It is
especially suitable for use in microcontrollers, but fits any memory
restricted system.
* **Homepage:** https://jpa.kapsi.fi/nanopb/
+* **Git repository:** https://github.com/nanopb/nanopb/
* **Documentation:** https://jpa.kapsi.fi/nanopb/docs/
-* **Downloads:** https://jpa.kapsi.fi/nanopb/download/
* **Forum:** https://groups.google.com/forum/#!forum/nanopb
-* **Nightly builds:** https://jpa.kapsi.fi/jenkins/job/nanopb/
+* **Stable version downloads:** https://jpa.kapsi.fi/nanopb/download/
+* **Pre-release binary packages:** https://github.com/nanopb/nanopb/actions/workflows/binary_packages.yml
Using the nanopb library
@@ -43,7 +45,7 @@ The binary packages for Windows, Linux and Mac OS X should contain all necessary
dependencies, including Python, python-protobuf library and protoc. If you are
using a git checkout or a plain source distribution, you will need to install
Python separately. Once you have Python, you can install the other dependencies
-with `pip install protobuf grpcio-tools`.
+with `pip install --upgrade protobuf grpcio-tools`.
You can further customize the header generation by creating an `.options` file.
See [documentation](https://jpa.kapsi.fi/nanopb/docs/concepts.html#modifying-generator-behaviour) for details.
@@ -65,7 +67,7 @@ end in an error, the test cases were successful.
Note: Mac OS X by default aliases 'clang' as 'gcc', while not actually
supporting the same command line options as gcc does. To run tests on
-Mac OS X, use: `scons CC=clang CXX=clang`. Same way can be used to run
+Mac OS X, use: `scons CC=clang CXX=clang++`. Same way can be used to run
tests with different compilers on any platform.
For embedded platforms, there is currently support for running the tests
@@ -85,24 +87,14 @@ There exist build rules for several systems:
* **Makefiles**: `extra/nanopb.mk`, see `examples/simple`
* **CMake**: `extra/FindNanopb.cmake`, see `examples/cmake`
* **SCons**: `tests/site_scons` (generator only)
-* **Bazel**: `BUILD` in source root
+* **Bazel**: `BUILD.bazel` in source root
* **Conan**: `conanfile.py` in source root
* **PlatformIO**: https://platformio.org/lib/show/431/Nanopb
* **PyPI/pip**: https://pypi.org/project/nanopb/
+* **vcpkg**: https://vcpkg.info/port/nanopb
And also integration to platform interfaces:
* **Arduino**: http://platformio.org/lib/show/1385/nanopb-arduino
+* **Zephyr**: https://docs.zephyrproject.org/latest/services/serialization/nanopb.html
-Building nanopb - Using vcpkg
------------------------------
-
-You can download and install nanopb using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager:
-
- git clone https://github.com/Microsoft/vcpkg.git
- cd vcpkg
- ./bootstrap-vcpkg.sh
- ./vcpkg integrate install
- ./vcpkg install nanopb
-
-The nanopb port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository.
diff --git a/vendor/nanopb/WORKSPACE b/vendor/nanopb/WORKSPACE
index e23271b0..e69de29b 100644
--- a/vendor/nanopb/WORKSPACE
+++ b/vendor/nanopb/WORKSPACE
@@ -1 +0,0 @@
-workspace(name = "com_github_nanopb_nanopb")
diff --git a/vendor/nanopb/build-tests/cmake_with_components/CMakeLists.txt b/vendor/nanopb/build-tests/cmake_with_components/CMakeLists.txt
new file mode 100644
index 00000000..6fe7f6cf
--- /dev/null
+++ b/vendor/nanopb/build-tests/cmake_with_components/CMakeLists.txt
@@ -0,0 +1,12 @@
+cmake_minimum_required(VERSION 2.8)
+project(NANOPB_CMAKE_SIMPLE C CXX)
+
+set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../../extra)
+find_package(Nanopb REQUIRED COMPONENTS cpp-descriptors)
+
+nanopb_generate_cpp(TARGET proto simple.proto)
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Werror -g -O0")
+
+add_executable(simple simple.cpp)
+target_link_libraries(simple proto)
diff --git a/vendor/nanopb/build-tests/cmake_with_components/simple.cpp b/vendor/nanopb/build-tests/cmake_with_components/simple.cpp
new file mode 100644
index 00000000..7135ac28
--- /dev/null
+++ b/vendor/nanopb/build-tests/cmake_with_components/simple.cpp
@@ -0,0 +1,70 @@
+#include
+#include
+#include
+#include "simple.pb.h"
+
+int main()
+{
+ /* This is the buffer where we will store our message. */
+ uint8_t buffer[128];
+ size_t message_length;
+ bool status;
+
+ /* Encode our message */
+ {
+ /* Allocate space on the stack to store the message data.
+ *
+ * Nanopb generates simple struct definitions for all the messages.
+ * - check out the contents of simple.pb.h!
+ * It is a good idea to always initialize your structures
+ * so that you do not have garbage data from RAM in there.
+ */
+ SimpleMessage message = SimpleMessage_init_zero;
+
+ /* Create a stream that will write to our buffer. */
+ pb_ostream_t stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
+
+ /* Fill in the lucky number */
+ message.lucky_number = 13;
+
+ /* Now we are ready to encode the message! */
+ status = pb_encode(&stream, nanopb::MessageDescriptor::fields(), &message);
+ message_length = stream.bytes_written;
+
+ /* Then just check for any errors.. */
+ if (!status)
+ {
+ printf("Encoding failed: %s\n", PB_GET_ERROR(&stream));
+ return 1;
+ }
+ }
+
+ /* Now we could transmit the message over network, store it in a file or
+ * wrap it to a pigeon's leg.
+ */
+
+ /* But because we are lazy, we will just decode it immediately. */
+
+ {
+ /* Allocate space for the decoded message. */
+ SimpleMessage message = SimpleMessage_init_zero;
+
+ /* Create a stream that reads from the buffer. */
+ pb_istream_t stream = pb_istream_from_buffer(buffer, message_length);
+
+ /* Now we are ready to decode the message. */
+ status = pb_decode(&stream, nanopb::MessageDescriptor::fields(), &message);
+
+ /* Check for errors... */
+ if (!status)
+ {
+ printf("Decoding failed: %s\n", PB_GET_ERROR(&stream));
+ return 1;
+ }
+
+ /* Print the data contained in the message. */
+ printf("Your lucky number was %d!\n", message.lucky_number);
+ }
+
+ return 0;
+}
diff --git a/vendor/nanopb/build-tests/cmake_with_components/simple.proto b/vendor/nanopb/build-tests/cmake_with_components/simple.proto
new file mode 100644
index 00000000..5c73a3b2
--- /dev/null
+++ b/vendor/nanopb/build-tests/cmake_with_components/simple.proto
@@ -0,0 +1,9 @@
+// A very simple protocol definition, consisting of only
+// one message.
+
+syntax = "proto2";
+
+message SimpleMessage {
+ required int32 lucky_number = 1;
+}
+
diff --git a/vendor/nanopb/build-tests/legacy_cmake_relpath/CMakeLists.txt b/vendor/nanopb/build-tests/legacy_cmake_relpath/CMakeLists.txt
new file mode 100644
index 00000000..a8abeb4f
--- /dev/null
+++ b/vendor/nanopb/build-tests/legacy_cmake_relpath/CMakeLists.txt
@@ -0,0 +1,15 @@
+cmake_minimum_required(VERSION 2.8)
+project(NANOPB_CMAKE_SIMPLE C)
+
+set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../../extra)
+find_package(Nanopb REQUIRED)
+include_directories(${NANOPB_INCLUDE_DIRS})
+
+nanopb_generate_cpp(PROTO_SRCS PROTO_HDRS RELPATH proto
+ proto/simple.proto proto/sub/unlucky.proto)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Werror -g -O0")
+
+add_executable(simple simple.c ${PROTO_SRCS} ${PROTO_HDRS})
diff --git a/vendor/nanopb/build-tests/legacy_cmake_relpath/proto/simple.proto b/vendor/nanopb/build-tests/legacy_cmake_relpath/proto/simple.proto
new file mode 100644
index 00000000..3bf4ad1d
--- /dev/null
+++ b/vendor/nanopb/build-tests/legacy_cmake_relpath/proto/simple.proto
@@ -0,0 +1,11 @@
+// A very simple protocol definition, consisting of only
+// one message.
+syntax = "proto2";
+
+import "sub/unlucky.proto";
+
+message SimpleMessage {
+ required int32 lucky_number = 1;
+ required UnluckyNumber unlucky = 2;
+}
+
diff --git a/vendor/nanopb/build-tests/legacy_cmake_relpath/proto/sub/unlucky.proto b/vendor/nanopb/build-tests/legacy_cmake_relpath/proto/sub/unlucky.proto
new file mode 100644
index 00000000..97a42c9c
--- /dev/null
+++ b/vendor/nanopb/build-tests/legacy_cmake_relpath/proto/sub/unlucky.proto
@@ -0,0 +1,5 @@
+syntax = "proto2";
+
+message UnluckyNumber {
+ required uint32 number = 1;
+}
diff --git a/vendor/nanopb/build-tests/legacy_cmake_relpath/simple.c b/vendor/nanopb/build-tests/legacy_cmake_relpath/simple.c
new file mode 100644
index 00000000..231886c2
--- /dev/null
+++ b/vendor/nanopb/build-tests/legacy_cmake_relpath/simple.c
@@ -0,0 +1,73 @@
+#include
+#include
+#include
+#include "simple.pb.h"
+
+int main()
+{
+ /* This is the buffer where we will store our message. */
+ uint8_t buffer[128];
+ size_t message_length;
+ bool status;
+
+ /* Encode our message */
+ {
+ /* Allocate space on the stack to store the message data.
+ *
+ * Nanopb generates simple struct definitions for all the messages.
+ * - check out the contents of simple.pb.h!
+ * It is a good idea to always initialize your structures
+ * so that you do not have garbage data from RAM in there.
+ */
+ SimpleMessage message = SimpleMessage_init_zero;
+
+ /* Create a stream that will write to our buffer. */
+ pb_ostream_t stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
+
+ /* Fill in the lucky number */
+ message.lucky_number = 13;
+ message.unlucky.number = 42;
+
+ /* Now we are ready to encode the message! */
+ status = pb_encode(&stream, SimpleMessage_fields, &message);
+ message_length = stream.bytes_written;
+
+ /* Then just check for any errors.. */
+ if (!status)
+ {
+ printf("Encoding failed: %s\n", PB_GET_ERROR(&stream));
+ return 1;
+ }
+ }
+
+ /* Now we could transmit the message over network, store it in a file or
+ * wrap it to a pigeon's leg.
+ */
+
+ /* But because we are lazy, we will just decode it immediately. */
+
+ {
+ /* Allocate space for the decoded message. */
+ SimpleMessage message = SimpleMessage_init_zero;
+
+ /* Create a stream that reads from the buffer. */
+ pb_istream_t stream = pb_istream_from_buffer(buffer, message_length);
+
+ /* Now we are ready to decode the message. */
+ status = pb_decode(&stream, SimpleMessage_fields, &message);
+
+ /* Check for errors... */
+ if (!status)
+ {
+ printf("Decoding failed: %s\n", PB_GET_ERROR(&stream));
+ return 1;
+ }
+
+ /* Print the data contained in the message. */
+ printf("Your lucky number was %d!\n", message.lucky_number);
+ printf("Your unlucky number was %u!\n", message.unlucky.number);
+ }
+
+ return 0;
+}
+
diff --git a/vendor/nanopb/build-tests/legacy_cmake_simple/CMakeLists.txt b/vendor/nanopb/build-tests/legacy_cmake_simple/CMakeLists.txt
new file mode 100644
index 00000000..d9b96be0
--- /dev/null
+++ b/vendor/nanopb/build-tests/legacy_cmake_simple/CMakeLists.txt
@@ -0,0 +1,13 @@
+cmake_minimum_required(VERSION 2.8)
+project(NANOPB_CMAKE_SIMPLE C)
+
+set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../../extra)
+find_package(Nanopb REQUIRED)
+include_directories(${NANOPB_INCLUDE_DIRS})
+
+nanopb_generate_cpp(PROTO_SRCS PROTO_HDRS simple.proto)
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Werror -g -O0")
+
+add_executable(simple simple.c ${PROTO_SRCS} ${PROTO_HDRS})
diff --git a/vendor/nanopb/build-tests/legacy_cmake_simple/simple.c b/vendor/nanopb/build-tests/legacy_cmake_simple/simple.c
new file mode 100644
index 00000000..1f6b1373
--- /dev/null
+++ b/vendor/nanopb/build-tests/legacy_cmake_simple/simple.c
@@ -0,0 +1,71 @@
+#include
+#include
+#include
+#include "simple.pb.h"
+
+int main()
+{
+ /* This is the buffer where we will store our message. */
+ uint8_t buffer[128];
+ size_t message_length;
+ bool status;
+
+ /* Encode our message */
+ {
+ /* Allocate space on the stack to store the message data.
+ *
+ * Nanopb generates simple struct definitions for all the messages.
+ * - check out the contents of simple.pb.h!
+ * It is a good idea to always initialize your structures
+ * so that you do not have garbage data from RAM in there.
+ */
+ SimpleMessage message = SimpleMessage_init_zero;
+
+ /* Create a stream that will write to our buffer. */
+ pb_ostream_t stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
+
+ /* Fill in the lucky number */
+ message.lucky_number = 13;
+
+ /* Now we are ready to encode the message! */
+ status = pb_encode(&stream, SimpleMessage_fields, &message);
+ message_length = stream.bytes_written;
+
+ /* Then just check for any errors.. */
+ if (!status)
+ {
+ printf("Encoding failed: %s\n", PB_GET_ERROR(&stream));
+ return 1;
+ }
+ }
+
+ /* Now we could transmit the message over network, store it in a file or
+ * wrap it to a pigeon's leg.
+ */
+
+ /* But because we are lazy, we will just decode it immediately. */
+
+ {
+ /* Allocate space for the decoded message. */
+ SimpleMessage message = SimpleMessage_init_zero;
+
+ /* Create a stream that reads from the buffer. */
+ pb_istream_t stream = pb_istream_from_buffer(buffer, message_length);
+
+ /* Now we are ready to decode the message. */
+ status = pb_decode(&stream, SimpleMessage_fields, &message);
+
+ /* Check for errors... */
+ if (!status)
+ {
+ printf("Decoding failed: %s\n", PB_GET_ERROR(&stream));
+ return 1;
+ }
+
+ /* Print the data contained in the message. */
+ printf("Your lucky number was %d!\n", message.lucky_number);
+ }
+
+ return 0;
+}
+
diff --git a/vendor/nanopb/build-tests/legacy_cmake_simple/simple.proto b/vendor/nanopb/build-tests/legacy_cmake_simple/simple.proto
new file mode 100644
index 00000000..5c73a3b2
--- /dev/null
+++ b/vendor/nanopb/build-tests/legacy_cmake_simple/simple.proto
@@ -0,0 +1,9 @@
+// A very simple protocol definition, consisting of only
+// one message.
+
+syntax = "proto2";
+
+message SimpleMessage {
+ required int32 lucky_number = 1;
+}
+
diff --git a/vendor/nanopb/conanfile.py b/vendor/nanopb/conanfile.py
index fb57883b..59dcd55d 100644
--- a/vendor/nanopb/conanfile.py
+++ b/vendor/nanopb/conanfile.py
@@ -3,7 +3,7 @@
class NanoPbConan(ConanFile):
name = "nanopb"
- version = "0.4.5"
+ version = "0.4.9.1"
license = "zlib"
url = "https://jpa.kapsi.fi/nanopb/"
description = "Protocol Buffers with small code size"
diff --git a/vendor/nanopb/docs/bazel_build.md b/vendor/nanopb/docs/bazel_build.md
new file mode 100644
index 00000000..4aa0c533
--- /dev/null
+++ b/vendor/nanopb/docs/bazel_build.md
@@ -0,0 +1,57 @@
+# Nanopb: Bazel build
+The Bazel build system, is designed to be fast and correct. Nanopb provides a
+set of plugins for the Bazel build system allowing Nanopb to be integrated
+into the build.
+
+## Getting started
+Add the following to your MODULE.bazel file.
+``` py
+# MODULE.bazel
+bazel_dep(name = "nanopb", version = "0.4.9")
+git_override(
+ module_name = "nanopb",
+ remote = "https://github.com/nanopb/nanopb.git",
+ commit = "",
+)
+```
+
+To use the Nanopb rules with in your build you can use the
+`cc_nanopb_proto_library` which works in a similar way to the native
+`cc_proto_library` rule.
+``` py
+# BUILD.bazel
+load("@nanopb//extra/bazel:nanopb_cc_proto_library.bzl", "cc_nanopb_proto_library")
+
+# Your native proto_library.
+proto_library(
+ name = "descriptor",
+ srcs = [
+ "generator/proto/google/protobuf/descriptor.proto",
+ ],
+)
+
+# Generated library.
+cc_nanopb_proto_library(
+ name = "descriptor_nanopb",
+ protos = [":descriptor"],
+ visibility = ["//visibility:private"],
+)
+
+# Depend directly on the generated code using a cc_library.
+cc_library(
+ name = "uses_generated_descriptors",
+ deps = [":descriptor_nanopb"],
+ hdrs = ["my_header.h"],
+)
+```
+
+If you have a custom nanopb options file, use the `nanopb_options_files` argument shown below.
+``` py
+# Generated library with options.
+cc_nanopb_proto_library(
+ name = "descriptor_nanopb",
+ protos = [":descriptor"],
+ nanopb_options_files = ["descriptor.options"],
+ visibility = ["//visibility:private"],
+)
+```
diff --git a/vendor/nanopb/docs/concepts.md b/vendor/nanopb/docs/concepts.md
index 74e1ca32..33f610c9 100644
--- a/vendor/nanopb/docs/concepts.md
+++ b/vendor/nanopb/docs/concepts.md
@@ -161,7 +161,7 @@ variable-length datatypes are more complex:
1) Strings, bytes and repeated fields of any type map to callback
functions by default.
-2) If there is a special option `(nanopb).max_size` specified in the
+2) If there is a special option `(nanopb).max_length` or `(nanopb).max_size` specified in the
.proto file, string maps to null-terminated char array and bytes map
to a structure containing a char array and a size field.
3) If `(nanopb).fixed_length` is set to `true` and
@@ -230,9 +230,15 @@ bytes there. For the `string` field type, the length limit is exact.
## Field callbacks
-When a field has dynamic length, nanopb cannot statically allocate
-storage for it. Instead, it allows you to handle the field in whatever
-way you want, using a callback function.
+The easiest way to handle repeated fields is to specify a maximum size for
+them, as shown in the previous section. However, sometimes you need to be
+able to handle arrays with unlimited length, possibly larger than available
+RAM memory.
+
+For these cases, nanopb provides a callback interface. Nanopb core invokes
+the callback function when it gets to the specific field in the message.
+Your code can then handle the field in custom ways, for example decode
+the data piece-by-piece and store to filesystem.
The [pb_callback_t](reference.html#pb-callback-t) structure contains a
function pointer and a `void` pointer called `arg` you can use for
@@ -335,14 +341,9 @@ alternative, the generator options `callback_function` and
`callback_datatype` can be used to bind a callback function
based on its name.
-Typically this feature is used by setting
-`callback_datatype` to e.g. `void\*` or other
-data type used for callback state. Then the generator will automatically
-set `callback_function` to
-`MessageName_callback` and produce a prototype for it in
-generated `.pb.h`. By implementing this function in your own
-code, you will receive callbacks for fields without having to separately
-set function pointers.
+Typically this feature is used by setting `callback_datatype` to e.g. `void\*` or even a struct type used to store encoded or decoded data.
+The generator will automatically set `callback_function` to `MessageName_callback` and produce a prototype for it in generated `.pb.h`.
+By implementing this function in your own code, you will receive callbacks for fields without having to separately set function pointers.
If you want to use function name bound callbacks for some fields and
`pb_callback_t` for other fields, you can call
@@ -544,7 +545,7 @@ framing format are to:
3. Perform any synchronization and error checking that may be needed
depending on application.
-For example UDP packets already fullfill all the requirements, and TCP
+For example UDP packets already fulfill all the requirements, and TCP
streams typically only need a way to identify the message length and
type. Lower level interfaces such as serial ports may need a more robust
frame format, such as HDLC (high-level data link control).
@@ -577,3 +578,45 @@ error. The most common error conditions are:
7) Errors that happen in your callback functions.
8) Running out of memory, i.e. stack overflow.
9) Invalid field descriptors (would usually mean a bug in the generator).
+
+## Static assertions
+
+Nanopb code uses static assertions to check size of structures at the compile
+time. The `PB_STATIC_ASSERT` macro is defined in `pb.h`. If ISO C11 standard
+is available, the C standard `_Static_assert` keyword is used, otherwise a
+negative sized array definition trick is used.
+
+Common reasons for static assertion errors are:
+
+1. `FIELDINFO_DOES_NOT_FIT_width2` with `width1` or `width2`:
+ Message that is larger than 256 bytes, but nanopb generator does not detect
+ it for some reason. Often resolved by giving all `.proto` files as argument
+ to `nanopb_generator.py` at the same time, to ensure submessage definitions
+ are found. Alternatively `(nanopb).descriptorsize = DS_4` option can be
+ given manually.
+
+2. `FIELDINFO_DOES_NOT_FIT_width4` with `width4`:
+ Message that is larger than 64 kilobytes. There will be a better error
+ message for this in a future nanopb version, but currently it asserts here.
+ The compile time option `PB_FIELD_32BIT` should be specified either on
+ C compiler command line or by editing `pb.h`. This will increase the sizes
+ of integer types used internally in nanopb code.
+
+3. `DOUBLE_MUST_BE_8_BYTES`:
+ Some platforms, most notably AVR, do not support the 64-bit `double` type,
+ only 32-bit `float`. The compile time option `PB_CONVERT_DOUBLE_FLOAT` can
+ be defined to convert between the types automatically. The conversion
+ results in small rounding errors and takes unnecessary space in transmission,
+ so changing the `.proto` to use `float` type is often better.
+
+4. `INT64_T_WRONG_SIZE`:
+ The `stdint.h` system header is incorrect for the C compiler being used.
+ This can result from erroneous compiler include path.
+ If the compiler actually does not support 64-bit types, the compile time
+ option `PB_WITHOUT_64BIT` can be used.
+
+5. `variably modified array size`:
+ The compiler used has problems resolving the array-based static assert at
+ compile time. Try setting the compiler to C11 standard mode if possible.
+ If static assertions cannot be made to work on the compiler used, the
+ compile-time option `PB_NO_STATIC_ASSERT` can be specified to turn them off.
diff --git a/vendor/nanopb/docs/index.md b/vendor/nanopb/docs/index.md
index d1ee2342..85ca878d 100644
--- a/vendor/nanopb/docs/index.md
+++ b/vendor/nanopb/docs/index.md
@@ -46,8 +46,8 @@ Features and limitations
**Features**
1) Pure C runtime
-2) Small code size (5--10 kB depending on processor and compilation options, plus any message definitions)
-3) Small ram usage (typically \~300 bytes stack, plus any message structs)
+2) Small code size (5--20 kB depending on processor and compilation options, plus any message definitions)
+3) Small ram usage (typically \~1 kB stack, plus any message structs)
4) Allows specifying maximum size for strings and arrays, so that they can be allocated statically.
5) No malloc needed: everything can be allocated statically or on the stack. Optional malloc support available.
6) You can use either encoder or decoder alone to cut the code size in half.
@@ -62,7 +62,7 @@ Features and limitations
1) Some speed has been sacrificed for code size.
2) Encoding is focused on writing to streams. For memory buffers only it could be made more efficient.
3) The deprecated Protocol Buffers feature called "groups" is not supported.
-4) Fields in the generated structs are ordered by the tag number, instead of the natural ordering in .proto file.
+4) Fields in the generated structs are ordered by the tag number, instead of the natural ordering in .proto file. (Since nanopb-0.4.2 this can be configured with `sort_by_tag` setting.)
5) Unknown fields are not preserved when decoding and re-encoding a message.
6) Reflection (runtime introspection) is not supported. E.g. you can't request a field by giving its name in a string.
7) Numeric arrays are always encoded as packed, even if not marked as packed in .proto.
diff --git a/vendor/nanopb/docs/migration.md b/vendor/nanopb/docs/migration.md
index 5a0a9842..1f5ed0f1 100644
--- a/vendor/nanopb/docs/migration.md
+++ b/vendor/nanopb/docs/migration.md
@@ -6,6 +6,172 @@ required modifications of user applications are explained. Also any
error indications are included, in order to make it easier to find this
document.
+Nanopb-0.4.9 (2024-09-19)
+-------------------------
+
+### CMake rules now default to grpcio_tools protoc
+
+**Rationale:** Previously CMake rules primarily looked for `protoc` in system
+path. This was often an outdated version installed from package manager, and
+not necessarily compatible with `python-protobuf` version installed from `pip`.
+
+**Changes:** CMake rules now default to using `generator/protoc`, which in
+turn uses `grpc_tools` Python package if available. If it is not available,
+system path is searched for `protoc`.
+
+**Required actions:** For most users, no actions are needed. In case of
+version incompatibilities, `pip install --user --upgrade grpcio-tools protobuf`
+is recommended. If needed, `PROTOBUF_PROTOC_EXECUTABLE` can be set to override
+the default.
+
+**Error indications:** `Failed to import generator/proto/nanopb_pb2.py` if
+versions of `protoc` selected by CMake is different than installed `python-protobuf`.
+
+### Use uint8_t for pb_byte_t when UINT8_MAX is defined
+
+**Rationale:** Previously `pb_byte_t` was always defined as `uint8_least_t`.
+This could be annoying on some platforms without this define, or when some
+compiles might warn on conversion from `uint8_t`. However not all platforms
+support `uint8_t` sized access.
+
+**Changes:** The `stdint.h` header will define `UINT8_MAX` exactly if `uint8_t`
+is available. Use it to select which type to typedef.
+
+**Required actions:** Usually none. If any compiler warnings are generated,
+they can either be fixed or `PB_BYTE_T_OVERRIDE` can be defined to `uint_least8_t`
+to restore old behavior.
+
+**Error indications:** Implicit conversion from `uint_least8_t` to `uint8_t`.
+
+### Migrate to bzlmod
+
+**Rationale:** Due to the [shortcomings of the WORKSPACE system](https://bazel.build/external/overview#workspace-shortcomings),
+Bzlmod is going to replace the legacy WORKSPACE system in future Bazel releases.
+Therefore, nanopb has been migrated to use bzlmod to better support newer bazel versions.
+
+**Changes**
+* upgrade bazel deps
+ * bazel_skylib: 1.7.1
+ * rules_python: 0.34.0
+ * rules_proto: 6.0.2
+ * protobuf: 24.4
+ * rules_proto_grpc: 5.0.0
+* Start using bzlmod (MODULE.bazel)
+
+**Required actions:** bazel build using WORKSPACE has been deprecated. To use bzlmod, adding below content to your MODULE.bazel
+```py
+bazel_dep(name = "nanopb", version = "0.4.9")
+git_override(
+ module_name = "nanopb",
+ remote = "https://github.com/nanopb/nanopb.git",
+ commit = "",
+)
+```
+noted that the name of the module has been changed to `nanopb`, to better fit the convention of bzlmod.
+If the old name `com_github_nanopb_nanopb` is preferred, can add `repo_name` parameter to indicate the repo name.
+```py
+bazel_dep(name = "nanopb", version = "0.4.9", repo_name="com_github_nanopb_nanopb")
+```
+
+### Separate enum_intsize setting
+
+**Rationale:** Nanopb-0.4.7 extended `int_size` option to affect enums.
+This is only supported by C++11 and C23 compilers.
+The generation used `#ifdef` to limit size option to use on C++ compilers.
+This caused binary incompatibility when project mixed C and C++ files.
+
+**Changes**: `enum_intsize` is now a separate option, and does not use `#ifdef`.
+If compiler does not support the setting, compilation will fail.
+
+**Required actions:** If using the recently introduced `int_size` option on enums, update to use `enum_intsize` instead.
+
+**Error indications:** Enum integer sizes use defaults as the old setting is ignored.
+
+Nanopb-0.4.8 (2023-11-11)
+-------------------------
+
+### Fix naming conflicts with CMake installation
+
+**Rationale:** Previously `CMakeLists.txt` installed nanopb Python module under name `proto` and include file directly as `/usr/include/pb.h`. These names have potential to conflict with other libraries.
+
+**Changes:** Python module is installed as `nanopb` and include files under `/usr/include/nanopb`.
+
+**Required actions:** Only affects users who install nanopb using the `cmake` build system.
+Does not affect use of `FindNanopb.cmake`.
+Calling nanopb generator should work as before.
+Include path may need adjustment if not using `nanopb-targets.cmake` to determine it.
+
+**Error indications:** Include file `pb.h` not found when compiling against a system-wide installation done with CMake.
+
+Nanopb-0.4.7 (2022-12-11)
+-------------------------
+
+### Add int_size option to enum fields
+
+**This option was separated to `enum_intsize` in nanopb-0.4.9. This migration notice has been updated to match.**
+
+**Rationale:** The `packed_enum` option does not work with MSVC due to `#pragma pack` not supporting enums with MSVC. To workaround this, enum sizes can be specified with the new `int_size` option. Note that this is only supported when generating C++.
+
+**Changes:** The ~~`int_size`~~ `enum_intsize` option can be specified for enums.
+
+**Required actions:** ~~Any users concerned about the size of the generated C++ enums and are setting the int_size of enums via a wildcard (e.g. `MyMessage.* int_size=IS_8`) will need to instead set the `int_size` option for individual fields.~~
+
+**Error indications:** ~~The size of generated C++ enums has changed.~~
+
+### Updated include path order in FindNanopb.cmake
+
+**Changes:** The include path passed to `protoc` by the CMake rules was updated.
+
+**Required actions:** No changes needed for most users.
+In some specific cases it could change the directory hierarchy generated by `protoc`.
+More details in
+[pull request #822](https://github.com/nanopb/nanopb/pull/822).
+
+**Error indications:** Generated `.pb.c` or `.pb.h` file not found when building
+with CMake rules.
+
+Nanopb-0.4.6 (2022-05-30)
+-------------------------
+
+### NANOPB_VERSION define is now a string
+
+**Changes:** To ease `NANOPB_VERSION` macro usage, the value is directly a string.
+
+**Required actions:** Most nanopb users probably never used that macro. If so,
+you certainly use the `#` preprocessor to convert it as string. You, now,
+only have to call it directly, like this for example:
+`strcpy(myvar, NANOPB_VERSION);`
+
+### FindNanopb.cmake now requires protoc 3.6.0 or newer by default
+
+**Changes:** The default options passing method now uses `--plugin-opt` which
+is supported by protoc 3.6.0 and newer (released in 2018).
+
+**Required actions:** Update `protoc` if needed, or alternatively install
+`grpcio-tools` package from `pip`. If neither is possible, the
+`NANOPB_PROTOC_OLDER_THAN_3_6_0` cmake option can be used to restore the old
+style option passing. Note that it has problems with special characters such
+as `:`.
+
+**Error indications:** "`protoc: Unknown flag: --nanopb_opt`"
+
+### pb.h uses C11 _Static_assert keyword by default
+
+**Rationale:** The nanopb generated headers use static assertions to catch
+errors at compile time. There are several mechanisms to implement this.
+The most widely supported is C11 `_Static_assert` keyword.
+Previously the code used negative size array definition trick, which is
+supported already in C99 but does not work with every compiler and can
+produce confusing error messages.
+
+**Changes:** Now `_Static_assert` is used by default.
+
+**Required actions:** If the keyword is not recognized, set the compiler to
+C11 standard mode if available. If it is not available, define either `PB_C99_STATIC_ASSERT`
+or `PB_NO_STATIC_ASSERT` in `pb.h` or on compiler command line.
+
+**Error indications:** `Undefined identifier _Static_assert`
+
Nanopb-0.4.4 (2020-11-25)
-------------------------
@@ -511,7 +677,7 @@ Nanopb-0.2.1 (2013-04-14)
### Callback function signature
-**Rationale:** Previously the auxilary data to field callbacks was
+**Rationale:** Previously the auxiliary data to field callbacks was
passed as `void*`. This allowed passing of any data, but made it
unnecessarily complex to return a pointer from callback.
diff --git a/vendor/nanopb/docs/reference.md b/vendor/nanopb/docs/reference.md
index 84e90247..890be377 100644
--- a/vendor/nanopb/docs/reference.md
+++ b/vendor/nanopb/docs/reference.md
@@ -2,12 +2,13 @@
## Compilation options
-The following options can be specified in one of two ways:
+Compilation options affect the functionality included in the nanopb core C code.
+The options can be specified in one of two ways:
1. Using the -D switch on the C compiler command line.
2. Using a `#define` at the top of pb.h.
-> **NOTE:** You must have the same settings for the nanopb library and all code that
+> **NOTE:** You must have the same compilation options for the nanopb library and all code that
includes nanopb headers.
* `PB_ENABLE_MALLOC`: Enable dynamic allocation support in the decoder.
@@ -18,8 +19,10 @@ includes nanopb headers.
* `PB_SYSTEM_HEADER`: Replace the standards header files with a single system-specific header file. Value must include quotes, for example `#define PB_SYSTEM_HEADER "foo.h"`. See [extra/pb_syshdr.h](https://github.com/nanopb/nanopb/blob/master/extra/pb_syshdr.h) for an example.
* `PB_WITHOUT_64BIT`: Disable support of 64-bit integer fields, for old compilers or for a slight speedup on 8-bit platforms.
* `PB_ENCODE_ARRAYS_UNPACKED`: Encode scalar arrays in the unpacked format, which takes up more space. Only to be used when the decoder on the receiving side cannot process packed arrays, such as [protobuf.js versions before 2020](https://github.com/protocolbuffers/protobuf/issues/1701).
-* `PB_CONVERT_DOBULE_FLOAT`: Convert doubles to floats for platforms that do not support 64-bit `double` datatype. Mainly `AVR` processors.
+* `PB_CONVERT_DOUBLE_FLOAT`: Convert doubles to floats for platforms that do not support 64-bit `double` datatype. Mainly `AVR` processors.
* `PB_VALIDATE_UTF8`: Check whether incoming strings are valid UTF-8 sequences. Adds a small performance and code size penalty.
+* `PB_C99_STATIC_ASSERT`: Use C99 style negative array trick for static assertions. For compilers that do not support C11 standard.
+* `PB_NO_STATIC_ASSERT`: Disable static assertions at compile time. Only for compilers with limited support of C standards.
The `PB_MAX_REQUIRED_FIELDS` and `PB_FIELD_32BIT` settings allow
raising some datatype limits to suit larger messages. Their need is
@@ -27,10 +30,12 @@ recognized automatically by C-preprocessor `#if`-directives in the
generated `.pb.c` files. The default setting is to use the smallest
datatypes (least resources used).
-## Proto file options
+## Generator options
-The generator behaviour can be adjusted using several options, defined
-in the [nanopb.proto](https://github.com/nanopb/nanopb/blob/master/generator/proto/nanopb.proto) file in the generator folder. Here is a list of the most common options, but see the file for a full list:
+Generator options affect how the `.proto` files get converted to `.pb.c` and `.pb.h.` files.
+
+Most options are related to specific message or field in `.proto` file.
+The full set of available options is defined in [nanopb.proto](https://github.com/nanopb/nanopb/blob/master/generator/proto/nanopb.proto). Here is a list of the most common options, but see the file for a full list:
* `max_size`: Allocated maximum size for `bytes` and `string` fields. For strings, this includes the terminating zero.
* `max_length`: Maximum length for `string` fields. Setting this is equivalent to setting `max_size` to a value of length + 1.
@@ -45,10 +50,10 @@ in the [nanopb.proto](https://github.com/nanopb/nanopb/blob/master/generator/pro
* `fixed_length`: Generate `bytes` fields with a constant length defined by `max_size`. A separate `.size` field will then not be generated.
* `fixed_count`: Generate arrays with constant length defined by `max_count`.
* `package`: Package name that applies only for nanopb generator. Defaults to name defined by `package` keyword in .proto file, which applies for all languages.
-* `int_size`: Override the integer type of a field. For example, specify `int_size = IS_8` to convert `int32` from protocol definition into `int8_t` in the structure.
+* `int_size`: Override the integer type of a field. For example, specify `int_size = IS_8` to convert `int32` from protocol definition into `int8_t` in the structure. When used with enum types, the size of the generated enum can be specified (C++ only)
These options can be defined for the .proto files before they are
-converted using the nanopb-generatory.py. There are three ways to define
+converted using the nanopb-generator.py. There are three ways to define
the options:
1. Using a separate .options file. This allows using wildcards for
@@ -120,11 +125,6 @@ separately to the nanopb plugin, like:
If preferred, the name of the options file can be set using generator
argument `-f`.
-### Defining the options on command line
-
-The nanopb_generator.py has a simple command line option `-s OPTION:VALUE`.
-The setting applies to the whole file that is being processed.
-
### Defining the options in the .proto file
The .proto file format allows defining custom options for the fields.
@@ -157,6 +157,77 @@ message Message
}
~~~~
+### Defining the options on command line
+
+The nanopb_generator.py has a simple command line option `-s OPTION:VALUE`.
+The setting applies to the whole file that is being processed.
+
+There are also a few command line options that cannot be applied using the
+other mechanisms, as they affect the whole generation:
+
+* `--c-style`: Modify symbol names to better match C naming conventions.
+* `--no-timestamp`: Do not add timestamp to generated files.
+* `--strip-path`: Remove relative path from generated `#include` directives.
+* `--cpp-descriptors`: Generate extra convenience definitions for use from C++
+
+For a full list of generator command line options, use `nanopb_generator.py --help`:
+
+ Usage: nanopb_generator.py [options] file.pb ...
+
+ Options:
+ -h, --help show this help message and exit
+ -V, --version Show version info and exit (add -v for protoc version
+ info)
+ -x FILE Exclude file from generated #include list.
+ -e EXTENSION, --extension=EXTENSION
+ Set extension to use instead of '.pb' for generated
+ files. [default: .pb]
+ -H EXTENSION, --header-extension=EXTENSION
+ Set extension to use for generated header files.
+ [default: .h]
+ -S EXTENSION, --source-extension=EXTENSION
+ Set extension to use for generated source files.
+ [default: .c]
+ -f FILE, --options-file=FILE
+ Set name of a separate generator options file.
+ -I DIR, --options-path=DIR, --proto-path=DIR
+ Search path for .options and .proto files. Also
+ determines relative paths for output directory
+ structure.
+ --error-on-unmatched Stop generation if there are unmatched fields in
+ options file
+ --no-error-on-unmatched
+ Continue generation if there are unmatched fields in
+ options file (default)
+ -D OUTPUTDIR, --output-dir=OUTPUTDIR
+ Output directory of .pb.h and .pb.c files
+ -Q FORMAT, --generated-include-format=FORMAT
+ Set format string to use for including other .pb.h
+ files. Value can be 'quote', 'bracket' or a format
+ string. [default: #include "%s"]
+ -L FORMAT, --library-include-format=FORMAT
+ Set format string to use for including the nanopb pb.h
+ header. Value can be 'quote', 'bracket' or a format
+ string. [default: #include <%s>]
+ --strip-path Strip directory path from #included .pb.h file name
+ --no-strip-path Opposite of --strip-path (default since 0.4.0)
+ --cpp-descriptors Generate C++ descriptors to lookup by type (e.g.
+ pb_field_t for a message)
+ -T, --no-timestamp Don't add timestamp to .pb.h and .pb.c preambles
+ (default since 0.4.0)
+ -t, --timestamp Add timestamp to .pb.h and .pb.c preambles
+ -q, --quiet Don't print anything except errors.
+ -v, --verbose Print more information.
+ -s OPTION:VALUE Set generator option (max_size, max_count etc.).
+ --protoc-opt=OPTION Pass an option to protoc when compiling .proto files
+ --protoc-insertion-points
+ Include insertion point comments in output for use by
+ custom protoc plugins
+ -C, --c-style Use C naming convention.
+
+ Compile file.pb from file.proto by: 'protoc -ofile.pb file.proto'. Output will
+ be written to file.pb.h and file.pb.c.
+
## pb.h
### pb_byte_t
@@ -718,7 +789,7 @@ memory buffer.
| | |
|----------------------|--------------------------------------------------------|
| buf | Pointer to byte array to read from.
-| bufsize | Size of the byte array.
+| bufsize | Size of the byte array. Typically length of the message to be decoded.
| returns | An input stream ready to use.
### pb_read
diff --git a/vendor/nanopb/examples/cmake_relpath/CMakeLists.txt b/vendor/nanopb/examples/cmake_relpath/CMakeLists.txt
index e7727d85..1fb97fbc 100644
--- a/vendor/nanopb/examples/cmake_relpath/CMakeLists.txt
+++ b/vendor/nanopb/examples/cmake_relpath/CMakeLists.txt
@@ -3,16 +3,11 @@ project(NANOPB_CMAKE_SIMPLE C)
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../../extra)
find_package(Nanopb REQUIRED)
-include_directories(${NANOPB_INCLUDE_DIRS})
-nanopb_generate_cpp(PROTO_SRCS PROTO_HDRS RELPATH proto
+nanopb_generate_cpp(TARGET proto RELPATH proto
proto/simple.proto proto/sub/unlucky.proto)
-include_directories(${CMAKE_CURRENT_BINARY_DIR})
-#add_custom_target(generate_proto_sources DEPENDS ${PROTO_SRCS} ${PROTO_HDRS})
-set_source_files_properties(${PROTO_SRCS} ${PROTO_HDRS}
- PROPERTIES GENERATED TRUE)
-
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Werror -g -O0")
-add_executable(simple simple.c ${PROTO_SRCS} ${PROTO_HDRS})
+add_executable(simple simple.c)
+target_link_libraries(simple proto)
diff --git a/vendor/nanopb/examples/cmake_simple/CMakeLists.txt b/vendor/nanopb/examples/cmake_simple/CMakeLists.txt
index e5f33a02..d6b26f0d 100644
--- a/vendor/nanopb/examples/cmake_simple/CMakeLists.txt
+++ b/vendor/nanopb/examples/cmake_simple/CMakeLists.txt
@@ -3,14 +3,10 @@ project(NANOPB_CMAKE_SIMPLE C)
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../../extra)
find_package(Nanopb REQUIRED)
-include_directories(${NANOPB_INCLUDE_DIRS})
-nanopb_generate_cpp(PROTO_SRCS PROTO_HDRS simple.proto)
-include_directories(${CMAKE_CURRENT_BINARY_DIR})
-#add_custom_target(generate_proto_sources DEPENDS ${PROTO_SRCS} ${PROTO_HDRS})
-set_source_files_properties(${PROTO_SRCS} ${PROTO_HDRS}
- PROPERTIES GENERATED TRUE)
+nanopb_generate_cpp(TARGET proto simple.proto)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Werror -g -O0")
-add_executable(simple simple.c ${PROTO_SRCS} ${PROTO_HDRS})
+add_executable(simple simple.c)
+target_link_libraries(simple proto)
diff --git a/vendor/nanopb/examples/conan_dependency/.gitignore b/vendor/nanopb/examples/conan_dependency/.gitignore
new file mode 100644
index 00000000..567609b1
--- /dev/null
+++ b/vendor/nanopb/examples/conan_dependency/.gitignore
@@ -0,0 +1 @@
+build/
diff --git a/vendor/nanopb/examples/conan_dependency/CMakeLists.txt b/vendor/nanopb/examples/conan_dependency/CMakeLists.txt
new file mode 100644
index 00000000..f5df573a
--- /dev/null
+++ b/vendor/nanopb/examples/conan_dependency/CMakeLists.txt
@@ -0,0 +1,13 @@
+cmake_minimum_required(VERSION 3.20)
+project(simple C)
+
+include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
+conan_basic_setup()
+
+add_library(simple-protos STATIC
+ ${CMAKE_BINARY_DIR}/src/simple.pb.c
+)
+
+add_executable(simple ${CMAKE_BINARY_DIR}/src/simple.c)
+
+target_link_libraries(simple ${CONAN_LIBS} simple-protos)
diff --git a/vendor/nanopb/examples/conan_dependency/README.md b/vendor/nanopb/examples/conan_dependency/README.md
new file mode 100644
index 00000000..00b86c4f
--- /dev/null
+++ b/vendor/nanopb/examples/conan_dependency/README.md
@@ -0,0 +1,28 @@
+# About
+This example shows how to use Conan to pull in the header files and static libraries
+for `nanopb` and incorporate them into a very simple CMake application.
+
+## How To Run
+
+### Before using this example
+The `conanfile.py` here imports `0.4.6` for `nanopb` and uses the packaged artifacts
+to build a simple application. You'll likely need to build this yourself, so
+checkout the tagged version and run `conan create .` in the base of this repository
+
+### Running line by line
+To run though the build one step at a time, use the following commands.
+```sh
+mkdir build
+cd build
+conan install ..
+conan source ..
+conan build ..
+conan package ..
+```
+The `conanfile.py` has been commented to explain the workflow
+
+### Installing to cache
+To have everything build at once and install to your local Conan cache
+```sh
+conan create .
+```
diff --git a/vendor/nanopb/examples/conan_dependency/conanfile.py b/vendor/nanopb/examples/conan_dependency/conanfile.py
new file mode 100644
index 00000000..e762bd53
--- /dev/null
+++ b/vendor/nanopb/examples/conan_dependency/conanfile.py
@@ -0,0 +1,42 @@
+from conans import ConanFile, CMake
+
+class SimpleProtosConan(ConanFile):
+ name = "simple_protos"
+ version = "1.0.0"
+ description = "An example of importing nanopb as a conan artifact"
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "cmake"
+ exports = "*"
+
+ def requirements(self):
+ self.requires("nanopb/0.4.6")
+
+ def imports(self):
+ # Includes the nanopb headers
+ self.copy("*.h")
+ # Includes the compiled nanopb libraries
+ self.copy("*", src="lib", dst="lib")
+ # Includes the protoc plugin
+ self.copy("*", src="bin", dst="bin")
+ # Includes the python libraries that `bin` reaches out to
+ self.copy("*", src="local", dst="local")
+
+ def source(self):
+ # To include the packages from nanopb, we need to get their path in cache
+ nanopb_package_root = self.deps_cpp_info["nanopb"].rootpath
+ python_path=f"PYTHONPATH={nanopb_package_root}/local/lib/python3.10/dist-packages"
+ plugin=f"--plugin={nanopb_package_root}/bin/protoc-gen-nanopb"
+ # These next values grab this environments source and proto directories
+ output=f"--nanopb_out={self.source_folder}/src"
+ proto_flags=f"-I {self.source_folder}/protos simple.proto"
+
+ self.run(f"{python_path} protoc {plugin} {output} {proto_flags}")
+
+ def build(self):
+ cmake = CMake(self)
+ cmake.configure()
+ cmake.build()
+
+ def package(self):
+ self.copy("simple", dst="bin", src="bin")
+
diff --git a/vendor/nanopb/examples/conan_dependency/protos/simple.proto b/vendor/nanopb/examples/conan_dependency/protos/simple.proto
new file mode 100644
index 00000000..5c73a3b2
--- /dev/null
+++ b/vendor/nanopb/examples/conan_dependency/protos/simple.proto
@@ -0,0 +1,9 @@
+// A very simple protocol definition, consisting of only
+// one message.
+
+syntax = "proto2";
+
+message SimpleMessage {
+ required int32 lucky_number = 1;
+}
+
diff --git a/vendor/nanopb/examples/conan_dependency/src/simple.c b/vendor/nanopb/examples/conan_dependency/src/simple.c
new file mode 100644
index 00000000..1f6b1373
--- /dev/null
+++ b/vendor/nanopb/examples/conan_dependency/src/simple.c
@@ -0,0 +1,71 @@
+#include
+#include
+#include
+#include "simple.pb.h"
+
+int main()
+{
+ /* This is the buffer where we will store our message. */
+ uint8_t buffer[128];
+ size_t message_length;
+ bool status;
+
+ /* Encode our message */
+ {
+ /* Allocate space on the stack to store the message data.
+ *
+ * Nanopb generates simple struct definitions for all the messages.
+ * - check out the contents of simple.pb.h!
+ * It is a good idea to always initialize your structures
+ * so that you do not have garbage data from RAM in there.
+ */
+ SimpleMessage message = SimpleMessage_init_zero;
+
+ /* Create a stream that will write to our buffer. */
+ pb_ostream_t stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
+
+ /* Fill in the lucky number */
+ message.lucky_number = 13;
+
+ /* Now we are ready to encode the message! */
+ status = pb_encode(&stream, SimpleMessage_fields, &message);
+ message_length = stream.bytes_written;
+
+ /* Then just check for any errors.. */
+ if (!status)
+ {
+ printf("Encoding failed: %s\n", PB_GET_ERROR(&stream));
+ return 1;
+ }
+ }
+
+ /* Now we could transmit the message over network, store it in a file or
+ * wrap it to a pigeon's leg.
+ */
+
+ /* But because we are lazy, we will just decode it immediately. */
+
+ {
+ /* Allocate space for the decoded message. */
+ SimpleMessage message = SimpleMessage_init_zero;
+
+ /* Create a stream that reads from the buffer. */
+ pb_istream_t stream = pb_istream_from_buffer(buffer, message_length);
+
+ /* Now we are ready to decode the message. */
+ status = pb_decode(&stream, SimpleMessage_fields, &message);
+
+ /* Check for errors... */
+ if (!status)
+ {
+ printf("Decoding failed: %s\n", PB_GET_ERROR(&stream));
+ return 1;
+ }
+
+ /* Print the data contained in the message. */
+ printf("Your lucky number was %d!\n", message.lucky_number);
+ }
+
+ return 0;
+}
+
diff --git a/vendor/nanopb/examples/platformio/.gitignore b/vendor/nanopb/examples/platformio/.gitignore
new file mode 100644
index 00000000..6093aef6
--- /dev/null
+++ b/vendor/nanopb/examples/platformio/.gitignore
@@ -0,0 +1,5 @@
+.pio/
+.idea/
+cmake-build-*/
+/CMakeLists.txt
+CMakeListsPrivate.txt
diff --git a/vendor/nanopb/examples/platformio/platformio.ini b/vendor/nanopb/examples/platformio/platformio.ini
new file mode 100644
index 00000000..7e119b84
--- /dev/null
+++ b/vendor/nanopb/examples/platformio/platformio.ini
@@ -0,0 +1,48 @@
+;
+; You can setup `custom_nanopb_protos` `nanopb_options` vars to generate code from proto files
+;
+; Generator will use next folders:
+;
+; `$BUILD_DIR/nanopb/generated-src` - `*.pb.h` and `*.pb.c` files
+; `$BUILD_DIR/nanopb/md5` - MD5 files to track changes in source .proto/.options
+;
+; Compiled `.pb.o` files will be located under `$BUILD_DIR/nanopb/generated-build`
+;
+; Example:
+
+[env:pio_with_options]
+platform = native
+lib_deps = Nanopb
+
+src_filter =
+ +
+
+; All path are relative to the `$PROJECT_DIR`
+custom_nanopb_protos =
+ +
+custom_nanopb_options =
+ --error-on-unmatched
+
+[env:pio_without_options]
+platform = native
+lib_deps = Nanopb
+
+src_filter =
+ +
+
+; All path are relative to the `$PROJECT_DIR`
+custom_nanopb_protos =
+ +
+
+
+[env:pio_esp32_idf]
+platform = espressif32
+board = firebeetle32
+framework = espidf
+lib_deps = Nanopb
+
+; Warning: the 'src_filter' option cannot be used with ESP-IDF. Select source files to build in the project CMakeLists.txt file.
+; So, we specified source files in src/CMakeLists.txt
+
+custom_nanopb_protos =
+ +
diff --git a/vendor/nanopb/examples/platformio/proto/pio_with_options.options b/vendor/nanopb/examples/platformio/proto/pio_with_options.options
new file mode 100644
index 00000000..fe2dbee6
--- /dev/null
+++ b/vendor/nanopb/examples/platformio/proto/pio_with_options.options
@@ -0,0 +1 @@
+TestMessageWithOptions.str max_size:16
diff --git a/vendor/nanopb/examples/platformio/proto/pio_with_options.proto b/vendor/nanopb/examples/platformio/proto/pio_with_options.proto
new file mode 100644
index 00000000..58e00ed3
--- /dev/null
+++ b/vendor/nanopb/examples/platformio/proto/pio_with_options.proto
@@ -0,0 +1,5 @@
+syntax = "proto3";
+
+message TestMessageWithOptions {
+ string str = 1;
+}
diff --git a/vendor/nanopb/examples/platformio/proto/pio_without_options.proto b/vendor/nanopb/examples/platformio/proto/pio_without_options.proto
new file mode 100644
index 00000000..2284488b
--- /dev/null
+++ b/vendor/nanopb/examples/platformio/proto/pio_without_options.proto
@@ -0,0 +1,5 @@
+syntax = "proto3";
+
+message TestMessageWithoutOptions {
+ int32 number = 1;
+}
diff --git a/vendor/nanopb/examples/platformio/src/CMakeLists.txt b/vendor/nanopb/examples/platformio/src/CMakeLists.txt
new file mode 100644
index 00000000..40ab4b7e
--- /dev/null
+++ b/vendor/nanopb/examples/platformio/src/CMakeLists.txt
@@ -0,0 +1,3 @@
+idf_component_register(
+ SRCS
+ pio_esp32_idf.c)
diff --git a/vendor/nanopb/examples/platformio/src/pio_esp32_idf.c b/vendor/nanopb/examples/platformio/src/pio_esp32_idf.c
new file mode 100644
index 00000000..6f1e2067
--- /dev/null
+++ b/vendor/nanopb/examples/platformio/src/pio_esp32_idf.c
@@ -0,0 +1,32 @@
+#include "pb_encode.h"
+#include "pb_decode.h"
+
+#include "test.h"
+
+#include "pio_without_options.pb.h"
+
+void app_main() {
+ int status = 0;
+
+ uint8_t buffer[256];
+ pb_ostream_t ostream;
+ pb_istream_t istream;
+ size_t written;
+
+ TestMessageWithoutOptions original = TestMessageWithoutOptions_init_zero;
+ original.number = 45;
+
+ ostream = pb_ostream_from_buffer(buffer, sizeof(buffer));
+
+ TEST(pb_encode(&ostream, &TestMessageWithoutOptions_msg, &original));
+
+ written = ostream.bytes_written;
+
+ istream = pb_istream_from_buffer(buffer, written);
+
+ TestMessageWithoutOptions decoded = TestMessageWithoutOptions_init_zero;
+
+ TEST(pb_decode(&istream, &TestMessageWithoutOptions_msg, &decoded));
+
+ TEST(decoded.number == 45);
+}
diff --git a/vendor/nanopb/examples/platformio/src/pio_with_options.c b/vendor/nanopb/examples/platformio/src/pio_with_options.c
new file mode 100644
index 00000000..f558c61d
--- /dev/null
+++ b/vendor/nanopb/examples/platformio/src/pio_with_options.c
@@ -0,0 +1,35 @@
+#include "pb_encode.h"
+#include "pb_decode.h"
+
+#include "test.h"
+
+#include "pio_with_options.pb.h"
+
+int main(int argc, char *argv[]) {
+
+ int status = 0;
+
+ uint8_t buffer[256];
+ pb_ostream_t ostream;
+ pb_istream_t istream;
+ size_t written;
+
+ TestMessageWithOptions original = TestMessageWithOptions_init_zero;
+ strcpy(original.str,"Hello");
+
+ ostream = pb_ostream_from_buffer(buffer, sizeof(buffer));
+
+ TEST(pb_encode(&ostream, &TestMessageWithOptions_msg, &original));
+
+ written = ostream.bytes_written;
+
+ istream = pb_istream_from_buffer(buffer, written);
+
+ TestMessageWithOptions decoded = TestMessageWithOptions_init_zero;
+
+ TEST(pb_decode(&istream, &TestMessageWithOptions_msg, &decoded));
+
+ TEST(strcmp(decoded.str,"Hello") == 0);
+
+ return status;
+}
diff --git a/vendor/nanopb/examples/platformio/src/pio_without_options.c b/vendor/nanopb/examples/platformio/src/pio_without_options.c
new file mode 100644
index 00000000..1ab59f94
--- /dev/null
+++ b/vendor/nanopb/examples/platformio/src/pio_without_options.c
@@ -0,0 +1,35 @@
+#include "pb_encode.h"
+#include "pb_decode.h"
+
+#include "test.h"
+
+#include "pio_without_options.pb.h"
+
+int main(int argc, char *argv[]) {
+
+ int status = 0;
+
+ uint8_t buffer[256];
+ pb_ostream_t ostream;
+ pb_istream_t istream;
+ size_t written;
+
+ TestMessageWithoutOptions original = TestMessageWithoutOptions_init_zero;
+ original.number = 45;
+
+ ostream = pb_ostream_from_buffer(buffer, sizeof(buffer));
+
+ TEST(pb_encode(&ostream, &TestMessageWithoutOptions_msg, &original));
+
+ written = ostream.bytes_written;
+
+ istream = pb_istream_from_buffer(buffer, written);
+
+ TestMessageWithoutOptions decoded = TestMessageWithoutOptions_init_zero;
+
+ TEST(pb_decode(&istream, &TestMessageWithoutOptions_msg, &decoded));
+
+ TEST(decoded.number == 45);
+
+ return status;
+}
diff --git a/vendor/nanopb/examples/platformio/src/test.h b/vendor/nanopb/examples/platformio/src/test.h
new file mode 100644
index 00000000..63895dac
--- /dev/null
+++ b/vendor/nanopb/examples/platformio/src/test.h
@@ -0,0 +1,9 @@
+#include
+
+#define TEST(x) \
+ if (!(x)) { \
+ fprintf(stderr, "\033[31;1mFAILED:\033[22;39m %s:%d %s\n", __FILE__, __LINE__, #x); \
+ status = 1; \
+ } else { \
+ printf("\033[32;1mOK:\033[22;39m %s\n", #x); \
+ }
diff --git a/vendor/nanopb/extra/FindNanopb.cmake b/vendor/nanopb/extra/FindNanopb.cmake
index a6c34fa9..8bc6939f 100644
--- a/vendor/nanopb/extra/FindNanopb.cmake
+++ b/vendor/nanopb/extra/FindNanopb.cmake
@@ -14,6 +14,12 @@
#
# NANOPB_OPTIONS - List of options passed to nanopb.
#
+# Nanopb_FIND_COMPONENTS - List of options to append to NANOPB_OPTIONS without the
+# leading '--'. This should not manually be set, but allows
+# passing options to nanopb via find_package. For example,
+# 'find_package(Nanopb REQUIRED COMPONENTS cpp-descriptors)'
+# is equivalent to setting NANOPB_OPTIONS to --cpp-descriptors.
+#
# NANOPB_DEPENDS - List of files to be used as dependencies
# for the generated source and header files. These
# files are not directly passed as options to
@@ -25,6 +31,7 @@
# under build directory, instead of mirroring
# relative paths of source directories.
# Set to FALSE if you want to disable this behaviour.
+# PROTOC_OPTIONS - Pass options to protoc executable
#
# Defines the following variables:
#
@@ -42,6 +49,10 @@
# ...)
# SRCS = Variable to define with autogenerated source files
# HDRS = Variable to define with autogenerated header files
+# NANOPB_GENERATE_CPP(TARGET TGT [RELPATH ]
+# ...)
+# TGT = Name of the static library to create with the autogenerated files
+#
# If you want to use relative paths in your import statements use the RELPATH
# option. The argument to RELPATH should be the directory that all the
# imports will be relative to.
@@ -50,17 +61,16 @@
#
#
# ====================================================================
-# Example:
+# Example using modern targets:
#
# set(NANOPB_SRC_ROOT_FOLDER "/path/to/nanopb")
# set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${NANOPB_SRC_ROOT_FOLDER}/extra)
# find_package( Nanopb REQUIRED )
-# include_directories(${NANOPB_INCLUDE_DIRS})
#
-# NANOPB_GENERATE_CPP(PROTO_SRCS PROTO_HDRS foo.proto)
+# NANOPB_GENERATE_CPP(TARGET proto foo.proto)
#
-# include_directories(${CMAKE_CURRENT_BINARY_DIR})
-# add_executable(bar bar.cc ${PROTO_SRCS} ${PROTO_HDRS})
+# add_executable(bar bar.cc)
+# target_link_libraries(bar proto)
#
# Example with RELPATH:
# Assume we have a layout like:
@@ -72,9 +82,21 @@
# Everything would be the same as the previous example, but the call to
# NANOPB_GENERATE_CPP would change to:
#
-# NANOPB_GENERATE_CPP(PROTO_SRCS PROTO_HDRS RELPATH proto
+# NANOPB_GENERATE_CPP(TARGET proto RELPATH proto
# proto/foo.proto proto/sub/bar.proto)
#
+# Example using traditional variables:
+#
+# set(NANOPB_SRC_ROOT_FOLDER "/path/to/nanopb")
+# set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${NANOPB_SRC_ROOT_FOLDER}/extra)
+# find_package( Nanopb REQUIRED )
+# include_directories(${NANOPB_INCLUDE_DIRS})
+#
+# NANOPB_GENERATE_CPP(PROTO_SRCS PROTO_HDRS foo.proto)
+#
+# include_directories(${CMAKE_CURRENT_BINARY_DIR})
+# add_executable(bar bar.cc ${PROTO_SRCS} ${PROTO_HDRS})
+#
# ====================================================================
#=============================================================================
@@ -119,11 +141,30 @@
#=============================================================================
-function(NANOPB_GENERATE_CPP SRCS HDRS)
- cmake_parse_arguments(NANOPB_GENERATE_CPP "" "RELPATH" "" ${ARGN})
+function(NANOPB_GENERATE_CPP)
+ cmake_parse_arguments(NANOPB_GENERATE_CPP "" "RELPATH;TARGET" "" ${ARGN})
+ if(NANOPB_GENERATE_CPP_TARGET)
+ set(SRCS NANOPB_TARGET_SRCS)
+ set(HDRS NANOPB_TARGET_HDRS)
+ else()
+ list(GET NANOPB_GENERATE_CPP_UNPARSED_ARGUMENTS 0 SRCS)
+ list(GET NANOPB_GENERATE_CPP_UNPARSED_ARGUMENTS 1 HDRS)
+ list(REMOVE_AT NANOPB_GENERATE_CPP_UNPARSED_ARGUMENTS 0 1)
+ endif()
if(NOT NANOPB_GENERATE_CPP_UNPARSED_ARGUMENTS)
return()
endif()
+ set(NANOPB_OPTIONS_DIRS)
+
+ if(MSVC)
+ set(CUSTOM_COMMAND_PREFIX call)
+ endif()
+
+ if(NANOPB_GENERATE_CPP_RELPATH)
+ get_filename_component(NANOPB_GENERATE_CPP_RELPATH ${NANOPB_GENERATE_CPP_RELPATH} ABSOLUTE)
+ list(APPEND _nanopb_include_path "-I${NANOPB_GENERATE_CPP_RELPATH}")
+ list(APPEND NANOPB_OPTIONS_DIRS ${NANOPB_GENERATE_CPP_RELPATH})
+ endif()
if(NANOPB_GENERATE_CPP_APPEND_PATH)
# Create an include path for each file specified
@@ -133,11 +174,7 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
list(APPEND _nanopb_include_path "-I${ABS_PATH}")
endforeach()
else()
- set(_nanopb_include_path "-I${CMAKE_CURRENT_SOURCE_DIR}")
- endif()
-
- if(NANOPB_GENERATE_CPP_RELPATH)
- list(APPEND _nanopb_include_path "-I${NANOPB_GENERATE_CPP_RELPATH}")
+ list(APPEND _nanopb_include_path "-I${CMAKE_CURRENT_SOURCE_DIR}")
endif()
if(DEFINED NANOPB_IMPORT_DIRS)
@@ -162,17 +199,20 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
set(GENERATOR_CORE_SRC
${GENERATOR_CORE_DIR}/nanopb.proto)
- # Treat the source diretory as immutable.
+ # Treat the source directory as immutable.
#
# Copy the generator directory to the build directory before
# compiling python and proto files. Fixes issues when using the
# same build directory with different python/protobuf versions
# as the binary build directory is discarded across builds.
#
+ # Notice: copy_directory does not copy the content if the directory already exists.
+ # We therefore append '/' to specify that we want to copy the content of the folder. See #847
+ #
add_custom_command(
OUTPUT ${NANOPB_GENERATOR_EXECUTABLE} ${GENERATOR_CORE_SRC}
COMMAND ${CMAKE_COMMAND} -E copy_directory
- ARGS ${NANOPB_GENERATOR_SOURCE_DIR} ${GENERATOR_PATH}
+ ARGS ${NANOPB_GENERATOR_SOURCE_DIR}/ ${GENERATOR_PATH}
VERBATIM)
set(GENERATOR_CORE_PYTHON_SRC)
@@ -184,26 +224,23 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
set(GENERATOR_CORE_PYTHON_SRC ${GENERATOR_CORE_PYTHON_SRC} ${output})
add_custom_command(
OUTPUT ${output}
- COMMAND ${PROTOBUF_PROTOC_EXECUTABLE}
+ COMMAND ${CUSTOM_COMMAND_PREFIX} ${PROTOBUF_PROTOC_EXECUTABLE}
ARGS -I${GENERATOR_PATH}/proto
--python_out=${GENERATOR_CORE_DIR} ${ABS_FIL}
DEPENDS ${ABS_FIL}
VERBATIM)
endforeach()
- if(NANOPB_GENERATE_CPP_RELPATH)
- get_filename_component(ABS_ROOT ${NANOPB_GENERATE_CPP_RELPATH} ABSOLUTE)
- endif()
foreach(FIL ${NANOPB_GENERATE_CPP_UNPARSED_ARGUMENTS})
get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
- get_filename_component(FIL_WE ${FIL} NAME_WE)
- get_filename_component(FIL_DIR ${FIL} PATH)
+ get_filename_component(FIL_WE ${FIL} NAME_WLE)
+ get_filename_component(FIL_DIR ${ABS_FIL} PATH)
set(FIL_PATH_REL)
- if(ABS_ROOT)
+ if(NANOPB_GENERATE_CPP_RELPATH)
# Check that the file is under the given "RELPATH"
- string(FIND ${ABS_FIL} ${ABS_ROOT} LOC)
+ string(FIND ${ABS_FIL} ${NANOPB_GENERATE_CPP_RELPATH} LOC)
if (${LOC} EQUAL 0)
- string(REPLACE "${ABS_ROOT}/" "" FIL_REL ${ABS_FIL})
+ string(REPLACE "${NANOPB_GENERATE_CPP_RELPATH}/" "" FIL_REL ${ABS_FIL})
get_filename_component(FIL_PATH_REL ${FIL_REL} PATH)
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${FIL_PATH_REL})
endif()
@@ -215,11 +252,15 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_PATH_REL}/${FIL_WE}.pb.c")
list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_PATH_REL}/${FIL_WE}.pb.h")
- set(NANOPB_PLUGIN_OPTIONS)
- set(NANOPB_OPTIONS_DIRS)
+ get_filename_component(ABS_OPT_IN_FIL ${FIL_DIR}/${FIL_WE}.options.in ABSOLUTE)
+ if(EXISTS ${ABS_OPT_IN_FIL})
+ set(ABS_OPT_FIL "${CMAKE_CURRENT_BINARY_DIR}/${FIL_PATH_REL}/${FIL_WE}.options")
+ configure_file(${ABS_OPT_IN_FIL} ${ABS_OPT_FIL})
+ else()
+ get_filename_component(ABS_OPT_FIL ${FIL_DIR}/${FIL_WE}.options ABSOLUTE)
+ endif()
# If there an options file in the same working directory, set it as a dependency
- get_filename_component(ABS_OPT_FIL ${FIL_DIR}/${FIL_WE}.options ABSOLUTE)
if(EXISTS ${ABS_OPT_FIL})
# Get directory as lookups for dependency options fail if an options
# file is used. The options is still set as a dependency of the
@@ -244,10 +285,14 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
list(REMOVE_DUPLICATES NANOPB_OPTIONS_DIRS)
endif()
+ set(NANOPB_PLUGIN_OPTIONS)
foreach(options_path ${NANOPB_OPTIONS_DIRS})
set(NANOPB_PLUGIN_OPTIONS "${NANOPB_PLUGIN_OPTIONS} -I${options_path}")
endforeach()
+ # Remove leading space before the first -I directive
+ string(STRIP "${NANOPB_PLUGIN_OPTIONS}" NANOPB_PLUGIN_OPTIONS)
+
if(NANOPB_OPTIONS)
set(NANOPB_PLUGIN_OPTIONS "${NANOPB_PLUGIN_OPTIONS} ${NANOPB_OPTIONS}")
endif()
@@ -258,17 +303,9 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
# We need to pass the path to the option files to the nanopb plugin. There are two ways to do it.
# - An older hacky one using ':' as option separator in protoc args preventing the ':' to be used in path.
# - Or a newer one, using --nanopb_opt which requires a version of protoc >= 3.6
- # So we will determine which version of protoc we have available and choose accordingly.
- execute_process(COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --version OUTPUT_VARIABLE PROTOC_VERSION_STRING OUTPUT_STRIP_TRAILING_WHITESPACE)
- string(REGEX MATCH "[(0-9)].*.[(0-9)].*.[(0-9)].*" PROTOC_VERSION ${PROTOC_VERSION_STRING})
-
- if(PROTOC_VERSION VERSION_LESS "3.6.0")
- #try to use the older way
- string(REGEX MATCH ":" HAS_COLON_IN_PATH ${NANOPB_PLUGIN_OPTIONS} ${NANOPB_OUT})
- if(HAS_COLON_IN_PATH)
- message(FATAL_ERROR "Your path includes a ':' character used as an option separator for nanopb. Upgrade to protoc version >= 3.6.0 or use a different path.")
- endif()
- set(NANOPB_OPT_STRING "--nanopb_out=${NANOPB_PLUGIN_OPTIONS}:${NANOPB_OUT}")
+ # Since nanopb 0.4.6, --nanopb_opt is the default.
+ if(DEFINED NANOPB_PROTOC_OLDER_THAN_3_6_0)
+ set(NANOPB_OPT_STRING "--nanopb_out=${NANOPB_PLUGIN_OPTIONS}:${NANOPB_OUT}")
else()
set(NANOPB_OPT_STRING "--nanopb_opt=${NANOPB_PLUGIN_OPTIONS}" "--nanopb_out=${NANOPB_OUT}")
endif()
@@ -276,11 +313,12 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_PATH_REL}/${FIL_WE}.pb.c"
"${CMAKE_CURRENT_BINARY_DIR}/${FIL_PATH_REL}/${FIL_WE}.pb.h"
- COMMAND ${PROTOBUF_PROTOC_EXECUTABLE}
- ARGS -I${GENERATOR_PATH} -I${GENERATOR_CORE_DIR}
- -I${CMAKE_CURRENT_BINARY_DIR} ${_nanopb_include_path}
+ COMMAND ${CUSTOM_COMMAND_PREFIX} ${PROTOBUF_PROTOC_EXECUTABLE}
+ ARGS ${_nanopb_include_path} -I${GENERATOR_PATH}
+ -I${GENERATOR_CORE_DIR} -I${CMAKE_CURRENT_BINARY_DIR}
--plugin=protoc-gen-nanopb=${NANOPB_GENERATOR_PLUGIN}
${NANOPB_OPT_STRING}
+ ${PROTOC_OPTIONS}
${ABS_FIL}
DEPENDS ${ABS_FIL} ${GENERATOR_CORE_PYTHON_SRC}
${ABS_OPT_FIL} ${NANOPB_DEPENDS}
@@ -290,9 +328,30 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
endforeach()
set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
- set(${SRCS} ${${SRCS}} ${NANOPB_SRCS} PARENT_SCOPE)
- set(${HDRS} ${${HDRS}} ${NANOPB_HDRS} PARENT_SCOPE)
+ if(NANOPB_GENERATE_CPP_TARGET)
+ add_library(${NANOPB_GENERATE_CPP_TARGET} STATIC EXCLUDE_FROM_ALL ${${SRCS}} ${${HDRS}})
+ target_include_directories(${NANOPB_GENERATE_CPP_TARGET} PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
+ target_link_libraries(${NANOPB_GENERATE_CPP_TARGET} nanopb)
+ endif()
+
+ if(NOT DEFINED NANOPB_GENERATE_CPP_STANDALONE)
+ set(NANOPB_GENERATE_CPP_STANDALONE TRUE)
+ endif()
+
+ if(MSVC)
+ unset(CUSTOM_COMMAND_PREFIX)
+ endif()
+
+ if(NOT NANOPB_GENERATE_CPP_TARGET)
+ if (NANOPB_GENERATE_CPP_STANDALONE)
+ set(${SRCS} ${${SRCS}} ${NANOPB_SRCS} PARENT_SCOPE)
+ set(${HDRS} ${${HDRS}} ${NANOPB_HDRS} PARENT_SCOPE)
+ else()
+ set(${SRCS} ${${SRCS}} PARENT_SCOPE)
+ set(${HDRS} ${${HDRS}} PARENT_SCOPE)
+ endif()
+ endif()
endfunction()
@@ -313,6 +372,11 @@ if(NOT DEFINED NANOPB_SRC_ROOT_FOLDER)
${CMAKE_CURRENT_LIST_DIR}/.. ABSOLUTE)
endif()
+# Parse any options given to find_package(... COMPONENTS ...)
+foreach(component ${Nanopb_FIND_COMPONENTS})
+ list(APPEND NANOPB_OPTIONS "--${component}")
+endforeach()
+
# Find the include directory
find_path(NANOPB_INCLUDE_DIRS
pb.h
@@ -339,7 +403,12 @@ foreach(FIL ${_nanopb_hdrs})
list(APPEND NANOPB_HDRS "${${FIL}__nano_pb_file}")
endforeach()
-# Find the protoc Executable
+# Create the library target
+add_library(nanopb STATIC EXCLUDE_FROM_ALL ${NANOPB_SRCS})
+target_compile_features(nanopb PUBLIC c_std_11)
+target_include_directories(nanopb PUBLIC ${NANOPB_INCLUDE_DIRS})
+
+# Find the local protoc Executable
find_program(PROTOBUF_PROTOC_EXECUTABLE
NAMES protoc
DOC "The Google Protocol Buffers Compiler"
@@ -348,7 +417,25 @@ find_program(PROTOBUF_PROTOC_EXECUTABLE
${PROTOBUF_SRC_ROOT_FOLDER}/vsprojects/Debug
${NANOPB_SRC_ROOT_FOLDER}/generator-bin
${NANOPB_SRC_ROOT_FOLDER}/generator
+ NO_DEFAULT_PATH
+)
+
+# Test protoc, try to get version
+execute_process(
+ COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --version
+ OUTPUT_QUIET
+ ERROR_QUIET
+ RESULT_VARIABLE ret
)
+if(NOT ret EQUAL 0)
+ # Fallback to system protoc
+ unset(PROTOBUF_PROTOC_EXECUTABLE)
+ find_program(PROTOBUF_PROTOC_EXECUTABLE
+ NAMES protoc
+ DOC "The Google Protocol Buffers Compiler"
+ )
+endif()
+
mark_as_advanced(PROTOBUF_PROTOC_EXECUTABLE)
# Find nanopb generator source dir
@@ -357,6 +444,7 @@ find_path(NANOPB_GENERATOR_SOURCE_DIR
DOC "nanopb generator source"
PATHS
${NANOPB_SRC_ROOT_FOLDER}/generator
+ NO_DEFAULT_PATH
NO_CMAKE_FIND_ROOT_PATH
)
mark_as_advanced(NANOPB_GENERATOR_SOURCE_DIR)
diff --git a/vendor/nanopb/extra/bazel/BUILD.bazel b/vendor/nanopb/extra/bazel/BUILD.bazel
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/nanopb/extra/bazel/nanopb_cc_proto_library.bzl b/vendor/nanopb/extra/bazel/nanopb_cc_proto_library.bzl
new file mode 100644
index 00000000..aae59654
--- /dev/null
+++ b/vendor/nanopb/extra/bazel/nanopb_cc_proto_library.bzl
@@ -0,0 +1,98 @@
+# Apache License, Version 2.0, January 2004, http://www.apache.org/licenses/
+# Adapted from: https://github.com/rules-proto-grpc/rules_proto_grpc/
+
+load("@rules_proto_grpc//internal:filter_files.bzl", "filter_files")
+load("@rules_cc//cc:defs.bzl", "cc_library")
+load(
+ "@rules_proto_grpc//:defs.bzl",
+ "ProtoPluginInfo",
+ "proto_compile_attrs",
+ "proto_compile",
+)
+
+def cc_nanopb_proto_compile_impl(ctx):
+ """Nanopb proto compile implementation to add options files."""
+ extra_protoc_args = getattr(ctx.attr, "extra_protoc_args", [])
+ extra_protoc_files = getattr(ctx.files, "extra_protoc_files", [])
+ for options_target in ctx.attr.nanopb_options_files:
+ for options_file in options_target.files.to_list():
+ extra_protoc_args = extra_protoc_args + [
+ "--nanopb_plugin_opt=-f{}".format(options_file.path)]
+ extra_protoc_files = extra_protoc_files + [options_file]
+ return proto_compile(ctx, ctx.attr.options, extra_protoc_args, extra_protoc_files)
+
+
+nanopb_proto_compile_attrs = dict(
+ nanopb_options_files = attr.label_list(
+ allow_files = [".options"],
+ doc = "An optional list of additional nanopb options files to apply",
+ ),
+ **proto_compile_attrs,
+)
+
+
+# Create compile rule
+cc_nanopb_proto_compile = rule(
+ implementation = cc_nanopb_proto_compile_impl,
+ attrs = dict(
+ nanopb_proto_compile_attrs,
+ _plugins = attr.label_list(
+ providers = [ProtoPluginInfo],
+ default = [
+ Label("@nanopb//:nanopb_plugin"),
+ ],
+ doc = "List of protoc plugins to apply",
+ ),
+ ),
+ toolchains = [str(Label("@rules_proto//proto:toolchain_type"))],
+)
+
+
+def cc_nanopb_proto_library(name, **kwargs): # buildifier: disable=function-docstring
+ # Compile protos
+ name_pb = name + "_pb"
+ cc_nanopb_proto_compile(
+ name = name_pb,
+ **{
+ k: v
+ for (k, v) in kwargs.items()
+ if k in nanopb_proto_compile_attrs.keys()
+ } # Forward args
+ )
+
+ # Filter files to sources and headers
+ filter_files(
+ name = name_pb + "_srcs",
+ target = name_pb,
+ extensions = ["c"],
+ )
+
+ filter_files(
+ name = name_pb + "_hdrs",
+ target = name_pb,
+ extensions = ["h"],
+ )
+
+ # Create c library
+ cc_library(
+ name = name,
+ srcs = [name_pb + "_srcs"],
+ deps = PROTO_DEPS + kwargs.get("deps", []),
+ hdrs = [name_pb + "_hdrs"],
+ includes = [name_pb],
+ alwayslink = kwargs.get("alwayslink"),
+ copts = kwargs.get("copts"),
+ defines = kwargs.get("defines"),
+ features = kwargs.get("features"),
+ include_prefix = kwargs.get("include_prefix"),
+ linkopts = kwargs.get("linkopts"),
+ linkstatic = kwargs.get("linkstatic"),
+ local_defines = kwargs.get("local_defines"),
+ strip_include_prefix = kwargs.get("strip_include_prefix"),
+ visibility = kwargs.get("visibility"),
+ tags = kwargs.get("tags"),
+ )
+
+PROTO_DEPS = [
+ "@nanopb//:nanopb",
+]
diff --git a/vendor/nanopb/extra/bazel/nanopb_deps.bzl b/vendor/nanopb/extra/bazel/nanopb_deps.bzl
new file mode 100644
index 00000000..a6353bd6
--- /dev/null
+++ b/vendor/nanopb/extra/bazel/nanopb_deps.bzl
@@ -0,0 +1,51 @@
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+def nanopb_deps():
+ # Required for rule `copy_file`.
+ # Used by: nanopb.
+ # Used in modules: generator.
+ if "bazel_skylib" not in native.existing_rules():
+ http_archive(
+ name = "bazel_skylib",
+ sha256 = "cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz",
+ ],
+ )
+
+ # Setup proto rules.
+ # Used by: com_github_nanopb_nanopb, rules_proto_grpc.
+ # Used in modules: root.
+ if "rules_proto" not in native.existing_rules():
+ http_archive(
+ name = "rules_proto",
+ sha256 = "dc3fb206a2cb3441b485eb1e423165b231235a1ea9b031b4433cf7bc1fa460dd",
+ strip_prefix = "rules_proto-5.3.0-21.7",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_proto/archive/refs/tags/5.3.0-21.7.tar.gz",
+ "https://github.com/bazelbuild/rules_proto/archive/refs/tags/5.3.0-21.7.tar.gz",
+ ],
+ )
+
+ # Required for plugin rules.
+ # Used by: com_github_nanopb_nanopb.
+ # Used in modules: generator.
+ if "rules_python" not in native.existing_rules():
+ http_archive(
+ name = "rules_python",
+ sha256 = "0a8003b044294d7840ac7d9d73eef05d6ceb682d7516781a4ec62eeb34702578",
+ strip_prefix = "rules_python-0.24.0",
+ url = "https://github.com/bazelbuild/rules_python/archive/refs/tags/0.24.0.tar.gz",
+ )
+
+ # Setup grpc tools.
+ # Used by: nanopb.
+ # Used in modules: generator/proto.
+ if "rules_proto_grpc" not in native.existing_rules():
+ http_archive(
+ name = "rules_proto_grpc",
+ sha256 = "2a0860a336ae836b54671cbbe0710eec17c64ef70c4c5a88ccfd47ea6e3739bd",
+ strip_prefix = "rules_proto_grpc-4.6.0",
+ urls = ["https://github.com/rules-proto-grpc/rules_proto_grpc/releases/download/4.6.0/rules_proto_grpc-4.6.0.tar.gz"],
+ )
diff --git a/vendor/nanopb/extra/bazel/nanopb_workspace.bzl b/vendor/nanopb/extra/bazel/nanopb_workspace.bzl
new file mode 100644
index 00000000..19556072
--- /dev/null
+++ b/vendor/nanopb/extra/bazel/nanopb_workspace.bzl
@@ -0,0 +1,10 @@
+load("@nanopb_pypi//:requirements.bzl", "install_deps")
+load("@rules_proto_grpc//:repositories.bzl", "rules_proto_grpc_repos", "rules_proto_grpc_toolchains")
+load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
+
+def nanopb_workspace():
+ install_deps()
+ rules_proto_grpc_toolchains()
+ rules_proto_grpc_repos()
+ rules_proto_dependencies()
+ rules_proto_toolchains()
diff --git a/vendor/nanopb/extra/bazel/python_deps.bzl b/vendor/nanopb/extra/bazel/python_deps.bzl
new file mode 100644
index 00000000..a66791e6
--- /dev/null
+++ b/vendor/nanopb/extra/bazel/python_deps.bzl
@@ -0,0 +1,12 @@
+load("@rules_python//python:pip.bzl", "pip_parse")
+
+def nanopb_python_deps(interpreter=None):
+ # Required for python deps for generator plugin.
+ # Used by: nanopb.
+ # Used in modules: generator.
+ if "nanopb_pypi" not in native.existing_rules():
+ pip_parse(
+ name = "nanopb_pypi",
+ requirements_lock = "@nanopb//:extra/requirements_lock.txt",
+ python_interpreter_target = interpreter,
+ )
diff --git a/vendor/nanopb/extra/nanopb.mk b/vendor/nanopb/extra/nanopb.mk
index fc085f45..81db04ad 100644
--- a/vendor/nanopb/extra/nanopb.mk
+++ b/vendor/nanopb/extra/nanopb.mk
@@ -7,6 +7,9 @@ NANOPB_DIR := $(patsubst %/,%,$(dir $(patsubst %/,%,$(dir $(lastword $(MAKEFILE_
# Files for the nanopb core
NANOPB_CORE = $(NANOPB_DIR)/pb_encode.c $(NANOPB_DIR)/pb_decode.c $(NANOPB_DIR)/pb_common.c
+ifndef BOLOS_SDK
+CFLAGS += -fPIC
+endif
# Check if we are running on Windows
ifdef windir
WINDOWS = 1
@@ -31,15 +34,10 @@ else
endif
endif
-########################################
-# Protobuf files regeneration #
-########################################
-.PHONY: proto
-proto:
- @echo "Generating protobuf files..."
- @for proto_file in $(PB_FILES) ; do \
- echo "Processing $$proto_file..." ; \
- $(PROTOC) $(PROTOC_OPTS) --nanopb_out=. $$proto_file ; \
- $(PROTOC) $(PROTOC_OPTS) --python_out=. $$proto_file ; \
- done
- @echo "Protobuf generation complete."
+# Rule for building .pb.c and .pb.h
+%.pb.c %.pb.h: %.proto %.options
+ $(PROTOC) $(PROTOC_OPTS) --nanopb_out=. $<
+
+%.pb.c %.pb.h: %.proto
+ $(PROTOC) $(PROTOC_OPTS) --nanopb_out=. $<
+
diff --git a/vendor/nanopb/extra/poetry/pyproject.toml b/vendor/nanopb/extra/poetry/pyproject.toml
index b03b482d..b8f0d827 100644
--- a/vendor/nanopb/extra/poetry/pyproject.toml
+++ b/vendor/nanopb/extra/poetry/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "nanopb"
-version = "0.4.5"
+version = "0.4.9.1"
description = "Nanopb is a small code-size Protocol Buffers implementation in ansi C. It is especially suitable for use in microcontrollers, but fits any memory restricted system."
authors = ["Petteri Aimonen "]
license = "Zlib"
@@ -18,8 +18,8 @@ protoc-gen-nanopb = "nanopb.generator.nanopb_generator:main_plugin"
[tool.poetry.dependencies]
python = ">=2.7"
-protobuf = ">=3.6"
-grpcio-tools = {version = ">=1.26.0rc1", allow-prereleases = true, optional=true}
+protobuf = ">=3.19"
+grpcio-tools = {version=">=1.46.0", optional=true}
[tool.poetry.dev-dependencies]
diff --git a/vendor/nanopb/extra/requirements.txt b/vendor/nanopb/extra/requirements.txt
new file mode 100644
index 00000000..a403af56
--- /dev/null
+++ b/vendor/nanopb/extra/requirements.txt
@@ -0,0 +1,2 @@
+grpcio-tools==1.68.0
+setuptools >= 66.1.0
diff --git a/vendor/nanopb/extra/requirements_lock.txt b/vendor/nanopb/extra/requirements_lock.txt
new file mode 100644
index 00000000..c523bb90
--- /dev/null
+++ b/vendor/nanopb/extra/requirements_lock.txt
@@ -0,0 +1,141 @@
+#
+# This file is autogenerated by pip-compile with Python 3.11
+# by the following command:
+#
+# bazel run //:requirements.update
+#
+grpcio==1.68.0 \
+ --hash=sha256:0d230852ba97654453d290e98d6aa61cb48fa5fafb474fb4c4298d8721809354 \
+ --hash=sha256:0efbbd849867e0e569af09e165363ade75cf84f5229b2698d53cf22c7a4f9e21 \
+ --hash=sha256:14331e5c27ed3545360464a139ed279aa09db088f6e9502e95ad4bfa852bb116 \
+ --hash=sha256:15327ab81131ef9b94cb9f45b5bd98803a179c7c61205c8c0ac9aff9d6c4e82a \
+ --hash=sha256:15377bce516b1c861c35e18eaa1c280692bf563264836cece693c0f169b48829 \
+ --hash=sha256:15fa1fe25d365a13bc6d52fcac0e3ee1f9baebdde2c9b3b2425f8a4979fccea1 \
+ --hash=sha256:18668e36e7f4045820f069997834e94e8275910b1f03e078a6020bd464cb2363 \
+ --hash=sha256:2af76ab7c427aaa26aa9187c3e3c42f38d3771f91a20f99657d992afada2294a \
+ --hash=sha256:2bddd04a790b69f7a7385f6a112f46ea0b34c4746f361ebafe9ca0be567c78e9 \
+ --hash=sha256:32a9cb4686eb2e89d97022ecb9e1606d132f85c444354c17a7dbde4a455e4a3b \
+ --hash=sha256:3ac7f10850fd0487fcce169c3c55509101c3bde2a3b454869639df2176b60a03 \
+ --hash=sha256:3b2b559beb2d433129441783e5f42e3be40a9e1a89ec906efabf26591c5cd415 \
+ --hash=sha256:4028b8e9a3bff6f377698587d642e24bd221810c06579a18420a17688e421af7 \
+ --hash=sha256:44bcbebb24363d587472089b89e2ea0ab2e2b4df0e4856ba4c0b087c82412121 \
+ --hash=sha256:46a2d74d4dd8993151c6cd585594c082abe74112c8e4175ddda4106f2ceb022f \
+ --hash=sha256:4df81d78fd1646bf94ced4fb4cd0a7fe2e91608089c522ef17bc7db26e64effd \
+ --hash=sha256:4e300e6978df0b65cc2d100c54e097c10dfc7018b9bd890bbbf08022d47f766d \
+ --hash=sha256:4f1931c7aa85be0fa6cea6af388e576f3bf6baee9e5d481c586980c774debcb4 \
+ --hash=sha256:50992f214264e207e07222703c17d9cfdcc2c46ed5a1ea86843d440148ebbe10 \
+ --hash=sha256:55d3b52fd41ec5772a953612db4e70ae741a6d6ed640c4c89a64f017a1ac02b5 \
+ --hash=sha256:5a180328e92b9a0050958ced34dddcb86fec5a8b332f5a229e353dafc16cd332 \
+ --hash=sha256:619b5d0f29f4f5351440e9343224c3e19912c21aeda44e0c49d0d147a8d01544 \
+ --hash=sha256:6b2f98165ea2790ea159393a2246b56f580d24d7da0d0342c18a085299c40a75 \
+ --hash=sha256:6f9c7ad1a23e1047f827385f4713b5b8c6c7d325705be1dd3e31fb00dcb2f665 \
+ --hash=sha256:79f81b7fbfb136247b70465bd836fa1733043fdee539cd6031cb499e9608a110 \
+ --hash=sha256:7e0a3e72c0e9a1acab77bef14a73a416630b7fd2cbd893c0a873edc47c42c8cd \
+ --hash=sha256:7e7483d39b4a4fddb9906671e9ea21aaad4f031cdfc349fec76bdfa1e404543a \
+ --hash=sha256:88fb2925789cfe6daa20900260ef0a1d0a61283dfb2d2fffe6194396a354c618 \
+ --hash=sha256:8af6137cc4ae8e421690d276e7627cfc726d4293f6607acf9ea7260bd8fc3d7d \
+ --hash=sha256:8b0ff09c81e3aded7a183bc6473639b46b6caa9c1901d6f5e2cba24b95e59e30 \
+ --hash=sha256:8c73f9fbbaee1a132487e31585aa83987ddf626426d703ebcb9a528cf231c9b1 \
+ --hash=sha256:99f06232b5c9138593ae6f2e355054318717d32a9c09cdc5a2885540835067a1 \
+ --hash=sha256:9fe1b141cda52f2ca73e17d2d3c6a9f3f3a0c255c216b50ce616e9dca7e3441d \
+ --hash=sha256:a17278d977746472698460c63abf333e1d806bd41f2224f90dbe9460101c9796 \
+ --hash=sha256:a59f5822f9459bed098ffbceb2713abbf7c6fd13f2b9243461da5c338d0cd6c3 \
+ --hash=sha256:a6213d2f7a22c3c30a479fb5e249b6b7e648e17f364598ff64d08a5136fe488b \
+ --hash=sha256:a831dcc343440969aaa812004685ed322cdb526cd197112d0db303b0da1e8659 \
+ --hash=sha256:afbf45a62ba85a720491bfe9b2642f8761ff348006f5ef67e4622621f116b04a \
+ --hash=sha256:b0cf343c6f4f6aa44863e13ec9ddfe299e0be68f87d68e777328bff785897b05 \
+ --hash=sha256:c03d89df516128febc5a7e760d675b478ba25802447624edf7aa13b1e7b11e2a \
+ --hash=sha256:c1245651f3c9ea92a2db4f95d37b7597db6b246d5892bca6ee8c0e90d76fb73c \
+ --hash=sha256:cc5f0a4f5904b8c25729a0498886b797feb817d1fd3812554ffa39551112c161 \
+ --hash=sha256:dba037ff8d284c8e7ea9a510c8ae0f5b016004f13c3648f72411c464b67ff2fb \
+ --hash=sha256:def1a60a111d24376e4b753db39705adbe9483ef4ca4761f825639d884d5da78 \
+ --hash=sha256:e0d2f68eaa0a755edd9a47d40e50dba6df2bceda66960dee1218da81a2834d27 \
+ --hash=sha256:e0d30f3fee9372796f54d3100b31ee70972eaadcc87314be369360248a3dcffe \
+ --hash=sha256:e18589e747c1e70b60fab6767ff99b2d0c359ea1db8a2cb524477f93cdbedf5b \
+ --hash=sha256:e1e7ed311afb351ff0d0e583a66fcb39675be112d61e7cfd6c8269884a98afbc \
+ --hash=sha256:e46541de8425a4d6829ac6c5d9b16c03c292105fe9ebf78cb1c31e8d242f9155 \
+ --hash=sha256:e694b5928b7b33ca2d3b4d5f9bf8b5888906f181daff6b406f4938f3a997a490 \
+ --hash=sha256:f60fa2adf281fd73ae3a50677572521edca34ba373a45b457b5ebe87c2d01e1d \
+ --hash=sha256:f84890b205692ea813653ece4ac9afa2139eae136e419231b0eec7c39fdbe4c2 \
+ --hash=sha256:f8f695d9576ce836eab27ba7401c60acaf9ef6cf2f70dfe5462055ba3df02cc3 \
+ --hash=sha256:fc05759ffbd7875e0ff2bd877be1438dfe97c9312bbc558c8284a9afa1d0f40e \
+ --hash=sha256:fd2c2d47969daa0e27eadaf15c13b5e92605c5e5953d23c06d0b5239a2f176d3
+ # via grpcio-tools
+grpcio-tools==1.68.0 \
+ --hash=sha256:01ace351a51d7ee120963a4612b1f00e964462ec548db20d17f8902e238592c8 \
+ --hash=sha256:061345c0079b9471f32230186ab01acb908ea0e577bc1699a8cf47acef8be4af \
+ --hash=sha256:0f77957e3a0916a0dd18d57ce6b49d95fc9a5cfed92310f226339c0fda5394f6 \
+ --hash=sha256:10d03e3ad4af6284fd27cb14f5a3d52045913c1253e3e24a384ed91bc8adbfcd \
+ --hash=sha256:1117a81592542f0c36575082daa6413c57ca39188b18a4c50ec7332616f4b97e \
+ --hash=sha256:1769d7f529de1cc102f7fb900611e3c0b69bdb244fca1075b24d6e5b49024586 \
+ --hash=sha256:17d0c9004ea82b4213955a585401e80c30d4b37a1d4ace32ccdea8db4d3b7d43 \
+ --hash=sha256:196cd8a3a5963a4c9e424314df9eb573b305e6f958fe6508d26580ce01e7aa56 \
+ --hash=sha256:19bafb80948eda979b1b3a63c1567162d06249f43068a0e46a028a448e6f72d4 \
+ --hash=sha256:261d98fd635595de42aadee848f9af46da6654d63791c888891e94f66c5d0682 \
+ --hash=sha256:26335eea976dfc1ff5d90b19c309a9425bd53868112a0507ad20f297f2c21d3e \
+ --hash=sha256:28ebdbad2ef16699d07400b65260240851049a75502eff69a59b127d3ab960f1 \
+ --hash=sha256:2919faae04fe47bad57fc9b578aeaab527da260e851f321a253b6b11862254a8 \
+ --hash=sha256:2ec3a2e0afa4866ccc5ba33c071aebaa619245dfdd840cbb74f2b0591868d085 \
+ --hash=sha256:3aa40958355920ae2846c6fb5cadac4f2c8e33234a2982fef8101da0990e3968 \
+ --hash=sha256:453ee3193d59c974c678d91f08786f43c25ef753651b0825dc3d008c31baf68d \
+ --hash=sha256:46b537480b8fd2195d988120a28467601a2a3de2e504043b89fb90318e1eb754 \
+ --hash=sha256:4fe611d89a1836df8936f066d39c7eb03d4241806449ec45d4b8e1c843ae8011 \
+ --hash=sha256:511224a99726eb84db9ddb84dc8a75377c3eae797d835f99e80128ec618376d5 \
+ --hash=sha256:51e5a090849b30c99a2396d42140b8a3e558eff6cdfa12603f9582e2cd07724e \
+ --hash=sha256:533ce6791a5ba21e35d74c6c25caf4776f5692785a170c01ea1153783ad5af31 \
+ --hash=sha256:56842a0ce74b4b92eb62cd5ee00181b2d3acc58ba0c4fd20d15a5db51f891ba6 \
+ --hash=sha256:57e29e78c33fb1b1d557fbe7650d722d1f2b0a9f53ea73beb8ea47e627b6000b \
+ --hash=sha256:59a885091bf29700ba0e14a954d156a18714caaa2006a7f328b18e1ac4b1e721 \
+ --hash=sha256:5afd2f3f7257b52228a7808a2b4a765893d4d802d7a2377d9284853e67d045c6 \
+ --hash=sha256:5d3150d784d8050b10dcf5eb06e04fb90747a1547fed3a062a608d940fe57066 \
+ --hash=sha256:66b70b37184d40806844f51c2757c6b852511d4ea46a3bf2c7e931a47b455bc6 \
+ --hash=sha256:6950725bf7a496f81d3ec3324334ffc9dbec743b510dd0e897f51f8627eeb6ac \
+ --hash=sha256:6dd69c9f3ff85eee8d1f71adf7023c638ca8d465633244ac1b7f19bc3668612d \
+ --hash=sha256:700f171cd3293ee8d50cd43171562ff07b14fa8e49ee471cd91c6924c7da8644 \
+ --hash=sha256:737804ec2225dd4cc27e633b4ca0e963b0795161bf678285fab6586e917fd867 \
+ --hash=sha256:766c2cd2e365e0fc0e559af56f2c2d144d95fd7cb8668a34d533e66d6435eb34 \
+ --hash=sha256:795f2cd76f68a12b0b5541b98187ba367dd69b49d359cf98b781ead742961370 \
+ --hash=sha256:7dc5195dc02057668cc22da1ff1aea1811f6fa0deb801b3194dec1fe0bab1cf0 \
+ --hash=sha256:80b733014eb40d920d836d782e5cdea0dcc90d251a2ffb35ab378ef4f8a42c14 \
+ --hash=sha256:849b12bec2320e49e988df104c92217d533e01febac172a4495caab36d9f0edc \
+ --hash=sha256:88640d95ee41921ac7352fa5fadca52a06d7e21fbe53e6a706a9a494f756be7d \
+ --hash=sha256:8fefc6d000e169a97336feded23ce614df3fb9926fc48c7a9ff8ea459d93b5b0 \
+ --hash=sha256:92a09afe64fe26696595de2036e10967876d26b12c894cc9160f00152cacebe7 \
+ --hash=sha256:9509a5c3ed3d54fa7ac20748d501cb86668f764605a0a68f275339ee0f1dc1a6 \
+ --hash=sha256:ab93fab49fa1e699e577ff5fbb99aba660164d710d4c33cfe0aa9d06f585539f \
+ --hash=sha256:b094b22919b786ad73c20372ef5e546330e7cd2c6dc12293b7ed586975f35d38 \
+ --hash=sha256:b47ae076ffb29a68e517bc03552bef0d9c973f8e18adadff180b123e973a26ea \
+ --hash=sha256:b4ca81770cd729a9ea536d871aacedbde2b732bb9bb83c9d993d63f58502153d \
+ --hash=sha256:c10f3faa0cc4d89eb546f53b623837af23e86dc495d3b89510bcc0e0a6c0b8b2 \
+ --hash=sha256:c77ecc5164bb413a613bdac9091dcc29d26834a2ac42fcd1afdfcda9e3003e68 \
+ --hash=sha256:cad40c3164ee9cef62524dea509449ea581b17ea493178beef051bf79b5103ca \
+ --hash=sha256:d0470ffc6a93c86cdda48edd428d22e2fef17d854788d60d0d5f291038873157 \
+ --hash=sha256:d3e678162e1d7a8720dc05fdd537fc8df082a50831791f7bb1c6f90095f8368b \
+ --hash=sha256:dd9a654af8536b3de8525bff72a245fef62d572eabf96ac946fe850e707cb27d \
+ --hash=sha256:e31be6dc61496a59c1079b0a669f93dfcc2cdc4b1dbdc4374247cd09cee1329b \
+ --hash=sha256:e903d07bc65232aa9e7704c829aec263e1e139442608e473d7912417a9908e29 \
+ --hash=sha256:ee86157ef899f58ba2fe1055cce0d33bd703e99aa6d5a0895581ac3969f06bfa \
+ --hash=sha256:f65942fab440e99113ce14436deace7554d5aa554ea18358e3a5f3fc47efe322 \
+ --hash=sha256:f95103e3e4e7fee7c6123bc9e4e925e07ad24d8d09d7c1c916fb6c8d1cb9e726
+ # via -r extra/requirements.txt
+protobuf==5.28.3 \
+ --hash=sha256:0c4eec6f987338617072592b97943fdbe30d019c56126493111cf24344c1cc24 \
+ --hash=sha256:135658402f71bbd49500322c0f736145731b16fc79dc8f367ab544a17eab4535 \
+ --hash=sha256:27b246b3723692bf1068d5734ddaf2fccc2cdd6e0c9b47fe099244d80200593b \
+ --hash=sha256:3e6101d095dfd119513cde7259aa703d16c6bbdfae2554dfe5cfdbe94e32d548 \
+ --hash=sha256:3fa2de6b8b29d12c61911505d893afe7320ce7ccba4df913e2971461fa36d584 \
+ --hash=sha256:64badbc49180a5e401f373f9ce7ab1d18b63f7dd4a9cdc43c92b9f0b481cef7b \
+ --hash=sha256:70585a70fc2dd4818c51287ceef5bdba6387f88a578c86d47bb34669b5552c36 \
+ --hash=sha256:712319fbdddb46f21abb66cd33cb9e491a5763b2febd8f228251add221981135 \
+ --hash=sha256:91fba8f445723fcf400fdbe9ca796b19d3b1242cd873907979b9ed71e4afe868 \
+ --hash=sha256:a3f6857551e53ce35e60b403b8a27b0295f7d6eb63d10484f12bc6879c715687 \
+ --hash=sha256:cee1757663fa32a1ee673434fcf3bf24dd54763c79690201208bafec62f19eed
+ # via grpcio-tools
+
+# The following packages are considered to be unsafe in a requirements file:
+setuptools==70.0.0 \
+ --hash=sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4 \
+ --hash=sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0
+ # via
+ # -r extra/requirements.txt
+ # grpcio-tools
diff --git a/vendor/nanopb/extra/script_wrappers/nanopb_generator.py.in b/vendor/nanopb/extra/script_wrappers/nanopb_generator.py.in
new file mode 100644
index 00000000..d44a9db5
--- /dev/null
+++ b/vendor/nanopb/extra/script_wrappers/nanopb_generator.py.in
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+# This script is a wrapper to invoke nanopb_generator from an installed Python module.
+import sys
+import os.path
+
+# CMakeLists.txt can provide this file the path to Python module installation
+# location. It is used as a relative path. By default only system path is used.
+python_instdir = r"@PYTHON_INSTDIR@"
+cmake_bindir = r"@CMAKE_INSTALL_BINDIR@"
+cmake_install_prefix = r"@CMAKE_INSTALL_PREFIX@"
+if python_instdir[0] != '@':
+ python_instdir = os.path.join(cmake_install_prefix, python_instdir)
+ cmake_bindir = os.path.join(cmake_install_prefix, cmake_bindir)
+ relpath = os.path.relpath(python_instdir, cmake_bindir)
+ bindir = os.path.dirname(os.path.realpath(__file__))
+ libdir = os.path.abspath(os.path.join(bindir, relpath))
+ if os.path.isdir(libdir):
+ sys.path.insert(0, libdir) # Path after make install
+ else:
+ sys.path.insert(0, bindir) # Path before make install
+
+from nanopb.generator.nanopb_generator import main_cli, main_plugin
+if __name__ == '__main__':
+ sys.exit(main_cli())
diff --git a/vendor/nanopb/generator/__init__.py b/vendor/nanopb/generator/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/nanopb/generator/nanopb_generator b/vendor/nanopb/generator/nanopb_generator
new file mode 100644
index 00000000..429aa13e
--- /dev/null
+++ b/vendor/nanopb/generator/nanopb_generator
@@ -0,0 +1,7 @@
+#!/usr/bin/env python3
+# Allow calling nanopb_generator.py as simply nanopb_generator.
+# This provides consistency with packages installed through CMake or pip.
+
+from nanopb_generator import *
+if __name__ == '__main__':
+ main_cli()
diff --git a/vendor/nanopb/generator/nanopb_generator.bat b/vendor/nanopb/generator/nanopb_generator.bat
new file mode 100644
index 00000000..d2083164
--- /dev/null
+++ b/vendor/nanopb/generator/nanopb_generator.bat
@@ -0,0 +1,5 @@
+@echo off
+:: Allow calling nanopb_generator.py as simply nanopb_generator.
+:: This provides consistency with packages installed through CMake or pip.
+set mydir=%~dp0
+python "%mydir%\nanopb_generator.py" %*
diff --git a/vendor/nanopb/generator/nanopb_generator.py b/vendor/nanopb/generator/nanopb_generator.py
index 48792ef5..16c6fd71 100755
--- a/vendor/nanopb/generator/nanopb_generator.py
+++ b/vendor/nanopb/generator/nanopb_generator.py
@@ -4,7 +4,7 @@
from __future__ import unicode_literals
'''Generate header file for nanopb from a ProtoBuf FileDescriptorSet.'''
-nanopb_version = "nanopb-0.4.5"
+nanopb_version = "nanopb-0.4.9.1"
import sys
import re
@@ -13,18 +13,16 @@
import itertools
import tempfile
import shutil
+import shlex
import os
from functools import reduce
-try:
- # Add some dummy imports to keep packaging tools happy.
- import google, distutils.util # bbfreeze seems to need these
- import pkg_resources # pyinstaller / protobuf 2.5 seem to need these
- import proto.nanopb_pb2 as nanopb_pb2 # pyinstaller seems to need this
- import pkg_resources.py2_warn
-except:
- # Don't care, we will error out later if it is actually important.
- pass
+# Python-protobuf breaks easily with protoc version differences if
+# using the cpp or upb implementation. Force it to use pure Python
+# implementation. Performance is not very important in the generator.
+if not os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"):
+ os.putenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION", "python")
+ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
try:
# Make sure grpc_tools gets included in binary package if it is available
@@ -36,61 +34,54 @@
import google.protobuf.text_format as text_format
import google.protobuf.descriptor_pb2 as descriptor
import google.protobuf.compiler.plugin_pb2 as plugin_pb2
- import google.protobuf.reflection as reflection
import google.protobuf.descriptor
+ import google.protobuf.message_factory as message_factory
except:
sys.stderr.write('''
- *************************************************************
- *** Could not import the Google protobuf Python libraries ***
- *** Try installing package 'python3-protobuf' or similar. ***
- *************************************************************
+ **********************************************************************
+ *** Could not import the Google protobuf Python libraries ***
+ *** ***
+ *** Easiest solution is often to install the dependencies via pip: ***
+ *** pip install protobuf grpcio-tools ***
+ **********************************************************************
''' + '\n')
raise
+# GetMessageClass() is used by modern python-protobuf (around 5.x onwards)
+# Retain compatibility with older python-protobuf versions.
try:
- from .proto import nanopb_pb2
- from .proto._utils import invoke_protoc
-except TypeError:
- sys.stderr.write('''
- ****************************************************************************
- *** Got TypeError when importing the protocol definitions for generator. ***
- *** This usually means that the protoc in your path doesn't match the ***
- *** Python protobuf library version. ***
- *** ***
- *** Please check the output of the following commands: ***
- *** which protoc ***
- *** protoc --version ***
- *** python3 -c 'import google.protobuf; print(google.protobuf.__file__)' ***
- *** If you are not able to find the python protobuf version using the ***
- *** above command, use this command. ***
- *** pip freeze | grep -i protobuf ***
- ****************************************************************************
- ''' + '\n')
- raise
-except (ValueError, SystemError, ImportError):
- # Probably invoked directly instead of via installed scripts.
- import proto.nanopb_pb2 as nanopb_pb2
+ import google.protobuf.message_factory as message_factory
+ GetMessageClass = message_factory.GetMessageClass
+except AttributeError:
+ import google.protobuf.reflection as reflection
+ GetMessageClass = reflection.MakeClass
+
+# Depending on how this script is run, we may or may not have PEP366 package name
+# available for relative imports.
+if not __package__:
+ import proto
from proto._utils import invoke_protoc
-except:
- sys.stderr.write('''
- ********************************************************************
- *** Failed to import the protocol definitions for generator. ***
- *** You have to run 'make' in the nanopb/generator/proto folder. ***
- ********************************************************************
- ''' + '\n')
- raise
+ from proto import TemporaryDirectory
+else:
+ from . import proto
+ from .proto._utils import invoke_protoc
+ from .proto import TemporaryDirectory
-try:
- from tempfile import TemporaryDirectory
-except ImportError:
- class TemporaryDirectory:
- '''TemporaryDirectory fallback for Python 2'''
- def __enter__(self):
- self.dir = tempfile.mkdtemp()
- return self.dir
+if getattr(sys, 'frozen', False):
+ # Binary package, just import the file
+ from proto import nanopb_pb2
+else:
+ # Import nanopb_pb2.py, rebuilds if necessary and not disabled
+ # by env variable NANOPB_PB2_NO_REBUILD
+ nanopb_pb2 = proto.load_nanopb_pb2()
- def __exit__(self, *args):
- shutil.rmtree(self.dir)
+try:
+ # Add some dummy imports to keep packaging tools happy.
+ import google # bbfreeze seems to need these
+ from proto import nanopb_pb2 # pyinstaller seems to need this
+except:
+ # Don't care, we will error out later if it is actually important.
+ pass
# ---------------------------------------------------------------------------
# Generation of single fields
@@ -116,7 +107,11 @@ def __exit__(self, *args):
FieldD.TYPE_UINT32: ('uint32_t', 'UINT32', 5, 4),
FieldD.TYPE_UINT64: ('uint64_t', 'UINT64', 10, 8),
- # Integer size override options
+ # Integer size override option
+ (FieldD.TYPE_ENUM, nanopb_pb2.IS_8): ('uint8_t', 'ENUM', 4, 1),
+ (FieldD.TYPE_ENUM, nanopb_pb2.IS_16): ('uint16_t', 'ENUM', 4, 2),
+ (FieldD.TYPE_ENUM, nanopb_pb2.IS_32): ('uint32_t', 'ENUM', 4, 4),
+ (FieldD.TYPE_ENUM, nanopb_pb2.IS_64): ('uint64_t', 'ENUM', 4, 8),
(FieldD.TYPE_INT32, nanopb_pb2.IS_8): ('int8_t', 'INT32', 10, 1),
(FieldD.TYPE_INT32, nanopb_pb2.IS_16): ('int16_t', 'INT32', 10, 2),
(FieldD.TYPE_INT32, nanopb_pb2.IS_32): ('int32_t', 'INT32', 10, 4),
@@ -143,20 +138,84 @@ def __exit__(self, *args):
(FieldD.TYPE_UINT64, nanopb_pb2.IS_64): ('uint64_t','UINT64', 10, 8),
}
+class NamingStyle:
+ def enum_name(self, name):
+ return "_%s" % (name)
+
+ def struct_name(self, name):
+ return "_%s" % (name)
+
+ def type_name(self, name):
+ return "%s" % (name)
+
+ def define_name(self, name):
+ return "%s" % (name)
+
+ def var_name(self, name):
+ return "%s" % (name)
+
+ def enum_entry(self, name):
+ return "%s" % (name)
+
+ def func_name(self, name):
+ return "%s" % (name)
+
+ def bytes_type(self, struct_name, name):
+ return "%s_%s_t" % (struct_name, name)
+
+class NamingStyleC(NamingStyle):
+ def enum_name(self, name):
+ return self.underscore(name)
+
+ def struct_name(self, name):
+ return self.underscore(name)
+
+ def type_name(self, name):
+ return "%s_t" % self.underscore(name)
+
+ def define_name(self, name):
+ return self.underscore(name).upper()
+
+ def var_name(self, name):
+ return self.underscore(name)
+
+ def enum_entry(self, name):
+ return self.underscore(name).upper()
+
+ def func_name(self, name):
+ return self.underscore(name)
+
+ def bytes_type(self, struct_name, name):
+ return "%s_%s_t" % (self.underscore(struct_name), self.underscore(name))
+
+ def underscore(self, word):
+ word = str(word)
+ word = re.sub(r"([A-Z]+)([A-Z][a-z])", r'\1_\2', word)
+ word = re.sub(r"([a-z\d])([A-Z])", r'\1_\2', word)
+ word = word.replace("-", "_")
+ return word.lower()
+
class Globals:
'''Ugly global variables, should find a good way to pass these.'''
verbose_options = False
separate_options = []
matched_namemasks = set()
protoc_insertion_points = False
+ naming_style = NamingStyle()
-# String types (for python 2 / python 3 compatibility)
-try:
+# String types and file encoding for Python2 UTF-8 support
+if sys.version_info.major == 2:
+ import codecs
+ open = codecs.open
strtypes = (unicode, str)
- openmode_unicode = 'rU'
-except NameError:
+
+ def str(x):
+ try:
+ return strtypes[1](x)
+ except UnicodeEncodeError:
+ return strtypes[0](x)
+else:
strtypes = (str, )
- openmode_unicode = 'r'
class Names:
@@ -168,9 +227,15 @@ def __init__(self, parts = ()):
parts = (parts,)
self.parts = tuple(parts)
+ if self.parts == ('',):
+ self.parts = ()
+
def __str__(self):
return '_'.join(self.parts)
+ def __repr__(self):
+ return 'Names(%s)' % ','.join("'%s'" % x for x in self.parts)
+
def __add__(self, other):
if isinstance(other, strtypes):
return Names(self.parts + (other,))
@@ -253,6 +318,9 @@ def __str__(self):
else:
return '(' + str(self.value) + ' + ' + ' + '.join(self.symbols) + ')'
+ def __repr__(self):
+ return 'EncodedSize(%s, %s, %s, %s)' % (self.value, self.symbols, self.declarations, self.required_defines)
+
def get_declarations(self):
'''Get any declarations that must appear alongside this encoded size definition,
such as helper union {} types.'''
@@ -272,56 +340,49 @@ def upperlimit(self):
else:
return 2**32 - 1
-
-'''
-Constants regarding path of proto elements in file descriptor.
-They are used to connect proto elements with source code information (comments)
-These values come from:
- https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto
-'''
-MESSAGE_PATH = 4
-ENUM_PATH = 5
-FIELD_PATH = 2
-
-
class ProtoElement(object):
- def __init__(self, path, index, comments):
+ # Constants regarding path of proto elements in file descriptor.
+ # They are used to connect proto elements with source code information (comments)
+ # These values come from:
+ # https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto
+ FIELD = 2
+ MESSAGE = 4
+ ENUM = 5
+ NESTED_TYPE = 3
+ NESTED_ENUM = 4
+
+ def __init__(self, path, comments = None):
'''
- path is a predefined value for each element type in proto file.
- For example, message == 4, enum == 5, service == 6
- index is the N-th occurance of the `path` in the proto file.
- For example, 4-th message in the proto file or 2-nd enum etc ...
+ path is a tuple containing integers (type, index, ...)
comments is a dictionary mapping between element path & SourceCodeInfo.Location
(contains information about source comments).
'''
- self.path = path
- self.index = index
- self.comments = comments
-
- def element_path(self):
- '''Get path to proto element.'''
- return [self.path, self.index]
-
- def member_path(self, member_index):
- '''Get path to member of proto element.
- Example paths:
- [4, m] - message comments, m: msgIdx in proto from 0
- [4, m, 2, f] - field comments in message, f: fieldIdx in message from 0
- [6, s] - service comments, s: svcIdx in proto from 0
- [6, s, 2, r] - rpc comments in service, r: rpc method def in service from 0
- '''
- return self.element_path() + [FIELD_PATH, member_index]
+ assert(isinstance(path, tuple))
+ self.element_path = path
+ self.comments = comments or {}
+
+ def get_member_comments(self, index):
+ '''Get comments for a member of enum or message.'''
+ return self.get_comments((ProtoElement.FIELD, index), leading_indent = True)
- def get_comments(self, path, leading_indent=True):
- '''Get leading & trailing comments for enum member based on path.
+ def format_comment(self, comment):
+ '''Put comment inside /* */ and sanitize comment contents'''
+ comment = comment.strip()
+ comment = comment.replace('/*', '/ *')
+ comment = comment.replace('*/', '* /')
+ return "/* %s */" % comment
- path is the proto path of an element or member (ex. [5 0] or [4 1 2 0])
+ def get_comments(self, member_path = (), leading_indent = False):
+ '''Get leading & trailing comments for a protobuf element.
+
+ member_path is the proto path of an element or member (ex. [5 0] or [4 1 2 0])
leading_indent is a flag that indicates if leading comments should be indented
'''
# Obtain SourceCodeInfo.Location object containing comment
# information (based on the member path)
- comment = self.comments.get(str(path))
+ path = self.element_path + member_path
+ comment = self.comments.get(path)
leading_comment = ""
trailing_comment = ""
@@ -331,23 +392,23 @@ def get_comments(self, path, leading_indent=True):
if comment.leading_comments:
leading_comment = " " if leading_indent else ""
- leading_comment += "/* %s */" % comment.leading_comments.strip()
+ leading_comment += self.format_comment(comment.leading_comments)
if comment.trailing_comments:
- trailing_comment = "/* %s */" % comment.trailing_comments.strip()
+ trailing_comment = self.format_comment(comment.trailing_comments)
return leading_comment, trailing_comment
class Enum(ProtoElement):
- def __init__(self, names, desc, enum_options, index, comments):
+ def __init__(self, names, desc, enum_options, element_path, comments):
'''
desc is EnumDescriptorProto
index is the index of this enum element inside the file
comments is a dictionary mapping between element path & SourceCodeInfo.Location
(contains information about source comments)
'''
- super(Enum, self).__init__(ENUM_PATH, index, comments)
+ super(Enum, self).__init__(element_path, comments)
self.options = enum_options
self.names = names
@@ -372,21 +433,34 @@ def has_negative(self):
def encoded_size(self):
return max([varint_max_size(v) for n,v in self.values])
+ def __repr__(self):
+ return 'Enum(%s)' % self.names
+
def __str__(self):
- enum_path = self.element_path()
- leading_comment, trailing_comment = self.get_comments(enum_path, leading_indent=False)
+ leading_comment, trailing_comment = self.get_comments()
result = ''
if leading_comment:
result = '%s\n' % leading_comment
- result += 'typedef enum _%s { %s\n' % (self.names, trailing_comment)
+ result += 'typedef enum %s' % Globals.naming_style.enum_name(self.names)
+
+ # Override the enum size if user wants to use smaller integers
+ if (FieldD.TYPE_ENUM, self.options.enum_intsize) in datatypes:
+ self.ctype, self.pbtype, self.enc_size, self.data_item_size = datatypes[(FieldD.TYPE_ENUM, self.options.enum_intsize)]
+ result += ': ' + self.ctype
+
+ result += ' {'
+
+ if trailing_comment:
+ result += " " + trailing_comment
+
+ result += "\n"
enum_length = len(self.values)
enum_values = []
for index, (name, value) in enumerate(self.values):
- member_path = self.member_path(index)
- leading_comment, trailing_comment = self.get_comments(member_path)
+ leading_comment, trailing_comment = self.get_member_comments(index)
if leading_comment:
enum_values.append(leading_comment)
@@ -396,7 +470,11 @@ def __str__(self):
# last enum member should not end with a comma
comma = ""
- enum_values.append(" %s = %d%s %s" % (name, value, comma, trailing_comment))
+ enum_value = " %s = %d%s" % (Globals.naming_style.enum_entry(name), value, comma)
+ if trailing_comment:
+ enum_value += " " + trailing_comment
+
+ enum_values.append(enum_value)
result += '\n'.join(enum_values)
result += '\n}'
@@ -404,24 +482,54 @@ def __str__(self):
if self.packed:
result += ' pb_packed'
- result += ' %s;' % self.names
+ result += ' %s;' % Globals.naming_style.type_name(self.names)
return result
def auxiliary_defines(self):
# sort the enum by value
sorted_values = sorted(self.values, key = lambda x: (x[1], x[0]))
- result = '#define _%s_MIN %s\n' % (self.names, sorted_values[0][0])
- result += '#define _%s_MAX %s\n' % (self.names, sorted_values[-1][0])
- result += '#define _%s_ARRAYSIZE ((%s)(%s+1))\n' % (self.names, self.names, sorted_values[-1][0])
+
+ unmangledName = self.protofile.manglenames.unmangle(self.names)
+ identifier = Globals.naming_style.define_name('_%s_MIN' % self.names)
+ result = '#define %s %s\n' % (
+ identifier,
+ Globals.naming_style.enum_entry(sorted_values[0][0]))
+ if unmangledName:
+ unmangledIdentifier = Globals.naming_style.define_name('_%s_MIN' % unmangledName)
+ self.protofile.manglenames.reverse_name_mapping[identifier] = unmangledIdentifier
+
+ identifier = Globals.naming_style.define_name('_%s_MAX' % self.names)
+ result += '#define %s %s\n' % (
+ identifier,
+ Globals.naming_style.enum_entry(sorted_values[-1][0]))
+ if unmangledName:
+ unmangledIdentifier = Globals.naming_style.define_name('_%s_MAX' % unmangledName)
+ self.protofile.manglenames.reverse_name_mapping[identifier] = unmangledIdentifier
+
+ identifier = Globals.naming_style.define_name('_%s_ARRAYSIZE' % self.names)
+ result += '#define %s ((%s)(%s+1))\n' % (
+ identifier,
+ Globals.naming_style.type_name(self.names),
+ Globals.naming_style.enum_entry(sorted_values[-1][0]))
+ if unmangledName:
+ unmangledIdentifier = Globals.naming_style.define_name('_%s_ARRAYSIZE' % unmangledName)
+ self.protofile.manglenames.reverse_name_mapping[identifier] = unmangledIdentifier
if not self.options.long_names:
# Define the long names always so that enum value references
# from other files work properly.
for i, x in enumerate(self.values):
- result += '#define %s %s\n' % (self.value_longnames[i], x[0])
+ result += '#define %s %s\n' % (Globals.naming_style.define_name(self.value_longnames[i]), Globals.naming_style.enum_entry(x[0]))
if self.options.enum_to_string:
- result += 'const char *%s_name(%s v);\n' % (self.names, self.names)
+ result += 'const char *%s(%s v);\n' % (
+ Globals.naming_style.func_name('%s_name' % self.names),
+ Globals.naming_style.type_name(self.names))
+
+ if self.options.enum_validate:
+ result += 'bool %s(%s v);\n' % (
+ Globals.naming_style.func_name('%s_valid' % self.names),
+ Globals.naming_style.type_name(self.names))
return result
@@ -429,13 +537,18 @@ def enum_to_string_definition(self):
if not self.options.enum_to_string:
return ""
- result = 'const char *%s_name(%s v) {\n' % (self.names, self.names)
+ result = 'const char *%s(%s v) {\n' % (
+ Globals.naming_style.func_name('%s_name' % self.names),
+ Globals.naming_style.type_name(self.names))
+
result += ' switch (v) {\n'
for ((enumname, _), strname) in zip(self.values, self.value_longnames):
# Strip off the leading type name from the string value.
strval = str(strname)[len(str(self.names)) + 1:]
- result += ' case %s: return "%s";\n' % (enumname, strval)
+ result += ' case %s: return "%s";\n' % (
+ Globals.naming_style.enum_entry(enumname),
+ Globals.naming_style.enum_entry(strval))
result += ' }\n'
result += ' return "unknown";\n'
@@ -443,6 +556,28 @@ def enum_to_string_definition(self):
return result
+ def enum_validate(self):
+ if not self.options.enum_validate:
+ return ""
+
+ result = 'bool %s(%s v) {\n' % (
+ Globals.naming_style.func_name('%s_valid' % self.names),
+ Globals.naming_style.type_name(self.names))
+
+ result += ' switch (v) {\n'
+
+ for (enumname, _) in self.values:
+ result += ' case %s: return true;\n' % (
+ Globals.naming_style.enum_entry(enumname)
+ )
+
+ result += ' }\n'
+ result += ' return false;\n'
+ result += '}\n'
+
+ return result
+
+
class FieldMaxSize:
def __init__(self, worst = 0, checks = [], field_name = 'undefined'):
if isinstance(worst, list):
@@ -461,12 +596,13 @@ def extend(self, extend, field_name = None):
self.checks.extend(extend.checks)
-class Field:
+class Field(ProtoElement):
macro_x_param = 'X'
macro_a_param = 'a'
- def __init__(self, struct_name, desc, field_options):
+ def __init__(self, struct_name, desc, field_options, element_path = (), comments = None):
'''desc is FieldDescriptorProto'''
+ ProtoElement.__init__(self, element_path, comments)
self.tag = desc.number
self.struct_name = struct_name
self.union_name = None
@@ -494,6 +630,11 @@ def __init__(self, struct_name, desc, field_options):
if field_options.HasField("max_size"):
self.max_size = field_options.max_size
+ if field_options.HasField("initializer"):
+ self.initializer = field_options.initializer
+ else:
+ self.initializer = None
+
self.default_has = field_options.default_has
if desc.type == FieldD.TYPE_STRING and field_options.HasField("max_length"):
@@ -507,6 +648,9 @@ def __init__(self, struct_name, desc, field_options):
self.default = desc.default_value
# Check field rules, i.e. required/optional/repeated.
+ if field_options.HasField("label_override"):
+ desc.label = field_options.label_override
+
can_be_static = True
if desc.label == FieldD.LABEL_REPEATED:
self.rules = 'REPEATED'
@@ -517,6 +661,9 @@ def __init__(self, struct_name, desc, field_options):
if field_options.fixed_count:
self.rules = 'FIXARRAY'
+ elif desc.label == FieldD.LABEL_REQUIRED:
+ # We allow LABEL_REQUIRED using label_override even for proto3 (see #962)
+ self.rules = 'REQUIRED'
elif field_options.proto3:
if desc.type == FieldD.TYPE_MESSAGE and not field_options.proto3_singular_msgs:
# In most other protobuf libraries proto3 submessages have
@@ -528,8 +675,6 @@ def __init__(self, struct_name, desc, field_options):
else:
# Proto3 singular fields (without has_field)
self.rules = 'SINGULAR'
- elif desc.label == FieldD.LABEL_REQUIRED:
- self.rules = 'REQUIRED'
elif desc.label == FieldD.LABEL_OPTIONAL:
self.rules = 'OPTIONAL'
else:
@@ -548,7 +693,7 @@ def __init__(self, struct_name, desc, field_options):
if can_be_static:
field_options.type = nanopb_pb2.FT_STATIC
else:
- field_options.type = nanopb_pb2.FT_CALLBACK
+ field_options.type = field_options.fallback_type
if field_options.type == nanopb_pb2.FT_STATIC and not can_be_static:
raise Exception("Field '%s' is defined as static, but max_size or "
@@ -608,7 +753,7 @@ def __init__(self, struct_name, desc, field_options):
self.pbtype = 'BYTES'
self.ctype = 'pb_bytes_array_t'
if self.allocation == 'STATIC':
- self.ctype = self.struct_name + self.name + 't'
+ self.ctype = Globals.naming_style.bytes_type(self.struct_name, self.name)
self.enc_size = varint_max_size(self.max_size) + self.max_size
elif desc.type == FieldD.TYPE_MESSAGE:
self.pbtype = 'MESSAGE'
@@ -626,42 +771,59 @@ def __init__(self, struct_name, desc, field_options):
def __lt__(self, other):
return self.tag < other.tag
+ def __repr__(self):
+ return 'Field(%s)' % self.name
+
def __str__(self):
result = ''
+
+ var_name = Globals.naming_style.var_name(self.name)
+ type_name = Globals.naming_style.type_name(self.ctype) if isinstance(self.ctype, Names) else self.ctype
+
if self.allocation == 'POINTER':
if self.rules == 'REPEATED':
if self.pbtype == 'MSG_W_CB':
- result += ' pb_callback_t cb_' + self.name + ';\n'
- result += ' pb_size_t ' + self.name + '_count;\n'
+ result += ' pb_callback_t cb_' + var_name + ';\n'
+ result += ' pb_size_t ' + var_name + '_count;\n'
- if self.pbtype in ['MESSAGE', 'MSG_W_CB']:
- # Use struct definition, so recursive submessages are possible
- result += ' struct _%s *%s;' % (self.ctype, self.name)
+ if self.rules == 'FIXARRAY' and self.pbtype in ['STRING', 'BYTES']:
+ # Pointer to fixed size array of pointers
+ result += ' %s* (*%s)%s;' % (type_name, var_name, self.array_decl)
elif self.pbtype == 'FIXED_LENGTH_BYTES' or self.rules == 'FIXARRAY':
- # Pointer to fixed size array
- result += ' %s (*%s)%s;' % (self.ctype, self.name, self.array_decl)
- elif self.rules in ['REPEATED', 'FIXARRAY'] and self.pbtype in ['STRING', 'BYTES']:
+ # Pointer to fixed size array of items
+ result += ' %s (*%s)%s;' % (type_name, var_name, self.array_decl)
+ elif self.rules == 'REPEATED' and self.pbtype in ['STRING', 'BYTES']:
# String/bytes arrays need to be defined as pointers to pointers
- result += ' %s **%s;' % (self.ctype, self.name)
+ result += ' %s **%s;' % (type_name, var_name)
+ elif self.pbtype in ['MESSAGE', 'MSG_W_CB']:
+ # Use struct definition, so recursive submessages are possible
+ result += ' struct %s *%s;' % (Globals.naming_style.struct_name(self.ctype), var_name)
else:
- result += ' %s *%s;' % (self.ctype, self.name)
+ # Normal case, just a pointer to single item
+ result += ' %s *%s;' % (type_name, var_name)
elif self.allocation == 'CALLBACK':
- result += ' %s %s;' % (self.callback_datatype, self.name)
+ result += ' %s %s;' % (self.callback_datatype, var_name)
else:
if self.pbtype == 'MSG_W_CB' and self.rules in ['OPTIONAL', 'REPEATED']:
- result += ' pb_callback_t cb_' + self.name + ';\n'
+ result += ' pb_callback_t cb_' + var_name + ';\n'
if self.rules == 'OPTIONAL':
- result += ' bool has_' + self.name + ';\n'
+ result += ' bool has_' + var_name + ';\n'
elif self.rules == 'REPEATED':
- result += ' pb_size_t ' + self.name + '_count;\n'
- result += ' %s %s%s;' % (self.ctype, self.name, self.array_decl)
+ result += ' pb_size_t ' + var_name + '_count;\n'
+
+ result += ' %s %s%s;' % (type_name, var_name, self.array_decl)
+
+ leading_comment, trailing_comment = self.get_comments(leading_indent = True)
+ if leading_comment: result = leading_comment + "\n" + result
+ if trailing_comment: result = result + " " + trailing_comment
+
return result
def types(self):
'''Return definitions for any special types this field might need.'''
if self.pbtype == 'BYTES' and self.allocation == 'STATIC':
- result = 'typedef PB_BYTES_ARRAY_T(%d) %s;\n' % (self.max_size, self.ctype)
+ result = 'typedef PB_BYTES_ARRAY_T(%d) %s;\n' % (self.max_size, Globals.naming_style.var_name(self.ctype))
else:
result = ''
return result
@@ -670,6 +832,8 @@ def get_dependencies(self):
'''Get list of type names used by this field.'''
if self.allocation == 'STATIC':
return [str(self.ctype)]
+ elif self.allocation == 'POINTER' and self.rules == 'FIXARRAY':
+ return [str(self.ctype)]
else:
return []
@@ -680,11 +844,13 @@ def get_initializer(self, null_init, inner_init_only = False):
'''
inner_init = None
- if self.pbtype in ['MESSAGE', 'MSG_W_CB']:
+ if self.initializer is not None:
+ inner_init = self.initializer
+ elif self.pbtype in ['MESSAGE', 'MSG_W_CB']:
if null_init:
- inner_init = '%s_init_zero' % self.ctype
+ inner_init = Globals.naming_style.define_name('%s_init_zero' % self.ctype)
else:
- inner_init = '%s_init_default' % self.ctype
+ inner_init = Globals.naming_style.define_name('%s_init_default' % self.ctype)
elif self.default is None or null_init:
if self.pbtype == 'STRING':
inner_init = '""'
@@ -693,7 +859,7 @@ def get_initializer(self, null_init, inner_init_only = False):
elif self.pbtype == 'FIXED_LENGTH_BYTES':
inner_init = '{0}'
elif self.pbtype in ('ENUM', 'UENUM'):
- inner_init = '_%s_MIN' % self.ctype
+ inner_init = '_%s_MIN' % Globals.naming_style.define_name(self.ctype)
else:
inner_init = '0'
else:
@@ -730,6 +896,8 @@ def get_initializer(self, null_init, inner_init_only = False):
inner_init += '.0f'
elif self.pbtype == 'FLOAT':
inner_init += 'f'
+ elif self.pbtype in ('ENUM', 'UENUM'):
+ inner_init = Globals.naming_style.enum_entry(self.default)
else:
inner_init = str(self.default)
@@ -757,8 +925,14 @@ def get_initializer(self, null_init, inner_init_only = False):
elif self.allocation == 'CALLBACK':
if self.pbtype == 'EXTENSION':
outer_init = 'NULL'
- else:
+ elif self.callback_datatype == 'pb_callback_t':
outer_init = '{{NULL}, NULL}'
+ elif self.initializer is not None:
+ outer_init = inner_init
+ elif self.callback_datatype.strip().endswith('*'):
+ outer_init = 'NULL'
+ else:
+ outer_init = '{0}'
if self.pbtype == 'MSG_W_CB' and self.rules in ['REPEATED', 'OPTIONAL']:
outer_init = '{{NULL}, NULL}, ' + outer_init
@@ -767,22 +941,29 @@ def get_initializer(self, null_init, inner_init_only = False):
def tags(self):
'''Return the #define for the tag number of this field.'''
- identifier = '%s_%s_tag' % (self.struct_name, self.name)
+ identifier = Globals.naming_style.define_name('%s_%s_tag' % (self.struct_name, self.name))
return '#define %-40s %d\n' % (identifier, self.tag)
def fieldlist(self):
'''Return the FIELDLIST macro entry for this field.
Format is: X(a, ATYPE, HTYPE, LTYPE, field_name, tag)
'''
- name = self.name
+ name = Globals.naming_style.var_name(self.name)
if self.rules == "ONEOF":
# For oneofs, make a tuple of the union name, union member name,
# and the name inside the parent struct.
if not self.anonymous:
- name = '(%s,%s,%s)' % (self.union_name, self.name, self.union_name + '.' + self.name)
+ name = '(%s,%s,%s)' % (
+ Globals.naming_style.var_name(self.union_name),
+ Globals.naming_style.var_name(self.name),
+ Globals.naming_style.var_name(self.union_name) + '.' +
+ Globals.naming_style.var_name(self.name))
else:
- name = '(%s,%s,%s)' % (self.union_name, self.name, self.name)
+ name = '(%s,%s,%s)' % (
+ Globals.naming_style.var_name(self.union_name),
+ Globals.naming_style.var_name(self.name),
+ Globals.naming_style.var_name(self.name))
return '%s(%s, %-9s %-9s %-9s %-16s %3d)' % (self.macro_x_param,
self.macro_a_param,
@@ -811,6 +992,7 @@ def data_size(self, dependencies):
size = dependencies[str(self.submsgname)].data_size(other_dependencies)
else:
size = 256 # Message is in other file, this is reasonable guess for most cases
+ sys.stderr.write('Could not determine size for submessage %s, using default %d\n' % (self.submsgname, size))
if self.pbtype == 'MSG_W_CB':
size += 16
@@ -941,6 +1123,7 @@ def __init__(self, struct_name, range_start, field_options):
self.data_item_size = 0
self.fixed_count = False
self.callback_datatype = 'pb_extension_t*'
+ self.initializer = None
def requires_custom_field_callback(self):
return False
@@ -971,13 +1154,13 @@ def __init__(self, fullname, desc, field_options):
else:
self.skip = False
self.rules = 'REQUIRED' # We don't really want the has_field for extensions
- # currently no support for comments for extension fields => provide 0, {}
- self.msg = Message(self.fullname + "extmsg", None, field_options, 0, {})
+ # currently no support for comments for extension fields => provide (), {}
+ self.msg = Message(self.fullname + "extmsg", None, field_options, (), {})
self.msg.fields.append(self)
def tags(self):
'''Return the #define for the tag number of this field.'''
- identifier = '%s_tag' % self.fullname
+ identifier = Globals.naming_style.define_name('%s_tag' % (self.fullname))
return '#define %-40s %d\n' % (identifier, self.tag)
def extension_decl(self):
@@ -988,7 +1171,7 @@ def extension_decl(self):
return msg
return ('extern const pb_extension_type_t %s; /* field type: %s */\n' %
- (self.fullname, str(self).strip()))
+ (Globals.naming_style.var_name(self.fullname), str(self).strip()))
def extension_def(self, dependencies):
'''Definition of the extension type in the .pb.c file'''
@@ -1001,10 +1184,10 @@ def extension_def(self, dependencies):
result += self.msg.fields_declaration(dependencies)
result += 'pb_byte_t %s_default[] = {0x00};\n' % self.msg.name
result += self.msg.fields_definition(dependencies)
- result += 'const pb_extension_type_t %s = {\n' % self.fullname
+ result += 'const pb_extension_type_t %s = {\n' % Globals.naming_style.var_name(self.fullname)
result += ' NULL,\n'
result += ' NULL,\n'
- result += ' &%s_msg\n' % self.msg.name
+ result += ' &%s_msg\n' % Globals.naming_style.type_name(self.msg.name)
result += '};\n'
return result
@@ -1046,16 +1229,16 @@ def __str__(self):
result = ''
if self.fields:
if self.has_msg_cb:
- result += ' pb_callback_t cb_' + self.name + ';\n'
+ result += ' pb_callback_t cb_' + Globals.naming_style.var_name(self.name) + ';\n'
- result += ' pb_size_t which_' + self.name + ";\n"
+ result += ' pb_size_t which_' + Globals.naming_style.var_name(self.name) + ";\n"
result += ' union {\n'
for f in self.fields:
result += ' ' + str(f).replace('\n', '\n ') + '\n'
if self.anonymous:
result += ' };'
else:
- result += ' } ' + self.name + ';'
+ result += ' } ' + Globals.naming_style.var_name(self.name) + ';'
return result
def types(self):
@@ -1125,8 +1308,8 @@ def requires_custom_field_callback(self):
class Message(ProtoElement):
- def __init__(self, names, desc, message_options, index, comments):
- super(Message, self).__init__(MESSAGE_PATH, index, comments)
+ def __init__(self, names, desc, message_options, element_path, comments):
+ super(Message, self).__init__(element_path, comments)
self.name = names
self.fields = []
self.oneofs = {}
@@ -1168,15 +1351,19 @@ def load_fields(self, desc, message_options):
else:
sys.stderr.write('Note: This Python protobuf library has no OneOf support\n')
- for f in desc.field:
+ for index, f in enumerate(desc.field):
field_options = get_nanopb_suboptions(f, message_options, self.name + f.name)
+
if field_options.type == nanopb_pb2.FT_IGNORE:
continue
+ if field_options.discard_deprecated and f.options.deprecated:
+ continue
+
if field_options.descriptorsize > self.descriptorsize:
self.descriptorsize = field_options.descriptorsize
- field = Field(self.name, f, field_options)
+ field = Field(self.name, f, field_options, self.element_path + (ProtoElement.FIELD, index), self.comments)
if hasattr(f, 'oneof_index') and f.HasField('oneof_index'):
if hasattr(f, 'proto3_optional') and f.proto3_optional:
no_unions.append(f.oneof_index)
@@ -1210,32 +1397,28 @@ def get_dependencies(self):
deps += f.get_dependencies()
return deps
+ def __repr__(self):
+ return 'Message(%s)' % self.name
+
def __str__(self):
- message_path = self.element_path()
- leading_comment, trailing_comment = self.get_comments(message_path, leading_indent=False)
+ leading_comment, trailing_comment = self.get_comments()
result = ''
if leading_comment:
result = '%s\n' % leading_comment
- result += 'typedef struct _%s { %s\n' % (self.name, trailing_comment)
+ result += 'typedef struct %s {' % Globals.naming_style.struct_name(self.name)
+ if trailing_comment:
+ result += " " + trailing_comment
+
+ result += '\n'
if not self.fields:
# Empty structs are not allowed in C standard.
# Therefore add a dummy field if an empty message occurs.
result += ' char dummy_field;'
- msg_fields = []
- for index, field in enumerate(self.fields):
- member_path = self.member_path(index)
- leading_comment, trailing_comment = self.get_comments(member_path)
-
- if leading_comment:
- msg_fields.append(leading_comment)
-
- msg_fields.append("%s %s" % (str(field), trailing_comment))
-
- result += '\n'.join(msg_fields)
+ result += '\n'.join([str(f) for f in self.fields])
if Globals.protoc_insertion_points:
result += '\n/* @@protoc_insertion_point(struct:%s) */' % self.name
@@ -1245,7 +1428,7 @@ def __str__(self):
if self.packed:
result += ' pb_packed'
- result += ' %s;' % self.name
+ result += ' %s;' % Globals.naming_style.type_name(self.name)
if self.packed:
result = 'PB_PACKED_STRUCT_START\n' + result
@@ -1314,9 +1497,10 @@ def fields_declaration(self, dependencies):
sorted_fields = list(self.all_fields())
sorted_fields.sort(key = lambda x: x.tag)
- result = '#define %s_FIELDLIST(%s, %s) \\\n' % (self.name,
- Field.macro_x_param,
- Field.macro_a_param)
+ result = '#define %s_FIELDLIST(%s, %s) \\\n' % (
+ Globals.naming_style.define_name(self.name),
+ Field.macro_x_param,
+ Field.macro_a_param)
result += ' \\\n'.join(x.fieldlist() for x in sorted_fields)
result += '\n'
@@ -1324,33 +1508,78 @@ def fields_declaration(self, dependencies):
if has_callbacks:
if self.callback_function != 'pb_default_field_callback':
result += "extern bool %s(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field);\n" % self.callback_function
- result += "#define %s_CALLBACK %s\n" % (self.name, self.callback_function)
+ result += "#define %s_CALLBACK %s\n" % (
+ Globals.naming_style.define_name(self.name),
+ self.callback_function)
else:
- result += "#define %s_CALLBACK NULL\n" % self.name
+ result += "#define %s_CALLBACK NULL\n" % Globals.naming_style.define_name(self.name)
defval = self.default_value(dependencies)
if defval:
hexcoded = ''.join("\\x%02x" % ord(defval[i:i+1]) for i in range(len(defval)))
- result += '#define %s_DEFAULT (const pb_byte_t*)"%s\\x00"\n' % (self.name, hexcoded)
+ result += '#define %s_DEFAULT (const pb_byte_t*)"%s\\x00"\n' % (
+ Globals.naming_style.define_name(self.name),
+ hexcoded)
else:
- result += '#define %s_DEFAULT NULL\n' % self.name
+ result += '#define %s_DEFAULT NULL\n' % Globals.naming_style.define_name(self.name)
for field in sorted_fields:
if field.pbtype in ['MESSAGE', 'MSG_W_CB']:
if field.rules == 'ONEOF':
- result += "#define %s_%s_%s_MSGTYPE %s\n" % (self.name, field.union_name, field.name, field.ctype)
+ result += "#define %s_%s_%s_MSGTYPE %s\n" % (
+ Globals.naming_style.type_name(self.name),
+ Globals.naming_style.var_name(field.union_name),
+ Globals.naming_style.var_name(field.name),
+ Globals.naming_style.type_name(field.ctype)
+ )
+ else:
+ result += "#define %s_%s_MSGTYPE %s\n" % (
+ Globals.naming_style.type_name(self.name),
+ Globals.naming_style.var_name(field.name),
+ Globals.naming_style.type_name(field.ctype)
+ )
+
+ return result
+
+ def enumtype_defines(self):
+ '''Defines to allow user code to refer to enum type of a specific field'''
+ result = ''
+ for field in self.all_fields():
+ if field.pbtype in ['ENUM', "UENUM"]:
+ if field.rules == 'ONEOF':
+ result += "#define %s_%s_%s_ENUMTYPE %s\n" % (
+ Globals.naming_style.type_name(self.name),
+ Globals.naming_style.var_name(field.union_name),
+ Globals.naming_style.var_name(field.name),
+ Globals.naming_style.type_name(field.ctype)
+ )
else:
- result += "#define %s_%s_MSGTYPE %s\n" % (self.name, field.name, field.ctype)
+ result += "#define %s_%s_ENUMTYPE %s\n" % (
+ Globals.naming_style.type_name(self.name),
+ Globals.naming_style.var_name(field.name),
+ Globals.naming_style.type_name(field.ctype)
+ )
return result
- def fields_declaration_cpp_lookup(self):
+ def fields_declaration_cpp_lookup(self, local_defines):
result = 'template <>\n'
result += 'struct MessageDescriptor<%s> {\n' % (self.name)
result += ' static PB_INLINE_CONSTEXPR const pb_size_t fields_array_length = %d;\n' % (self.count_all_fields())
+
+ size_define = "%s_size" % (self.name)
+ if size_define in local_defines:
+ result += ' static PB_INLINE_CONSTEXPR const pb_size_t size = %s;\n' % (size_define)
+
result += ' static inline const pb_msgdesc_t* fields() {\n'
result += ' return &%s_msg;\n' % (self.name)
result += ' }\n'
+ result += ' static inline bool has_msgid() {\n'
+ result += ' return %s;\n' % ("true" if hasattr(self, "msgid") else "false", )
+ result += ' }\n'
+ result += ' static inline uint32_t msgid() {\n'
+ result += ' return %d;\n' % (getattr(self, "msgid", 0), )
+ result += ' }\n'
result += '};'
return result
@@ -1360,7 +1589,10 @@ def fields_definition(self, dependencies):
if width == 1:
width = 'AUTO'
- result = 'PB_BIND(%s, %s, %s)\n' % (self.name, self.name, width)
+ result = 'PB_BIND(%s, %s, %s)\n' % (
+ Globals.naming_style.define_name(self.name),
+ Globals.naming_style.type_name(self.name),
+ width)
return result
def required_descriptor_width(self, dependencies):
@@ -1441,6 +1673,9 @@ def default_value(self, dependencies):
raise Exception("Could not find enum type %s while generating default values for %s.\n" % (enumname, self.name)
+ "Try passing all source files to generator at once, or use -I option.")
+ if not isinstance(enumtype, Enum):
+ raise Exception("Expected enum type as %s, got %s" % (enumname, repr(enumtype)))
+
if field.HasField('default_value'):
defvals = [v for n,v in enumtype.values if n.parts[-1] == field.default_value]
else:
@@ -1462,8 +1697,10 @@ def default_value(self, dependencies):
optional_only.ClearField(str('nested_type'))
optional_only.ClearField(str('extension'))
optional_only.ClearField(str('enum_type'))
+ optional_only.name += str(id(self))
+
desc = google.protobuf.descriptor.MakeDescriptor(optional_only)
- msg = reflection.MakeClass(desc)()
+ msg = GetMessageClass(desc)()
for field in optional_only.field:
if field.type == FieldD.TYPE_STRING:
@@ -1484,21 +1721,24 @@ def default_value(self, dependencies):
# Processing of entire .proto files
# ---------------------------------------------------------------------------
-def iterate_messages(desc, flatten = False, names = Names()):
- '''Recursively find all messages. For each, yield name, DescriptorProto.'''
+def iterate_messages(desc, flatten = False, names = Names(), comment_path = ()):
+ '''Recursively find all messages. For each, yield name, DescriptorProto, comment_path.'''
if hasattr(desc, 'message_type'):
submsgs = desc.message_type
+ comment_path += (ProtoElement.MESSAGE,)
else:
submsgs = desc.nested_type
+ comment_path += (ProtoElement.NESTED_TYPE,)
- for submsg in submsgs:
+ for idx, submsg in enumerate(submsgs):
sub_names = names + submsg.name
+ sub_path = comment_path + (idx,)
if flatten:
- yield Names(submsg.name), submsg
+ yield Names(submsg.name), submsg, sub_path
else:
- yield sub_names, submsg
+ yield sub_names, submsg, sub_path
- for x in iterate_messages(submsg, flatten, sub_names):
+ for x in iterate_messages(submsg, flatten, sub_names, sub_path):
yield x
def iterate_extensions(desc, flatten = False, names = Names()):
@@ -1508,40 +1748,51 @@ def iterate_extensions(desc, flatten = False, names = Names()):
for extension in desc.extension:
yield names, extension
- for subname, subdesc in iterate_messages(desc, flatten, names):
+ for subname, subdesc, comment_path in iterate_messages(desc, flatten, names):
for extension in subdesc.extension:
yield subname, extension
-def toposort2(data):
- '''Topological sort.
- From http://code.activestate.com/recipes/577413-topological-sort/
- This function is under the MIT license.
- '''
- for k, v in list(data.items()):
- v.discard(k) # Ignore self dependencies
- extra_items_in_deps = reduce(set.union, list(data.values()), set()) - set(data.keys())
- data.update(dict([(item, set()) for item in extra_items_in_deps]))
- while True:
- ordered = set(item for item,dep in list(data.items()) if not dep)
- if not ordered:
- break
- for item in sorted(ordered):
- yield item
- data = dict([(item, (dep - ordered)) for item,dep in list(data.items())
- if item not in ordered])
- assert not data, "A cyclic dependency exists amongst %r" % data
+def check_recursive_dependencies(message, all_messages, root = None):
+ '''Returns True if message has a recursive dependency on root (or itself if root is None).'''
+
+ if not isinstance(all_messages, dict):
+ all_messages = dict((str(m.name), m) for m in all_messages)
+
+ if not root:
+ root = message
+
+ for dep in message.get_dependencies():
+ if dep == str(root.name):
+ return True
+ elif dep in all_messages:
+ if check_recursive_dependencies(all_messages[dep], all_messages, root):
+ return True
+
+ return False
def sort_dependencies(messages):
'''Sort a list of Messages based on dependencies.'''
+
+ # Construct first level list of dependencies
dependencies = {}
- message_by_name = {}
for message in messages:
dependencies[str(message.name)] = set(message.get_dependencies())
- message_by_name[str(message.name)] = message
- for msgname in toposort2(dependencies):
- if msgname in message_by_name:
- yield message_by_name[msgname]
+ # Emit messages after all their dependencies have been processed
+ remaining = list(messages)
+ remainset = set(str(m.name) for m in remaining)
+ while remaining:
+ for candidate in remaining:
+ if not remainset.intersection(dependencies[str(candidate.name)]):
+ remaining.remove(candidate)
+ remainset.remove(str(candidate.name))
+ yield candidate
+ break
+ else:
+ sys.stderr.write("Circular dependency in messages: " + ', '.join(remainset) + " (consider changing to FT_POINTER or FT_CALLBACK)\n")
+ candidate = remaining.pop(0)
+ remainset.remove(str(candidate.name))
+ yield candidate
def make_identifier(headername):
'''Make #ifndef identifier that contains uppercase A-Z and digits 0-9'''
@@ -1553,6 +1804,92 @@ def make_identifier(headername):
result += '_'
return result
+class MangleNames:
+ '''Handles conversion of type names according to mangle_names option:
+ M_NONE = 0; // Default, no typename mangling
+ M_STRIP_PACKAGE = 1; // Strip current package name
+ M_FLATTEN = 2; // Only use last path component
+ M_PACKAGE_INITIALS = 3; // Replace the package name by the initials
+ '''
+ def __init__(self, fdesc, file_options):
+ self.file_options = file_options
+ self.mangle_names = file_options.mangle_names
+ self.flatten = (self.mangle_names == nanopb_pb2.M_FLATTEN)
+ self.strip_prefix = None
+ self.replacement_prefix = None
+ self.name_mapping = {}
+ self.reverse_name_mapping = {}
+ self.canonical_base = Names(fdesc.package.split('.'))
+
+ if self.mangle_names == nanopb_pb2.M_STRIP_PACKAGE:
+ self.strip_prefix = "." + fdesc.package
+ elif self.mangle_names == nanopb_pb2.M_PACKAGE_INITIALS:
+ self.strip_prefix = "." + fdesc.package
+ self.replacement_prefix = ""
+ for part in fdesc.package.split("."):
+ self.replacement_prefix += part[0]
+ elif file_options.package:
+ self.strip_prefix = "." + fdesc.package
+ self.replacement_prefix = file_options.package
+
+ if self.strip_prefix == '.':
+ self.strip_prefix = ''
+
+ if self.replacement_prefix is not None:
+ self.base_name = Names(self.replacement_prefix.split('.'))
+ elif fdesc.package:
+ self.base_name = Names(fdesc.package.split('.'))
+ else:
+ self.base_name = Names()
+
+ def create_name(self, names):
+ '''Create name for a new message / enum.
+ Argument can be either string or Names instance.
+ '''
+ if str(names) not in self.name_mapping:
+ if self.mangle_names in (nanopb_pb2.M_NONE, nanopb_pb2.M_PACKAGE_INITIALS):
+ new_name = self.base_name + names
+ elif self.mangle_names == nanopb_pb2.M_STRIP_PACKAGE:
+ new_name = Names(names)
+ elif isinstance(names, Names):
+ new_name = Names(names.parts[-1])
+ else:
+ new_name = Names(names)
+
+ if str(new_name) in self.reverse_name_mapping:
+ sys.stderr.write("Warning: Duplicate name with mangle_names=%s: %s and %s map to %s\n" %
+ (self.mangle_names, self.reverse_name_mapping[str(new_name)], names, new_name))
+
+ self.name_mapping[str(names)] = new_name
+ self.reverse_name_mapping[str(new_name)] = self.canonical_base + names
+
+ return self.name_mapping[str(names)]
+
+ def mangle_field_typename(self, typename):
+ '''Mangle type name for a submessage / enum crossreference.
+ Argument is a string.
+ '''
+ if self.mangle_names == nanopb_pb2.M_FLATTEN:
+ return "." + typename.split(".")[-1]
+
+ canonical_mangled_typename = str(Names(typename.strip(".").split(".")))
+ if not canonical_mangled_typename.startswith(str(self.canonical_base) + "_"):
+ return typename
+
+ if self.strip_prefix is not None and typename.startswith(self.strip_prefix):
+ if self.replacement_prefix is not None:
+ return "." + self.replacement_prefix + typename[len(self.strip_prefix):]
+ else:
+ return typename[len(self.strip_prefix):]
+
+ if self.file_options.package:
+ return "." + self.replacement_prefix + typename
+
+ return typename
+
+ def unmangle(self, names):
+ return self.reverse_name_mapping.get(str(names), names)
+
class ProtoFile:
def __init__(self, fdesc, file_options):
'''Takes a FileDescriptorProto and parses it.'''
@@ -1561,6 +1898,7 @@ def __init__(self, fdesc, file_options):
self.dependencies = {}
self.math_include_required = False
self.parse()
+ self.discard_unused_automatic_types()
for message in self.messages:
if message.math_include_required:
self.math_include_required = True
@@ -1574,103 +1912,105 @@ def parse(self):
self.enums = []
self.messages = []
self.extensions = []
-
- mangle_names = self.file_options.mangle_names
- flatten = mangle_names == nanopb_pb2.M_FLATTEN
- strip_prefix = None
- replacement_prefix = None
- if mangle_names == nanopb_pb2.M_STRIP_PACKAGE:
- strip_prefix = "." + self.fdesc.package
- elif mangle_names == nanopb_pb2.M_PACKAGE_INITIALS:
- strip_prefix = "." + self.fdesc.package
- replacement_prefix = ""
- for part in self.fdesc.package.split("."):
- replacement_prefix += part[0]
- elif self.file_options.package:
- strip_prefix = "." + self.fdesc.package
- replacement_prefix = self.file_options.package
-
-
- def create_name(names):
- if mangle_names in (nanopb_pb2.M_NONE, nanopb_pb2.M_PACKAGE_INITIALS):
- return base_name + names
- if mangle_names == nanopb_pb2.M_STRIP_PACKAGE:
- return Names(names)
- single_name = names
- if isinstance(names, Names):
- single_name = names.parts[-1]
- return Names(single_name)
-
- def mangle_field_typename(typename):
- if mangle_names == nanopb_pb2.M_FLATTEN:
- return "." + typename.split(".")[-1]
- if strip_prefix is not None and typename.startswith(strip_prefix):
- if replacement_prefix is not None:
- return "." + replacement_prefix + typename[len(strip_prefix):]
- else:
- return typename[len(strip_prefix):]
- if self.file_options.package:
- return "." + replacement_prefix + typename
- return typename
-
- if replacement_prefix is not None:
- base_name = Names(replacement_prefix.split('.'))
- elif self.fdesc.package:
- base_name = Names(self.fdesc.package.split('.'))
- else:
- base_name = Names()
+ self.manglenames = MangleNames(self.fdesc, self.file_options)
# process source code comment locations
# ignores any locations that do not contain any comment information
self.comment_locations = {
- str(list(location.path)): location
+ tuple(location.path): location
for location in self.fdesc.source_code_info.location
if location.leading_comments or location.leading_detached_comments or location.trailing_comments
}
for index, enum in enumerate(self.fdesc.enum_type):
- name = create_name(enum.name)
+ name = self.manglenames.create_name(enum.name)
enum_options = get_nanopb_suboptions(enum, self.file_options, name)
- self.enums.append(Enum(name, enum, enum_options, index, self.comment_locations))
+ enum_path = (ProtoElement.ENUM, index)
+ self.enums.append(Enum(name, enum, enum_options, enum_path, self.comment_locations))
- for index, (names, message) in enumerate(iterate_messages(self.fdesc, flatten)):
- name = create_name(names)
+ for names, message, comment_path in iterate_messages(self.fdesc, self.manglenames.flatten):
+ name = self.manglenames.create_name(names)
message_options = get_nanopb_suboptions(message, self.file_options, name)
if message_options.skip_message:
continue
+ if message_options.discard_deprecated and message.options.deprecated:
+ continue
+
+ # Apply any configured typename mangling options
message = copy.deepcopy(message)
for field in message.field:
if field.type in (FieldD.TYPE_MESSAGE, FieldD.TYPE_ENUM):
- field.type_name = mangle_field_typename(field.type_name)
-
- self.messages.append(Message(name, message, message_options, index, self.comment_locations))
+ field.type_name = self.manglenames.mangle_field_typename(field.type_name)
+
+ # Check for circular dependencies
+ msgobject = Message(name, message, message_options, comment_path, self.comment_locations)
+ if check_recursive_dependencies(msgobject, self.messages):
+ message_options.type = message_options.fallback_type
+ sys.stderr.write('Breaking circular dependency at message %s by converting to %s\n'
+ % (msgobject.name, nanopb_pb2.FieldType.Name(message_options.type)))
+ msgobject = Message(name, message, message_options, comment_path, self.comment_locations)
+ self.messages.append(msgobject)
+
+ # Process any nested enums
for index, enum in enumerate(message.enum_type):
- name = create_name(names + enum.name)
+ name = self.manglenames.create_name(names + enum.name)
enum_options = get_nanopb_suboptions(enum, message_options, name)
- self.enums.append(Enum(name, enum, enum_options, index, self.comment_locations))
+ enum_path = comment_path + (ProtoElement.NESTED_ENUM, index)
+ self.enums.append(Enum(name, enum, enum_options, enum_path, self.comment_locations))
- for names, extension in iterate_extensions(self.fdesc, flatten):
- name = create_name(names + extension.name)
+ for names, extension in iterate_extensions(self.fdesc, self.manglenames.flatten):
+ name = self.manglenames.create_name(names + extension.name)
field_options = get_nanopb_suboptions(extension, self.file_options, name)
extension = copy.deepcopy(extension)
if extension.type in (FieldD.TYPE_MESSAGE, FieldD.TYPE_ENUM):
- extension.type_name = mangle_field_typename(extension.type_name)
+ extension.type_name = self.manglenames.mangle_field_typename(extension.type_name)
if field_options.type != nanopb_pb2.FT_IGNORE:
self.extensions.append(ExtensionField(name, extension, field_options))
+ def discard_unused_automatic_types(self):
+ '''Discard unused types that are automatically generated by protoc if they are not actually
+ needed. Currently this applies to map< > types when the field is ignored by options.
+ '''
+
+ if not self.file_options.discard_unused_automatic_types:
+ return
+
+ map_entries = {}
+ types_used = set()
+ for msg in self.messages:
+ if msg.desc.options.map_entry:
+ map_entries[str(msg.name)] = msg
+
+ for field in msg.all_fields():
+ if field.pbtype == 'MESSAGE':
+ types_used.add(str(field.submsgname))
+
+ for name, msg in map_entries.items():
+ if name not in types_used:
+ self.messages.remove(msg)
+
def add_dependency(self, other):
for enum in other.enums:
self.dependencies[str(enum.names)] = enum
+ self.dependencies[str(other.manglenames.unmangle(enum.names))] = enum
enum.protofile = other
for msg in other.messages:
+ canonical_mangled_typename = str(other.manglenames.unmangle(msg.name))
self.dependencies[str(msg.name)] = msg
+ self.dependencies[canonical_mangled_typename] = msg
msg.protofile = other
+ # Fix references to submessages with different mangling rules
+ for message in self.messages:
+ for field in message.all_fields():
+ if field.ctype == canonical_mangled_typename:
+ field.ctype = msg.name
+
# Fix field default values where enum short names are used.
for enum in other.enums:
if not enum.options.long_names:
@@ -1755,24 +2095,35 @@ def generate_header(self, includes, headername, options):
yield extension.extension_decl()
yield '\n'
+ yield '#ifdef __cplusplus\n'
+ yield 'extern "C" {\n'
+ yield '#endif\n\n'
+
if self.enums:
yield '/* Helper constants for enums */\n'
for enum in self.enums:
yield enum.auxiliary_defines() + '\n'
- yield '\n'
- yield '#ifdef __cplusplus\n'
- yield 'extern "C" {\n'
- yield '#endif\n\n'
+ for msg in self.messages:
+ yield msg.enumtype_defines() + '\n'
+ yield '\n'
if self.messages:
yield '/* Initializer values for message structs */\n'
for msg in self.messages:
- identifier = '%s_init_default' % msg.name
+ identifier = Globals.naming_style.define_name('%s_init_default' % msg.name)
yield '#define %-40s %s\n' % (identifier, msg.get_initializer(False))
+ unmangledName = self.manglenames.unmangle(msg.name)
+ if unmangledName:
+ unmangledIdentifier = Globals.naming_style.define_name('%s_init_default' % unmangledName)
+ self.manglenames.reverse_name_mapping[identifier] = unmangledIdentifier
for msg in self.messages:
- identifier = '%s_init_zero' % msg.name
+ identifier = Globals.naming_style.define_name('%s_init_zero' % msg.name)
yield '#define %-40s %s\n' % (identifier, msg.get_initializer(True))
+ unmangledName = self.manglenames.unmangle(msg.name)
+ if unmangledName:
+ unmangledIdentifier = Globals.naming_style.define_name('%s_init_zero' % unmangledName)
+ self.manglenames.reverse_name_mapping[identifier] = unmangledIdentifier
yield '\n'
yield '/* Field tags (for use in manual encoding/decoding) */\n'
@@ -1787,12 +2138,14 @@ def generate_header(self, includes, headername, options):
for msg in self.messages:
yield msg.fields_declaration(self.dependencies) + '\n'
for msg in self.messages:
- yield 'extern const pb_msgdesc_t %s_msg;\n' % msg.name
+ yield 'extern const pb_msgdesc_t %s_msg;\n' % Globals.naming_style.type_name(msg.name)
yield '\n'
yield '/* Defines for backwards compatibility with code written before nanopb-0.4.0 */\n'
for msg in self.messages:
- yield '#define %s_fields &%s_msg\n' % (msg.name, msg.name)
+ yield '#define %s &%s_msg\n' % (
+ Globals.naming_style.define_name('%s_fields' % msg.name),
+ Globals.naming_style.type_name(msg.name))
yield '\n'
yield '/* Maximum encoded size of messages (where known) */\n'
@@ -1804,15 +2157,38 @@ def generate_header(self, includes, headername, options):
# If we require a symbol from another file, put a preprocessor if statement
# around it to prevent compilation errors if the symbol is not actually available.
local_defines = [identifier for identifier, msize in messagesizes if msize is not None]
+
+ # emit size_unions, if any
+ oneof_sizes = []
+ for msg in self.messages:
+ for f in msg.fields:
+ if isinstance(f, OneOf):
+ msize = f.encoded_size(self.dependencies)
+ if msize is not None:
+ oneof_sizes.append(msize)
+ for msize in oneof_sizes:
+ guard = msize.get_cpp_guard(local_defines)
+ if guard:
+ yield guard
+ yield msize.get_declarations()
+ if guard:
+ yield '#endif\n'
+
guards = {}
+ # Provide a #define of the maximum message size, which faciliates setting the size of static arrays to be the largest possible encoded message size
+ max_messagesize = max(messagesizes, key=lambda messagesize: messagesize[1].value if messagesize[1] else 0)
for identifier, msize in messagesizes:
if msize is not None:
cpp_guard = msize.get_cpp_guard(local_defines)
if cpp_guard not in guards:
guards[cpp_guard] = set()
- for decl in msize.get_declarations().splitlines():
- guards[cpp_guard].add(decl)
- guards[cpp_guard].add('#define %-40s %s' % (identifier, msize))
+ guards[cpp_guard].add('#define %-40s %s' % (
+ Globals.naming_style.define_name(identifier), msize))
+
+ if identifier == max_messagesize[0]:
+ guards[cpp_guard].add('#define %-40s %s' % (
+ Globals.naming_style.define_name(symbol + "_MAX_SIZE"), Globals.naming_style.define_name(identifier)))
+
else:
yield '/* %s depends on runtime parameters */\n' % identifier
for guard, values in guards.items():
@@ -1849,6 +2225,14 @@ def generate_header(self, includes, headername, options):
yield '#define %s_msgid %d\n' % (msg.name, msg.msgid)
yield '\n'
+ # Check if there is any name mangling active
+ pairs = [x for x in self.manglenames.reverse_name_mapping.items() if str(x[0]) != str(x[1])]
+ if pairs:
+ yield '/* Mapping from canonical names (mangle_names or overridden package name) */\n'
+ for shortname, longname in pairs:
+ yield '#define %s %s\n' % (longname, shortname)
+ yield '\n'
+
yield '#ifdef __cplusplus\n'
yield '} /* extern "C" */\n'
yield '#endif\n'
@@ -1859,7 +2243,7 @@ def generate_header(self, includes, headername, options):
yield '/* Message descriptors for nanopb */\n'
yield 'namespace nanopb {\n'
for msg in self.messages:
- yield msg.fields_declaration_cpp_lookup() + '\n'
+ yield msg.fields_declaration_cpp_lookup(local_defines) + '\n'
yield '} // namespace nanopb\n'
yield '\n'
yield '#endif /* __cplusplus */\n'
@@ -1890,15 +2274,36 @@ def generate_source(self, headername, options):
yield '#endif\n'
yield '\n'
+ # Check if any messages exceed the 64 kB limit of 16-bit pb_size_t
+ exceeds_64kB = []
+ for msg in self.messages:
+ size = msg.data_size(self.dependencies)
+ if size >= 65536:
+ exceeds_64kB.append(str(msg.name))
+
+ if exceeds_64kB:
+ yield '\n/* The following messages exceed 64kB in size: ' + ', '.join(exceeds_64kB) + ' */\n'
+ yield '\n/* The PB_FIELD_32BIT compilation option must be defined to support messages that exceed 64 kB in size. */\n'
+ yield '#ifndef PB_FIELD_32BIT\n'
+ yield '#error Enable PB_FIELD_32BIT to support messages exceeding 64kB in size: ' + ', '.join(exceeds_64kB) + '\n'
+ yield '#endif\n'
+
+ # Generate the message field definitions (PB_BIND() call)
for msg in self.messages:
yield msg.fields_definition(self.dependencies) + '\n\n'
+ # Generate pb_extension_type_t definitions if extensions are used in proto file
for ext in self.extensions:
yield ext.extension_def(self.dependencies) + '\n'
+ # Generate enum_name function if enum_to_string option is defined
for enum in self.enums:
yield enum.enum_to_string_definition() + '\n'
+ # Generate enum_valid function if enum_valid option is defined
+ for enum in self.enums:
+ yield enum.enum_validate() + '\n'
+
# Add checks for numeric limits
if self.messages:
largest_msg = max(self.messages, key = lambda m: m.count_required_fields())
@@ -1966,7 +2371,7 @@ def read_options_file(infile):
text_format.Merge(parts[1], opts)
except Exception as e:
sys.stderr.write("%s:%d: " % (infile.name, i + 1) +
- "Unparseable option line: '%s'. " % line +
+ "Unparsable option line: '%s'. " % line +
"Error: %s\n" % str(e))
sys.exit(1)
results.append((parts[0], opts))
@@ -2023,8 +2428,8 @@ def get_nanopb_suboptions(subdesc, options, name):
usage = "Usage: nanopb_generator.py [options] file.pb ...",
epilog = "Compile file.pb from file.proto by: 'protoc -ofile.pb file.proto'. " +
"Output will be written to file.pb.h and file.pb.c.")
-optparser.add_option("--version", dest="version", action="store_true",
- help="Show version info and exit")
+optparser.add_option("-V", "--version", dest="version", action="store_true",
+ help="Show version info and exit (add -v for protoc version info)")
optparser.add_option("-x", dest="exclude", metavar="FILE", action="append", default=[],
help="Exclude file from generated #include list.")
optparser.add_option("-e", "--extension", dest="extension", metavar="EXTENSION", default=".pb",
@@ -2035,9 +2440,9 @@ def get_nanopb_suboptions(subdesc, options, name):
help="Set extension to use for generated source files. [default: %default]")
optparser.add_option("-f", "--options-file", dest="options_file", metavar="FILE", default="%s.options",
help="Set name of a separate generator options file.")
-optparser.add_option("-I", "--options-path", dest="options_path", metavar="DIR",
+optparser.add_option("-I", "--options-path", "--proto-path", dest="options_path", metavar="DIR",
action="append", default = [],
- help="Search for .options files additionally in this path")
+ help="Search path for .options and .proto files. Also determines relative paths for output directory structure.")
optparser.add_option("--error-on-unmatched", dest="error_on_unmatched", action="store_true", default=False,
help ="Stop generation if there are unmatched fields in options file")
optparser.add_option("--no-error-on-unmatched", dest="error_on_unmatched", action="store_false", default=False,
@@ -2047,10 +2452,10 @@ def get_nanopb_suboptions(subdesc, options, name):
help="Output directory of .pb.h and .pb.c files")
optparser.add_option("-Q", "--generated-include-format", dest="genformat",
metavar="FORMAT", default='#include "%s"',
- help="Set format string to use for including other .pb.h files. [default: %default]")
+ help="Set format string to use for including other .pb.h files. Value can be 'quote', 'bracket' or a format string. [default: %default]")
optparser.add_option("-L", "--library-include-format", dest="libformat",
metavar="FORMAT", default='#include <%s>',
- help="Set format string to use for including the nanopb pb.h header. [default: %default]")
+ help="Set format string to use for including the nanopb pb.h header. Value can be 'quote', 'bracket' or a format string. [default: %default]")
optparser.add_option("--strip-path", dest="strip_path", action="store_true", default=False,
help="Strip directory path from #included .pb.h file name")
optparser.add_option("--no-strip-path", dest="strip_path", action="store_false",
@@ -2067,13 +2472,59 @@ def get_nanopb_suboptions(subdesc, options, name):
help="Print more information.")
optparser.add_option("-s", dest="settings", metavar="OPTION:VALUE", action="append", default=[],
help="Set generator option (max_size, max_count etc.).")
+optparser.add_option("--protoc-opt", dest="protoc_opts", action="append", default = [], metavar="OPTION",
+ help="Pass an option to protoc when compiling .proto files")
optparser.add_option("--protoc-insertion-points", dest="protoc_insertion_points", action="store_true", default=False,
- help="Include insertion point comments in output for use by custom protoc plugins")
+ help="Include insertion point comments in output for use by custom protoc plugins")
+optparser.add_option("-C", "--c-style", dest="c_style", action="store_true", default=False,
+ help="Use C naming convention.")
+
+def process_cmdline(args, is_plugin):
+ '''Process command line options. Returns list of options, filenames.'''
+
+ options, filenames = optparser.parse_args(args)
+
+ if options.version:
+ if is_plugin:
+ sys.stderr.write('%s\n' % (nanopb_version))
+ else:
+ print(nanopb_version)
+
+ if options.verbose:
+ proto.print_versions()
+
+ sys.exit(0)
+
+ if not filenames and not is_plugin:
+ optparser.print_help()
+ sys.exit(1)
+
+ if options.quiet:
+ options.verbose = False
+
+ include_formats = {'quote': '#include "%s"', 'bracket': '#include <%s>'}
+ options.libformat = include_formats.get(options.libformat, options.libformat)
+ options.genformat = include_formats.get(options.genformat, options.genformat)
+
+ if options.c_style:
+ Globals.naming_style = NamingStyleC()
+
+ Globals.verbose_options = options.verbose
+
+ if options.verbose:
+ sys.stderr.write("Nanopb version %s\n" % nanopb_version)
+ sys.stderr.write('Google Python protobuf library imported from %s, version %s\n'
+ % (google.protobuf.__file__, google.protobuf.__version__))
+
+ return options, filenames
+
def parse_file(filename, fdesc, options):
'''Parse a single file. Returns a ProtoFile instance.'''
toplevel_options = nanopb_pb2.NanoPBOptions()
for s in options.settings:
+ if ':' not in s and '=' in s:
+ s = s.replace('=', ':')
text_format.Merge(s, toplevel_options)
if not fdesc:
@@ -2095,7 +2546,7 @@ def parse_file(filename, fdesc, options):
optfilename = os.path.join(p, optfilename)
if options.verbose:
sys.stderr.write('Reading options from ' + optfilename + '\n')
- Globals.separate_options = read_options_file(open(optfilename, openmode_unicode))
+ Globals.separate_options = read_options_file(open(optfilename, 'r', encoding = 'utf-8'))
break
else:
# If we are given a full filename and it does not exist, give an error.
@@ -2168,8 +2619,8 @@ def process_file(filename, fdesc, options, other_files = {}):
sys.stderr.write("Following patterns in " + f.optfilename + " did not match any fields: "
+ ', '.join(unmatched) + "\n")
- if not Globals.verbose_options:
- sys.stderr.write("Use protoc --nanopb-out=-v:. to see a list of the field names.\n")
+ if not Globals.verbose_options:
+ sys.stderr.write("Use protoc --nanopb-out=-v:. to see a list of the field names.\n")
return {'headername': headername, 'headerdata': headerdata,
'sourcename': sourcename, 'sourcedata': sourcedata}
@@ -2177,54 +2628,46 @@ def process_file(filename, fdesc, options, other_files = {}):
def main_cli():
'''Main function when invoked directly from the command line.'''
- options, filenames = optparser.parse_args()
-
- if options.version:
- print(nanopb_version)
- sys.exit(0)
-
- if not filenames:
- optparser.print_help()
- sys.exit(1)
-
- if options.quiet:
- options.verbose = False
+ options, filenames = process_cmdline(sys.argv[1:], is_plugin = False)
if options.output_dir and not os.path.exists(options.output_dir):
optparser.print_help()
sys.stderr.write("\noutput_dir does not exist: %s\n" % options.output_dir)
sys.exit(1)
- if options.verbose:
- sys.stderr.write("Nanopb version %s\n" % nanopb_version)
- sys.stderr.write('Google Python protobuf library imported from %s, version %s\n'
- % (google.protobuf.__file__, google.protobuf.__version__))
-
# Load .pb files into memory and compile any .proto files.
- fdescs = {}
include_path = ['-I%s' % p for p in options.options_path]
+ all_fdescs = {}
+ out_fdescs = {}
for filename in filenames:
if filename.endswith(".proto"):
with TemporaryDirectory() as tmpdir:
tmpname = os.path.join(tmpdir, os.path.basename(filename) + ".pb")
- status = invoke_protoc(["protoc"] + include_path + ['--include_imports', '--include_source_info', '-o' + tmpname, filename])
+ args = ["protoc"] + include_path
+ args += options.protoc_opts
+ args += ['--include_imports', '--include_source_info', '-o' + tmpname, filename]
+ status = invoke_protoc(args)
if status != 0: sys.exit(status)
data = open(tmpname, 'rb').read()
else:
data = open(filename, 'rb').read()
- fdesc = descriptor.FileDescriptorSet.FromString(data).file[-1]
- fdescs[fdesc.name] = fdesc
+ fdescs = descriptor.FileDescriptorSet.FromString(data).file
+ last_fdesc = fdescs[-1]
+
+ for fdesc in fdescs:
+ all_fdescs[fdesc.name] = fdesc
+
+ out_fdescs[last_fdesc.name] = last_fdesc
# Process any include files first, in order to have them
# available as dependencies
other_files = {}
- for fdesc in fdescs.values():
+ for fdesc in all_fdescs.values():
other_files[fdesc.name] = parse_file(fdesc.name, fdesc, options)
# Then generate the headers / sources
- Globals.verbose_options = options.verbose
- for fdesc in fdescs.values():
+ for fdesc in out_fdescs.values():
results = process_file(fdesc.name, fdesc, options, other_files)
base_dir = options.output_dir or ''
@@ -2242,7 +2685,7 @@ def main_cli():
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
- with open(path, 'w') as f:
+ with open(path, 'w', encoding='utf-8') as f:
f.write(data)
def main_plugin():
@@ -2266,19 +2709,19 @@ def main_plugin():
except UnicodeEncodeError:
params = request.parameter
- import shlex
- args = shlex.split(params)
-
- if len(args) == 1 and ',' in args[0]:
- # For compatibility with other protoc plugins, support options
- # separated by comma.
+ if ',' not in params and ' -' in params:
+ # Nanopb has traditionally supported space as separator in options
+ args = shlex.split(params)
+ else:
+ # Protoc separates options passed to plugins by comma
+ # This allows also giving --nanopb_opt option multiple times.
lex = shlex.shlex(params)
lex.whitespace_split = True
lex.whitespace = ','
lex.commenters = ''
args = list(lex)
- optparser.usage = "Usage: protoc --nanopb_out=[options][,more_options]:outdir file.proto"
+ optparser.usage = "protoc --nanopb_out=outdir [--nanopb_opt=option] ['--nanopb_opt=option with spaces'] file.proto"
optparser.epilog = "Output will be written to file.pb.h and file.pb.c."
if '-h' in args or '--help' in args:
@@ -2287,18 +2730,7 @@ def main_plugin():
optparser.print_help(sys.stderr)
sys.exit(1)
- options, dummy = optparser.parse_args(args)
-
- if options.version:
- sys.stderr.write('%s\n' % (nanopb_version))
- sys.exit(0)
-
- Globals.verbose_options = options.verbose
-
- if options.verbose:
- sys.stderr.write("Nanopb version %s\n" % nanopb_version)
- sys.stderr.write('Google Python protobuf library imported from %s, version %s\n'
- % (google.protobuf.__file__, google.protobuf.__version__))
+ options, dummy = process_cmdline(args, is_plugin = True)
response = plugin_pb2.CodeGeneratorResponse()
diff --git a/vendor/nanopb/generator/platformio_generator.py b/vendor/nanopb/generator/platformio_generator.py
new file mode 100644
index 00000000..0be4cfd1
--- /dev/null
+++ b/vendor/nanopb/generator/platformio_generator.py
@@ -0,0 +1,157 @@
+import os
+import hashlib
+import pathlib
+import shlex
+import subprocess
+
+import SCons.Action
+from platformio import fs
+
+Import("env")
+
+# We don't use `env.Execute` because it does not handle spaces in path
+# See https://github.com/nanopb/nanopb/pull/834
+# So, we resolve the path to the executable and then use `subprocess.run`
+python_exe = env.subst("$PYTHONEXE")
+
+try:
+ import google.protobuf
+except ImportError:
+ print("[nanopb] Installing Protocol Buffers dependencies");
+
+ # We need to specify protobuf version. In other case got next (on Ubuntu 20.04):
+ # Requirement already satisfied: protobuf in /usr/lib/python3/dist-packages (3.6.1)
+ subprocess.run([python_exe, '-m', 'pip', 'install', "protobuf>=3.19.1"])
+
+try:
+ import grpc_tools.protoc
+except ImportError:
+ print("[nanopb] Installing gRPC dependencies");
+ subprocess.run([python_exe, '-m', 'pip', 'install', "grpcio-tools>=1.43.0"])
+
+
+nanopb_root = os.path.join(os.getcwd(), '..')
+
+project_dir = env.subst("$PROJECT_DIR")
+build_dir = env.subst("$BUILD_DIR")
+
+generated_src_dir = os.path.join(build_dir, 'nanopb', 'generated-src')
+generated_build_dir = os.path.join(build_dir, 'nanopb', 'generated-build')
+md5_dir = os.path.join(build_dir, 'nanopb', 'md5')
+
+nanopb_protos = env.subst(env.GetProjectOption("custom_nanopb_protos", ""))
+nanopb_plugin_options = env.GetProjectOption("custom_nanopb_options", "")
+
+if not nanopb_protos:
+ print("[nanopb] No generation needed.")
+else:
+ if isinstance(nanopb_plugin_options, (list, tuple)):
+ nanopb_plugin_options = " ".join(nanopb_plugin_options)
+
+ nanopb_plugin_options = shlex.split(nanopb_plugin_options)
+
+ protos_files = fs.match_src_files(project_dir, nanopb_protos)
+ if not len(protos_files):
+ print("[nanopb] ERROR: No files matched pattern:")
+ print(f"custom_nanopb_protos: {nanopb_protos}")
+ exit(1)
+
+ nanopb_generator = os.path.join(nanopb_root, 'generator', 'nanopb_generator.py')
+
+ nanopb_options = []
+ nanopb_options.extend(["--output-dir", generated_src_dir])
+ for opt in nanopb_plugin_options:
+ nanopb_options.append(opt)
+
+ try:
+ os.makedirs(generated_src_dir)
+ except FileExistsError:
+ pass
+
+ try:
+ os.makedirs(md5_dir)
+ except FileExistsError:
+ pass
+
+ # Collect include dirs based on
+ proto_include_dirs = set()
+ for proto_file in protos_files:
+ proto_file_abs = os.path.join(project_dir, proto_file)
+ proto_dir = os.path.dirname(proto_file_abs)
+ proto_include_dirs.add(proto_dir)
+
+ for proto_include_dir in proto_include_dirs:
+ nanopb_options.extend(["--proto-path", proto_include_dir])
+
+ for proto_file in protos_files:
+ proto_file_abs = os.path.join(project_dir, proto_file)
+
+ proto_file_path_abs = os.path.dirname(proto_file_abs)
+ proto_file_basename = os.path.basename(proto_file_abs)
+ proto_file_without_ext = os.path.splitext(proto_file_basename)[0]
+
+ proto_file_md5_abs = os.path.join(md5_dir, proto_file_basename + '.md5')
+ proto_file_current_md5 = hashlib.md5(pathlib.Path(proto_file_abs).read_bytes()).hexdigest()
+
+ options_file = proto_file_without_ext + ".options"
+ options_file_abs = os.path.join(proto_file_path_abs, options_file)
+ options_file_md5_abs = None
+ options_file_current_md5 = None
+ if pathlib.Path(options_file_abs).exists():
+ options_file_md5_abs = os.path.join(md5_dir, options_file + '.md5')
+ options_file_current_md5 = hashlib.md5(pathlib.Path(options_file_abs).read_bytes()).hexdigest()
+ else:
+ options_file = None
+
+ header_file = proto_file_without_ext + ".pb.h"
+ source_file = proto_file_without_ext + ".pb.c"
+
+ header_file_abs = os.path.join(generated_src_dir, source_file)
+ source_file_abs = os.path.join(generated_src_dir, header_file)
+
+ need_generate = False
+
+ # Check proto file md5
+ try:
+ last_md5 = pathlib.Path(proto_file_md5_abs).read_text()
+ if last_md5 != proto_file_current_md5:
+ need_generate = True
+ except FileNotFoundError:
+ need_generate = True
+
+ if options_file:
+ # Check options file md5
+ try:
+ last_md5 = pathlib.Path(options_file_md5_abs).read_text()
+ if last_md5 != options_file_current_md5:
+ need_generate = True
+ except FileNotFoundError:
+ need_generate = True
+
+ options_info = f"{options_file}" if options_file else "no options"
+
+ if not need_generate:
+ print(f"[nanopb] Skipping '{proto_file}' ({options_info})")
+ else:
+ print(f"[nanopb] Processing '{proto_file}' ({options_info})")
+ cmd = [python_exe, nanopb_generator] + nanopb_options + [proto_file_basename]
+ action = SCons.Action.CommandAction(cmd)
+ result = env.Execute(action)
+ if result != 0:
+ print(f"[nanopb] ERROR: ({result}) processing cmd: '{cmd}'")
+ exit(1)
+ pathlib.Path(proto_file_md5_abs).write_text(proto_file_current_md5)
+ if options_file:
+ pathlib.Path(options_file_md5_abs).write_text(options_file_current_md5)
+
+ #
+ # Add generated includes and sources to build environment
+ #
+ env.Append(CPPPATH=[generated_src_dir])
+
+ # Fix for ESP32 ESP-IDF https://github.com/nanopb/nanopb/issues/734#issuecomment-1001544447
+ global_env = DefaultEnvironment()
+ already_called_env_name = "_PROTOBUF_GENERATOR_ALREADY_CALLED_" + env['PIOENV'].replace("-", "_")
+ if not global_env.get(already_called_env_name, False):
+ env.BuildSources(generated_build_dir, generated_src_dir)
+ global_env[already_called_env_name] = True
diff --git a/vendor/nanopb/generator/proto/Makefile b/vendor/nanopb/generator/proto/Makefile
index c17fb9c7..a93d88ff 100644
--- a/vendor/nanopb/generator/proto/Makefile
+++ b/vendor/nanopb/generator/proto/Makefile
@@ -1,6 +1,10 @@
-PROTOC?=protoc
+PROTOC?=../protoc
all: nanopb_pb2.py
%_pb2.py: %.proto
$(PROTOC) --python_out=. $<
+
+.PHONY: clean
+clean:
+ rm nanopb_pb2.py
diff --git a/vendor/nanopb/generator/proto/__init__.py b/vendor/nanopb/generator/proto/__init__.py
index 29153d40..b2b47b69 100644
--- a/vendor/nanopb/generator/proto/__init__.py
+++ b/vendor/nanopb/generator/proto/__init__.py
@@ -1,36 +1,126 @@
-'''This file automatically rebuilds the proto definitions for Python.'''
+'''This file dynamically builds the proto definitions for Python.'''
from __future__ import absolute_import
+import os
import os.path
import sys
+import tempfile
+import shutil
+import traceback
+from ._utils import has_grpcio_protoc, invoke_protoc, print_versions
-import pkg_resources
+# Compatibility layer to make TemporaryDirectory() available on Python 2.
+try:
+ from tempfile import TemporaryDirectory
+except ImportError:
+ class TemporaryDirectory:
+ '''TemporaryDirectory fallback for Python 2'''
+ def __init__(self, prefix = 'tmp', dir = None):
+ self.prefix = prefix
+ self.dir = dir
-from ._utils import has_grpcio_protoc, invoke_protoc
+ def __enter__(self):
+ self.dir = tempfile.mkdtemp(prefix = self.prefix, dir = self.dir)
+ return self.dir
-dirname = os.path.dirname(__file__)
-protosrc = os.path.join(dirname, "nanopb.proto")
-protodst = os.path.join(dirname, "nanopb_pb2.py")
+ def __exit__(self, *args):
+ shutil.rmtree(self.dir)
-if os.path.isfile(protosrc):
- src_date = os.path.getmtime(protosrc)
- if not os.path.isfile(protodst) or os.path.getmtime(protodst) < src_date:
+def build_nanopb_proto(protosrc, dirname):
+ '''Try to build a .proto file for python-protobuf.
+ Returns True if successful.
+ '''
- cmd = [
- "protoc",
- "--python_out={}".format(dirname),
- protosrc,
- "-I={}".format(dirname),
- ]
+ cmd = [
+ "protoc",
+ "--python_out={}".format(dirname),
+ protosrc,
+ "-I={}".format(dirname),
+ ]
- if has_grpcio_protoc():
- # grpcio-tools has an extra CLI argument
- # from grpc.tools.protoc __main__ invocation.
- _builtin_proto_include = pkg_resources.resource_filename('grpc_tools', '_proto')
+ if has_grpcio_protoc():
+ # grpcio-tools has an extra CLI argument
+ # from grpc.tools.protoc __main__ invocation.
+ cmd.append("-I={}".format(_utils.get_grpc_tools_proto_path()))
+
+ try:
+ invoke_protoc(argv=cmd)
+ except:
+ sys.stderr.write("Failed to build nanopb_pb2.py: " + ' '.join(cmd) + "\n")
+ sys.stderr.write(traceback.format_exc() + "\n")
+ return False
+
+ return True
+
+def load_nanopb_pb2():
+ # To work, the generator needs python-protobuf built version of nanopb.proto.
+ # There are three methods to provide this:
+ #
+ # 1) Load a previously generated generator/proto/nanopb_pb2.py
+ # 2) Use protoc to build it and store it permanently generator/proto/nanopb_pb2.py
+ # 3) Use protoc to build it, but store only temporarily in system-wide temp folder
+ #
+ # By default these are tried in numeric order.
+ # If NANOPB_PB2_TEMP_DIR environment variable is defined, the 2) is skipped.
+ # If the value of the $NANOPB_PB2_TEMP_DIR exists as a directory, it is used instead
+ # of system temp folder.
+
+ tmpdir = os.getenv("NANOPB_PB2_TEMP_DIR")
+ temporary_only = (tmpdir is not None)
+ dirname = os.path.dirname(__file__)
+ protosrc = os.path.join(dirname, "nanopb.proto")
+ protodst = os.path.join(dirname, "nanopb_pb2.py")
+
+ if tmpdir is not None and not os.path.isdir(tmpdir):
+ tmpdir = None # Use system-wide temp dir
+
+ no_rebuild = bool(int(os.getenv("NANOPB_PB2_NO_REBUILD", default = 0)))
+ if bool(no_rebuild):
+ # Don't attempt to autogenerate nanopb_pb2.py, external build rules
+ # should have already done so.
+ import nanopb_pb2 as nanopb_pb2_mod
+ return nanopb_pb2_mod
+
+ if os.path.isfile(protosrc):
+ src_date = os.path.getmtime(protosrc)
+ if os.path.isfile(protodst) and os.path.getmtime(protodst) >= src_date:
+ try:
+ from . import nanopb_pb2 as nanopb_pb2_mod
+ return nanopb_pb2_mod
+ except Exception as e:
+ sys.stderr.write("Failed to import nanopb_pb2.py: " + str(e) + "\n"
+ "Will automatically attempt to rebuild this.\n"
+ "Verify that python-protobuf and protoc versions match.\n")
+ print_versions()
+
+ # Try to rebuild into generator/proto directory
+ if not temporary_only:
+ build_nanopb_proto(protosrc, dirname)
- cmd.append("-I={}".format(_builtin_proto_include))
try:
- invoke_protoc(argv=cmd)
+ from . import nanopb_pb2 as nanopb_pb2_mod
+ return nanopb_pb2_mod
except:
- sys.stderr.write("Failed to build nanopb_pb2.py: " + ' '.join(cmd) + "\n")
- raise
+ sys.stderr.write("Failed to import generator/proto/nanopb_pb2.py:\n")
+ sys.stderr.write(traceback.format_exc() + "\n")
+
+ # Try to rebuild into temporary directory
+ with TemporaryDirectory(prefix = 'nanopb-', dir = tmpdir) as protodir:
+ build_nanopb_proto(protosrc, protodir)
+
+ if protodir not in sys.path:
+ sys.path.insert(0, protodir)
+
+ try:
+ import nanopb_pb2 as nanopb_pb2_mod
+ return nanopb_pb2_mod
+ except:
+ sys.stderr.write("Failed to import %s/nanopb_pb2.py:\n" % protodir)
+ sys.stderr.write(traceback.format_exc() + "\n")
+
+ # If everything fails
+ sys.stderr.write("\n\nGenerating nanopb_pb2.py failed.\n")
+ sys.stderr.write("Make sure that a protoc generator is available and matches python-protobuf version.\n")
+ print_versions()
+ sys.exit(1)
+
diff --git a/vendor/nanopb/generator/proto/_utils.py b/vendor/nanopb/generator/proto/_utils.py
index 7076e9d3..705629f3 100644
--- a/vendor/nanopb/generator/proto/_utils.py
+++ b/vendor/nanopb/generator/proto/_utils.py
@@ -1,16 +1,53 @@
+import sys
import subprocess
import os.path
-def has_grpcio_protoc():
+import traceback
+
+def has_grpcio_protoc(verbose = False):
# type: () -> bool
""" checks if grpcio-tools protoc is installed"""
try:
import grpc_tools.protoc
except ImportError:
+ if verbose:
+ sys.stderr.write("Failed to import grpc_tools: %s\n" % traceback.format_exc())
return False
+
return True
+def get_grpc_tools_proto_path():
+ if sys.hexversion > 0x03090000:
+ import importlib.resources as ir
+ with ir.as_file(ir.files('grpc_tools') / '_proto') as path:
+ return str(path)
+ else:
+ import pkg_resources
+ return pkg_resources.resource_filename('grpc_tools', '_proto')
+
+def get_proto_builtin_include_path():
+ """Find include path for standard google/protobuf includes and for
+ nanopb.proto.
+ """
+
+ if getattr(sys, 'frozen', False):
+ # Pyinstaller package
+ paths = [
+ os.path.join(os.path.dirname(os.path.abspath(sys.executable)), 'proto'),
+ os.path.join(os.path.dirname(os.path.abspath(sys.executable)), 'grpc_tools', '_proto')
+ ]
+
+ else:
+ # Stand-alone script
+ paths = [
+ os.path.dirname(os.path.abspath(__file__))
+ ]
+
+ if has_grpcio_protoc():
+ paths.append(get_grpc_tools_proto_path())
+
+ return paths
def invoke_protoc(argv):
# type: (list) -> typing.Any
@@ -29,15 +66,39 @@ def invoke_protoc(argv):
argv.append("-I.")
# Add default protoc include paths
- nanopb_include = os.path.dirname(os.path.abspath(__file__))
- argv.append('-I' + nanopb_include)
+ for incpath in get_proto_builtin_include_path():
+ argv.append('-I' + incpath)
if has_grpcio_protoc():
import grpc_tools.protoc as protoc
- import pkg_resources
- proto_include = pkg_resources.resource_filename('grpc_tools', '_proto')
- argv.append('-I' + proto_include)
-
return protoc.main(argv)
else:
return subprocess.call(argv)
+
+def print_versions():
+ try:
+ if has_grpcio_protoc(verbose = True):
+ import grpc_tools.protoc
+ sys.stderr.write("Using grpcio-tools protoc from " + grpc_tools.protoc.__file__ + "\n")
+ else:
+ sys.stderr.write("Using protoc from system path\n")
+
+ invoke_protoc(['protoc', '--version'])
+ except Exception as e:
+ sys.stderr.write("Failed to determine protoc version: " + str(e) + "\n")
+
+ try:
+ sys.stderr.write("protoc builtin include path: " + str(get_proto_builtin_include_path()) + "\n")
+ except Exception as e:
+ sys.stderr.write("Failed to construct protoc include path: " + str(e) + "\n")
+
+ try:
+ import google.protobuf
+ sys.stderr.write("Python version " + sys.version + "\n")
+ sys.stderr.write("Using python-protobuf from " + google.protobuf.__file__ + "\n")
+ sys.stderr.write("Python-protobuf version: " + google.protobuf.__version__ + "\n")
+ except Exception as e:
+ sys.stderr.write("Failed to determine python-protobuf version: " + str(e) + "\n")
+
+if __name__ == '__main__':
+ print_versions()
diff --git a/vendor/nanopb/generator/proto/google/protobuf/descriptor.proto b/vendor/nanopb/generator/proto/google/protobuf/descriptor.proto
index 8697a50d..4752e3d5 100644
--- a/vendor/nanopb/generator/proto/google/protobuf/descriptor.proto
+++ b/vendor/nanopb/generator/proto/google/protobuf/descriptor.proto
@@ -36,11 +36,11 @@
// A valid .proto file can be translated directly to a FileDescriptorProto
// without any other information (e.g. without reading its imports).
-
syntax = "proto2";
package google.protobuf;
-option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
+
+option go_package = "google.golang.org/protobuf/types/descriptorpb";
option java_package = "com.google.protobuf";
option java_outer_classname = "DescriptorProtos";
option csharp_namespace = "Google.Protobuf.Reflection";
@@ -57,10 +57,46 @@ message FileDescriptorSet {
repeated FileDescriptorProto file = 1;
}
+// The full set of known editions.
+enum Edition {
+ // A placeholder for an unknown edition value.
+ EDITION_UNKNOWN = 0;
+
+ // A placeholder edition for specifying default behaviors *before* a feature
+ // was first introduced. This is effectively an "infinite past".
+ EDITION_LEGACY = 900;
+
+ // Legacy syntax "editions". These pre-date editions, but behave much like
+ // distinct editions. These can't be used to specify the edition of proto
+ // files, but feature definitions must supply proto2/proto3 defaults for
+ // backwards compatibility.
+ EDITION_PROTO2 = 998;
+ EDITION_PROTO3 = 999;
+
+ // Editions that have been released. The specific values are arbitrary and
+ // should not be depended on, but they will always be time-ordered for easy
+ // comparison.
+ EDITION_2023 = 1000;
+ EDITION_2024 = 1001;
+
+ // Placeholder editions for testing feature resolution. These should not be
+ // used or relyed on outside of tests.
+ EDITION_1_TEST_ONLY = 1;
+ EDITION_2_TEST_ONLY = 2;
+ EDITION_99997_TEST_ONLY = 99997;
+ EDITION_99998_TEST_ONLY = 99998;
+ EDITION_99999_TEST_ONLY = 99999;
+
+ // Placeholder for specifying unbounded edition support. This should only
+ // ever be used by plugins that can expect to never require any changes to
+ // support a new edition.
+ EDITION_MAX = 0x7FFFFFFF;
+}
+
// Describes a complete .proto file.
message FileDescriptorProto {
- optional string name = 1; // file name, relative to root of source tree
- optional string package = 2; // e.g. "foo", "foo.bar", etc.
+ optional string name = 1; // file name, relative to root of source tree
+ optional string package = 2; // e.g. "foo", "foo.bar", etc.
// Names of files imported by this file.
repeated string dependency = 3;
@@ -85,8 +121,13 @@ message FileDescriptorProto {
optional SourceCodeInfo source_code_info = 9;
// The syntax of the proto file.
- // The supported values are "proto2" and "proto3".
+ // The supported values are "proto2", "proto3", and "editions".
+ //
+ // If `edition` is present, this value must be "editions".
optional string syntax = 12;
+
+ // The edition of the proto file.
+ optional Edition edition = 14;
}
// Describes a message type.
@@ -100,8 +141,8 @@ message DescriptorProto {
repeated EnumDescriptorProto enum_type = 4;
message ExtensionRange {
- optional int32 start = 1;
- optional int32 end = 2;
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Exclusive.
optional ExtensionRangeOptions options = 3;
}
@@ -115,8 +156,8 @@ message DescriptorProto {
// fields or extension ranges in the same message. Reserved ranges may
// not overlap.
message ReservedRange {
- optional int32 start = 1; // Inclusive.
- optional int32 end = 2; // Exclusive.
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Exclusive.
}
repeated ReservedRange reserved_range = 9;
// Reserved field names, which may not be used by fields in the same message.
@@ -128,6 +169,52 @@ message ExtensionRangeOptions {
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
+ message Declaration {
+ // The extension number declared within the extension range.
+ optional int32 number = 1;
+
+ // The fully-qualified name of the extension field. There must be a leading
+ // dot in front of the full name.
+ optional string full_name = 2;
+
+ // The fully-qualified type name of the extension field. Unlike
+ // Metadata.type, Declaration.type must have a leading dot for messages
+ // and enums.
+ optional string type = 3;
+
+ // If true, indicates that the number is reserved in the extension range,
+ // and any extension field with the number will fail to compile. Set this
+ // when a declared extension field is deleted.
+ optional bool reserved = 5;
+
+ // If true, indicates that the extension must be defined as repeated.
+ // Otherwise the extension must be defined as optional.
+ optional bool repeated = 6;
+
+ reserved 4; // removed is_repeated
+ }
+
+ // For external users: DO NOT USE. We are in the process of open sourcing
+ // extension declaration and executing internal cleanups before it can be
+ // used externally.
+ repeated Declaration declaration = 2 [retention = RETENTION_SOURCE];
+
+ // Any features defined in the specific edition.
+ optional FeatureSet features = 50;
+
+ // The verification state of the extension range.
+ enum VerificationState {
+ // All the extensions of the range must be declared.
+ DECLARATION = 0;
+ UNVERIFIED = 1;
+ }
+
+ // The verification state of the range.
+ // TODO: flip the default to DECLARATION once all empty ranges
+ // are marked as UNVERIFIED.
+ optional VerificationState verification = 3
+ [default = UNVERIFIED, retention = RETENTION_SOURCE];
+
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
@@ -137,42 +224,46 @@ message FieldDescriptorProto {
enum Type {
// 0 is reserved for errors.
// Order is weird for historical reasons.
- TYPE_DOUBLE = 1;
- TYPE_FLOAT = 2;
+ TYPE_DOUBLE = 1;
+ TYPE_FLOAT = 2;
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
// negative values are likely.
- TYPE_INT64 = 3;
- TYPE_UINT64 = 4;
+ TYPE_INT64 = 3;
+ TYPE_UINT64 = 4;
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
// negative values are likely.
- TYPE_INT32 = 5;
- TYPE_FIXED64 = 6;
- TYPE_FIXED32 = 7;
- TYPE_BOOL = 8;
- TYPE_STRING = 9;
+ TYPE_INT32 = 5;
+ TYPE_FIXED64 = 6;
+ TYPE_FIXED32 = 7;
+ TYPE_BOOL = 8;
+ TYPE_STRING = 9;
// Tag-delimited aggregate.
- // Group type is deprecated and not supported in proto3. However, Proto3
+ // Group type is deprecated and not supported after google.protobuf. However, Proto3
// implementations should still be able to parse the group wire format and
- // treat group fields as unknown fields.
- TYPE_GROUP = 10;
- TYPE_MESSAGE = 11; // Length-delimited aggregate.
+ // treat group fields as unknown fields. In Editions, the group wire format
+ // can be enabled via the `message_encoding` feature.
+ TYPE_GROUP = 10;
+ TYPE_MESSAGE = 11; // Length-delimited aggregate.
// New in version 2.
- TYPE_BYTES = 12;
- TYPE_UINT32 = 13;
- TYPE_ENUM = 14;
- TYPE_SFIXED32 = 15;
- TYPE_SFIXED64 = 16;
- TYPE_SINT32 = 17; // Uses ZigZag encoding.
- TYPE_SINT64 = 18; // Uses ZigZag encoding.
- };
+ TYPE_BYTES = 12;
+ TYPE_UINT32 = 13;
+ TYPE_ENUM = 14;
+ TYPE_SFIXED32 = 15;
+ TYPE_SFIXED64 = 16;
+ TYPE_SINT32 = 17; // Uses ZigZag encoding.
+ TYPE_SINT64 = 18; // Uses ZigZag encoding.
+ }
enum Label {
// 0 is reserved for errors
- LABEL_OPTIONAL = 1;
- LABEL_REQUIRED = 2;
- LABEL_REPEATED = 3;
- };
+ LABEL_OPTIONAL = 1;
+ LABEL_REPEATED = 3;
+ // The required label is only allowed in google.protobuf. In proto3 and Editions
+ // it's explicitly prohibited. In Editions, the `field_presence` feature
+ // can be used to get this behavior.
+ LABEL_REQUIRED = 2;
+ }
optional string name = 1;
optional int32 number = 3;
@@ -197,7 +288,6 @@ message FieldDescriptorProto {
// For booleans, "true" or "false".
// For strings, contains the default text contents (not escaped in any way).
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
- // TODO(kenton): Base-64 encode?
optional string default_value = 7;
// If set, gives the index of a oneof in the containing type's oneof_decl
@@ -211,6 +301,29 @@ message FieldDescriptorProto {
optional string json_name = 10;
optional FieldOptions options = 8;
+
+ // If true, this is a proto3 "optional". When a proto3 field is optional, it
+ // tracks presence regardless of field type.
+ //
+ // When proto3_optional is true, this field must belong to a oneof to signal
+ // to old proto3 clients that presence is tracked for this field. This oneof
+ // is known as a "synthetic" oneof, and this field must be its sole member
+ // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs
+ // exist in the descriptor only, and do not generate any API. Synthetic oneofs
+ // must be ordered after all "real" oneofs.
+ //
+ // For message fields, proto3_optional doesn't create any semantic change,
+ // since non-repeated message fields always track presence. However it still
+ // indicates the semantic detail of whether the user wrote "optional" or not.
+ // This can be useful for round-tripping the .proto file. For consistency we
+ // give message fields a synthetic oneof also, even though it is not required
+ // to track presence. This is especially important because the parser can't
+ // tell if a field is a message or an enum, so it must always create a
+ // synthetic oneof.
+ //
+ // Proto2 optional fields do not set this flag, because they already indicate
+ // optional with `LABEL_OPTIONAL`.
+ optional bool proto3_optional = 17;
}
// Describes a oneof.
@@ -234,8 +347,8 @@ message EnumDescriptorProto {
// is inclusive such that it can appropriately represent the entire int32
// domain.
message EnumReservedRange {
- optional int32 start = 1; // Inclusive.
- optional int32 end = 2; // Inclusive.
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Inclusive.
}
// Range of reserved numeric values. Reserved numeric values may not be used
@@ -276,12 +389,11 @@ message MethodDescriptorProto {
optional MethodOptions options = 4;
// Identifies if client streams multiple client messages
- optional bool client_streaming = 5 [default=false];
+ optional bool client_streaming = 5 [default = false];
// Identifies if server streams multiple server messages
- optional bool server_streaming = 6 [default=false];
+ optional bool server_streaming = 6 [default = false];
}
-
// ===================================================================
// Options
@@ -314,7 +426,6 @@ message MethodDescriptorProto {
// If this turns out to be popular, a web service will be set up
// to automatically assign option numbers.
-
message FileOptions {
// Sets the Java package where classes generated from this .proto will be
@@ -323,42 +434,44 @@ message FileOptions {
// domain names.
optional string java_package = 1;
-
- // If set, all the classes from the .proto file are wrapped in a single
- // outer class with the given name. This applies to both Proto1
- // (equivalent to the old "--one_java_file" option) and Proto2 (where
- // a .proto always translates to a single class, but you may want to
- // explicitly choose the class name).
+ // Controls the name of the wrapper Java class generated for the .proto file.
+ // That class will always contain the .proto file's getDescriptor() method as
+ // well as any top-level extensions defined in the .proto file.
+ // If java_multiple_files is disabled, then all the other classes from the
+ // .proto file will be nested inside the single wrapper outer class.
optional string java_outer_classname = 8;
- // If set true, then the Java code generator will generate a separate .java
+ // If enabled, then the Java code generator will generate a separate .java
// file for each top-level message, enum, and service defined in the .proto
- // file. Thus, these types will *not* be nested inside the outer class
- // named by java_outer_classname. However, the outer class will still be
+ // file. Thus, these types will *not* be nested inside the wrapper class
+ // named by java_outer_classname. However, the wrapper class will still be
// generated to contain the file's getDescriptor() method as well as any
// top-level extensions defined in the file.
- optional bool java_multiple_files = 10 [default=false];
+ optional bool java_multiple_files = 10 [default = false];
// This option does nothing.
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
- // If set true, then the Java2 code generator will generate code that
- // throws an exception whenever an attempt is made to assign a non-UTF-8
- // byte sequence to a string field.
- // Message reflection will do the same.
- // However, an extension field still accepts non-UTF-8 byte sequences.
- // This option has no effect on when used with the lite runtime.
- optional bool java_string_check_utf8 = 27 [default=false];
-
+ // A proto2 file can set this to true to opt in to UTF-8 checking for Java,
+ // which will throw an exception if invalid UTF-8 is parsed from the wire or
+ // assigned to a string field.
+ //
+ // TODO: clarify exactly what kinds of field types this option
+ // applies to, and update these docs accordingly.
+ //
+ // Proto3 files already perform these checks. Setting the option explicitly to
+ // false has no effect: it cannot be used to opt proto3 files out of UTF-8
+ // checks.
+ optional bool java_string_check_utf8 = 27 [default = false];
// Generated classes can be optimized for speed or code size.
enum OptimizeMode {
- SPEED = 1; // Generate complete code for parsing, serialization,
- // etc.
- CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
- LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+ SPEED = 1; // Generate complete code for parsing, serialization,
+ // etc.
+ CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
+ LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
}
- optional OptimizeMode optimize_for = 9 [default=SPEED];
+ optional OptimizeMode optimize_for = 9 [default = SPEED];
// Sets the Go package where structs generated from this .proto will be
// placed. If omitted, the Go package will be derived from the following:
@@ -367,8 +480,6 @@ message FileOptions {
// - Otherwise, the basename of the .proto file, without extension.
optional string go_package = 11;
-
-
// Should generic services be generated in each language? "Generic" services
// are not specific to any particular RPC system. They are generated by the
// main code generators in each language (without additional plugins).
@@ -379,21 +490,21 @@ message FileOptions {
// that generate code specific to your particular RPC system. Therefore,
// these default to false. Old code which depends on generic services should
// explicitly set them to true.
- optional bool cc_generic_services = 16 [default=false];
- optional bool java_generic_services = 17 [default=false];
- optional bool py_generic_services = 18 [default=false];
- optional bool php_generic_services = 42 [default=false];
+ optional bool cc_generic_services = 16 [default = false];
+ optional bool java_generic_services = 17 [default = false];
+ optional bool py_generic_services = 18 [default = false];
+ reserved 42; // removed php_generic_services
+ reserved "php_generic_services";
// Is this file deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for everything in the file, or it will be completely ignored; in the very
// least, this is a formalization for deprecating files.
- optional bool deprecated = 23 [default=false];
+ optional bool deprecated = 23 [default = false];
// Enables the use of arenas for the proto messages in this file. This applies
// only to generated classes for C++.
- optional bool cc_enable_arenas = 31 [default=false];
-
+ optional bool cc_enable_arenas = 31 [default = true];
// Sets the objective c class prefix which is prepended to all objective c
// generated classes from this .proto. There is no default.
@@ -417,6 +528,19 @@ message FileOptions {
// determining the namespace.
optional string php_namespace = 41;
+ // Use this option to change the namespace of php generated metadata classes.
+ // Default is empty. When this option is empty, the proto file name will be
+ // used for determining the namespace.
+ optional string php_metadata_namespace = 44;
+
+ // Use this option to change the package of ruby generated classes. Default
+ // is empty. When this option is not set, the package name will be used for
+ // determining the ruby package.
+ optional string ruby_package = 45;
+
+ // Any features defined in the specific edition.
+ optional FeatureSet features = 50;
+
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -447,18 +571,20 @@ message MessageOptions {
//
// Because this is an option, the above two restrictions are not enforced by
// the protocol compiler.
- optional bool message_set_wire_format = 1 [default=false];
+ optional bool message_set_wire_format = 1 [default = false];
// Disables the generation of the standard "descriptor()" accessor, which can
// conflict with a field of the same name. This is meant to make migration
// from proto1 easier; new code should avoid fields named "descriptor".
- optional bool no_standard_descriptor_accessor = 2 [default=false];
+ optional bool no_standard_descriptor_accessor = 2 [default = false];
// Is this message deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the message, or it will be completely ignored; in the very least,
// this is a formalization for deprecating messages.
- optional bool deprecated = 3 [default=false];
+ optional bool deprecated = 3 [default = false];
+
+ reserved 4, 5, 6;
// Whether the message is an automatically generated map entry type for the
// maps field.
@@ -475,7 +601,7 @@ message MessageOptions {
//
// Implementations may choose not to generate the map_entry=true message, but
// use a native map in the target language to hold the keys and values.
- // The reflection APIs in such implementions still need to work as
+ // The reflection APIs in such implementations still need to work as
// if the field is a repeated message field.
//
// NOTE: Do not set the option in .proto files. Always use the maps syntax
@@ -486,6 +612,21 @@ message MessageOptions {
reserved 8; // javalite_serializable
reserved 9; // javanano_as_lite
+ // Enable the legacy handling of JSON field name conflicts. This lowercases
+ // and strips underscored from the fields before comparison in proto3 only.
+ // The new behavior takes `json_name` into account and applies to proto2 as
+ // well.
+ //
+ // This should only be used as a temporary measure against broken builds due
+ // to the change in behavior for JSON field name conflicts.
+ //
+ // TODO This is legacy behavior we plan to remove once downstream
+ // teams have had time to migrate.
+ optional bool deprecated_legacy_json_field_conflicts = 11 [deprecated = true];
+
+ // Any features defined in the specific edition.
+ optional FeatureSet features = 12;
+
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -494,15 +635,24 @@ message MessageOptions {
}
message FieldOptions {
+ // NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead.
// The ctype option instructs the C++ code generator to use a different
// representation of the field than it normally would. See the specific
- // options below. This option is not yet implemented in the open source
- // release -- sorry, we'll try to include it in a future version!
- optional CType ctype = 1 [default = STRING];
+ // options below. This option is only implemented to support use of
+ // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of
+ // type "bytes" in the open source release.
+ // TODO: make ctype actually deprecated.
+ optional CType ctype = 1 [/*deprecated = true,*/ default = STRING];
enum CType {
// Default mode.
STRING = 0;
+ // The option [ctype=CORD] may be applied to a non-repeated field of type
+ // "bytes". It indicates that in C++, the data should be stored in a Cord
+ // instead of a string. For very large strings, this may reduce memory
+ // fragmentation. It may also allow better performance when parsing from a
+ // Cord, or when parsing with aliasing enabled, as the parsed Cord may then
+ // alias the original buffer.
CORD = 1;
STRING_PIECE = 2;
@@ -511,7 +661,9 @@ message FieldOptions {
// a more efficient representation on the wire. Rather than repeatedly
// writing the tag and type for each element, the entire array is encoded as
// a single length-delimited blob. In proto3, only explicit setting it to
- // false will avoid using packed encoding.
+ // false will avoid using packed encoding. This option is prohibited in
+ // Editions, but the `repeated_field_encoding` feature can be used to control
+ // the behavior.
optional bool packed = 2;
// The jstype option determines the JavaScript type used for values of the
@@ -554,28 +706,88 @@ message FieldOptions {
// call from multiple threads concurrently, while non-const methods continue
// to require exclusive access.
//
- //
- // Note that implementations may choose not to check required fields within
- // a lazy sub-message. That is, calling IsInitialized() on the outer message
- // may return true even if the inner message has missing required fields.
- // This is necessary because otherwise the inner message would have to be
- // parsed in order to perform the check, defeating the purpose of lazy
- // parsing. An implementation which chooses not to check required fields
- // must be consistent about it. That is, for any particular sub-message, the
- // implementation must either *always* check its required fields, or *never*
- // check its required fields, regardless of whether or not the message has
- // been parsed.
- optional bool lazy = 5 [default=false];
+ // Note that lazy message fields are still eagerly verified to check
+ // ill-formed wireformat or missing required fields. Calling IsInitialized()
+ // on the outer message would fail if the inner message has missing required
+ // fields. Failed verification would result in parsing failure (except when
+ // uninitialized messages are acceptable).
+ optional bool lazy = 5 [default = false];
+
+ // unverified_lazy does no correctness checks on the byte stream. This should
+ // only be used where lazy with verification is prohibitive for performance
+ // reasons.
+ optional bool unverified_lazy = 15 [default = false];
// Is this field deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for accessors, or it will be completely ignored; in the very least, this
// is a formalization for deprecating fields.
- optional bool deprecated = 3 [default=false];
+ optional bool deprecated = 3 [default = false];
// For Google-internal migration only. Do not use.
- optional bool weak = 10 [default=false];
+ optional bool weak = 10 [default = false];
+
+ // Indicate that the field value should not be printed out when using debug
+ // formats, e.g. when the field contains sensitive credentials.
+ optional bool debug_redact = 16 [default = false];
+
+ // If set to RETENTION_SOURCE, the option will be omitted from the binary.
+ enum OptionRetention {
+ RETENTION_UNKNOWN = 0;
+ RETENTION_RUNTIME = 1;
+ RETENTION_SOURCE = 2;
+ }
+
+ optional OptionRetention retention = 17;
+
+ // This indicates the types of entities that the field may apply to when used
+ // as an option. If it is unset, then the field may be freely used as an
+ // option on any kind of entity.
+ enum OptionTargetType {
+ TARGET_TYPE_UNKNOWN = 0;
+ TARGET_TYPE_FILE = 1;
+ TARGET_TYPE_EXTENSION_RANGE = 2;
+ TARGET_TYPE_MESSAGE = 3;
+ TARGET_TYPE_FIELD = 4;
+ TARGET_TYPE_ONEOF = 5;
+ TARGET_TYPE_ENUM = 6;
+ TARGET_TYPE_ENUM_ENTRY = 7;
+ TARGET_TYPE_SERVICE = 8;
+ TARGET_TYPE_METHOD = 9;
+ }
+ repeated OptionTargetType targets = 19;
+
+ message EditionDefault {
+ optional Edition edition = 3;
+ optional string value = 2; // Textproto value.
+ }
+ repeated EditionDefault edition_defaults = 20;
+
+ // Any features defined in the specific edition.
+ optional FeatureSet features = 21;
+
+ // Information about the support window of a feature.
+ message FeatureSupport {
+ // The edition that this feature was first available in. In editions
+ // earlier than this one, the default assigned to EDITION_LEGACY will be
+ // used, and proto files will not be able to override it.
+ optional Edition edition_introduced = 1;
+
+ // The edition this feature becomes deprecated in. Using this after this
+ // edition may trigger warnings.
+ optional Edition edition_deprecated = 2;
+
+ // The deprecation warning text if this feature is used after the edition it
+ // was marked deprecated in.
+ optional string deprecation_warning = 3;
+
+ // The edition this feature is no longer available in. In editions after
+ // this one, the last default assigned will be used, and proto files will
+ // not be able to override it.
+ optional Edition edition_removed = 4;
+ }
+ optional FeatureSupport feature_support = 22;
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -583,10 +795,14 @@ message FieldOptions {
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
- reserved 4; // removed jtype
+ reserved 4; // removed jtype
+ reserved 18; // reserve target, target_obsolete_do_not_use
}
message OneofOptions {
+ // Any features defined in the specific edition.
+ optional FeatureSet features = 1;
+
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -604,10 +820,21 @@ message EnumOptions {
// Depending on the target platform, this can emit Deprecated annotations
// for the enum, or it will be completely ignored; in the very least, this
// is a formalization for deprecating enums.
- optional bool deprecated = 3 [default=false];
+ optional bool deprecated = 3 [default = false];
reserved 5; // javanano_as_lite
+ // Enable the legacy handling of JSON field name conflicts. This lowercases
+ // and strips underscored from the fields before comparison in proto3 only.
+ // The new behavior takes `json_name` into account and applies to proto2 as
+ // well.
+ // TODO Remove this legacy behavior once downstream teams have
+ // had time to migrate.
+ optional bool deprecated_legacy_json_field_conflicts = 6 [deprecated = true];
+
+ // Any features defined in the specific edition.
+ optional FeatureSet features = 7;
+
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -620,7 +847,18 @@ message EnumValueOptions {
// Depending on the target platform, this can emit Deprecated annotations
// for the enum value, or it will be completely ignored; in the very least,
// this is a formalization for deprecating enum values.
- optional bool deprecated = 1 [default=false];
+ optional bool deprecated = 1 [default = false];
+
+ // Any features defined in the specific edition.
+ optional FeatureSet features = 2;
+
+ // Indicate that fields annotated with this enum value should not be printed
+ // out when using debug formats, e.g. when the field contains sensitive
+ // credentials.
+ optional bool debug_redact = 3 [default = false];
+
+ // Information about the support window of a feature value.
+ optional FieldOptions.FeatureSupport feature_support = 4;
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -631,6 +869,9 @@ message EnumValueOptions {
message ServiceOptions {
+ // Any features defined in the specific edition.
+ optional FeatureSet features = 34;
+
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
// framework. We apologize for hoarding these numbers to ourselves, but
// we were already using them long before we decided to release Protocol
@@ -640,7 +881,7 @@ message ServiceOptions {
// Depending on the target platform, this can emit Deprecated annotations
// for the service, or it will be completely ignored; in the very least,
// this is a formalization for deprecating services.
- optional bool deprecated = 33 [default=false];
+ optional bool deprecated = 33 [default = false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -660,18 +901,21 @@ message MethodOptions {
// Depending on the target platform, this can emit Deprecated annotations
// for the method, or it will be completely ignored; in the very least,
// this is a formalization for deprecating methods.
- optional bool deprecated = 33 [default=false];
+ optional bool deprecated = 33 [default = false];
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
// or neither? HTTP based RPC implementation may choose GET verb for safe
// methods, and PUT verb for idempotent methods instead of the default POST.
enum IdempotencyLevel {
IDEMPOTENCY_UNKNOWN = 0;
- NO_SIDE_EFFECTS = 1; // implies idempotent
- IDEMPOTENT = 2; // idempotent, but may have side effects
+ NO_SIDE_EFFECTS = 1; // implies idempotent
+ IDEMPOTENT = 2; // idempotent, but may have side effects
}
- optional IdempotencyLevel idempotency_level =
- 34 [default=IDEMPOTENCY_UNKNOWN];
+ optional IdempotencyLevel idempotency_level = 34
+ [default = IDEMPOTENCY_UNKNOWN];
+
+ // Any features defined in the specific edition.
+ optional FeatureSet features = 35;
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
@@ -680,7 +924,6 @@ message MethodOptions {
extensions 1000 to max;
}
-
// A message representing a option the parser does not recognize. This only
// appears in options protos created by the compiler::Parser class.
// DescriptorPool resolves these when building Descriptor objects. Therefore,
@@ -691,8 +934,8 @@ message UninterpretedOption {
// The name of the uninterpreted option. Each string represents a segment in
// a dot-separated name. is_extension is true iff a segment represents an
// extension (denoted with parentheses in options specs in .proto files).
- // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
- // "foo.(bar.baz).qux".
+ // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents
+ // "foo.(bar.baz).moo".
message NamePart {
required string name_part = 1;
required bool is_extension = 2;
@@ -709,6 +952,172 @@ message UninterpretedOption {
optional string aggregate_value = 8;
}
+// ===================================================================
+// Features
+
+// TODO Enums in C++ gencode (and potentially other languages) are
+// not well scoped. This means that each of the feature enums below can clash
+// with each other. The short names we've chosen maximize call-site
+// readability, but leave us very open to this scenario. A future feature will
+// be designed and implemented to handle this, hopefully before we ever hit a
+// conflict here.
+message FeatureSet {
+ enum FieldPresence {
+ FIELD_PRESENCE_UNKNOWN = 0;
+ EXPLICIT = 1;
+ IMPLICIT = 2;
+ LEGACY_REQUIRED = 3;
+ }
+ optional FieldPresence field_presence = 1 [
+ retention = RETENTION_RUNTIME,
+ targets = TARGET_TYPE_FIELD,
+ targets = TARGET_TYPE_FILE,
+ feature_support = {
+ edition_introduced: EDITION_2023,
+ },
+ edition_defaults = { edition: EDITION_LEGACY, value: "EXPLICIT" },
+ edition_defaults = { edition: EDITION_PROTO3, value: "IMPLICIT" },
+ edition_defaults = { edition: EDITION_2023, value: "EXPLICIT" }
+ ];
+
+ enum EnumType {
+ ENUM_TYPE_UNKNOWN = 0;
+ OPEN = 1;
+ CLOSED = 2;
+ }
+ optional EnumType enum_type = 2 [
+ retention = RETENTION_RUNTIME,
+ targets = TARGET_TYPE_ENUM,
+ targets = TARGET_TYPE_FILE,
+ feature_support = {
+ edition_introduced: EDITION_2023,
+ },
+ edition_defaults = { edition: EDITION_LEGACY, value: "CLOSED" },
+ edition_defaults = { edition: EDITION_PROTO3, value: "OPEN" }
+ ];
+
+ enum RepeatedFieldEncoding {
+ REPEATED_FIELD_ENCODING_UNKNOWN = 0;
+ PACKED = 1;
+ EXPANDED = 2;
+ }
+ optional RepeatedFieldEncoding repeated_field_encoding = 3 [
+ retention = RETENTION_RUNTIME,
+ targets = TARGET_TYPE_FIELD,
+ targets = TARGET_TYPE_FILE,
+ feature_support = {
+ edition_introduced: EDITION_2023,
+ },
+ edition_defaults = { edition: EDITION_LEGACY, value: "EXPANDED" },
+ edition_defaults = { edition: EDITION_PROTO3, value: "PACKED" }
+ ];
+
+ enum Utf8Validation {
+ UTF8_VALIDATION_UNKNOWN = 0;
+ VERIFY = 2;
+ NONE = 3;
+ reserved 1;
+ }
+ optional Utf8Validation utf8_validation = 4 [
+ retention = RETENTION_RUNTIME,
+ targets = TARGET_TYPE_FIELD,
+ targets = TARGET_TYPE_FILE,
+ feature_support = {
+ edition_introduced: EDITION_2023,
+ },
+ edition_defaults = { edition: EDITION_LEGACY, value: "NONE" },
+ edition_defaults = { edition: EDITION_PROTO3, value: "VERIFY" }
+ ];
+
+ enum MessageEncoding {
+ MESSAGE_ENCODING_UNKNOWN = 0;
+ LENGTH_PREFIXED = 1;
+ DELIMITED = 2;
+ }
+ optional MessageEncoding message_encoding = 5 [
+ retention = RETENTION_RUNTIME,
+ targets = TARGET_TYPE_FIELD,
+ targets = TARGET_TYPE_FILE,
+ feature_support = {
+ edition_introduced: EDITION_2023,
+ },
+ edition_defaults = { edition: EDITION_LEGACY, value: "LENGTH_PREFIXED" }
+ ];
+
+ enum JsonFormat {
+ JSON_FORMAT_UNKNOWN = 0;
+ ALLOW = 1;
+ LEGACY_BEST_EFFORT = 2;
+ }
+ optional JsonFormat json_format = 6 [
+ retention = RETENTION_RUNTIME,
+ targets = TARGET_TYPE_MESSAGE,
+ targets = TARGET_TYPE_ENUM,
+ targets = TARGET_TYPE_FILE,
+ feature_support = {
+ edition_introduced: EDITION_2023,
+ },
+ edition_defaults = { edition: EDITION_LEGACY, value: "LEGACY_BEST_EFFORT" },
+ edition_defaults = { edition: EDITION_PROTO3, value: "ALLOW" }
+ ];
+
+ reserved 999;
+
+ extensions 1000 to 9994 [
+ declaration = {
+ number: 1000,
+ full_name: ".pb.cpp",
+ type: ".pb.CppFeatures"
+ },
+ declaration = {
+ number: 1001,
+ full_name: ".pb.java",
+ type: ".pb.JavaFeatures"
+ },
+ declaration = { number: 1002, full_name: ".pb.go", type: ".pb.GoFeatures" },
+ declaration = {
+ number: 9990,
+ full_name: ".pb.proto1",
+ type: ".pb.Proto1Features"
+ }
+ ];
+
+ extensions 9995 to 9999; // For internal testing
+ extensions 10000; // for https://github.com/bufbuild/protobuf-es
+}
+
+// A compiled specification for the defaults of a set of features. These
+// messages are generated from FeatureSet extensions and can be used to seed
+// feature resolution. The resolution with this object becomes a simple search
+// for the closest matching edition, followed by proto merges.
+message FeatureSetDefaults {
+ // A map from every known edition with a unique set of defaults to its
+ // defaults. Not all editions may be contained here. For a given edition,
+ // the defaults at the closest matching edition ordered at or before it should
+ // be used. This field must be in strict ascending order by edition.
+ message FeatureSetEditionDefault {
+ optional Edition edition = 3;
+
+ // Defaults of features that can be overridden in this edition.
+ optional FeatureSet overridable_features = 4;
+
+ // Defaults of features that can't be overridden in this edition.
+ optional FeatureSet fixed_features = 5;
+
+ reserved 1, 2;
+ reserved "features";
+ }
+ repeated FeatureSetEditionDefault defaults = 1;
+
+ // The minimum supported edition (inclusive) when this was constructed.
+ // Editions before this will not have defaults.
+ optional Edition minimum_edition = 4;
+
+ // The maximum known edition (inclusive) when this was constructed. Editions
+ // after this will not have reliable defaults.
+ optional Edition maximum_edition = 5;
+}
+
// ===================================================================
// Optional source code info
@@ -752,7 +1161,7 @@ message SourceCodeInfo {
// beginning of the "extend" block and is shared by all extensions within
// the block.
// - Just because a location's span is a subset of some other location's span
- // does not mean that it is a descendent. For example, a "group" defines
+ // does not mean that it is a descendant. For example, a "group" defines
// both a type and a field in a single declaration. Thus, the locations
// corresponding to the type and field and their components will overlap.
// - Code which tries to interpret locations should probably be designed to
@@ -764,8 +1173,8 @@ message SourceCodeInfo {
// location.
//
// Each element is a field number or an index. They form a path from
- // the root FileDescriptorProto to the place where the definition. For
- // example, this path:
+ // the root FileDescriptorProto to the place where the definition appears.
+ // For example, this path:
// [ 4, 3, 2, 7, 1 ]
// refers to:
// file.message_type(3) // 4, 3
@@ -783,14 +1192,14 @@ message SourceCodeInfo {
// [ 4, 3, 2, 7 ]
// this path refers to the whole field declaration (from the beginning
// of the label to the terminating semicolon).
- repeated int32 path = 1 [packed=true];
+ repeated int32 path = 1 [packed = true];
// Always has exactly three or four elements: start line, start column,
// end line (optional, otherwise assumed same as start line), end column.
// These are packed into a single field for efficiency. Note that line
// and column numbers are zero-based -- typically you will want to add
// 1 to each before displaying to a user.
- repeated int32 span = 2 [packed=true];
+ repeated int32 span = 2 [packed = true];
// If this SourceCodeInfo represents a complete declaration, these are any
// comments appearing before and after the declaration which appear to be
@@ -819,13 +1228,13 @@ message SourceCodeInfo {
// // Comment attached to baz.
// // Another line attached to baz.
//
- // // Comment attached to qux.
+ // // Comment attached to moo.
// //
- // // Another line attached to qux.
- // optional double qux = 4;
+ // // Another line attached to moo.
+ // optional double moo = 4;
//
// // Detached comment for corge. This is not leading or trailing comments
- // // to qux or corge because there are blank lines separating it from
+ // // to moo or corge because there are blank lines separating it from
// // both.
//
// // Detached comment for corge paragraph 2.
@@ -855,7 +1264,7 @@ message GeneratedCodeInfo {
message Annotation {
// Identifies the element in the original source .proto file. This field
// is formatted the same as SourceCodeInfo.Location.path.
- repeated int32 path = 1 [packed=true];
+ repeated int32 path = 1 [packed = true];
// Identifies the filesystem path to the original source .proto.
optional string source_file = 2;
@@ -865,8 +1274,20 @@ message GeneratedCodeInfo {
optional int32 begin = 3;
// Identifies the ending offset in bytes in the generated code that
- // relates to the identified offset. The end offset should be one past
+ // relates to the identified object. The end offset should be one past
// the last relevant byte (so the length of the text = end - begin).
optional int32 end = 4;
+
+ // Represents the identified object's effect on the element in the original
+ // .proto file.
+ enum Semantic {
+ // There is no effect or the effect is indescribable.
+ NONE = 0;
+ // The element is set or otherwise mutated.
+ SET = 1;
+ // An alias to the element is returned.
+ ALIAS = 2;
+ }
+ optional Semantic semantic = 5;
}
}
diff --git a/vendor/nanopb/generator/proto/nanopb.proto b/vendor/nanopb/generator/proto/nanopb.proto
index c8067e3a..1a1cc98f 100644
--- a/vendor/nanopb/generator/proto/nanopb.proto
+++ b/vendor/nanopb/generator/proto/nanopb.proto
@@ -1,9 +1,15 @@
-// Custom options for defining:
-// - Maximum size of string/bytes
-// - Maximum number of elements in array
+// This file contains definitions of custom options used to control the
+// code generator in nanopb protocol buffers library.
//
-// These are used by nanopb to generate statically allocable structures
-// for memory-limited environments.
+// Most commonly used options are max_count and max_size, which allow
+// the generator to allocate static arrays for repeated and string fields.
+//
+// There are three ways to use these options:
+// 1. Use a separate .options file
+// 2. Use command line switches to nanopb_generator.py
+// 3. Use [(nanopb).option = value] in your .proto file
+//
+// For detailed documentation, refer to "Generator options" in docs/reference.md
syntax = "proto2";
import "google/protobuf/descriptor.proto";
@@ -61,6 +67,9 @@ message NanoPBOptions {
// full 32 bits for the value.
optional IntSize int_size = 7 [default = IS_DEFAULT];
+ // Size for enum fields. Supported by C++11 and C23 standards.
+ optional IntSize enum_intsize = 34 [default = IS_DEFAULT];
+
// Force type of field (callback or static allocation)
optional FieldType type = 3 [default = FT_DEFAULT];
@@ -97,6 +106,9 @@ message NanoPBOptions {
// Generate an enum->string mapping function (can take up lots of space).
optional bool enum_to_string = 13 [default = false];
+ // Generate validation methods for enums
+ optional bool enum_validate = 32 [default = false];
+
// Generate bytes arrays with fixed length
optional bool fixed_length = 15 [default = false];
@@ -133,7 +145,7 @@ message NanoPBOptions {
// Extra files to include in generated `.pb.h`
repeated string include = 24;
- // Automatic includes to exlude from generated `.pb.h`
+ // Automatic includes to exclude from generated `.pb.h`
// Same as nanopb_generator.py command line flag -x.
repeated string exclude = 26;
@@ -143,10 +155,32 @@ message NanoPBOptions {
// Override type of the field in generated C code. Only to be used with related field types
optional google.protobuf.FieldDescriptorProto.Type type_override = 27;
+ // Override of the label of the field (see FieldDescriptorProto.Label). Can be used to create
+ // fields which nanopb considers required in proto3, or whether nanopb treats the field as
+ // optional/required/repeated.
+ optional google.protobuf.FieldDescriptorProto.Label label_override = 31;
+
// Due to historical reasons, nanopb orders fields in structs by their tag number
// instead of the order in .proto. Set this to false to keep the .proto order.
// The default value will probably change to false in nanopb-0.5.0.
optional bool sort_by_tag = 28 [default = true];
+
+ // Set the FT_DEFAULT field conversion strategy.
+ // A field that can become a static member of a c struct (e.g. int, bool, etc)
+ // will be a a static field.
+ // Fields with dynamic length are converted to either a pointer or a callback.
+ optional FieldType fallback_type = 29 [default = FT_CALLBACK];
+
+ // Override initializer used in generated MyMessage_init_zero and MyMessage_init_default macros
+ // By default decided automatically based on field default value and datatype.
+ optional string initializer = 30;
+
+ // Discard unused types that are automatically generated by protoc if they are not actually
+ // needed. Currently this applies to map< > types when the field is ignored by options.
+ optional bool discard_unused_automatic_types = 33 [default = true];
+
+ // Discard messages and fields marked with [deprecated = true] in the proto file.
+ optional bool discard_deprecated = 35 [default = false];
}
// Extensions to protoc 'Descriptor' type in order to define options
diff --git a/vendor/nanopb/generator/protoc b/vendor/nanopb/generator/protoc
index 4b1512d0..c259702f 100755
--- a/vendor/nanopb/generator/protoc
+++ b/vendor/nanopb/generator/protoc
@@ -1,17 +1,34 @@
#!/usr/bin/env python3
+# This file acts as a drop-in replacement of binary protoc.exe.
+# It will use either Python-based protoc from grpcio-tools package,
+# or if it is not available, protoc.exe from path if found.
import sys
import os
import os.path
-from nanopb_generator import invoke_protoc
+
+# Depending on how this script is run, we may or may not have PEP366 package name
+# available for relative imports.
+if not __package__:
+ from proto._utils import invoke_protoc
+else:
+ from .proto._utils import invoke_protoc
if __name__ == '__main__':
- # Add argument so that protoc-gen-nanopb gets found
+ # Get path of the directory where this script is stored.
if getattr(sys, 'frozen', False):
mypath = os.path.dirname(sys.executable) # For pyInstaller
else:
mypath = os.path.dirname(__file__)
+ # Avoid recursive calls to self
+ env_paths = os.environ["PATH"].split(os.pathsep)
+ if mypath in env_paths:
+ env_paths.remove(mypath)
+ os.environ["PATH"] = os.pathsep.join(env_paths)
+
+ # Add argument for finding the nanopb generator when using --nanopb_out=
+ # argument to protoc.
if os.path.isfile(os.path.join(mypath, "protoc-gen-nanopb.exe")):
protoc_gen_nanopb = os.path.join(mypath, "protoc-gen-nanopb.exe")
elif os.name == 'nt':
diff --git a/vendor/nanopb/generator/protoc-gen-nanopb b/vendor/nanopb/generator/protoc-gen-nanopb
index 471a620b..20a36c79 100755
--- a/vendor/nanopb/generator/protoc-gen-nanopb
+++ b/vendor/nanopb/generator/protoc-gen-nanopb
@@ -1,13 +1,11 @@
-#!/bin/sh
-
+#!/usr/bin/env python3
# This file is used to invoke nanopb_generator.py as a plugin
# to protoc on Linux and other *nix-style systems.
# Use it like this:
# protoc --plugin=protoc-gen-nanopb=..../protoc-gen-nanopb --nanopb_out=dir foo.proto
-#
-# Note that if you use the binary package of nanopb, the protoc
-# path is already set up properly and there is no need to give
-# --plugin= on the command line.
-MYPATH=$(dirname "$0")
-exec "$MYPATH/nanopb_generator.py" --protoc-plugin
+from nanopb_generator import *
+
+if __name__ == '__main__':
+ # Assume we are running as a plugin under protoc.
+ main_plugin()
diff --git a/vendor/nanopb/generator/protoc-gen-nanopb.bat b/vendor/nanopb/generator/protoc-gen-nanopb.bat
index 48a4aa92..fa5bdd2b 100644
--- a/vendor/nanopb/generator/protoc-gen-nanopb.bat
+++ b/vendor/nanopb/generator/protoc-gen-nanopb.bat
@@ -9,4 +9,4 @@
:: --plugin= on the command line.
set mydir=%~dp0
-py -3 "%mydir%\nanopb_generator.py" --protoc-plugin
+python "%mydir%\nanopb_generator.py" --protoc-plugin %*
diff --git a/vendor/nanopb/generator/protoc.bat b/vendor/nanopb/generator/protoc.bat
index cfd9e437..2538c94a 100644
--- a/vendor/nanopb/generator/protoc.bat
+++ b/vendor/nanopb/generator/protoc.bat
@@ -5,5 +5,5 @@
setLocal enableDelayedExpansion
set mydir=%~dp0
-py -3 "%mydir%\protoc" %*
+python "%mydir%\protoc" %*
exit /b %ERRORLEVEL%
diff --git a/vendor/nanopb/library.json b/vendor/nanopb/library.json
index 93364587..74cb4f9f 100644
--- a/vendor/nanopb/library.json
+++ b/vendor/nanopb/library.json
@@ -1,6 +1,6 @@
{
"name": "Nanopb",
- "version": "0.4.5",
+ "version": "0.4.9.1",
"keywords": "protocol buffers, protobuf, google",
"description": "Nanopb is a plain-C implementation of Google's Protocol Buffers data format. It is targeted at 32 bit microcontrollers, but is also fit for other embedded systems with tight (<10 kB ROM, <1 kB RAM) memory constraints.",
"repository": {
@@ -17,10 +17,27 @@
"*.c",
"*.cpp",
"*.h",
- "examples"
+ "examples",
+ "generator"
+ ],
+ "exclude": [
+ "generator/**/__pycache__",
+ "examples/platformio/.gitignore"
]
},
- "examples": "examples/*/*.c",
+ "build": {
+ "extraScript": "generator/platformio_generator.py",
+ "includeDir": "",
+ "srcDir": "",
+ "srcFilter": [
+ "+<*.c>"
+ ]
+ },
+ "examples": [
+ "examples/platformio/platformio.ini",
+ "examples/platformio/src/*.c",
+ "examples/*/*.c"
+ ],
"frameworks": "*",
"platforms": "*"
}
diff --git a/vendor/nanopb/pb.h b/vendor/nanopb/pb.h
index fbb9f1d1..10249bb6 100644
--- a/vendor/nanopb/pb.h
+++ b/vendor/nanopb/pb.h
@@ -14,7 +14,8 @@
/* #define PB_ENABLE_MALLOC 1 */
/* Define this if your CPU / compiler combination does not support
- * unaligned memory access to packed structures. */
+ * unaligned memory access to packed structures. Note that packed
+ * structures are only used when requested in .proto options. */
/* #define PB_NO_PACKED_STRUCTS 1 */
/* Increase the number of required fields that are tracked.
@@ -47,6 +48,15 @@
* the string processing slightly and slightly increases code size. */
/* #define PB_VALIDATE_UTF8 1 */
+/* This can be defined if the platform is little-endian and has 8-bit bytes.
+ * Normally it is automatically detected based on __BYTE_ORDER__ macro. */
+/* #define PB_LITTLE_ENDIAN_8BIT 1 */
+
+/* Configure static assert mechanism. Instead of changing these, set your
+ * compiler to C11 standard mode if possible. */
+/* #define PB_C99_STATIC_ASSERT 1 */
+/* #define PB_NO_STATIC_ASSERT 1 */
+
/******************************************************************
* You usually don't need to change anything below this line. *
* Feel free to look around and use the defined macros, though. *
@@ -55,7 +65,7 @@
/* Version of the nanopb library. Just in case you want to check it in
* your own program. */
-#define NANOPB_VERSION nanopb-0.4.5
+#define NANOPB_VERSION "nanopb-0.4.9.1"
/* Include all the system headers needed by nanopb. You will need the
* definitions of the following:
@@ -76,7 +86,6 @@
#include
#include
#include
-#include "os.h"
#ifdef PB_ENABLE_MALLOC
#include
@@ -117,6 +126,18 @@ extern "C" {
# define pb_packed
#endif
+/* Detect endianness */
+#ifndef PB_LITTLE_ENDIAN_8BIT
+#if ((defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN) || \
+ (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
+ defined(__LITTLE_ENDIAN__) || defined(__ARMEL__) || \
+ defined(__THUMBEL__) || defined(__AARCH64EL__) || defined(_MIPSEL) || \
+ defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM)) \
+ && CHAR_BIT == 8
+#define PB_LITTLE_ENDIAN_8BIT 1
+#endif
+#endif
+
/* Handly macro for suppressing unreferenced-parameter compiler warnings. */
#ifndef PB_UNUSED
#define PB_UNUSED(x) (void)(x)
@@ -146,14 +167,23 @@ extern "C" {
*/
#ifndef PB_NO_STATIC_ASSERT
# ifndef PB_STATIC_ASSERT
-# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
- /* C11 standard _Static_assert mechanism */
-# define PB_STATIC_ASSERT(COND,MSG) _Static_assert(COND,#MSG);
-# else
+# if defined(__ICCARM__)
+ /* IAR has static_assert keyword but no _Static_assert */
+# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
+# elif defined(_MSC_VER) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112)
+ /* MSVC in C89 mode supports static_assert() keyword anyway */
+# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
+# elif defined(PB_C99_STATIC_ASSERT)
/* Classic negative-size-array static assert mechanism */
# define PB_STATIC_ASSERT(COND,MSG) typedef char PB_STATIC_ASSERT_MSG(MSG, __LINE__, __COUNTER__)[(COND)?1:-1];
# define PB_STATIC_ASSERT_MSG(MSG, LINE, COUNTER) PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER)
# define PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER) pb_static_assertion_##MSG##_##LINE##_##COUNTER
+# elif defined(__cplusplus)
+ /* C++11 standard static_assert mechanism */
+# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
+# else
+ /* C11 standard _Static_assert mechanism */
+# define PB_STATIC_ASSERT(COND,MSG) _Static_assert(COND,#MSG);
# endif
# endif
#else
@@ -161,6 +191,14 @@ extern "C" {
# define PB_STATIC_ASSERT(COND,MSG)
#endif
+/* Test that PB_STATIC_ASSERT works
+ * If you get errors here, you may need to do one of these:
+ * - Enable C11 standard support in your compiler
+ * - Define PB_C99_STATIC_ASSERT to enable C99 standard support
+ * - Define PB_NO_STATIC_ASSERT to disable static asserts altogether
+ */
+PB_STATIC_ASSERT(1, STATIC_ASSERT_IS_NOT_WORKING)
+
/* Number of required fields to keep track of. */
#ifndef PB_MAX_REQUIRED_FIELDS
#define PB_MAX_REQUIRED_FIELDS 64
@@ -177,12 +215,23 @@ extern "C" {
#endif
#endif
+/* Data type for storing encoded data and other byte streams.
+ * This typedef exists to support platforms where uint8_t does not exist.
+ * You can regard it as equivalent on uint8_t on other platforms.
+ */
+#if defined(PB_BYTE_T_OVERRIDE)
+typedef PB_BYTE_T_OVERRIDE pb_byte_t;
+#elif defined(UINT8_MAX)
+typedef uint8_t pb_byte_t;
+#else
+typedef uint_least8_t pb_byte_t;
+#endif
+
/* List of possible field types. These are used in the autogenerated code.
* Least-significant 4 bits tell the scalar type
* Most-significant 4 bits specify repeated/required/packed etc.
*/
-
-typedef uint_least8_t pb_type_t;
+typedef pb_byte_t pb_type_t;
/**** Field data types ****/
@@ -239,7 +288,7 @@ typedef uint_least8_t pb_type_t;
#define PB_HTYPE_MASK 0x30U
/**** Field allocation types ****/
-
+
#define PB_ATYPE_STATIC 0x00U
#define PB_ATYPE_POINTER 0x80U
#define PB_ATYPE_CALLBACK 0x40U
@@ -263,12 +312,6 @@ typedef uint_least8_t pb_type_t;
#endif
#define PB_SIZE_MAX ((pb_size_t)-1)
-/* Data type for storing encoded data and other byte streams.
- * This typedef exists to support platforms where uint8_t does not exist.
- * You can regard it as equivalent on uint8_t on other platforms.
- */
-typedef uint_least8_t pb_byte_t;
-
/* Forward declaration of struct types */
typedef struct pb_istream_s pb_istream_t;
typedef struct pb_ostream_s pb_ostream_t;
@@ -366,7 +409,7 @@ struct pb_callback_s {
bool (*decode)(pb_istream_t *stream, const pb_field_t *field, void **arg);
bool (*encode)(pb_ostream_t *stream, const pb_field_t *field, void * const *arg);
} funcs;
-
+
/* Free arg for use by callback */
void *arg;
};
@@ -378,7 +421,8 @@ typedef enum {
PB_WT_VARINT = 0,
PB_WT_64BIT = 1,
PB_WT_STRING = 2,
- PB_WT_32BIT = 5
+ PB_WT_32BIT = 5,
+ PB_WT_PACKED = 255 /* PB_WT_PACKED is internal marker for packed arrays. */
} pb_wire_type_t;
/* Structure for defining the handling of unknown/extension fields.
@@ -398,7 +442,7 @@ struct pb_extension_type_s {
*/
bool (*decode)(pb_istream_t *stream, pb_extension_t *extension,
uint32_t tag, pb_wire_type_t wire_type);
-
+
/* Called once after all regular fields have been encoded.
* If you have something to write, do so and return true.
* If you do not have anything to write, just return true.
@@ -406,7 +450,7 @@ struct pb_extension_type_s {
* Set to NULL for default handler.
*/
bool (*encode)(pb_ostream_t *stream, const pb_extension_t *extension);
-
+
/* Free field for use by the callback. */
const void *arg;
};
@@ -415,11 +459,11 @@ struct pb_extension_s {
/* Type describing the extension field. Usually you'll initialize
* this to a pointer to the automatically generated structure. */
const pb_extension_type_t *type;
-
+
/* Destination for the decoded data. This must match the datatype
* of the extension field. */
void *dest;
-
+
/* Pointer to the next extension handler, or NULL.
* If this extension does not match a field, the next handler is
* automatically called. */
@@ -866,11 +910,13 @@ struct pb_extension_s {
#define PB_INLINE_CONSTEXPR PB_CONSTEXPR
#endif // __cplusplus >= 201703L
+extern "C++"
+{
namespace nanopb {
// Each type will be partially specialized by the generator.
template struct MessageDescriptor;
} // namespace nanopb
+}
#endif /* __cplusplus */
#endif
-
diff --git a/vendor/nanopb/pb_common.c b/vendor/nanopb/pb_common.c
index af6583d0..6aee76b1 100644
--- a/vendor/nanopb/pb_common.c
+++ b/vendor/nanopb/pb_common.c
@@ -14,7 +14,7 @@ static bool load_descriptor_values(pb_field_iter_t *iter)
if (iter->index >= iter->descriptor->field_count)
return false;
- word0 = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index]);
+ word0 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
iter->type = (pb_type_t)((word0 >> 8) & 0xFF);
switch(word0 & 3)
@@ -31,7 +31,7 @@ static bool load_descriptor_values(pb_field_iter_t *iter)
case 1: {
/* 2-word format */
- uint32_t word1 = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index + 1]);
+ uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]);
iter->array_size = (pb_size_t)((word0 >> 16) & 0x0FFF);
iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 28) << 6));
@@ -43,9 +43,9 @@ static bool load_descriptor_values(pb_field_iter_t *iter)
case 2: {
/* 4-word format */
- uint32_t word1 = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index + 1]);
- uint32_t word2 = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index + 2]);
- uint32_t word3 = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index + 3]);
+ uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]);
+ uint32_t word2 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 2]);
+ uint32_t word3 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 3]);
iter->array_size = (pb_size_t)(word0 >> 16);
iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 8) << 6));
@@ -57,10 +57,10 @@ static bool load_descriptor_values(pb_field_iter_t *iter)
default: {
/* 8-word format */
- uint32_t word1 = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index + 1]);
- uint32_t word2 = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index + 2]);
- uint32_t word3 = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index + 3]);
- uint32_t word4 = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index + 4]);
+ uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]);
+ uint32_t word2 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 2]);
+ uint32_t word3 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 3]);
+ uint32_t word4 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 4]);
iter->array_size = (pb_size_t)word4;
iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 8) << 6));
@@ -109,7 +109,7 @@ static bool load_descriptor_values(pb_field_iter_t *iter)
if (PB_LTYPE_IS_SUBMSG(iter->type))
{
- iter->submsg_desc = PIC(((const pb_msgdesc_t * const *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->submsg_info))[iter->submessage_index]);
+ iter->submsg_desc = iter->descriptor->submsg_info[iter->submessage_index];
}
else
{
@@ -139,7 +139,7 @@ static void advance_iterator(pb_field_iter_t *iter)
* - bits 2..7 give the lowest bits of tag number.
* - bits 8..15 give the field type.
*/
- uint32_t prev_descriptor = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index]);
+ uint32_t prev_descriptor = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
pb_type_t prev_type = (prev_descriptor >> 8) & 0xFF;
pb_size_t descriptor_len = (pb_size_t)(1 << (prev_descriptor & 3));
@@ -157,7 +157,7 @@ bool pb_field_iter_begin(pb_field_iter_t *iter, const pb_msgdesc_t *desc, void *
{
memset(iter, 0, sizeof(*iter));
- iter->descriptor = PIC(desc);
+ iter->descriptor = desc;
iter->message = message;
return load_descriptor_values(iter);
@@ -221,7 +221,7 @@ bool pb_field_iter_find(pb_field_iter_t *iter, uint32_t tag)
advance_iterator(iter);
/* Do fast check for tag number match */
- fieldinfo = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index]);
+ fieldinfo = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
if (((fieldinfo >> 2) & 0x3F) == (tag & 0x3F))
{
@@ -260,7 +260,8 @@ bool pb_field_iter_find_extension(pb_field_iter_t *iter)
advance_iterator(iter);
/* Do fast check for field type */
- fieldinfo = PB_PROGMEM_READU32(((uint32_t *)PIC(((pb_msgdesc_t *)PIC(iter->descriptor))->field_info))[iter->field_info_index]);
+ fieldinfo = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]);
+
if (PB_LTYPE((fieldinfo >> 8) & 0xFF) == PB_LTYPE_EXTENSION)
{
return load_descriptor_values(iter);
diff --git a/vendor/nanopb/pb_decode.c b/vendor/nanopb/pb_decode.c
index d9fcc2dd..b3f96fc7 100644
--- a/vendor/nanopb/pb_decode.c
+++ b/vendor/nanopb/pb_decode.c
@@ -4,13 +4,14 @@
*/
/* Use the GCC warn_unused_result attribute to check that all return values
- * are propagated correctly. On other compilers and gcc before 3.4.0 just
- * ignore the annotation.
+ * are propagated correctly. On other compilers, gcc before 3.4.0 and iar
+ * before 9.40.1 just ignore the annotation.
*/
-#if !defined(__GNUC__) || ( __GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
- #define checkreturn
-#else
+#if (defined(__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))) || \
+ (defined(__IAR_SYSTEMS_ICC__) && (__VER__ >= 9040001))
#define checkreturn __attribute__((warn_unused_result))
+#else
+ #define checkreturn
#endif
#include "pb.h"
@@ -57,8 +58,6 @@ static void pb_release_single_field(pb_field_iter_t *field);
#define pb_uint64_t uint64_t
#endif
-#define PB_WT_PACKED ((pb_wire_type_t)0xFF)
-
typedef struct {
uint32_t bitfield[(PB_MAX_REQUIRED_FIELDS + 31) / 32];
} pb_fields_seen_t;
@@ -69,14 +68,12 @@ typedef struct {
static bool checkreturn buf_read(pb_istream_t *stream, pb_byte_t *buf, size_t count)
{
- size_t i;
const pb_byte_t *source = (const pb_byte_t*)stream->state;
stream->state = (pb_byte_t*)stream->state + count;
if (buf != NULL)
{
- for (i = 0; i < count; i++)
- buf[i] = source[i];
+ memcpy(buf, source, count * sizeof(pb_byte_t));
}
return true;
@@ -115,7 +112,11 @@ bool checkreturn pb_read(pb_istream_t *stream, pb_byte_t *buf, size_t count)
return false;
#endif
- stream->bytes_left -= count;
+ if (stream->bytes_left < count)
+ stream->bytes_left = 0;
+ else
+ stream->bytes_left -= count;
+
return true;
}
@@ -213,18 +214,20 @@ static bool checkreturn pb_decode_varint32_eof(pb_istream_t *stream, uint32_t *d
PB_RETURN_ERROR(stream, "varint overflow");
}
}
+ else if (bitpos == 28)
+ {
+ if ((byte & 0x70) != 0 && (byte & 0x78) != 0x78)
+ {
+ PB_RETURN_ERROR(stream, "varint overflow");
+ }
+ result |= (uint32_t)(byte & 0x0F) << bitpos;
+ }
else
{
result |= (uint32_t)(byte & 0x7F) << bitpos;
}
bitpos = (uint_fast8_t)(bitpos + 7);
} while (byte & 0x80);
-
- if (bitpos == 35 && (byte & 0x70) != 0)
- {
- /* The last byte was at bitpos=28, so only bottom 4 bits fit. */
- PB_RETURN_ERROR(stream, "varint overflow");
- }
}
*dest = result;
@@ -245,12 +248,12 @@ bool checkreturn pb_decode_varint(pb_istream_t *stream, uint64_t *dest)
do
{
- if (bitpos >= 64)
- PB_RETURN_ERROR(stream, "varint overflow");
-
if (!pb_readbyte(stream, &byte))
return false;
+ if (bitpos >= 63 && (byte & 0xFE) != 0)
+ PB_RETURN_ERROR(stream, "varint overflow");
+
result |= (uint64_t)(byte & 0x7F) << bitpos;
bitpos = (uint_fast8_t)(bitpos + 7);
} while (byte & 0x80);
@@ -532,7 +535,7 @@ static bool checkreturn decode_static_field(pb_istream_t *stream, pb_wire_type_t
/* Set default values for the submessage fields. */
if (field->submsg_desc->default_value != NULL ||
field->submsg_desc->field_callback != NULL ||
- ((const pb_msgdesc_t * const *)PIC(field->submsg_desc->submsg_info))[0] != NULL)
+ field->submsg_desc->submsg_info[0] != NULL)
{
pb_field_iter_t submsg_iter;
if (pb_field_iter_begin(&submsg_iter, field->submsg_desc, field->pData))
@@ -703,6 +706,12 @@ static bool checkreturn decode_pointer_field(pb_istream_t *stream, pb_wire_type_
/* Decode the array entry */
field->pData = *(char**)field->pField + field->data_size * (*size);
+ if (field->pData == NULL)
+ {
+ /* Shouldn't happen, but satisfies static analyzers */
+ status = false;
+ break;
+ }
initialize_pointer_field(field->pData, field);
if (!decode_basic_field(&substream, PB_WT_PACKED, field))
{
@@ -756,8 +765,11 @@ static bool checkreturn decode_callback_field(pb_istream_t *stream, pb_wire_type
do
{
prev_bytes_left = substream.bytes_left;
- if (!((bool (*)(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_iter_t *field))(PIC(field->descriptor->field_callback)))(&substream, NULL, field))
- PB_RETURN_ERROR(stream, "callback failed");
+ if (!field->descriptor->field_callback(&substream, NULL, field))
+ {
+ PB_SET_ERROR(stream, substream.errmsg ? substream.errmsg : "callback failed");
+ return false;
+ }
} while (substream.bytes_left > 0 && substream.bytes_left < prev_bytes_left);
if (!pb_close_string_substream(stream, &substream))
@@ -897,7 +909,7 @@ static bool pb_field_set_to_default(pb_field_iter_t *field)
if (PB_LTYPE_IS_SUBMSG(field->type) &&
(field->submsg_desc->default_value != NULL ||
field->submsg_desc->field_callback != NULL ||
- ((const pb_msgdesc_t * const *)PIC(field->submsg_desc->submsg_info))[0] != NULL))
+ field->submsg_desc->submsg_info[0] != NULL))
{
/* Initialize submessage to defaults.
* Only needed if it has default values
@@ -1156,7 +1168,7 @@ bool checkreturn pb_decode_ex(pb_istream_t *stream, const pb_msgdesc_t *fields,
status = pb_decode_inner(&substream, fields, dest_struct, flags);
if (!pb_close_string_substream(stream, &substream))
- return false;
+ status = false;
}
#ifdef PB_ENABLE_MALLOC
@@ -1320,6 +1332,13 @@ void pb_release(const pb_msgdesc_t *fields, void *dest_struct)
pb_release_single_field(&iter);
} while (pb_field_iter_next(&iter));
}
+#else
+void pb_release(const pb_msgdesc_t *fields, void *dest_struct)
+{
+ /* Nothing to release without PB_ENABLE_MALLOC. */
+ PB_UNUSED(fields);
+ PB_UNUSED(dest_struct);
+}
#endif
/* Field decoders */
@@ -1358,7 +1377,7 @@ bool pb_decode_fixed32(pb_istream_t *stream, void *dest)
if (!pb_read(stream, u.bytes, 4))
return false;
-#if defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN && CHAR_BIT == 8
+#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1
/* fast path - if we know that we're on little endian, assign directly */
*(uint32_t*)dest = u.fixed32;
#else
@@ -1381,7 +1400,7 @@ bool pb_decode_fixed64(pb_istream_t *stream, void *dest)
if (!pb_read(stream, u.bytes, 8))
return false;
-#if defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN && CHAR_BIT == 8
+#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1
/* fast path - if we know that we're on little endian, assign directly */
*(uint64_t*)dest = u.fixed64;
#else
diff --git a/vendor/nanopb/pb_decode.h b/vendor/nanopb/pb_decode.h
index 824acd4e..3f392b29 100644
--- a/vendor/nanopb/pb_decode.h
+++ b/vendor/nanopb/pb_decode.h
@@ -37,10 +37,21 @@ struct pb_istream_s
bool (*callback)(pb_istream_t *stream, pb_byte_t *buf, size_t count);
#endif
- void *state; /* Free field for use by callback implementation */
+ /* state is a free field for use of the callback function defined above.
+ * Note that when pb_istream_from_buffer() is used, it reserves this field
+ * for its own use.
+ */
+ void *state;
+
+ /* Maximum number of bytes left in this stream. Callback can report
+ * EOF before this limit is reached. Setting a limit is recommended
+ * when decoding directly from file or network streams to avoid
+ * denial-of-service by excessively long messages.
+ */
size_t bytes_left;
#ifndef PB_NO_ERRMSG
+ /* Pointer to constant (ROM) string when decoding function returns error */
const char *errmsg;
#endif
};
@@ -107,17 +118,11 @@ bool pb_decode_ex(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_s
#define pb_decode_delimited_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED | PB_DECODE_NOINIT)
#define pb_decode_nullterminated(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NULLTERMINATED)
-#ifdef PB_ENABLE_MALLOC
/* Release any allocated pointer fields. If you use dynamic allocation, you should
* call this for any successfully decoded message when you are done with it. If
* pb_decode() returns with an error, the message is already released.
*/
void pb_release(const pb_msgdesc_t *fields, void *dest_struct);
-#else
-/* Allocation is not supported, so release is no-op */
-#define pb_release(fields, dest_struct) PB_UNUSED(fields); PB_UNUSED(dest_struct);
-#endif
-
/**************************************
* Functions for manipulating streams *
diff --git a/vendor/nanopb/pb_encode.c b/vendor/nanopb/pb_encode.c
index de716f7a..f9034a54 100644
--- a/vendor/nanopb/pb_encode.c
+++ b/vendor/nanopb/pb_encode.c
@@ -8,13 +8,14 @@
#include "pb_common.h"
/* Use the GCC warn_unused_result attribute to check that all return values
- * are propagated correctly. On other compilers and gcc before 3.4.0 just
- * ignore the annotation.
+ * are propagated correctly. On other compilers, gcc before 3.4.0 and iar
+ * before 9.40.1 just ignore the annotation.
*/
-#if !defined(__GNUC__) || ( __GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
- #define checkreturn
-#else
+#if (defined(__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))) || \
+ (defined(__IAR_SYSTEMS_ICC__) && (__VER__ >= 9040001))
#define checkreturn __attribute__((warn_unused_result))
+#else
+ #define checkreturn
#endif
/**************************************
@@ -51,12 +52,10 @@ static bool checkreturn pb_enc_fixed_length_bytes(pb_ostream_t *stream, const pb
static bool checkreturn buf_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count)
{
- size_t i;
pb_byte_t *dest = (pb_byte_t*)stream->state;
stream->state = dest + count;
- for (i = 0; i < count; i++)
- dest[i] = buf[i];
+ memcpy(dest, buf, count * sizeof(pb_byte_t));
return true;
}
@@ -65,7 +64,11 @@ pb_ostream_t pb_ostream_from_buffer(pb_byte_t *buf, size_t bufsize)
{
pb_ostream_t stream;
#ifdef PB_BUFFER_ONLY
- stream.callback = (void*)1; /* Just a marker value */
+ /* In PB_BUFFER_ONLY configuration the callback pointer is just int*.
+ * NULL pointer marks a sizing field, so put a non-NULL value to mark a buffer stream.
+ */
+ static const int marker = 0;
+ stream.callback = ▮
#else
stream.callback = &buf_write;
#endif
@@ -622,8 +625,9 @@ bool checkreturn pb_encode_varint(pb_ostream_t *stream, pb_uint64_t value)
bool checkreturn pb_encode_svarint(pb_ostream_t *stream, pb_int64_t value)
{
pb_uint64_t zigzagged;
+ pb_uint64_t mask = ((pb_uint64_t)-1) >> 1; /* Satisfy clang -fsanitize=integer */
if (value < 0)
- zigzagged = ~((pb_uint64_t)value << 1);
+ zigzagged = ~(((pb_uint64_t)value & mask) << 1);
else
zigzagged = (pb_uint64_t)value << 1;
@@ -632,6 +636,10 @@ bool checkreturn pb_encode_svarint(pb_ostream_t *stream, pb_int64_t value)
bool checkreturn pb_encode_fixed32(pb_ostream_t *stream, const void *value)
{
+#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1
+ /* Fast path if we know that we're on little endian */
+ return pb_write(stream, (const pb_byte_t*)value, 4);
+#else
uint32_t val = *(const uint32_t*)value;
pb_byte_t bytes[4];
bytes[0] = (pb_byte_t)(val & 0xFF);
@@ -639,11 +647,16 @@ bool checkreturn pb_encode_fixed32(pb_ostream_t *stream, const void *value)
bytes[2] = (pb_byte_t)((val >> 16) & 0xFF);
bytes[3] = (pb_byte_t)((val >> 24) & 0xFF);
return pb_write(stream, bytes, 4);
+#endif
}
#ifndef PB_WITHOUT_64BIT
bool checkreturn pb_encode_fixed64(pb_ostream_t *stream, const void *value)
{
+#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1
+ /* Fast path if we know that we're on little endian */
+ return pb_write(stream, (const pb_byte_t*)value, 8);
+#else
uint64_t val = *(const uint64_t*)value;
pb_byte_t bytes[8];
bytes[0] = (pb_byte_t)(val & 0xFF);
@@ -655,6 +668,7 @@ bool checkreturn pb_encode_fixed64(pb_ostream_t *stream, const void *value)
bytes[6] = (pb_byte_t)((val >> 48) & 0xFF);
bytes[7] = (pb_byte_t)((val >> 56) & 0xFF);
return pb_write(stream, bytes, 8);
+#endif
}
#endif
diff --git a/vendor/nanopb/pb_encode.h b/vendor/nanopb/pb_encode.h
index 9cff22a4..6dc089da 100644
--- a/vendor/nanopb/pb_encode.h
+++ b/vendor/nanopb/pb_encode.h
@@ -33,15 +33,25 @@ struct pb_ostream_s
* Also, NULL pointer marks a 'sizing stream' that does not
* write anything.
*/
- int *callback;
+ const int *callback;
#else
bool (*callback)(pb_ostream_t *stream, const pb_byte_t *buf, size_t count);
#endif
- void *state; /* Free field for use by callback implementation. */
- size_t max_size; /* Limit number of output bytes written (or use SIZE_MAX). */
- size_t bytes_written; /* Number of bytes written so far. */
+
+ /* state is a free field for use of the callback function defined above.
+ * Note that when pb_ostream_from_buffer() is used, it reserves this field
+ * for its own use.
+ */
+ void *state;
+
+ /* Limit number of output bytes written. Can be set to SIZE_MAX. */
+ size_t max_size;
+
+ /* Number of bytes written so far. */
+ size_t bytes_written;
#ifndef PB_NO_ERRMSG
+ /* Pointer to constant (ROM) string when decoding function returns error */
const char *errmsg;
#endif
};
diff --git a/vendor/nanopb/requirements.txt b/vendor/nanopb/requirements.txt
new file mode 100644
index 00000000..cc689df3
--- /dev/null
+++ b/vendor/nanopb/requirements.txt
@@ -0,0 +1,2 @@
+protobuf
+grpcio-tools
\ No newline at end of file
diff --git a/vendor/nanopb/spm_headers/nanopb/pb.h b/vendor/nanopb/spm_headers/nanopb/pb.h
index e2be14d9..10249bb6 100644
--- a/vendor/nanopb/spm_headers/nanopb/pb.h
+++ b/vendor/nanopb/spm_headers/nanopb/pb.h
@@ -1 +1,922 @@
-../../pb.h
\ No newline at end of file
+/* Common parts of the nanopb library. Most of these are quite low-level
+ * stuff. For the high-level interface, see pb_encode.h and pb_decode.h.
+ */
+
+#ifndef PB_H_INCLUDED
+#define PB_H_INCLUDED
+
+/*****************************************************************
+ * Nanopb compilation time options. You can change these here by *
+ * uncommenting the lines, or on the compiler command line. *
+ *****************************************************************/
+
+/* Enable support for dynamically allocated fields */
+/* #define PB_ENABLE_MALLOC 1 */
+
+/* Define this if your CPU / compiler combination does not support
+ * unaligned memory access to packed structures. Note that packed
+ * structures are only used when requested in .proto options. */
+/* #define PB_NO_PACKED_STRUCTS 1 */
+
+/* Increase the number of required fields that are tracked.
+ * A compiler warning will tell if you need this. */
+/* #define PB_MAX_REQUIRED_FIELDS 256 */
+
+/* Add support for tag numbers > 65536 and fields larger than 65536 bytes. */
+/* #define PB_FIELD_32BIT 1 */
+
+/* Disable support for error messages in order to save some code space. */
+/* #define PB_NO_ERRMSG 1 */
+
+/* Disable support for custom streams (support only memory buffers). */
+/* #define PB_BUFFER_ONLY 1 */
+
+/* Disable support for 64-bit datatypes, for compilers without int64_t
+ or to save some code space. */
+/* #define PB_WITHOUT_64BIT 1 */
+
+/* Don't encode scalar arrays as packed. This is only to be used when
+ * the decoder on the receiving side cannot process packed scalar arrays.
+ * Such example is older protobuf.js. */
+/* #define PB_ENCODE_ARRAYS_UNPACKED 1 */
+
+/* Enable conversion of doubles to floats for platforms that do not
+ * support 64-bit doubles. Most commonly AVR. */
+/* #define PB_CONVERT_DOUBLE_FLOAT 1 */
+
+/* Check whether incoming strings are valid UTF-8 sequences. Slows down
+ * the string processing slightly and slightly increases code size. */
+/* #define PB_VALIDATE_UTF8 1 */
+
+/* This can be defined if the platform is little-endian and has 8-bit bytes.
+ * Normally it is automatically detected based on __BYTE_ORDER__ macro. */
+/* #define PB_LITTLE_ENDIAN_8BIT 1 */
+
+/* Configure static assert mechanism. Instead of changing these, set your
+ * compiler to C11 standard mode if possible. */
+/* #define PB_C99_STATIC_ASSERT 1 */
+/* #define PB_NO_STATIC_ASSERT 1 */
+
+/******************************************************************
+ * You usually don't need to change anything below this line. *
+ * Feel free to look around and use the defined macros, though. *
+ ******************************************************************/
+
+
+/* Version of the nanopb library. Just in case you want to check it in
+ * your own program. */
+#define NANOPB_VERSION "nanopb-0.4.9.1"
+
+/* Include all the system headers needed by nanopb. You will need the
+ * definitions of the following:
+ * - strlen, memcpy, memset functions
+ * - [u]int_least8_t, uint_fast8_t, [u]int_least16_t, [u]int32_t, [u]int64_t
+ * - size_t
+ * - bool
+ *
+ * If you don't have the standard header files, you can instead provide
+ * a custom header that defines or includes all this. In that case,
+ * define PB_SYSTEM_HEADER to the path of this file.
+ */
+#ifdef PB_SYSTEM_HEADER
+#include PB_SYSTEM_HEADER
+#else
+#include
+#include
+#include
+#include
+#include
+
+#ifdef PB_ENABLE_MALLOC
+#include
+#endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Macro for defining packed structures (compiler dependent).
+ * This just reduces memory requirements, but is not required.
+ */
+#if defined(PB_NO_PACKED_STRUCTS)
+ /* Disable struct packing */
+# define PB_PACKED_STRUCT_START
+# define PB_PACKED_STRUCT_END
+# define pb_packed
+#elif defined(__GNUC__) || defined(__clang__)
+ /* For GCC and clang */
+# define PB_PACKED_STRUCT_START
+# define PB_PACKED_STRUCT_END
+# define pb_packed __attribute__((packed))
+#elif defined(__ICCARM__) || defined(__CC_ARM)
+ /* For IAR ARM and Keil MDK-ARM compilers */
+# define PB_PACKED_STRUCT_START _Pragma("pack(push, 1)")
+# define PB_PACKED_STRUCT_END _Pragma("pack(pop)")
+# define pb_packed
+#elif defined(_MSC_VER) && (_MSC_VER >= 1500)
+ /* For Microsoft Visual C++ */
+# define PB_PACKED_STRUCT_START __pragma(pack(push, 1))
+# define PB_PACKED_STRUCT_END __pragma(pack(pop))
+# define pb_packed
+#else
+ /* Unknown compiler */
+# define PB_PACKED_STRUCT_START
+# define PB_PACKED_STRUCT_END
+# define pb_packed
+#endif
+
+/* Detect endianness */
+#ifndef PB_LITTLE_ENDIAN_8BIT
+#if ((defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN) || \
+ (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
+ defined(__LITTLE_ENDIAN__) || defined(__ARMEL__) || \
+ defined(__THUMBEL__) || defined(__AARCH64EL__) || defined(_MIPSEL) || \
+ defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM)) \
+ && CHAR_BIT == 8
+#define PB_LITTLE_ENDIAN_8BIT 1
+#endif
+#endif
+
+/* Handly macro for suppressing unreferenced-parameter compiler warnings. */
+#ifndef PB_UNUSED
+#define PB_UNUSED(x) (void)(x)
+#endif
+
+/* Harvard-architecture processors may need special attributes for storing
+ * field information in program memory. */
+#ifndef PB_PROGMEM
+#ifdef __AVR__
+#include
+#define PB_PROGMEM PROGMEM
+#define PB_PROGMEM_READU32(x) pgm_read_dword(&x)
+#else
+#define PB_PROGMEM
+#define PB_PROGMEM_READU32(x) (x)
+#endif
+#endif
+
+/* Compile-time assertion, used for checking compatible compilation options.
+ * If this does not work properly on your compiler, use
+ * #define PB_NO_STATIC_ASSERT to disable it.
+ *
+ * But before doing that, check carefully the error message / place where it
+ * comes from to see if the error has a real cause. Unfortunately the error
+ * message is not always very clear to read, but you can see the reason better
+ * in the place where the PB_STATIC_ASSERT macro was called.
+ */
+#ifndef PB_NO_STATIC_ASSERT
+# ifndef PB_STATIC_ASSERT
+# if defined(__ICCARM__)
+ /* IAR has static_assert keyword but no _Static_assert */
+# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
+# elif defined(_MSC_VER) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112)
+ /* MSVC in C89 mode supports static_assert() keyword anyway */
+# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
+# elif defined(PB_C99_STATIC_ASSERT)
+ /* Classic negative-size-array static assert mechanism */
+# define PB_STATIC_ASSERT(COND,MSG) typedef char PB_STATIC_ASSERT_MSG(MSG, __LINE__, __COUNTER__)[(COND)?1:-1];
+# define PB_STATIC_ASSERT_MSG(MSG, LINE, COUNTER) PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER)
+# define PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER) pb_static_assertion_##MSG##_##LINE##_##COUNTER
+# elif defined(__cplusplus)
+ /* C++11 standard static_assert mechanism */
+# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG);
+# else
+ /* C11 standard _Static_assert mechanism */
+# define PB_STATIC_ASSERT(COND,MSG) _Static_assert(COND,#MSG);
+# endif
+# endif
+#else
+ /* Static asserts disabled by PB_NO_STATIC_ASSERT */
+# define PB_STATIC_ASSERT(COND,MSG)
+#endif
+
+/* Test that PB_STATIC_ASSERT works
+ * If you get errors here, you may need to do one of these:
+ * - Enable C11 standard support in your compiler
+ * - Define PB_C99_STATIC_ASSERT to enable C99 standard support
+ * - Define PB_NO_STATIC_ASSERT to disable static asserts altogether
+ */
+PB_STATIC_ASSERT(1, STATIC_ASSERT_IS_NOT_WORKING)
+
+/* Number of required fields to keep track of. */
+#ifndef PB_MAX_REQUIRED_FIELDS
+#define PB_MAX_REQUIRED_FIELDS 64
+#endif
+
+#if PB_MAX_REQUIRED_FIELDS < 64
+#error You should not lower PB_MAX_REQUIRED_FIELDS from the default value (64).
+#endif
+
+#ifdef PB_WITHOUT_64BIT
+#ifdef PB_CONVERT_DOUBLE_FLOAT
+/* Cannot use doubles without 64-bit types */
+#undef PB_CONVERT_DOUBLE_FLOAT
+#endif
+#endif
+
+/* Data type for storing encoded data and other byte streams.
+ * This typedef exists to support platforms where uint8_t does not exist.
+ * You can regard it as equivalent on uint8_t on other platforms.
+ */
+#if defined(PB_BYTE_T_OVERRIDE)
+typedef PB_BYTE_T_OVERRIDE pb_byte_t;
+#elif defined(UINT8_MAX)
+typedef uint8_t pb_byte_t;
+#else
+typedef uint_least8_t pb_byte_t;
+#endif
+
+/* List of possible field types. These are used in the autogenerated code.
+ * Least-significant 4 bits tell the scalar type
+ * Most-significant 4 bits specify repeated/required/packed etc.
+ */
+typedef pb_byte_t pb_type_t;
+
+/**** Field data types ****/
+
+/* Numeric types */
+#define PB_LTYPE_BOOL 0x00U /* bool */
+#define PB_LTYPE_VARINT 0x01U /* int32, int64, enum, bool */
+#define PB_LTYPE_UVARINT 0x02U /* uint32, uint64 */
+#define PB_LTYPE_SVARINT 0x03U /* sint32, sint64 */
+#define PB_LTYPE_FIXED32 0x04U /* fixed32, sfixed32, float */
+#define PB_LTYPE_FIXED64 0x05U /* fixed64, sfixed64, double */
+
+/* Marker for last packable field type. */
+#define PB_LTYPE_LAST_PACKABLE 0x05U
+
+/* Byte array with pre-allocated buffer.
+ * data_size is the length of the allocated PB_BYTES_ARRAY structure. */
+#define PB_LTYPE_BYTES 0x06U
+
+/* String with pre-allocated buffer.
+ * data_size is the maximum length. */
+#define PB_LTYPE_STRING 0x07U
+
+/* Submessage
+ * submsg_fields is pointer to field descriptions */
+#define PB_LTYPE_SUBMESSAGE 0x08U
+
+/* Submessage with pre-decoding callback
+ * The pre-decoding callback is stored as pb_callback_t right before pSize.
+ * submsg_fields is pointer to field descriptions */
+#define PB_LTYPE_SUBMSG_W_CB 0x09U
+
+/* Extension pseudo-field
+ * The field contains a pointer to pb_extension_t */
+#define PB_LTYPE_EXTENSION 0x0AU
+
+/* Byte array with inline, pre-allocated byffer.
+ * data_size is the length of the inline, allocated buffer.
+ * This differs from PB_LTYPE_BYTES by defining the element as
+ * pb_byte_t[data_size] rather than pb_bytes_array_t. */
+#define PB_LTYPE_FIXED_LENGTH_BYTES 0x0BU
+
+/* Number of declared LTYPES */
+#define PB_LTYPES_COUNT 0x0CU
+#define PB_LTYPE_MASK 0x0FU
+
+/**** Field repetition rules ****/
+
+#define PB_HTYPE_REQUIRED 0x00U
+#define PB_HTYPE_OPTIONAL 0x10U
+#define PB_HTYPE_SINGULAR 0x10U
+#define PB_HTYPE_REPEATED 0x20U
+#define PB_HTYPE_FIXARRAY 0x20U
+#define PB_HTYPE_ONEOF 0x30U
+#define PB_HTYPE_MASK 0x30U
+
+/**** Field allocation types ****/
+
+#define PB_ATYPE_STATIC 0x00U
+#define PB_ATYPE_POINTER 0x80U
+#define PB_ATYPE_CALLBACK 0x40U
+#define PB_ATYPE_MASK 0xC0U
+
+#define PB_ATYPE(x) ((x) & PB_ATYPE_MASK)
+#define PB_HTYPE(x) ((x) & PB_HTYPE_MASK)
+#define PB_LTYPE(x) ((x) & PB_LTYPE_MASK)
+#define PB_LTYPE_IS_SUBMSG(x) (PB_LTYPE(x) == PB_LTYPE_SUBMESSAGE || \
+ PB_LTYPE(x) == PB_LTYPE_SUBMSG_W_CB)
+
+/* Data type used for storing sizes of struct fields
+ * and array counts.
+ */
+#if defined(PB_FIELD_32BIT)
+ typedef uint32_t pb_size_t;
+ typedef int32_t pb_ssize_t;
+#else
+ typedef uint_least16_t pb_size_t;
+ typedef int_least16_t pb_ssize_t;
+#endif
+#define PB_SIZE_MAX ((pb_size_t)-1)
+
+/* Forward declaration of struct types */
+typedef struct pb_istream_s pb_istream_t;
+typedef struct pb_ostream_s pb_ostream_t;
+typedef struct pb_field_iter_s pb_field_iter_t;
+
+/* This structure is used in auto-generated constants
+ * to specify struct fields.
+ */
+typedef struct pb_msgdesc_s pb_msgdesc_t;
+struct pb_msgdesc_s {
+ const uint32_t *field_info;
+ const pb_msgdesc_t * const * submsg_info;
+ const pb_byte_t *default_value;
+
+ bool (*field_callback)(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_iter_t *field);
+
+ pb_size_t field_count;
+ pb_size_t required_field_count;
+ pb_size_t largest_tag;
+};
+
+/* Iterator for message descriptor */
+struct pb_field_iter_s {
+ const pb_msgdesc_t *descriptor; /* Pointer to message descriptor constant */
+ void *message; /* Pointer to start of the structure */
+
+ pb_size_t index; /* Index of the field */
+ pb_size_t field_info_index; /* Index to descriptor->field_info array */
+ pb_size_t required_field_index; /* Index that counts only the required fields */
+ pb_size_t submessage_index; /* Index that counts only submessages */
+
+ pb_size_t tag; /* Tag of current field */
+ pb_size_t data_size; /* sizeof() of a single item */
+ pb_size_t array_size; /* Number of array entries */
+ pb_type_t type; /* Type of current field */
+
+ void *pField; /* Pointer to current field in struct */
+ void *pData; /* Pointer to current data contents. Different than pField for arrays and pointers. */
+ void *pSize; /* Pointer to count/has field */
+
+ const pb_msgdesc_t *submsg_desc; /* For submessage fields, pointer to field descriptor for the submessage. */
+};
+
+/* For compatibility with legacy code */
+typedef pb_field_iter_t pb_field_t;
+
+/* Make sure that the standard integer types are of the expected sizes.
+ * Otherwise fixed32/fixed64 fields can break.
+ *
+ * If you get errors here, it probably means that your stdint.h is not
+ * correct for your platform.
+ */
+#ifndef PB_WITHOUT_64BIT
+PB_STATIC_ASSERT(sizeof(int64_t) == 2 * sizeof(int32_t), INT64_T_WRONG_SIZE)
+PB_STATIC_ASSERT(sizeof(uint64_t) == 2 * sizeof(uint32_t), UINT64_T_WRONG_SIZE)
+#endif
+
+/* This structure is used for 'bytes' arrays.
+ * It has the number of bytes in the beginning, and after that an array.
+ * Note that actual structs used will have a different length of bytes array.
+ */
+#define PB_BYTES_ARRAY_T(n) struct { pb_size_t size; pb_byte_t bytes[n]; }
+#define PB_BYTES_ARRAY_T_ALLOCSIZE(n) ((size_t)n + offsetof(pb_bytes_array_t, bytes))
+
+struct pb_bytes_array_s {
+ pb_size_t size;
+ pb_byte_t bytes[1];
+};
+typedef struct pb_bytes_array_s pb_bytes_array_t;
+
+/* This structure is used for giving the callback function.
+ * It is stored in the message structure and filled in by the method that
+ * calls pb_decode.
+ *
+ * The decoding callback will be given a limited-length stream
+ * If the wire type was string, the length is the length of the string.
+ * If the wire type was a varint/fixed32/fixed64, the length is the length
+ * of the actual value.
+ * The function may be called multiple times (especially for repeated types,
+ * but also otherwise if the message happens to contain the field multiple
+ * times.)
+ *
+ * The encoding callback will receive the actual output stream.
+ * It should write all the data in one call, including the field tag and
+ * wire type. It can write multiple fields.
+ *
+ * The callback can be null if you want to skip a field.
+ */
+typedef struct pb_callback_s pb_callback_t;
+struct pb_callback_s {
+ /* Callback functions receive a pointer to the arg field.
+ * You can access the value of the field as *arg, and modify it if needed.
+ */
+ union {
+ bool (*decode)(pb_istream_t *stream, const pb_field_t *field, void **arg);
+ bool (*encode)(pb_ostream_t *stream, const pb_field_t *field, void * const *arg);
+ } funcs;
+
+ /* Free arg for use by callback */
+ void *arg;
+};
+
+extern bool pb_default_field_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field);
+
+/* Wire types. Library user needs these only in encoder callbacks. */
+typedef enum {
+ PB_WT_VARINT = 0,
+ PB_WT_64BIT = 1,
+ PB_WT_STRING = 2,
+ PB_WT_32BIT = 5,
+ PB_WT_PACKED = 255 /* PB_WT_PACKED is internal marker for packed arrays. */
+} pb_wire_type_t;
+
+/* Structure for defining the handling of unknown/extension fields.
+ * Usually the pb_extension_type_t structure is automatically generated,
+ * while the pb_extension_t structure is created by the user. However,
+ * if you want to catch all unknown fields, you can also create a custom
+ * pb_extension_type_t with your own callback.
+ */
+typedef struct pb_extension_type_s pb_extension_type_t;
+typedef struct pb_extension_s pb_extension_t;
+struct pb_extension_type_s {
+ /* Called for each unknown field in the message.
+ * If you handle the field, read off all of its data and return true.
+ * If you do not handle the field, do not read anything and return true.
+ * If you run into an error, return false.
+ * Set to NULL for default handler.
+ */
+ bool (*decode)(pb_istream_t *stream, pb_extension_t *extension,
+ uint32_t tag, pb_wire_type_t wire_type);
+
+ /* Called once after all regular fields have been encoded.
+ * If you have something to write, do so and return true.
+ * If you do not have anything to write, just return true.
+ * If you run into an error, return false.
+ * Set to NULL for default handler.
+ */
+ bool (*encode)(pb_ostream_t *stream, const pb_extension_t *extension);
+
+ /* Free field for use by the callback. */
+ const void *arg;
+};
+
+struct pb_extension_s {
+ /* Type describing the extension field. Usually you'll initialize
+ * this to a pointer to the automatically generated structure. */
+ const pb_extension_type_t *type;
+
+ /* Destination for the decoded data. This must match the datatype
+ * of the extension field. */
+ void *dest;
+
+ /* Pointer to the next extension handler, or NULL.
+ * If this extension does not match a field, the next handler is
+ * automatically called. */
+ pb_extension_t *next;
+
+ /* The decoder sets this to true if the extension was found.
+ * Ignored for encoding. */
+ bool found;
+};
+
+#define pb_extension_init_zero {NULL,NULL,NULL,false}
+
+/* Memory allocation functions to use. You can define pb_realloc and
+ * pb_free to custom functions if you want. */
+#ifdef PB_ENABLE_MALLOC
+# ifndef pb_realloc
+# define pb_realloc(ptr, size) realloc(ptr, size)
+# endif
+# ifndef pb_free
+# define pb_free(ptr) free(ptr)
+# endif
+#endif
+
+/* This is used to inform about need to regenerate .pb.h/.pb.c files. */
+#define PB_PROTO_HEADER_VERSION 40
+
+/* These macros are used to declare pb_field_t's in the constant array. */
+/* Size of a structure member, in bytes. */
+#define pb_membersize(st, m) (sizeof ((st*)0)->m)
+/* Number of entries in an array. */
+#define pb_arraysize(st, m) (pb_membersize(st, m) / pb_membersize(st, m[0]))
+/* Delta from start of one member to the start of another member. */
+#define pb_delta(st, m1, m2) ((int)offsetof(st, m1) - (int)offsetof(st, m2))
+
+/* Force expansion of macro value */
+#define PB_EXPAND(x) x
+
+/* Binding of a message field set into a specific structure */
+#define PB_BIND(msgname, structname, width) \
+ const uint32_t structname ## _field_info[] PB_PROGMEM = \
+ { \
+ msgname ## _FIELDLIST(PB_GEN_FIELD_INFO_ ## width, structname) \
+ 0 \
+ }; \
+ const pb_msgdesc_t* const structname ## _submsg_info[] = \
+ { \
+ msgname ## _FIELDLIST(PB_GEN_SUBMSG_INFO, structname) \
+ NULL \
+ }; \
+ const pb_msgdesc_t structname ## _msg = \
+ { \
+ structname ## _field_info, \
+ structname ## _submsg_info, \
+ msgname ## _DEFAULT, \
+ msgname ## _CALLBACK, \
+ 0 msgname ## _FIELDLIST(PB_GEN_FIELD_COUNT, structname), \
+ 0 msgname ## _FIELDLIST(PB_GEN_REQ_FIELD_COUNT, structname), \
+ 0 msgname ## _FIELDLIST(PB_GEN_LARGEST_TAG, structname), \
+ }; \
+ msgname ## _FIELDLIST(PB_GEN_FIELD_INFO_ASSERT_ ## width, structname)
+
+#define PB_GEN_FIELD_COUNT(structname, atype, htype, ltype, fieldname, tag) +1
+#define PB_GEN_REQ_FIELD_COUNT(structname, atype, htype, ltype, fieldname, tag) \
+ + (PB_HTYPE_ ## htype == PB_HTYPE_REQUIRED)
+#define PB_GEN_LARGEST_TAG(structname, atype, htype, ltype, fieldname, tag) \
+ * 0 + tag
+
+/* X-macro for generating the entries in struct_field_info[] array. */
+#define PB_GEN_FIELD_INFO_1(structname, atype, htype, ltype, fieldname, tag) \
+ PB_FIELDINFO_1(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
+ PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
+
+#define PB_GEN_FIELD_INFO_2(structname, atype, htype, ltype, fieldname, tag) \
+ PB_FIELDINFO_2(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
+ PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
+
+#define PB_GEN_FIELD_INFO_4(structname, atype, htype, ltype, fieldname, tag) \
+ PB_FIELDINFO_4(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
+ PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
+
+#define PB_GEN_FIELD_INFO_8(structname, atype, htype, ltype, fieldname, tag) \
+ PB_FIELDINFO_8(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
+ PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
+
+#define PB_GEN_FIELD_INFO_AUTO(structname, atype, htype, ltype, fieldname, tag) \
+ PB_FIELDINFO_AUTO2(PB_FIELDINFO_WIDTH_AUTO(_PB_ATYPE_ ## atype, _PB_HTYPE_ ## htype, _PB_LTYPE_ ## ltype), \
+ tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
+ PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
+
+#define PB_FIELDINFO_AUTO2(width, tag, type, data_offset, data_size, size_offset, array_size) \
+ PB_FIELDINFO_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size)
+
+#define PB_FIELDINFO_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) \
+ PB_FIELDINFO_ ## width(tag, type, data_offset, data_size, size_offset, array_size)
+
+/* X-macro for generating asserts that entries fit in struct_field_info[] array.
+ * The structure of macros here must match the structure above in PB_GEN_FIELD_INFO_x(),
+ * but it is not easily reused because of how macro substitutions work. */
+#define PB_GEN_FIELD_INFO_ASSERT_1(structname, atype, htype, ltype, fieldname, tag) \
+ PB_FIELDINFO_ASSERT_1(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
+ PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
+
+#define PB_GEN_FIELD_INFO_ASSERT_2(structname, atype, htype, ltype, fieldname, tag) \
+ PB_FIELDINFO_ASSERT_2(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
+ PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
+
+#define PB_GEN_FIELD_INFO_ASSERT_4(structname, atype, htype, ltype, fieldname, tag) \
+ PB_FIELDINFO_ASSERT_4(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
+ PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
+
+#define PB_GEN_FIELD_INFO_ASSERT_8(structname, atype, htype, ltype, fieldname, tag) \
+ PB_FIELDINFO_ASSERT_8(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
+ PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
+
+#define PB_GEN_FIELD_INFO_ASSERT_AUTO(structname, atype, htype, ltype, fieldname, tag) \
+ PB_FIELDINFO_ASSERT_AUTO2(PB_FIELDINFO_WIDTH_AUTO(_PB_ATYPE_ ## atype, _PB_HTYPE_ ## htype, _PB_LTYPE_ ## ltype), \
+ tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \
+ PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \
+ PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname))
+
+#define PB_FIELDINFO_ASSERT_AUTO2(width, tag, type, data_offset, data_size, size_offset, array_size) \
+ PB_FIELDINFO_ASSERT_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size)
+
+#define PB_FIELDINFO_ASSERT_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) \
+ PB_FIELDINFO_ASSERT_ ## width(tag, type, data_offset, data_size, size_offset, array_size)
+
+#define PB_DATA_OFFSET_STATIC(htype, structname, fieldname) PB_DO ## htype(structname, fieldname)
+#define PB_DATA_OFFSET_POINTER(htype, structname, fieldname) PB_DO ## htype(structname, fieldname)
+#define PB_DATA_OFFSET_CALLBACK(htype, structname, fieldname) PB_DO ## htype(structname, fieldname)
+#define PB_DO_PB_HTYPE_REQUIRED(structname, fieldname) offsetof(structname, fieldname)
+#define PB_DO_PB_HTYPE_SINGULAR(structname, fieldname) offsetof(structname, fieldname)
+#define PB_DO_PB_HTYPE_ONEOF(structname, fieldname) offsetof(structname, PB_ONEOF_NAME(FULL, fieldname))
+#define PB_DO_PB_HTYPE_OPTIONAL(structname, fieldname) offsetof(structname, fieldname)
+#define PB_DO_PB_HTYPE_REPEATED(structname, fieldname) offsetof(structname, fieldname)
+#define PB_DO_PB_HTYPE_FIXARRAY(structname, fieldname) offsetof(structname, fieldname)
+
+#define PB_SIZE_OFFSET_STATIC(htype, structname, fieldname) PB_SO ## htype(structname, fieldname)
+#define PB_SIZE_OFFSET_POINTER(htype, structname, fieldname) PB_SO_PTR ## htype(structname, fieldname)
+#define PB_SIZE_OFFSET_CALLBACK(htype, structname, fieldname) PB_SO_CB ## htype(structname, fieldname)
+#define PB_SO_PB_HTYPE_REQUIRED(structname, fieldname) 0
+#define PB_SO_PB_HTYPE_SINGULAR(structname, fieldname) 0
+#define PB_SO_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF2(structname, PB_ONEOF_NAME(FULL, fieldname), PB_ONEOF_NAME(UNION, fieldname))
+#define PB_SO_PB_HTYPE_ONEOF2(structname, fullname, unionname) PB_SO_PB_HTYPE_ONEOF3(structname, fullname, unionname)
+#define PB_SO_PB_HTYPE_ONEOF3(structname, fullname, unionname) pb_delta(structname, fullname, which_ ## unionname)
+#define PB_SO_PB_HTYPE_OPTIONAL(structname, fieldname) pb_delta(structname, fieldname, has_ ## fieldname)
+#define PB_SO_PB_HTYPE_REPEATED(structname, fieldname) pb_delta(structname, fieldname, fieldname ## _count)
+#define PB_SO_PB_HTYPE_FIXARRAY(structname, fieldname) 0
+#define PB_SO_PTR_PB_HTYPE_REQUIRED(structname, fieldname) 0
+#define PB_SO_PTR_PB_HTYPE_SINGULAR(structname, fieldname) 0
+#define PB_SO_PTR_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF(structname, fieldname)
+#define PB_SO_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) 0
+#define PB_SO_PTR_PB_HTYPE_REPEATED(structname, fieldname) PB_SO_PB_HTYPE_REPEATED(structname, fieldname)
+#define PB_SO_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) 0
+#define PB_SO_CB_PB_HTYPE_REQUIRED(structname, fieldname) 0
+#define PB_SO_CB_PB_HTYPE_SINGULAR(structname, fieldname) 0
+#define PB_SO_CB_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF(structname, fieldname)
+#define PB_SO_CB_PB_HTYPE_OPTIONAL(structname, fieldname) 0
+#define PB_SO_CB_PB_HTYPE_REPEATED(structname, fieldname) 0
+#define PB_SO_CB_PB_HTYPE_FIXARRAY(structname, fieldname) 0
+
+#define PB_ARRAY_SIZE_STATIC(htype, structname, fieldname) PB_AS ## htype(structname, fieldname)
+#define PB_ARRAY_SIZE_POINTER(htype, structname, fieldname) PB_AS_PTR ## htype(structname, fieldname)
+#define PB_ARRAY_SIZE_CALLBACK(htype, structname, fieldname) 1
+#define PB_AS_PB_HTYPE_REQUIRED(structname, fieldname) 1
+#define PB_AS_PB_HTYPE_SINGULAR(structname, fieldname) 1
+#define PB_AS_PB_HTYPE_OPTIONAL(structname, fieldname) 1
+#define PB_AS_PB_HTYPE_ONEOF(structname, fieldname) 1
+#define PB_AS_PB_HTYPE_REPEATED(structname, fieldname) pb_arraysize(structname, fieldname)
+#define PB_AS_PB_HTYPE_FIXARRAY(structname, fieldname) pb_arraysize(structname, fieldname)
+#define PB_AS_PTR_PB_HTYPE_REQUIRED(structname, fieldname) 1
+#define PB_AS_PTR_PB_HTYPE_SINGULAR(structname, fieldname) 1
+#define PB_AS_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) 1
+#define PB_AS_PTR_PB_HTYPE_ONEOF(structname, fieldname) 1
+#define PB_AS_PTR_PB_HTYPE_REPEATED(structname, fieldname) 1
+#define PB_AS_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) pb_arraysize(structname, fieldname[0])
+
+#define PB_DATA_SIZE_STATIC(htype, structname, fieldname) PB_DS ## htype(structname, fieldname)
+#define PB_DATA_SIZE_POINTER(htype, structname, fieldname) PB_DS_PTR ## htype(structname, fieldname)
+#define PB_DATA_SIZE_CALLBACK(htype, structname, fieldname) PB_DS_CB ## htype(structname, fieldname)
+#define PB_DS_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname)
+#define PB_DS_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname)
+#define PB_DS_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname)
+#define PB_DS_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname))
+#define PB_DS_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname[0])
+#define PB_DS_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname[0])
+#define PB_DS_PTR_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname[0])
+#define PB_DS_PTR_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname[0])
+#define PB_DS_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname[0])
+#define PB_DS_PTR_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname)[0])
+#define PB_DS_PTR_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname[0])
+#define PB_DS_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname[0][0])
+#define PB_DS_CB_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname)
+#define PB_DS_CB_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname)
+#define PB_DS_CB_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname)
+#define PB_DS_CB_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname))
+#define PB_DS_CB_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname)
+#define PB_DS_CB_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname)
+
+#define PB_ONEOF_NAME(type, tuple) PB_EXPAND(PB_ONEOF_NAME_ ## type tuple)
+#define PB_ONEOF_NAME_UNION(unionname,membername,fullname) unionname
+#define PB_ONEOF_NAME_MEMBER(unionname,membername,fullname) membername
+#define PB_ONEOF_NAME_FULL(unionname,membername,fullname) fullname
+
+#define PB_GEN_SUBMSG_INFO(structname, atype, htype, ltype, fieldname, tag) \
+ PB_SUBMSG_INFO_ ## htype(_PB_LTYPE_ ## ltype, structname, fieldname)
+
+#define PB_SUBMSG_INFO_REQUIRED(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
+#define PB_SUBMSG_INFO_SINGULAR(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
+#define PB_SUBMSG_INFO_OPTIONAL(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
+#define PB_SUBMSG_INFO_ONEOF(ltype, structname, fieldname) PB_SUBMSG_INFO_ONEOF2(ltype, structname, PB_ONEOF_NAME(UNION, fieldname), PB_ONEOF_NAME(MEMBER, fieldname))
+#define PB_SUBMSG_INFO_ONEOF2(ltype, structname, unionname, membername) PB_SUBMSG_INFO_ONEOF3(ltype, structname, unionname, membername)
+#define PB_SUBMSG_INFO_ONEOF3(ltype, structname, unionname, membername) PB_SI ## ltype(structname ## _ ## unionname ## _ ## membername ## _MSGTYPE)
+#define PB_SUBMSG_INFO_REPEATED(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
+#define PB_SUBMSG_INFO_FIXARRAY(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE)
+#define PB_SI_PB_LTYPE_BOOL(t)
+#define PB_SI_PB_LTYPE_BYTES(t)
+#define PB_SI_PB_LTYPE_DOUBLE(t)
+#define PB_SI_PB_LTYPE_ENUM(t)
+#define PB_SI_PB_LTYPE_UENUM(t)
+#define PB_SI_PB_LTYPE_FIXED32(t)
+#define PB_SI_PB_LTYPE_FIXED64(t)
+#define PB_SI_PB_LTYPE_FLOAT(t)
+#define PB_SI_PB_LTYPE_INT32(t)
+#define PB_SI_PB_LTYPE_INT64(t)
+#define PB_SI_PB_LTYPE_MESSAGE(t) PB_SUBMSG_DESCRIPTOR(t)
+#define PB_SI_PB_LTYPE_MSG_W_CB(t) PB_SUBMSG_DESCRIPTOR(t)
+#define PB_SI_PB_LTYPE_SFIXED32(t)
+#define PB_SI_PB_LTYPE_SFIXED64(t)
+#define PB_SI_PB_LTYPE_SINT32(t)
+#define PB_SI_PB_LTYPE_SINT64(t)
+#define PB_SI_PB_LTYPE_STRING(t)
+#define PB_SI_PB_LTYPE_UINT32(t)
+#define PB_SI_PB_LTYPE_UINT64(t)
+#define PB_SI_PB_LTYPE_EXTENSION(t)
+#define PB_SI_PB_LTYPE_FIXED_LENGTH_BYTES(t)
+#define PB_SUBMSG_DESCRIPTOR(t) &(t ## _msg),
+
+/* The field descriptors use a variable width format, with width of either
+ * 1, 2, 4 or 8 of 32-bit words. The two lowest bytes of the first byte always
+ * encode the descriptor size, 6 lowest bits of field tag number, and 8 bits
+ * of the field type.
+ *
+ * Descriptor size is encoded as 0 = 1 word, 1 = 2 words, 2 = 4 words, 3 = 8 words.
+ *
+ * Formats, listed starting with the least significant bit of the first word.
+ * 1 word: [2-bit len] [6-bit tag] [8-bit type] [8-bit data_offset] [4-bit size_offset] [4-bit data_size]
+ *
+ * 2 words: [2-bit len] [6-bit tag] [8-bit type] [12-bit array_size] [4-bit size_offset]
+ * [16-bit data_offset] [12-bit data_size] [4-bit tag>>6]
+ *
+ * 4 words: [2-bit len] [6-bit tag] [8-bit type] [16-bit array_size]
+ * [8-bit size_offset] [24-bit tag>>6]
+ * [32-bit data_offset]
+ * [32-bit data_size]
+ *
+ * 8 words: [2-bit len] [6-bit tag] [8-bit type] [16-bit reserved]
+ * [8-bit size_offset] [24-bit tag>>6]
+ * [32-bit data_offset]
+ * [32-bit data_size]
+ * [32-bit array_size]
+ * [32-bit reserved]
+ * [32-bit reserved]
+ * [32-bit reserved]
+ */
+
+#define PB_FIELDINFO_1(tag, type, data_offset, data_size, size_offset, array_size) \
+ (0 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(data_offset) & 0xFF) << 16) | \
+ (((uint32_t)(size_offset) & 0x0F) << 24) | (((uint32_t)(data_size) & 0x0F) << 28)),
+
+#define PB_FIELDINFO_2(tag, type, data_offset, data_size, size_offset, array_size) \
+ (1 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(array_size) & 0xFFF) << 16) | (((uint32_t)(size_offset) & 0x0F) << 28)), \
+ (((uint32_t)(data_offset) & 0xFFFF) | (((uint32_t)(data_size) & 0xFFF) << 16) | (((uint32_t)(tag) & 0x3c0) << 22)),
+
+#define PB_FIELDINFO_4(tag, type, data_offset, data_size, size_offset, array_size) \
+ (2 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(array_size) & 0xFFFF) << 16)), \
+ ((uint32_t)(int_least8_t)(size_offset) | (((uint32_t)(tag) << 2) & 0xFFFFFF00)), \
+ (data_offset), (data_size),
+
+#define PB_FIELDINFO_8(tag, type, data_offset, data_size, size_offset, array_size) \
+ (3 | (((tag) << 2) & 0xFF) | ((type) << 8)), \
+ ((uint32_t)(int_least8_t)(size_offset) | (((uint32_t)(tag) << 2) & 0xFFFFFF00)), \
+ (data_offset), (data_size), (array_size), 0, 0, 0,
+
+/* These assertions verify that the field information fits in the allocated space.
+ * The generator tries to automatically determine the correct width that can fit all
+ * data associated with a message. These asserts will fail only if there has been a
+ * problem in the automatic logic - this may be worth reporting as a bug. As a workaround,
+ * you can increase the descriptor width by defining PB_FIELDINFO_WIDTH or by setting
+ * descriptorsize option in .options file.
+ */
+#define PB_FITS(value,bits) ((uint32_t)(value) < ((uint32_t)1<2GB messages with nanopb anyway.
+ */
+#define PB_FIELDINFO_ASSERT_4(tag, type, data_offset, data_size, size_offset, array_size) \
+ PB_STATIC_ASSERT(PB_FITS(tag,30) && PB_FITS(data_offset,31) && PB_FITS(size_offset,8) && PB_FITS(data_size,31) && PB_FITS(array_size,16), FIELDINFO_DOES_NOT_FIT_width4_field ## tag)
+
+#define PB_FIELDINFO_ASSERT_8(tag, type, data_offset, data_size, size_offset, array_size) \
+ PB_STATIC_ASSERT(PB_FITS(tag,30) && PB_FITS(data_offset,31) && PB_FITS(size_offset,8) && PB_FITS(data_size,31) && PB_FITS(array_size,31), FIELDINFO_DOES_NOT_FIT_width8_field ## tag)
+#endif
+
+
+/* Automatic picking of FIELDINFO width:
+ * Uses width 1 when possible, otherwise resorts to width 2.
+ * This is used when PB_BIND() is called with "AUTO" as the argument.
+ * The generator will give explicit size argument when it knows that a message
+ * structure grows beyond 1-word format limits.
+ */
+#define PB_FIELDINFO_WIDTH_AUTO(atype, htype, ltype) PB_FI_WIDTH ## atype(htype, ltype)
+#define PB_FI_WIDTH_PB_ATYPE_STATIC(htype, ltype) PB_FI_WIDTH ## htype(ltype)
+#define PB_FI_WIDTH_PB_ATYPE_POINTER(htype, ltype) PB_FI_WIDTH ## htype(ltype)
+#define PB_FI_WIDTH_PB_ATYPE_CALLBACK(htype, ltype) 2
+#define PB_FI_WIDTH_PB_HTYPE_REQUIRED(ltype) PB_FI_WIDTH ## ltype
+#define PB_FI_WIDTH_PB_HTYPE_SINGULAR(ltype) PB_FI_WIDTH ## ltype
+#define PB_FI_WIDTH_PB_HTYPE_OPTIONAL(ltype) PB_FI_WIDTH ## ltype
+#define PB_FI_WIDTH_PB_HTYPE_ONEOF(ltype) PB_FI_WIDTH ## ltype
+#define PB_FI_WIDTH_PB_HTYPE_REPEATED(ltype) 2
+#define PB_FI_WIDTH_PB_HTYPE_FIXARRAY(ltype) 2
+#define PB_FI_WIDTH_PB_LTYPE_BOOL 1
+#define PB_FI_WIDTH_PB_LTYPE_BYTES 2
+#define PB_FI_WIDTH_PB_LTYPE_DOUBLE 1
+#define PB_FI_WIDTH_PB_LTYPE_ENUM 1
+#define PB_FI_WIDTH_PB_LTYPE_UENUM 1
+#define PB_FI_WIDTH_PB_LTYPE_FIXED32 1
+#define PB_FI_WIDTH_PB_LTYPE_FIXED64 1
+#define PB_FI_WIDTH_PB_LTYPE_FLOAT 1
+#define PB_FI_WIDTH_PB_LTYPE_INT32 1
+#define PB_FI_WIDTH_PB_LTYPE_INT64 1
+#define PB_FI_WIDTH_PB_LTYPE_MESSAGE 2
+#define PB_FI_WIDTH_PB_LTYPE_MSG_W_CB 2
+#define PB_FI_WIDTH_PB_LTYPE_SFIXED32 1
+#define PB_FI_WIDTH_PB_LTYPE_SFIXED64 1
+#define PB_FI_WIDTH_PB_LTYPE_SINT32 1
+#define PB_FI_WIDTH_PB_LTYPE_SINT64 1
+#define PB_FI_WIDTH_PB_LTYPE_STRING 2
+#define PB_FI_WIDTH_PB_LTYPE_UINT32 1
+#define PB_FI_WIDTH_PB_LTYPE_UINT64 1
+#define PB_FI_WIDTH_PB_LTYPE_EXTENSION 1
+#define PB_FI_WIDTH_PB_LTYPE_FIXED_LENGTH_BYTES 2
+
+/* The mapping from protobuf types to LTYPEs is done using these macros. */
+#define PB_LTYPE_MAP_BOOL PB_LTYPE_BOOL
+#define PB_LTYPE_MAP_BYTES PB_LTYPE_BYTES
+#define PB_LTYPE_MAP_DOUBLE PB_LTYPE_FIXED64
+#define PB_LTYPE_MAP_ENUM PB_LTYPE_VARINT
+#define PB_LTYPE_MAP_UENUM PB_LTYPE_UVARINT
+#define PB_LTYPE_MAP_FIXED32 PB_LTYPE_FIXED32
+#define PB_LTYPE_MAP_FIXED64 PB_LTYPE_FIXED64
+#define PB_LTYPE_MAP_FLOAT PB_LTYPE_FIXED32
+#define PB_LTYPE_MAP_INT32 PB_LTYPE_VARINT
+#define PB_LTYPE_MAP_INT64 PB_LTYPE_VARINT
+#define PB_LTYPE_MAP_MESSAGE PB_LTYPE_SUBMESSAGE
+#define PB_LTYPE_MAP_MSG_W_CB PB_LTYPE_SUBMSG_W_CB
+#define PB_LTYPE_MAP_SFIXED32 PB_LTYPE_FIXED32
+#define PB_LTYPE_MAP_SFIXED64 PB_LTYPE_FIXED64
+#define PB_LTYPE_MAP_SINT32 PB_LTYPE_SVARINT
+#define PB_LTYPE_MAP_SINT64 PB_LTYPE_SVARINT
+#define PB_LTYPE_MAP_STRING PB_LTYPE_STRING
+#define PB_LTYPE_MAP_UINT32 PB_LTYPE_UVARINT
+#define PB_LTYPE_MAP_UINT64 PB_LTYPE_UVARINT
+#define PB_LTYPE_MAP_EXTENSION PB_LTYPE_EXTENSION
+#define PB_LTYPE_MAP_FIXED_LENGTH_BYTES PB_LTYPE_FIXED_LENGTH_BYTES
+
+/* These macros are used for giving out error messages.
+ * They are mostly a debugging aid; the main error information
+ * is the true/false return value from functions.
+ * Some code space can be saved by disabling the error
+ * messages if not used.
+ *
+ * PB_SET_ERROR() sets the error message if none has been set yet.
+ * msg must be a constant string literal.
+ * PB_GET_ERROR() always returns a pointer to a string.
+ * PB_RETURN_ERROR() sets the error and returns false from current
+ * function.
+ */
+#ifdef PB_NO_ERRMSG
+#define PB_SET_ERROR(stream, msg) PB_UNUSED(stream)
+#define PB_GET_ERROR(stream) "(errmsg disabled)"
+#else
+#define PB_SET_ERROR(stream, msg) (stream->errmsg = (stream)->errmsg ? (stream)->errmsg : (msg))
+#define PB_GET_ERROR(stream) ((stream)->errmsg ? (stream)->errmsg : "(none)")
+#endif
+
+#define PB_RETURN_ERROR(stream, msg) return PB_SET_ERROR(stream, msg), false
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#ifdef __cplusplus
+#if __cplusplus >= 201103L
+#define PB_CONSTEXPR constexpr
+#else // __cplusplus >= 201103L
+#define PB_CONSTEXPR
+#endif // __cplusplus >= 201103L
+
+#if __cplusplus >= 201703L
+#define PB_INLINE_CONSTEXPR inline constexpr
+#else // __cplusplus >= 201703L
+#define PB_INLINE_CONSTEXPR PB_CONSTEXPR
+#endif // __cplusplus >= 201703L
+
+extern "C++"
+{
+namespace nanopb {
+// Each type will be partially specialized by the generator.
+template struct MessageDescriptor;
+} // namespace nanopb
+}
+#endif /* __cplusplus */
+
+#endif
diff --git a/vendor/nanopb/spm_headers/nanopb/pb_common.h b/vendor/nanopb/spm_headers/nanopb/pb_common.h
index 9449ad84..58aa90f7 100644
--- a/vendor/nanopb/spm_headers/nanopb/pb_common.h
+++ b/vendor/nanopb/spm_headers/nanopb/pb_common.h
@@ -1 +1,49 @@
-../../pb_common.h
\ No newline at end of file
+/* pb_common.h: Common support functions for pb_encode.c and pb_decode.c.
+ * These functions are rarely needed by applications directly.
+ */
+
+#ifndef PB_COMMON_H_INCLUDED
+#define PB_COMMON_H_INCLUDED
+
+#include "pb.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Initialize the field iterator structure to beginning.
+ * Returns false if the message type is empty. */
+bool pb_field_iter_begin(pb_field_iter_t *iter, const pb_msgdesc_t *desc, void *message);
+
+/* Get a field iterator for extension field. */
+bool pb_field_iter_begin_extension(pb_field_iter_t *iter, pb_extension_t *extension);
+
+/* Same as pb_field_iter_begin(), but for const message pointer.
+ * Note that the pointers in pb_field_iter_t will be non-const but shouldn't
+ * be written to when using these functions. */
+bool pb_field_iter_begin_const(pb_field_iter_t *iter, const pb_msgdesc_t *desc, const void *message);
+bool pb_field_iter_begin_extension_const(pb_field_iter_t *iter, const pb_extension_t *extension);
+
+/* Advance the iterator to the next field.
+ * Returns false when the iterator wraps back to the first field. */
+bool pb_field_iter_next(pb_field_iter_t *iter);
+
+/* Advance the iterator until it points at a field with the given tag.
+ * Returns false if no such field exists. */
+bool pb_field_iter_find(pb_field_iter_t *iter, uint32_t tag);
+
+/* Find a field with type PB_LTYPE_EXTENSION, or return false if not found.
+ * There can be only one extension range field per message. */
+bool pb_field_iter_find_extension(pb_field_iter_t *iter);
+
+#ifdef PB_VALIDATE_UTF8
+/* Validate UTF-8 text string */
+bool pb_validate_utf8(const char *s);
+#endif
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
+
diff --git a/vendor/nanopb/spm_headers/nanopb/pb_decode.h b/vendor/nanopb/spm_headers/nanopb/pb_decode.h
index d8288c70..3f392b29 100644
--- a/vendor/nanopb/spm_headers/nanopb/pb_decode.h
+++ b/vendor/nanopb/spm_headers/nanopb/pb_decode.h
@@ -1 +1,204 @@
-../../pb_decode.h
\ No newline at end of file
+/* pb_decode.h: Functions to decode protocol buffers. Depends on pb_decode.c.
+ * The main function is pb_decode. You also need an input stream, and the
+ * field descriptions created by nanopb_generator.py.
+ */
+
+#ifndef PB_DECODE_H_INCLUDED
+#define PB_DECODE_H_INCLUDED
+
+#include "pb.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Structure for defining custom input streams. You will need to provide
+ * a callback function to read the bytes from your storage, which can be
+ * for example a file or a network socket.
+ *
+ * The callback must conform to these rules:
+ *
+ * 1) Return false on IO errors. This will cause decoding to abort.
+ * 2) You can use state to store your own data (e.g. buffer pointer),
+ * and rely on pb_read to verify that no-body reads past bytes_left.
+ * 3) Your callback may be used with substreams, in which case bytes_left
+ * is different than from the main stream. Don't use bytes_left to compute
+ * any pointers.
+ */
+struct pb_istream_s
+{
+#ifdef PB_BUFFER_ONLY
+ /* Callback pointer is not used in buffer-only configuration.
+ * Having an int pointer here allows binary compatibility but
+ * gives an error if someone tries to assign callback function.
+ */
+ int *callback;
+#else
+ bool (*callback)(pb_istream_t *stream, pb_byte_t *buf, size_t count);
+#endif
+
+ /* state is a free field for use of the callback function defined above.
+ * Note that when pb_istream_from_buffer() is used, it reserves this field
+ * for its own use.
+ */
+ void *state;
+
+ /* Maximum number of bytes left in this stream. Callback can report
+ * EOF before this limit is reached. Setting a limit is recommended
+ * when decoding directly from file or network streams to avoid
+ * denial-of-service by excessively long messages.
+ */
+ size_t bytes_left;
+
+#ifndef PB_NO_ERRMSG
+ /* Pointer to constant (ROM) string when decoding function returns error */
+ const char *errmsg;
+#endif
+};
+
+#ifndef PB_NO_ERRMSG
+#define PB_ISTREAM_EMPTY {0,0,0,0}
+#else
+#define PB_ISTREAM_EMPTY {0,0,0}
+#endif
+
+/***************************
+ * Main decoding functions *
+ ***************************/
+
+/* Decode a single protocol buffers message from input stream into a C structure.
+ * Returns true on success, false on any failure.
+ * The actual struct pointed to by dest must match the description in fields.
+ * Callback fields of the destination structure must be initialized by caller.
+ * All other fields will be initialized by this function.
+ *
+ * Example usage:
+ * MyMessage msg = {};
+ * uint8_t buffer[64];
+ * pb_istream_t stream;
+ *
+ * // ... read some data into buffer ...
+ *
+ * stream = pb_istream_from_buffer(buffer, count);
+ * pb_decode(&stream, MyMessage_fields, &msg);
+ */
+bool pb_decode(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct);
+
+/* Extended version of pb_decode, with several options to control
+ * the decoding process:
+ *
+ * PB_DECODE_NOINIT: Do not initialize the fields to default values.
+ * This is slightly faster if you do not need the default
+ * values and instead initialize the structure to 0 using
+ * e.g. memset(). This can also be used for merging two
+ * messages, i.e. combine already existing data with new
+ * values.
+ *
+ * PB_DECODE_DELIMITED: Input message starts with the message size as varint.
+ * Corresponds to parseDelimitedFrom() in Google's
+ * protobuf API.
+ *
+ * PB_DECODE_NULLTERMINATED: Stop reading when field tag is read as 0. This allows
+ * reading null terminated messages.
+ * NOTE: Until nanopb-0.4.0, pb_decode() also allows
+ * null-termination. This behaviour is not supported in
+ * most other protobuf implementations, so PB_DECODE_DELIMITED
+ * is a better option for compatibility.
+ *
+ * Multiple flags can be combined with bitwise or (| operator)
+ */
+#define PB_DECODE_NOINIT 0x01U
+#define PB_DECODE_DELIMITED 0x02U
+#define PB_DECODE_NULLTERMINATED 0x04U
+bool pb_decode_ex(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct, unsigned int flags);
+
+/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
+#define pb_decode_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NOINIT)
+#define pb_decode_delimited(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED)
+#define pb_decode_delimited_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED | PB_DECODE_NOINIT)
+#define pb_decode_nullterminated(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NULLTERMINATED)
+
+/* Release any allocated pointer fields. If you use dynamic allocation, you should
+ * call this for any successfully decoded message when you are done with it. If
+ * pb_decode() returns with an error, the message is already released.
+ */
+void pb_release(const pb_msgdesc_t *fields, void *dest_struct);
+
+/**************************************
+ * Functions for manipulating streams *
+ **************************************/
+
+/* Create an input stream for reading from a memory buffer.
+ *
+ * msglen should be the actual length of the message, not the full size of
+ * allocated buffer.
+ *
+ * Alternatively, you can use a custom stream that reads directly from e.g.
+ * a file or a network socket.
+ */
+pb_istream_t pb_istream_from_buffer(const pb_byte_t *buf, size_t msglen);
+
+/* Function to read from a pb_istream_t. You can use this if you need to
+ * read some custom header data, or to read data in field callbacks.
+ */
+bool pb_read(pb_istream_t *stream, pb_byte_t *buf, size_t count);
+
+
+/************************************************
+ * Helper functions for writing field callbacks *
+ ************************************************/
+
+/* Decode the tag for the next field in the stream. Gives the wire type and
+ * field tag. At end of the message, returns false and sets eof to true. */
+bool pb_decode_tag(pb_istream_t *stream, pb_wire_type_t *wire_type, uint32_t *tag, bool *eof);
+
+/* Skip the field payload data, given the wire type. */
+bool pb_skip_field(pb_istream_t *stream, pb_wire_type_t wire_type);
+
+/* Decode an integer in the varint format. This works for enum, int32,
+ * int64, uint32 and uint64 field types. */
+#ifndef PB_WITHOUT_64BIT
+bool pb_decode_varint(pb_istream_t *stream, uint64_t *dest);
+#else
+#define pb_decode_varint pb_decode_varint32
+#endif
+
+/* Decode an integer in the varint format. This works for enum, int32,
+ * and uint32 field types. */
+bool pb_decode_varint32(pb_istream_t *stream, uint32_t *dest);
+
+/* Decode a bool value in varint format. */
+bool pb_decode_bool(pb_istream_t *stream, bool *dest);
+
+/* Decode an integer in the zig-zagged svarint format. This works for sint32
+ * and sint64. */
+#ifndef PB_WITHOUT_64BIT
+bool pb_decode_svarint(pb_istream_t *stream, int64_t *dest);
+#else
+bool pb_decode_svarint(pb_istream_t *stream, int32_t *dest);
+#endif
+
+/* Decode a fixed32, sfixed32 or float value. You need to pass a pointer to
+ * a 4-byte wide C variable. */
+bool pb_decode_fixed32(pb_istream_t *stream, void *dest);
+
+#ifndef PB_WITHOUT_64BIT
+/* Decode a fixed64, sfixed64 or double value. You need to pass a pointer to
+ * a 8-byte wide C variable. */
+bool pb_decode_fixed64(pb_istream_t *stream, void *dest);
+#endif
+
+#ifdef PB_CONVERT_DOUBLE_FLOAT
+/* Decode a double value into float variable. */
+bool pb_decode_double_as_float(pb_istream_t *stream, float *dest);
+#endif
+
+/* Make a limited-length substream for reading a PB_WT_STRING field. */
+bool pb_make_string_substream(pb_istream_t *stream, pb_istream_t *substream);
+bool pb_close_string_substream(pb_istream_t *stream, pb_istream_t *substream);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/vendor/nanopb/spm_headers/nanopb/pb_encode.h b/vendor/nanopb/spm_headers/nanopb/pb_encode.h
index 3236a5f5..6dc089da 100644
--- a/vendor/nanopb/spm_headers/nanopb/pb_encode.h
+++ b/vendor/nanopb/spm_headers/nanopb/pb_encode.h
@@ -1 +1,195 @@
-../../pb_encode.h
\ No newline at end of file
+/* pb_encode.h: Functions to encode protocol buffers. Depends on pb_encode.c.
+ * The main function is pb_encode. You also need an output stream, and the
+ * field descriptions created by nanopb_generator.py.
+ */
+
+#ifndef PB_ENCODE_H_INCLUDED
+#define PB_ENCODE_H_INCLUDED
+
+#include "pb.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Structure for defining custom output streams. You will need to provide
+ * a callback function to write the bytes to your storage, which can be
+ * for example a file or a network socket.
+ *
+ * The callback must conform to these rules:
+ *
+ * 1) Return false on IO errors. This will cause encoding to abort.
+ * 2) You can use state to store your own data (e.g. buffer pointer).
+ * 3) pb_write will update bytes_written after your callback runs.
+ * 4) Substreams will modify max_size and bytes_written. Don't use them
+ * to calculate any pointers.
+ */
+struct pb_ostream_s
+{
+#ifdef PB_BUFFER_ONLY
+ /* Callback pointer is not used in buffer-only configuration.
+ * Having an int pointer here allows binary compatibility but
+ * gives an error if someone tries to assign callback function.
+ * Also, NULL pointer marks a 'sizing stream' that does not
+ * write anything.
+ */
+ const int *callback;
+#else
+ bool (*callback)(pb_ostream_t *stream, const pb_byte_t *buf, size_t count);
+#endif
+
+ /* state is a free field for use of the callback function defined above.
+ * Note that when pb_ostream_from_buffer() is used, it reserves this field
+ * for its own use.
+ */
+ void *state;
+
+ /* Limit number of output bytes written. Can be set to SIZE_MAX. */
+ size_t max_size;
+
+ /* Number of bytes written so far. */
+ size_t bytes_written;
+
+#ifndef PB_NO_ERRMSG
+ /* Pointer to constant (ROM) string when decoding function returns error */
+ const char *errmsg;
+#endif
+};
+
+/***************************
+ * Main encoding functions *
+ ***************************/
+
+/* Encode a single protocol buffers message from C structure into a stream.
+ * Returns true on success, false on any failure.
+ * The actual struct pointed to by src_struct must match the description in fields.
+ * All required fields in the struct are assumed to have been filled in.
+ *
+ * Example usage:
+ * MyMessage msg = {};
+ * uint8_t buffer[64];
+ * pb_ostream_t stream;
+ *
+ * msg.field1 = 42;
+ * stream = pb_ostream_from_buffer(buffer, sizeof(buffer));
+ * pb_encode(&stream, MyMessage_fields, &msg);
+ */
+bool pb_encode(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct);
+
+/* Extended version of pb_encode, with several options to control the
+ * encoding process:
+ *
+ * PB_ENCODE_DELIMITED: Prepend the length of message as a varint.
+ * Corresponds to writeDelimitedTo() in Google's
+ * protobuf API.
+ *
+ * PB_ENCODE_NULLTERMINATED: Append a null byte to the message for termination.
+ * NOTE: This behaviour is not supported in most other
+ * protobuf implementations, so PB_ENCODE_DELIMITED
+ * is a better option for compatibility.
+ */
+#define PB_ENCODE_DELIMITED 0x02U
+#define PB_ENCODE_NULLTERMINATED 0x04U
+bool pb_encode_ex(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct, unsigned int flags);
+
+/* Defines for backwards compatibility with code written before nanopb-0.4.0 */
+#define pb_encode_delimited(s,f,d) pb_encode_ex(s,f,d, PB_ENCODE_DELIMITED)
+#define pb_encode_nullterminated(s,f,d) pb_encode_ex(s,f,d, PB_ENCODE_NULLTERMINATED)
+
+/* Encode the message to get the size of the encoded data, but do not store
+ * the data. */
+bool pb_get_encoded_size(size_t *size, const pb_msgdesc_t *fields, const void *src_struct);
+
+/**************************************
+ * Functions for manipulating streams *
+ **************************************/
+
+/* Create an output stream for writing into a memory buffer.
+ * The number of bytes written can be found in stream.bytes_written after
+ * encoding the message.
+ *
+ * Alternatively, you can use a custom stream that writes directly to e.g.
+ * a file or a network socket.
+ */
+pb_ostream_t pb_ostream_from_buffer(pb_byte_t *buf, size_t bufsize);
+
+/* Pseudo-stream for measuring the size of a message without actually storing
+ * the encoded data.
+ *
+ * Example usage:
+ * MyMessage msg = {};
+ * pb_ostream_t stream = PB_OSTREAM_SIZING;
+ * pb_encode(&stream, MyMessage_fields, &msg);
+ * printf("Message size is %d\n", stream.bytes_written);
+ */
+#ifndef PB_NO_ERRMSG
+#define PB_OSTREAM_SIZING {0,0,0,0,0}
+#else
+#define PB_OSTREAM_SIZING {0,0,0,0}
+#endif
+
+/* Function to write into a pb_ostream_t stream. You can use this if you need
+ * to append or prepend some custom headers to the message.
+ */
+bool pb_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count);
+
+
+/************************************************
+ * Helper functions for writing field callbacks *
+ ************************************************/
+
+/* Encode field header based on type and field number defined in the field
+ * structure. Call this from the callback before writing out field contents. */
+bool pb_encode_tag_for_field(pb_ostream_t *stream, const pb_field_iter_t *field);
+
+/* Encode field header by manually specifying wire type. You need to use this
+ * if you want to write out packed arrays from a callback field. */
+bool pb_encode_tag(pb_ostream_t *stream, pb_wire_type_t wiretype, uint32_t field_number);
+
+/* Encode an integer in the varint format.
+ * This works for bool, enum, int32, int64, uint32 and uint64 field types. */
+#ifndef PB_WITHOUT_64BIT
+bool pb_encode_varint(pb_ostream_t *stream, uint64_t value);
+#else
+bool pb_encode_varint(pb_ostream_t *stream, uint32_t value);
+#endif
+
+/* Encode an integer in the zig-zagged svarint format.
+ * This works for sint32 and sint64. */
+#ifndef PB_WITHOUT_64BIT
+bool pb_encode_svarint(pb_ostream_t *stream, int64_t value);
+#else
+bool pb_encode_svarint(pb_ostream_t *stream, int32_t value);
+#endif
+
+/* Encode a string or bytes type field. For strings, pass strlen(s) as size. */
+bool pb_encode_string(pb_ostream_t *stream, const pb_byte_t *buffer, size_t size);
+
+/* Encode a fixed32, sfixed32 or float value.
+ * You need to pass a pointer to a 4-byte wide C variable. */
+bool pb_encode_fixed32(pb_ostream_t *stream, const void *value);
+
+#ifndef PB_WITHOUT_64BIT
+/* Encode a fixed64, sfixed64 or double value.
+ * You need to pass a pointer to a 8-byte wide C variable. */
+bool pb_encode_fixed64(pb_ostream_t *stream, const void *value);
+#endif
+
+#ifdef PB_CONVERT_DOUBLE_FLOAT
+/* Encode a float value so that it appears like a double in the encoded
+ * message. */
+bool pb_encode_float_as_double(pb_ostream_t *stream, float value);
+#endif
+
+/* Encode a submessage field.
+ * You need to pass the pb_field_t array and pointer to struct, just like
+ * with pb_encode(). This internally encodes the submessage twice, first to
+ * calculate message size and then to actually write it out.
+ */
+bool pb_encode_submessage(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/vendor/nanopb/spm_resources/PrivacyInfo.xcprivacy b/vendor/nanopb/spm_resources/PrivacyInfo.xcprivacy
new file mode 100644
index 00000000..72e00ac3
--- /dev/null
+++ b/vendor/nanopb/spm_resources/PrivacyInfo.xcprivacy
@@ -0,0 +1,15 @@
+
+
+
+
+ NSPrivacyAccessedAPITypes
+
+ NSPrivacyCollectedDataTypes
+
+ NSPrivacyTracking
+
+ NSPrivacyTrackingDomains
+
+
+
+
diff --git a/vendor/nanopb/tests/SConstruct b/vendor/nanopb/tests/SConstruct
index 17550125..a719e354 100644
--- a/vendor/nanopb/tests/SConstruct
+++ b/vendor/nanopb/tests/SConstruct
@@ -3,7 +3,7 @@ Type 'scons' to build and run all the available test cases.
It will automatically detect your platform and C compiler and
build appropriately.
-You can modify the behavious using following options:
+You can modify the behaviour using following options:
BUILDDIR Directory to build into (default "build")
CC Name of C compiler
CXX Name of C++ compiler
@@ -76,12 +76,12 @@ env.Append(PROTOCPATH = '#../generator')
if not env.GetOption('clean'):
def check_ccflags(context, flags, linkflags = ''):
'''Check if given CCFLAGS are supported'''
- context.Message('Checking support for CCFLAGS="%s"... ' % flags)
+ context.Message('Checking support for CCFLAGS="%s" LINKFLAGS="%s"... ' % (flags, linkflags))
oldflags = context.env['CCFLAGS']
oldlinkflags = context.env['LINKFLAGS']
context.env.Append(CCFLAGS = flags)
context.env.Append(LINKFLAGS = linkflags)
- result = context.TryCompile("int main() {return 0;}", '.c')
+ result = context.TryLink("int main() {return 0;}", '.c')
context.env.Replace(CCFLAGS = oldflags)
context.env.Replace(LINKFLAGS = oldlinkflags)
context.Result(result)
@@ -122,6 +122,10 @@ if not env.GetOption('clean'):
if stdlib: conf.env.Append(CPPDEFINES = {'HAVE_STDLIB_H': 1})
if limits: conf.env.Append(CPPDEFINES = {'HAVE_LIMITS_H': 1})
+ # Some platforms need libm for isnan()
+ if conf.CheckCCFLAGS('', linkflags = '-lm'):
+ conf.env.Append(LINKFLAGS = '-lm')
+
# Check protoc version
conf.env['PROTOC_VERSION'] = conf.CheckProtocVersion()
@@ -162,12 +166,13 @@ if not env.get('NODEFARGS'):
# Debug info, warnings as errors
env.Append(CFLAGS = '-g -Wall -Werror ')
- env.Append(CORECFLAGS = '-Wextra')
+ env.Append(CORECFLAGS = '-Wextra ')
# Pedantic ANSI C. On AVR this doesn't work because we use large
# enums in some of the tests.
if env.get("EMBEDDED") != "AVR":
- env.Append(CFLAGS = '-ansi -pedantic')
+ env.Append(CFLAGS = '-ansi ')
+ env.Append(CORECFLAGS = '-pedantic ')
# Profiling and coverage
if not env.get("EMBEDDED"):
@@ -191,6 +196,9 @@ if not env.get('NODEFARGS'):
# More strict checks on the nanopb core
env.Append(CORECFLAGS = '/W4')
+ # Enable C11 standard
+ env.Append(CFLAGS = ' /std:c11 ')
+
# Disable warning about sizeof(union{}) construct that is used in
# message size macros, in e.g. multiple_files testcase. The C construct
# itself is valid, but quite rare, which causes Visual C++ to give a warning
@@ -203,7 +211,7 @@ if not env.get('NODEFARGS'):
if 'clang' in env['CXX']:
env.Append(CXXFLAGS = '-g -Wall -Werror -Wextra -Wno-missing-field-initializers')
elif 'g++' in env['CXX'] or 'gcc' in env['CXX']:
- env.Append(CXXFLAGS = '-g -Wall -Werror -Wextra -Wno-missing-field-initializers')
+ env.Append(CXXFLAGS = '-g -Wall -Werror -Wextra -Wno-missing-field-initializers -std=gnu++11')
elif 'cl' in env['CXX']:
env.Append(CXXFLAGS = '/Zi /W2 /WX /wd4116 /wd4127')
diff --git a/vendor/nanopb/tests/any_type/anytest.proto b/vendor/nanopb/tests/any_type/anytest.proto
index 0b86957f..891bfaab 100644
--- a/vendor/nanopb/tests/any_type/anytest.proto
+++ b/vendor/nanopb/tests/any_type/anytest.proto
@@ -13,7 +13,7 @@
// the type_url and value fields, and then call pb_decode() again on the value.
//
// This does result in unnecessarily copying the data around, so for larger
-// values it is preferrable to use callbacks on the fields instead.
+// values it is preferable to use callbacks on the fields instead.
syntax = "proto3";
import "google/protobuf/any.proto";
diff --git a/vendor/nanopb/tests/bazel_options_support/bazel_options_support.cc b/vendor/nanopb/tests/bazel_options_support/bazel_options_support.cc
new file mode 100644
index 00000000..92620a8f
--- /dev/null
+++ b/vendor/nanopb/tests/bazel_options_support/bazel_options_support.cc
@@ -0,0 +1,10 @@
+#include "tests/alltypes/alltypes.pb.h"
+
+int main(int argc, char* argv[]) {
+ IntSizes intSizes;
+ if (sizeof(intSizes.req_int8) == 1) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
diff --git a/vendor/nanopb/tests/comments/comments.expected b/vendor/nanopb/tests/comments/comments.expected
index 0017cda1..ae22a924 100644
--- a/vendor/nanopb/tests/comments/comments.expected
+++ b/vendor/nanopb/tests/comments/comments.expected
@@ -3,4 +3,12 @@ LeadingEnumComment
ENUMVAL2.*TrailingEnumComment
Message1Comment
member2.*TrailingMemberComment
-
+m2member1.*m2comment1
+m2member50.*m2comment50
+m2member4.*m2comment4
+m2oneof10.*m2oneof10_comment
+m2oneof5.*m2oneof5_comment
+A.*A_comment
+B.*B_comment
+C.*C_comment
+subfield.*subfield_comment
diff --git a/vendor/nanopb/tests/comments/comments.proto b/vendor/nanopb/tests/comments/comments.proto
index e4825c62..0f952ce5 100644
--- a/vendor/nanopb/tests/comments/comments.proto
+++ b/vendor/nanopb/tests/comments/comments.proto
@@ -15,3 +15,29 @@ enum Enum1
ENUMVAL1 = 1;
ENUMVAL2 = 2; // TrailingEnumComment
}
+
+message Message2
+{
+ required string m2member1 = 1; // m2comment1
+ required string m2member50 = 50; // m2comment50
+ required string m2member4 = 4; // m2comment4
+
+ oneof m2oneof {
+ int32 m2oneof10 = 10; // m2oneof10_comment
+ int32 m2oneof5 = 5; // m2oneof5_comment
+ }
+}
+
+message Message3
+{
+ message SubMessage {
+ required int32 subfield = 1; // subfield_comment
+
+ enum SubEnum
+ {
+ A = 0; /// A_comment
+ B = 1; /// B_comment
+ C = 2; /// C_comment
+ }
+ }
+}
diff --git a/vendor/nanopb/tests/cxx_callback_datatype/SConscript b/vendor/nanopb/tests/cxx_callback_datatype/SConscript
index ecdab0b7..e3021ead 100644
--- a/vendor/nanopb/tests/cxx_callback_datatype/SConscript
+++ b/vendor/nanopb/tests/cxx_callback_datatype/SConscript
@@ -1,3 +1,4 @@
+# Test wrapping of a C++ class inside struct using callback_datatype option.
Import('env')
import os
diff --git a/vendor/nanopb/tests/cxx_callback_datatype/cxx_callback_datatype.cpp b/vendor/nanopb/tests/cxx_callback_datatype/cxx_callback_datatype.cpp
index f95b4f61..2b06d379 100644
--- a/vendor/nanopb/tests/cxx_callback_datatype/cxx_callback_datatype.cpp
+++ b/vendor/nanopb/tests/cxx_callback_datatype/cxx_callback_datatype.cpp
@@ -1,3 +1,4 @@
+// Test wrapping of a C++ class inside struct using callback_datatype option.
#include "message.pb.hpp"
#include
@@ -7,9 +8,10 @@
#include
// See tests/alltypes_callback, tests/oneoff_callback and examples/network_server for more...
-bool TestMessage_submessages_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field)
+bool TestMessage_values_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field)
{
if (ostream != NULL) {
+ // Encoding callback, serialize items from vector
const std::vector &v = *(const std::vector *)field->pData;
for (std::vector::const_iterator i = v.begin(); i != v.end(); ++i) {
if (!pb_encode_tag_for_field(ostream, field)) {
@@ -22,6 +24,7 @@ bool TestMessage_submessages_callback(pb_istream_t *istream, pb_ostream_t *ostre
}
}
} else if (istream != NULL) {
+ // Decoding callback, add items to vector
std::vector &v = *(std::vector *)field->pData;
SubMessage tmp;
if (!pb_decode(istream, SubMessage_fields, &tmp)) {
@@ -35,26 +38,27 @@ bool TestMessage_submessages_callback(pb_istream_t *istream, pb_ostream_t *ostre
extern "C"
bool TestMessage_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field)
{
- if (field->tag == TestMessage_submessages_tag) {
- return TestMessage_submessages_callback(istream, ostream, field);
+ if (field->tag == TestMessage_values_tag) {
+ return TestMessage_values_callback(istream, ostream, field);
}
return true;
}
extern "C"
int main() {
- std::vector source;
- source.push_back(5);
- source.push_back(4);
- source.push_back(3);
- source.push_back(2);
- source.push_back(1);
+ TestMessage source = TestMessage_init_zero; // Not strictly necessary to initialize, just using it to test the initializer.
+ source.values.push_back(5);
+ source.values.push_back(4);
+ source.values.push_back(3);
+ source.values.push_back(2);
+ source.values.push_back(1);
std::vector serialized;
- pb_ostream_t sizestream = {0};
- pb_encode(&sizestream, TestMessage_fields, &source);
- serialized.resize(sizestream.bytes_written);
+ size_t size = 0;
+ pb_get_encoded_size(&size, TestMessage_fields, &source);
+ serialized.resize(size);
+
pb_ostream_t outstream = pb_ostream_from_buffer(&serialized.front(), serialized.size());
if (!pb_encode(&outstream, TestMessage_fields, &source)) {
fprintf(stderr, "Failed to encode: %s\n", PB_GET_ERROR(&outstream));
@@ -62,16 +66,16 @@ int main() {
}
- std::vector destination;
+ TestMessage destination;
pb_istream_t instream = pb_istream_from_buffer(&serialized.front(), outstream.bytes_written);
if (!pb_decode(&instream, TestMessage_fields, &destination)) {
fprintf(stderr, "Failed to decode: %s\n", PB_GET_ERROR(&instream));
return 2;
}
- if (source != destination) {
+ if (source.values != destination.values) {
fprintf(stderr, "Result does not match\n");
- fprintf(stderr, "source(%d): ", (int)source.size());
- for (std::vector::iterator i = source.begin(); i != source.end(); ++i)
+ fprintf(stderr, "source(%d): ", (int)source.values.size());
+ for (std::vector::iterator i = source.values.begin(); i != source.values.end(); ++i)
{
fprintf(stderr, "%d, ", *i);
}
@@ -79,8 +83,8 @@ int main() {
for (unsigned i = 0; i != std::min(serialized.size(), outstream.bytes_written); ++i) {
fprintf(stderr, "0x%02x ", serialized[i]);
}
- fprintf(stderr, "\ndestination(%d): ", (int)destination.size());
- for (std::vector::iterator i = destination.begin(); i != destination.end(); ++i)
+ fprintf(stderr, "\ndestination(%d): ", (int)destination.values.size());
+ for (std::vector::iterator i = destination.values.begin(); i != destination.values.end(); ++i)
{
fprintf(stderr, "%d, ", *i);
}
diff --git a/vendor/nanopb/tests/cxx_callback_datatype/message.proto b/vendor/nanopb/tests/cxx_callback_datatype/message.proto
index 605f289e..092d35d2 100644
--- a/vendor/nanopb/tests/cxx_callback_datatype/message.proto
+++ b/vendor/nanopb/tests/cxx_callback_datatype/message.proto
@@ -10,5 +10,5 @@ message SubMessage {
message TestMessage {
// Instead of std::vector callback handles wrapping/unwrapping of the int.
- repeated SubMessage submessages = 1 [(nanopb).callback_datatype = "std::vector"];
+ repeated SubMessage values = 1 [(nanopb).callback_datatype = "std::vector"];
}
diff --git a/vendor/nanopb/tests/cxx_descriptor/SConscript b/vendor/nanopb/tests/cxx_descriptor/SConscript
index a11aff03..3a798aec 100644
--- a/vendor/nanopb/tests/cxx_descriptor/SConscript
+++ b/vendor/nanopb/tests/cxx_descriptor/SConscript
@@ -19,7 +19,10 @@ for std in ["c++03", "c++11", "c++14", "c++17", "c++20"]:
print("Skipping {} test - compiler doesn't support it".format(std))
continue
+ if std == 'c++03':
+ e.Append(CPPDEFINES = {'PB_C99_STATIC_ASSERT': 1})
+
o1 = e.Object('message_descriptor_{}'.format(std), 'message_descriptor.cc')
o2 = e.Object('message.pb_{}'.format(std), 'message.pb.c')
- p = e.Program([o1, o2])
+ p = e.Program([o1, o2, "$COMMON/pb_common.o"])
e.RunTest(p)
diff --git a/vendor/nanopb/tests/cxx_descriptor/message.proto b/vendor/nanopb/tests/cxx_descriptor/message.proto
index be4334de..bf074e80 100644
--- a/vendor/nanopb/tests/cxx_descriptor/message.proto
+++ b/vendor/nanopb/tests/cxx_descriptor/message.proto
@@ -10,3 +10,25 @@ message MyEmptyMessage {
message MyNonEmptyMessage {
optional uint32 field = 1;
}
+
+message MyMessageWithMsgid {
+ option (nanopb_msgopt).msgid = 42;
+ optional uint32 field = 1;
+}
+
+message MyMessageWithoutMsgid {
+ optional uint32 field = 1;
+}
+
+// This message is not used in the tests but is rather a sentry message that
+// will trigger a build failure if the generator decides to create a size
+// variable for fields which are not size bound. Note that this only works as
+// long as the C++ interface for sizing wraps the C interface.
+message MyMessageWithoutSize {
+ repeated uint32 field = 1;
+}
+
+message MyMessageWithSizeBoundRepeatedFields {
+ option (nanopb_msgopt).max_count = 100;
+ repeated uint32 field = 1;
+}
diff --git a/vendor/nanopb/tests/cxx_descriptor/message_descriptor.cc b/vendor/nanopb/tests/cxx_descriptor/message_descriptor.cc
index 44373011..149000ba 100644
--- a/vendor/nanopb/tests/cxx_descriptor/message_descriptor.cc
+++ b/vendor/nanopb/tests/cxx_descriptor/message_descriptor.cc
@@ -19,10 +19,21 @@ extern "C" int main() {
TEST(MessageDescriptor::fields_array_length ==
MyNonEmptyMessage_msg.field_count);
+ TEST(MessageDescriptor::size ==
+ MyNonEmptyMessage_size);
+ TEST(MessageDescriptor::size ==
+ MyMessageWithSizeBoundRepeatedFields_size);
+
TEST(MessageDescriptor::fields() == MyEmptyMessage_fields);
TEST(MessageDescriptor::fields() ==
MyNonEmptyMessage_fields);
+ TEST(MessageDescriptor::has_msgid() == true);
+ TEST(MessageDescriptor::msgid() == 42);
+
+ TEST(MessageDescriptor::has_msgid() == false);
+
+
if (status != 0) fprintf(stdout, "\n\nSome tests FAILED!\n");
return status;
diff --git a/vendor/nanopb/tests/enum_mapping/SConscript b/vendor/nanopb/tests/enum_mapping/SConscript
new file mode 100644
index 00000000..92c7eb5e
--- /dev/null
+++ b/vendor/nanopb/tests/enum_mapping/SConscript
@@ -0,0 +1,8 @@
+# Test generated ENUMTYPE defines
+
+Import('env')
+
+env.NanopbProto('enum_mapping')
+env.Object("enum_mapping.pb.c")
+env.Match(["enum_mapping.pb.h", "enum_mapping.expected"])
+
diff --git a/vendor/nanopb/tests/enum_mapping/enum_mapping.expected b/vendor/nanopb/tests/enum_mapping/enum_mapping.expected
new file mode 100644
index 00000000..fd19de75
--- /dev/null
+++ b/vendor/nanopb/tests/enum_mapping/enum_mapping.expected
@@ -0,0 +1,3 @@
+define TestMsg_test1_ENUMTYPE TestEnum1
+define TestMsg_oneof1_test2_ENUMTYPE TestEnum2
+
diff --git a/vendor/nanopb/tests/enum_mapping/enum_mapping.proto b/vendor/nanopb/tests/enum_mapping/enum_mapping.proto
new file mode 100644
index 00000000..0e26096e
--- /dev/null
+++ b/vendor/nanopb/tests/enum_mapping/enum_mapping.proto
@@ -0,0 +1,26 @@
+syntax = "proto3";
+
+enum TestEnum1
+{
+ A = 0;
+ B = 2;
+ C = -3;
+}
+
+enum TestEnum2
+{
+ X = 0;
+ Y = 5;
+}
+
+message TestMsg
+{
+ TestEnum1 test1 = 1;
+
+ oneof oneof1
+ {
+ TestEnum2 test2 = 2;
+ }
+}
+
+
diff --git a/vendor/nanopb/tests/enum_sizes/SConscript b/vendor/nanopb/tests/enum_sizes/SConscript
index 048592ed..479632ed 100644
--- a/vendor/nanopb/tests/enum_sizes/SConscript
+++ b/vendor/nanopb/tests/enum_sizes/SConscript
@@ -2,11 +2,21 @@
Import('env')
-env.NanopbProto('enumsizes')
-
-p = env.Program(["enumsizes_unittests.c",
- "enumsizes.pb.c",
+env.NanopbProto('packed_enum')
+p = env.Program(["packed_enum_unittests.c",
+ "packed_enum.pb.c",
"$COMMON/pb_encode.o",
"$COMMON/pb_decode.o",
"$COMMON/pb_common.o"])
env.RunTest(p)
+
+# Enum size specification is supported by C++11 and newer
+env2 = env.Clone()
+env2.Append(CXXFLAGS = "-std=c++11")
+env2.NanopbProtoCpp('enum_intsize')
+p2 = env2.Program(["enum_intsize_unittests.cc",
+ "enum_intsize.pb.cpp",
+ "$COMMON/pb_encode.o",
+ "$COMMON/pb_decode.o",
+ "$COMMON/pb_common.o"])
+env2.RunTest(p2)
diff --git a/vendor/nanopb/tests/enum_sizes/enum_intsize.proto b/vendor/nanopb/tests/enum_sizes/enum_intsize.proto
new file mode 100644
index 00000000..4e6f1bb8
--- /dev/null
+++ b/vendor/nanopb/tests/enum_sizes/enum_intsize.proto
@@ -0,0 +1,41 @@
+syntax = "proto2";
+
+import 'nanopb.proto';
+
+option (nanopb_fileopt).long_names = false;
+
+
+enum IntSizeInt8
+{
+ option (nanopb_enumopt).enum_intsize = IS_8;
+ I8_A = 0;
+ I8_B = 2;
+ I8_C = 3;
+}
+
+
+enum IntSizeInt16
+{
+ option (nanopb_enumopt).enum_intsize = IS_16;
+ I16_A = 0;
+ I16_B = 2;
+ I16_C = 3;
+}
+
+
+enum IntSizeInt32
+{
+ option (nanopb_enumopt).enum_intsize = IS_32;
+ I32_A = 0;
+ I32_B = 2;
+ I32_C = 3;
+}
+
+
+enum IntSizeInt64
+{
+ option (nanopb_enumopt).enum_intsize = IS_64;
+ I64_A = 0;
+ I64_B = 2;
+ I64_C = 3;
+}
diff --git a/vendor/nanopb/tests/enum_sizes/enum_intsize_unittests.cc b/vendor/nanopb/tests/enum_sizes/enum_intsize_unittests.cc
new file mode 100644
index 00000000..dae87371
--- /dev/null
+++ b/vendor/nanopb/tests/enum_sizes/enum_intsize_unittests.cc
@@ -0,0 +1,17 @@
+#include
+#include "enum_intsize.pb.hpp"
+#include "unittests.h"
+
+extern "C" int main()
+{
+ int status = 0;
+
+ TEST(sizeof(IntSizeInt8) == sizeof(uint8_t));
+ TEST(sizeof(IntSizeInt16) == sizeof(uint16_t));
+ TEST(sizeof(IntSizeInt32) == sizeof(uint32_t));
+ TEST(sizeof(IntSizeInt64) == sizeof(uint64_t));
+
+ if (status != 0) fprintf(stdout, "\n\nSome tests FAILED!\n");
+
+ return status;
+}
diff --git a/vendor/nanopb/tests/enum_sizes/enumsizes.proto b/vendor/nanopb/tests/enum_sizes/packed_enum.proto
similarity index 100%
rename from vendor/nanopb/tests/enum_sizes/enumsizes.proto
rename to vendor/nanopb/tests/enum_sizes/packed_enum.proto
diff --git a/vendor/nanopb/tests/enum_sizes/enumsizes_unittests.c b/vendor/nanopb/tests/enum_sizes/packed_enum_unittests.c
similarity index 98%
rename from vendor/nanopb/tests/enum_sizes/enumsizes_unittests.c
rename to vendor/nanopb/tests/enum_sizes/packed_enum_unittests.c
index 5606895a..4f0016af 100644
--- a/vendor/nanopb/tests/enum_sizes/enumsizes_unittests.c
+++ b/vendor/nanopb/tests/enum_sizes/packed_enum_unittests.c
@@ -3,7 +3,7 @@
#include
#include
#include "unittests.h"
-#include "enumsizes.pb.h"
+#include "packed_enum.pb.h"
int main()
{
diff --git a/vendor/nanopb/tests/enum_validate/SConscript b/vendor/nanopb/tests/enum_validate/SConscript
new file mode 100644
index 00000000..a1dd009d
--- /dev/null
+++ b/vendor/nanopb/tests/enum_validate/SConscript
@@ -0,0 +1,7 @@
+# Test enum to string functionality
+
+Import('env')
+env.NanopbProto("enum.proto")
+p = env.Program(["enum_validate.c", "enum.pb.c"])
+env.RunTest(p)
+
diff --git a/vendor/nanopb/tests/enum_validate/enum.proto b/vendor/nanopb/tests/enum_validate/enum.proto
new file mode 100644
index 00000000..b57b5b79
--- /dev/null
+++ b/vendor/nanopb/tests/enum_validate/enum.proto
@@ -0,0 +1,18 @@
+/* Test enum to string function generation */
+
+syntax = "proto2";
+
+import "nanopb.proto";
+
+option (nanopb_fileopt).enum_validate = true;
+
+enum MyEnum {
+ VALUE1 = 1;
+ VALUE2 = 2;
+ VALUE15 = 15;
+}
+
+enum MyShortNameEnum {
+ option (nanopb_enumopt).long_names = false;
+ MSNE_VALUE256 = 256;
+}
diff --git a/vendor/nanopb/tests/enum_validate/enum_validate.c b/vendor/nanopb/tests/enum_validate/enum_validate.c
new file mode 100644
index 00000000..2af83cc9
--- /dev/null
+++ b/vendor/nanopb/tests/enum_validate/enum_validate.c
@@ -0,0 +1,19 @@
+#include
+#include "unittests.h"
+#include "enum.pb.h"
+
+int main()
+{
+ int status = 0;
+ TEST(MyEnum_valid(MyEnum_VALUE1) == true);
+ TEST(MyEnum_valid(MyEnum_VALUE2) == true);
+ TEST(MyEnum_valid(MyEnum_VALUE15) == true);
+ TEST(MyShortNameEnum_valid(MSNE_VALUE256) == true);
+ TEST(MyShortNameEnum_valid(9999) == false);
+
+ if (status != 0)
+ fprintf(stdout, "\n\nSome tests FAILED!\n");
+
+ return status;
+}
+
diff --git a/vendor/nanopb/tests/fallback_type/SConscript b/vendor/nanopb/tests/fallback_type/SConscript
new file mode 100644
index 00000000..98dfd745
--- /dev/null
+++ b/vendor/nanopb/tests/fallback_type/SConscript
@@ -0,0 +1,7 @@
+# Test fallback_type option
+
+Import("env")
+
+env.NanopbProto(["fallback_type","fallback_type.options"])
+env.Object("fallback_type.pb.c")
+env.Match(['fallback_type.pb.h', 'fallback_type.expected'])
diff --git a/vendor/nanopb/tests/fallback_type/fallback_type.expected b/vendor/nanopb/tests/fallback_type/fallback_type.expected
new file mode 100644
index 00000000..05d72d09
--- /dev/null
+++ b/vendor/nanopb/tests/fallback_type/fallback_type.expected
@@ -0,0 +1,3 @@
+int32_t member1;
+char \*member2
+pb_callback_t member3
diff --git a/vendor/nanopb/tests/fallback_type/fallback_type.options b/vendor/nanopb/tests/fallback_type/fallback_type.options
new file mode 100644
index 00000000..8db3189f
--- /dev/null
+++ b/vendor/nanopb/tests/fallback_type/fallback_type.options
@@ -0,0 +1 @@
+Message1.member2 fallback_type:FT_POINTER
diff --git a/vendor/nanopb/tests/fallback_type/fallback_type.proto b/vendor/nanopb/tests/fallback_type/fallback_type.proto
new file mode 100644
index 00000000..2e1dc0a6
--- /dev/null
+++ b/vendor/nanopb/tests/fallback_type/fallback_type.proto
@@ -0,0 +1,9 @@
+syntax = "proto2";
+
+message Message1
+{
+ required int32 member1 = 1; // must remain as int
+ required string member2 = 2; // must become a pointer
+ required string member3 = 3; // must become pb_callback_t
+}
+
diff --git a/vendor/nanopb/tests/fixed_count/SConscript b/vendor/nanopb/tests/fixed_count/SConscript
index 3cecb12a..31380ad7 100644
--- a/vendor/nanopb/tests/fixed_count/SConscript
+++ b/vendor/nanopb/tests/fixed_count/SConscript
@@ -1,14 +1,15 @@
# Test that fixed count option works.
-Import("env")
+Import("malloc_env")
-env.NanopbProto("fixed_count")
-env.Object("fixed_count.pb.c")
+malloc_env.NanopbProto("fixed_count")
+malloc_env.Object("fixed_count.pb.c")
-p = env.Program(["fixed_count_unittests.c",
+p = malloc_env.Program(["fixed_count_unittests.c",
"fixed_count.pb.c",
- "$COMMON/pb_encode.o",
- "$COMMON/pb_decode.o",
- "$COMMON/pb_common.o"])
+ "$COMMON/pb_encode_with_malloc.o",
+ "$COMMON/pb_decode_with_malloc.o",
+ "$COMMON/pb_common_with_malloc.o",
+ "$COMMON/malloc_wrappers.o"])
-env.RunTest(p)
+malloc_env.RunTest(p)
diff --git a/vendor/nanopb/tests/fixed_count/fixed_count.proto b/vendor/nanopb/tests/fixed_count/fixed_count.proto
index e96d1ec4..29fadd24 100644
--- a/vendor/nanopb/tests/fixed_count/fixed_count.proto
+++ b/vendor/nanopb/tests/fixed_count/fixed_count.proto
@@ -19,3 +19,12 @@ message Message3
repeated Message2 data1 = 1 [(nanopb).max_count = 2, (nanopb).fixed_count = true];
repeated Message2 data2 = 2 [(nanopb).max_count = 2, (nanopb).fixed_count = true];
}
+
+message SubMessage {
+ required int32 a = 1;
+}
+
+message Message4 {
+ repeated SubMessage submsgs = 1 [(nanopb).type = FT_POINTER, (nanopb).max_count = 5, (nanopb).fixed_count = true];
+ repeated string strings = 2 [(nanopb).type = FT_POINTER, (nanopb).max_count = 4, (nanopb).fixed_count = true];
+}
diff --git a/vendor/nanopb/tests/fixed_count/fixed_count_unittests.c b/vendor/nanopb/tests/fixed_count/fixed_count_unittests.c
index b039c9b6..f1c7c518 100644
--- a/vendor/nanopb/tests/fixed_count/fixed_count_unittests.c
+++ b/vendor/nanopb/tests/fixed_count/fixed_count_unittests.c
@@ -2,6 +2,7 @@
#include
#include
#include
+#include
#include "unittests.h"
#include "fixed_count.pb.h"
@@ -134,6 +135,54 @@ int main()
TEST(memcmp(&msg_b, &msg_a, sizeof(msg_a)) == 0);
}
+ {
+ pb_byte_t buffer[256];
+ Message4 msg_a = Message4_init_zero;
+ Message4 msg_b = Message4_init_zero;
+
+ pb_ostream_t ostream;
+ pb_istream_t istream;
+ size_t message_length;
+
+ COMMENT("Test encode and decode with pointer type fixarray");
+
+ SubMessage submsgs[pb_arraysize(Message4, submsgs[0])] = {SubMessage_init_zero};
+ submsgs[0].a = 1;
+ submsgs[1].a = 5;
+ submsgs[2].a = 999;
+
+ char a[5] = "a";
+ char b[5] = "b";
+ char abc[5] = "abc";
+ char *strings[pb_arraysize(Message4, strings[0])] = {a, b, abc};
+
+ msg_a.submsgs = &submsgs;
+ msg_a.strings = &strings;
+
+ ostream = pb_ostream_from_buffer(buffer, Message3_size);
+ TEST(pb_encode(&ostream, Message4_fields, &msg_a));
+ message_length = ostream.bytes_written;
+
+ TEST(get_alloc_count() == 0);
+
+ istream = pb_istream_from_buffer(buffer, message_length);
+ TEST(pb_decode(&istream, Message4_fields, &msg_b));
+
+ TEST(istream.bytes_left == 0);
+
+ TEST((*msg_b.submsgs)[0].a == 1);
+ TEST((*msg_b.submsgs)[1].a == 5);
+ TEST((*msg_b.submsgs)[2].a == 999);
+
+ TEST(strcmp((*msg_b.strings)[0], "a") == 0);
+ TEST(strcmp((*msg_b.strings)[1], "b") == 0);
+ TEST(strcmp((*msg_b.strings)[2], "abc") == 0);
+
+ pb_release(Message4_fields, &msg_b);
+
+ TEST(get_alloc_count() == 0);
+ }
+
if (status != 0)
fprintf(stdout, "\n\nSome tests FAILED!\n");
diff --git a/vendor/nanopb/tests/fuzztest/SConscript b/vendor/nanopb/tests/fuzztest/SConscript
index 7c0d6e27..a5df9050 100644
--- a/vendor/nanopb/tests/fuzztest/SConscript
+++ b/vendor/nanopb/tests/fuzztest/SConscript
@@ -80,12 +80,12 @@ def run_against_corpus(target, source, env):
count = 0
args = [str(source[0])]
- if env.has_key("TEST_RUNNER"):
+ if "TEST_RUNNER" in env:
args = [env["TEST_RUNNER"]] + args
- if env.has_key("FUZZTEST_CORPUS_SAMPLESIZE"):
+ if "FUZZTEST_CORPUS_SAMPLESIZE" in env:
samplesize = int(env["FUZZTEST_CORPUS_SAMPLESIZE"])
- elif env.has_key("EMBEDDED"):
+ elif env.get('EMBEDDED'):
samplesize = 100
else:
samplesize = 4096
diff --git a/vendor/nanopb/tests/fuzztest/fuzztest.c b/vendor/nanopb/tests/fuzztest/fuzztest.c
index dbec6365..b59b7a1e 100644
--- a/vendor/nanopb/tests/fuzztest/fuzztest.c
+++ b/vendor/nanopb/tests/fuzztest/fuzztest.c
@@ -101,7 +101,12 @@ static bool do_decode(const uint8_t *buffer, size_t msglen, size_t structsize, c
assert(status);
}
- pb_release(msgtype, msg);
+ if (status)
+ {
+ /* On error return, pb_release() should be called automatically. */
+ pb_release(msgtype, msg);
+ }
+
free_with_check(msg);
free_with_check(buf2);
assert(get_alloc_count() == initial_alloc_count);
@@ -109,7 +114,7 @@ static bool do_decode(const uint8_t *buffer, size_t msglen, size_t structsize, c
return status;
}
-static bool do_stream_decode(const uint8_t *buffer, size_t msglen, size_t fail_after, size_t structsize, const pb_msgdesc_t *msgtype, bool assert_success)
+static bool do_stream_decode(const uint8_t *buffer, size_t msglen, size_t fail_after, size_t structsize, const pb_msgdesc_t *msgtype, unsigned flags, bool assert_success)
{
bool status;
flakystream_t stream;
@@ -119,7 +124,7 @@ static bool do_stream_decode(const uint8_t *buffer, size_t msglen, size_t fail_a
memset(msg, 0, structsize);
flakystream_init(&stream, buffer, msglen, fail_after);
- status = pb_decode(&stream.stream, msgtype, msg);
+ status = pb_decode_ex(&stream.stream, msgtype, msg, flags);
if (status)
{
@@ -132,7 +137,12 @@ static bool do_stream_decode(const uint8_t *buffer, size_t msglen, size_t fail_a
assert(status);
}
- pb_release(msgtype, msg);
+ if (status)
+ {
+ /* On error return, pb_release() should be called automatically. */
+ pb_release(msgtype, msg);
+ }
+
free_with_check(msg);
assert(get_alloc_count() == initial_alloc_count);
@@ -295,7 +305,7 @@ void do_roundtrips(const uint8_t *data, size_t size, bool expect_valid)
if (do_decode(data, size, sizeof(alltypes_static_AllTypes), alltypes_static_AllTypes_fields, 0, expect_valid))
{
do_roundtrip(data, size, sizeof(alltypes_static_AllTypes), alltypes_static_AllTypes_fields);
- do_stream_decode(data, size, SIZE_MAX, sizeof(alltypes_static_AllTypes), alltypes_static_AllTypes_fields, true);
+ do_stream_decode(data, size, SIZE_MAX, sizeof(alltypes_static_AllTypes), alltypes_static_AllTypes_fields, 0, true);
do_callback_decode(data, size, true);
}
#endif
@@ -304,7 +314,7 @@ void do_roundtrips(const uint8_t *data, size_t size, bool expect_valid)
if (do_decode(data, size, sizeof(alltypes_proto3_static_AllTypes), alltypes_proto3_static_AllTypes_fields, 0, expect_valid))
{
do_roundtrip(data, size, sizeof(alltypes_proto3_static_AllTypes), alltypes_proto3_static_AllTypes_fields);
- do_stream_decode(data, size, SIZE_MAX, sizeof(alltypes_proto3_static_AllTypes), alltypes_proto3_static_AllTypes_fields, true);
+ do_stream_decode(data, size, SIZE_MAX, sizeof(alltypes_proto3_static_AllTypes), alltypes_proto3_static_AllTypes_fields, 0, true);
}
#endif
@@ -312,7 +322,7 @@ void do_roundtrips(const uint8_t *data, size_t size, bool expect_valid)
if (do_decode(data, size, sizeof(alltypes_pointer_AllTypes), alltypes_pointer_AllTypes_fields, 0, expect_valid))
{
do_roundtrip(data, size, sizeof(alltypes_pointer_AllTypes), alltypes_pointer_AllTypes_fields);
- do_stream_decode(data, size, SIZE_MAX, sizeof(alltypes_pointer_AllTypes), alltypes_pointer_AllTypes_fields, true);
+ do_stream_decode(data, size, SIZE_MAX, sizeof(alltypes_pointer_AllTypes), alltypes_pointer_AllTypes_fields, 0, true);
}
#endif
@@ -320,7 +330,7 @@ void do_roundtrips(const uint8_t *data, size_t size, bool expect_valid)
if (do_decode(data, size, sizeof(alltypes_proto3_pointer_AllTypes), alltypes_proto3_pointer_AllTypes_fields, 0, expect_valid))
{
do_roundtrip(data, size, sizeof(alltypes_proto3_pointer_AllTypes), alltypes_proto3_pointer_AllTypes_fields);
- do_stream_decode(data, size, SIZE_MAX, sizeof(alltypes_proto3_pointer_AllTypes), alltypes_proto3_pointer_AllTypes_fields, true);
+ do_stream_decode(data, size, SIZE_MAX, sizeof(alltypes_proto3_pointer_AllTypes), alltypes_proto3_pointer_AllTypes_fields, 0, true);
}
#endif
@@ -332,8 +342,9 @@ void do_roundtrips(const uint8_t *data, size_t size, bool expect_valid)
* Testing proto2 is enough for good coverage here, as it has a superset of the field types of proto3.
*/
set_max_alloc_bytes(get_alloc_bytes() + 4096);
- do_stream_decode(data, size, size - 16, sizeof(alltypes_static_AllTypes), alltypes_static_AllTypes_fields, false);
- do_stream_decode(data, size, size - 16, sizeof(alltypes_pointer_AllTypes), alltypes_pointer_AllTypes_fields, false);
+ do_stream_decode(data, size, size - 16, sizeof(alltypes_static_AllTypes), alltypes_static_AllTypes_fields, 0, false);
+ do_stream_decode(data, size, size - 16, sizeof(alltypes_pointer_AllTypes), alltypes_pointer_AllTypes_fields, 0, false);
+ do_stream_decode(data, size, size - 16, sizeof(alltypes_pointer_AllTypes), alltypes_pointer_AllTypes_fields, PB_DECODE_DELIMITED, false);
set_max_alloc_bytes(orig_max_alloc_bytes);
}
diff --git a/vendor/nanopb/tests/fuzztest/random_data.c b/vendor/nanopb/tests/fuzztest/random_data.c
index 6fbc8128..46eeb448 100644
--- a/vendor/nanopb/tests/fuzztest/random_data.c
+++ b/vendor/nanopb/tests/fuzztest/random_data.c
@@ -55,7 +55,7 @@ uint8_t rand_byte()
}
/* Get a random length, with skewed distribution.
- * Favors the shorter lengths, but always atleast 1. */
+ * Favors the shorter lengths, but always at least 1. */
size_t rand_len(size_t max)
{
uint32_t w = rand_word();
diff --git a/vendor/nanopb/tests/fuzztest/random_data.h b/vendor/nanopb/tests/fuzztest/random_data.h
index 292b77c8..57923eab 100644
--- a/vendor/nanopb/tests/fuzztest/random_data.h
+++ b/vendor/nanopb/tests/fuzztest/random_data.h
@@ -23,7 +23,7 @@ bool rand_bool();
uint8_t rand_byte();
/* Get a random length, with skewed distribution.
- * Favors the shorter lengths, but always atleast 1. */
+ * Favors the shorter lengths, but always at least 1. */
size_t rand_len(size_t max);
/* Fills a buffer with random bytes with skewed distribution. */
diff --git a/vendor/nanopb/tests/initializers/SConscript b/vendor/nanopb/tests/initializers/SConscript
new file mode 100644
index 00000000..059af390
--- /dev/null
+++ b/vendor/nanopb/tests/initializers/SConscript
@@ -0,0 +1,6 @@
+# Test initializers when overriding callback datatype
+
+Import('env')
+
+env.NanopbProto("initializertest.proto")
+env.Object("test_initializer.c")
diff --git a/vendor/nanopb/tests/initializers/initializertest.proto b/vendor/nanopb/tests/initializers/initializertest.proto
new file mode 100644
index 00000000..2d192f98
--- /dev/null
+++ b/vendor/nanopb/tests/initializers/initializertest.proto
@@ -0,0 +1,10 @@
+syntax = "proto2";
+import "nanopb.proto";
+
+message TestMessage {
+ required int32 field1 = 1;
+ repeated string field2 = 2 [(nanopb).callback_datatype = "struct MyType*"];
+ repeated string field3 = 3 [(nanopb).callback_datatype = "struct { int a; int b; }"];
+ repeated string field4 = 4 [(nanopb).callback_datatype = "struct { struct { int x; } y; }",
+ (nanopb).initializer = "{{5}}"];
+}
diff --git a/vendor/nanopb/tests/initializers/test_initializer.c b/vendor/nanopb/tests/initializers/test_initializer.c
new file mode 100644
index 00000000..930e5e8b
--- /dev/null
+++ b/vendor/nanopb/tests/initializers/test_initializer.c
@@ -0,0 +1,8 @@
+#include "initializertest.pb.h"
+
+int main()
+{
+ TestMessage msg1 = TestMessage_init_zero;
+ TestMessage msg2 = TestMessage_init_default;
+ return msg1.field1 + msg2.field1; /* Mark variables as used for compiler */
+}
diff --git a/vendor/nanopb/tests/message_sizes/dummy.c b/vendor/nanopb/tests/message_sizes/dummy.c
index 767ad463..560fd09c 100644
--- a/vendor/nanopb/tests/message_sizes/dummy.c
+++ b/vendor/nanopb/tests/message_sizes/dummy.c
@@ -4,6 +4,7 @@
int main()
{
+ PB_STATIC_ASSERT(MESSAGES2_PB_H_MAX_SIZE == xmit_size, INCORRECT_MAX_SIZE);
return xmit_size;
}
diff --git a/vendor/nanopb/tests/namingstyle/SConscript b/vendor/nanopb/tests/namingstyle/SConscript
new file mode 100644
index 00000000..3922340f
--- /dev/null
+++ b/vendor/nanopb/tests/namingstyle/SConscript
@@ -0,0 +1,11 @@
+# Test namingstyle option
+
+Import('env')
+
+env = env.Clone()
+env.Replace(NANOPBFLAGS = "-C")
+
+env.NanopbProto(["naming_style", "naming_style.options"])
+
+test = env.Program(["test_naming_style_c.c", "naming_style.pb.c", "$COMMON/pb_decode.o", "$COMMON/pb_encode.o", '$COMMON/pb_common.o'])
+env.RunTest(test)
diff --git a/vendor/nanopb/tests/namingstyle/naming_style.options b/vendor/nanopb/tests/namingstyle/naming_style.options
new file mode 100644
index 00000000..e7bf8ca3
--- /dev/null
+++ b/vendor/nanopb/tests/namingstyle/naming_style.options
@@ -0,0 +1,16 @@
+* long_names:true
+* enum_to_string:true
+* enum_validate:true
+
+MainMessage.repeatedNumber max_count:4, fixed_count:true
+MainMessage.string_Values1 type:FT_POINTER
+MainMessage.stringValues2 max_length:40, max_count:5
+MainMessage.requiredString max_length:10
+MainMessage.repeatedFixed32 max_count:10
+MainMessage.requiredBytes1 max_size:10, fixed_length:true
+MainMessage.requiredBytes2 max_size:10
+MainMessage.repeatedBytes1 type:FT_POINTER
+MainMessage.repeatedBytes2 type:FT_POINTER, fixed_count:true, max_count:5
+MainMessage.repeatedInts type:FT_POINTER
+MainMessage.SUB_MESSAGE2 type:FT_CALLBACK
+MainMessage.oneOfName2 anonymous_oneof:true
diff --git a/vendor/nanopb/tests/namingstyle/naming_style.proto b/vendor/nanopb/tests/namingstyle/naming_style.proto
new file mode 100644
index 00000000..1e6dcb97
--- /dev/null
+++ b/vendor/nanopb/tests/namingstyle/naming_style.proto
@@ -0,0 +1,63 @@
+syntax = "proto2";
+
+enum MyEnum1 {
+ ENTRY_FIRST = 0;
+ ENTRY_Second = 1;
+ EnumThird = 2;
+}
+
+enum MY_ENUM2 {
+ ENUM2_ENTRY = 0;
+}
+
+message SubMessage {
+ optional int32 test_value = 1;
+}
+
+message MainMessage {
+ optional int32 LUCKY_number = 1;
+ required int32 REQUIRED_NUMBER = 2;
+ repeated int32 repeatedNumber = 3;
+ repeated int32 repeatedInts = 4;
+
+ optional MyEnum1 MyEnum1 = 5;
+ optional MY_ENUM2 My_Enum2 = 6;
+ required MY_ENUM2 MY_ENUM3 = 7;
+ repeated MY_ENUM2 MY_ENUM4 = 8;
+ required MyEnum1 MyEnum5 = 25 [default = ENTRY_Second];
+
+ repeated string string_Values1 = 9;
+ repeated string stringValues2 = 10;
+ optional string OPTIONAL_String = 11;
+ required string requiredString = 12;
+
+ repeated fixed32 repeatedFixed32 = 13;
+
+ required bytes requiredBytes1 = 14;
+ required bytes requiredBytes2 = 15;
+ repeated bytes repeatedBytes1 = 16;
+ repeated bytes repeatedBytes2 = 17;
+
+ optional SubMessage subMessage1 = 18;
+ repeated SubMessage SUB_MESSAGE2 = 19;
+ required SubMessage sub_message3 = 20;
+
+ oneof oneOfName {
+ SubMessage testMessage1 = 21;
+ SubMessage testMessage2 = 22;
+ }
+
+ oneof oneOfName2 {
+ SubMessage testMessage4 = 23;
+ SubMessage testMessage5 = 24;
+ }
+
+ extensions 200 to 255;
+}
+
+message TestExtension {
+ extend MainMessage {
+ optional TestExtension testExtension = 250;
+ }
+ optional string stringValue = 1;
+}
diff --git a/vendor/nanopb/tests/namingstyle/test_naming_style_c.c b/vendor/nanopb/tests/namingstyle/test_naming_style_c.c
new file mode 100644
index 00000000..6cc2e9dc
--- /dev/null
+++ b/vendor/nanopb/tests/namingstyle/test_naming_style_c.c
@@ -0,0 +1,83 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include "unittests.h"
+#include "naming_style.pb.h"
+
+int main()
+{
+ int status = 0;
+ main_message_t defaultMessage = MAIN_MESSAGE_INIT_DEFAULT;
+ main_message_t message = MAIN_MESSAGE_INIT_ZERO;
+
+ /* Verify the default value was initialized */
+ TEST(defaultMessage.my_enum5 == MY_ENUM1_ENTRY_SECOND);
+
+ /* Verify that all members have the expected names */
+ message.lucky_number = 13;
+ message.required_number = 1;
+ message.repeated_number[0] = 1;
+ message.repeated_ints = NULL;
+
+ message.my_enum1 = MY_ENUM1_ENUM_THIRD;
+ message.my_enum2 = MY_ENUM2_ENUM2_ENTRY;
+ message.my_enum3 = MY_ENUM2_ENUM2_ENTRY;
+ message.my_enum4.arg = NULL;
+
+ message.string_values1 = NULL;
+ message.string_values2[0][0] = 'a';
+ message.optional_string.arg = NULL;
+ message.required_string[0] = 'a';
+
+ message.repeated_fixed32[0] = 1;
+
+ message.required_bytes1[0] = 0;
+ message.required_bytes2.size = 0;
+ message.repeated_bytes1_count = 0;
+ message.repeated_bytes2 = NULL;
+
+ message.has_sub_message1 = true;
+ message.sub_message1.has_test_value = true;
+ message.sub_message1.test_value = 0;
+ message.sub_message2.arg = NULL;
+ message.sub_message3.test_value = 0;
+
+ message.which_one_of_name = MAIN_MESSAGE_TEST_MESSAGE2_TAG;
+ message.one_of_name.test_message2.has_test_value = true;
+ message.one_of_name.test_message2.test_value = 5;
+
+ message.which_one_of_name2 = MAIN_MESSAGE_TEST_MESSAGE5_TAG;
+ message.test_message5.test_value = 5;
+
+ TEST(strcmp("ENTRY_FIRST", my_enum1_name(MY_ENUM1_ENTRY_FIRST)) == 0);
+ TEST(my_enum1_valid(MY_ENUM1_ENTRY_FIRST) == true);
+ TEST(my_enum2_valid(MY_ENUM2_ENUM2_ENTRY) == true);
+
+ /* Verify that the descriptor structure is at least mostly correct
+ * by doing a round-trip encoding test.
+ */
+ {
+ uint8_t buffer1[256];
+ uint8_t buffer2[256];
+ pb_ostream_t ostream1 = pb_ostream_from_buffer(buffer1, sizeof(buffer1));
+ pb_ostream_t ostream2 = pb_ostream_from_buffer(buffer2, sizeof(buffer2));
+ pb_istream_t istream;
+ main_message_t message2 = MAIN_MESSAGE_INIT_ZERO;
+
+ TEST(pb_encode(&ostream1, &main_message_t_msg, &message));
+
+ istream = pb_istream_from_buffer(buffer1, ostream1.bytes_written);
+ TEST(pb_decode(&istream, &main_message_t_msg, &message2));
+
+ /* Encoding a second time should produce same output */
+ TEST(pb_encode(&ostream2, &main_message_t_msg, &message2));
+
+ TEST(ostream2.bytes_written == ostream1.bytes_written);
+ TEST(memcmp(buffer1, buffer2, ostream1.bytes_written) == 0);
+ }
+
+ return status;
+}
diff --git a/vendor/nanopb/tests/options/options.proto b/vendor/nanopb/tests/options/options.proto
index 10acca6e..4355ceda 100644
--- a/vendor/nanopb/tests/options/options.proto
+++ b/vendor/nanopb/tests/options/options.proto
@@ -119,9 +119,26 @@ message HasFieldMessage
optional int32 normal = 3;
}
-// Overriden type in generated C code
+// Overridden type in generated C code
message TypeOverrideMessage
{
required Enum1 normal = 1;
- required Enum1 overriden = 2 [(nanopb).type_override = TYPE_UINT32];
+ required Enum1 overridden = 2 [(nanopb).type_override = TYPE_UINT32];
+}
+
+// Deprecated field
+message DeprecatedFieldMessage
+{
+ option (nanopb_msgopt).discard_deprecated = true;
+ required int32 normal = 1;
+ required int32 discarded_deprecatedfield = 2 [deprecated = true];
+ required int32 kept_deprecatedfield = 3 [deprecated = true, (nanopb).discard_deprecated = false];
+}
+
+// Deprecated message
+message DeprecatedMessage
+{
+ option (nanopb_msgopt).discard_deprecated = true;
+ option deprecated = true;
+ required int32 deprecatedmessagefield = 1;
}
diff --git a/vendor/nanopb/tests/options/options_h.expected b/vendor/nanopb/tests/options/options_h.expected
index 9e3be16a..062b6430 100644
--- a/vendor/nanopb/tests/options/options_h.expected
+++ b/vendor/nanopb/tests/options/options_h.expected
@@ -18,7 +18,10 @@ Message5_EnumValue1
#define Message5_msgid 105
! has_proto3field
Enum1 normal
-uint32_t overriden
+uint32_t overridden
#define TypeOverrideMessage_init_default[ ]*\{_Enum1_MIN, 0\}
#define TypeOverrideMessage_init_zero[ ]*\{_Enum1_MIN, 0\}
+! discarded_deprecatedfield
+kept_deprecatedfield
+! deprecatedmessagefield
diff --git a/vendor/nanopb/tests/options/proto3_options.expected b/vendor/nanopb/tests/options/proto3_options.expected
index 26985b10..1dbcbb00 100644
--- a/vendor/nanopb/tests/options/proto3_options.expected
+++ b/vendor/nanopb/tests/options/proto3_options.expected
@@ -3,3 +3,5 @@ bool has_proto3_off
! bool has_proto3_on
bool has_normal_submsg
! bool has_sng_submsg
+! bool has_required_submsg
+X\(a,\s*STATIC,\s*REQUIRED,\s*MESSAGE,\s*required_submsg,
diff --git a/vendor/nanopb/tests/options/proto3_options.proto b/vendor/nanopb/tests/options/proto3_options.proto
index b73d03ce..12ec9ca8 100644
--- a/vendor/nanopb/tests/options/proto3_options.proto
+++ b/vendor/nanopb/tests/options/proto3_options.proto
@@ -14,5 +14,6 @@ message Message1
int32 proto3_on = 3 [(nanopb).proto3 = true];
SubMsg normal_submsg = 4;
SubMsg sng_submsg = 5 [(nanopb).proto3_singular_msgs = true];
+ SubMsg required_submsg = 6 [(nanopb).label_override = LABEL_REQUIRED];
}
diff --git a/vendor/nanopb/tests/proto3_optional/SConscript b/vendor/nanopb/tests/proto3_optional/SConscript
index f8a5b5c2..cdaa2a07 100644
--- a/vendor/nanopb/tests/proto3_optional/SConscript
+++ b/vendor/nanopb/tests/proto3_optional/SConscript
@@ -6,8 +6,8 @@ import re
version = None
if 'PROTOC_VERSION' in env:
- match = re.search('([0-9]+).([0-9]+).([0-9]+)', env['PROTOC_VERSION'])
- version = (int(match.group(1)), int(match.group(2)), int(match.group(3)))
+ match = re.search('(?:([0-9]+).)?([0-9]+).([0-9]+)', env['PROTOC_VERSION'])
+ version = (int(match.group(1) or 3), int(match.group(2)), int(match.group(3)))
# Oneof is supported by protoc >= 3.12.0
if env.GetOption('clean') or (version and (version[0] > 3 or (version[0] == 3 and version[1] >= 12))):
diff --git a/vendor/nanopb/tests/recursive_proto/SConscript b/vendor/nanopb/tests/recursive_proto/SConscript
new file mode 100644
index 00000000..8205851e
--- /dev/null
+++ b/vendor/nanopb/tests/recursive_proto/SConscript
@@ -0,0 +1,6 @@
+# Test building of a recursive protobuf definition
+
+Import('env')
+
+env.NanopbProto("recursive.proto")
+env.Object("recursive.pb.c")
diff --git a/vendor/nanopb/tests/recursive_proto/recursive.proto b/vendor/nanopb/tests/recursive_proto/recursive.proto
new file mode 100644
index 00000000..b4eee223
--- /dev/null
+++ b/vendor/nanopb/tests/recursive_proto/recursive.proto
@@ -0,0 +1,12 @@
+message SingleRecursion {
+ optional SingleRecursion msg = 1;
+}
+
+message Recurse1 {
+ optional Recurse2 msg = 1;
+}
+
+message Recurse2 {
+ optional Recurse1 msg = 1;
+ optional SingleRecursion msg2 = 2;
+}
diff --git a/vendor/nanopb/tests/regression/issue_485/uint8.expected b/vendor/nanopb/tests/regression/issue_485/uint8.expected
index 291ab817..c110c2df 100644
--- a/vendor/nanopb/tests/regression/issue_485/uint8.expected
+++ b/vendor/nanopb/tests/regression/issue_485/uint8.expected
@@ -1,3 +1,3 @@
-! ^\s*[^/* ].*uint8_t
-! ^\s*[^/* ].*int8_t
+! ^(?!.*\*)(?!.*typedef).*uint8_t.*
+! ^(?!.*\*)(?!.*typedef).*int8_t.*
diff --git a/vendor/nanopb/tests/regression/issue_692/SConscript b/vendor/nanopb/tests/regression/issue_692/SConscript
new file mode 100644
index 00000000..11f07ca2
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_692/SConscript
@@ -0,0 +1,9 @@
+# Regression test for #693:
+# Duplicate declarations of size_unions with repeated fields inside a oneof
+
+Import("env")
+
+env.NanopbProto("other.proto")
+env.NanopbProto(["oneof.proto", "other.proto"])
+env.Object("oneof.pb.c")
+env.Object("test.c")
diff --git a/vendor/nanopb/tests/regression/issue_692/oneof.proto b/vendor/nanopb/tests/regression/issue_692/oneof.proto
new file mode 100644
index 00000000..7f7baafe
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_692/oneof.proto
@@ -0,0 +1,19 @@
+syntax = "proto3";
+
+import "other.proto";
+
+message FirstOneof {}
+
+message Bar {
+ oneof content {
+ FirstOneof first = 1;
+ SecondOneof second = 2; // unknown size if no options are considered
+ }
+}
+
+message Foo {
+ AnotherList foo = 1; // again, unknown size
+ Bar bar = 2; // no duplicate size_union shall be generated anymore
+}
+
+
diff --git a/vendor/nanopb/tests/regression/issue_692/other.proto b/vendor/nanopb/tests/regression/issue_692/other.proto
new file mode 100644
index 00000000..733103cb
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_692/other.proto
@@ -0,0 +1,9 @@
+syntax = "proto3";
+
+message SecondOneof {
+ repeated int32 foo = 1;
+}
+
+message AnotherList {
+ repeated int32 bar = 1;
+}
\ No newline at end of file
diff --git a/vendor/nanopb/tests/regression/issue_692/test.c b/vendor/nanopb/tests/regression/issue_692/test.c
new file mode 100644
index 00000000..95281b7d
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_692/test.c
@@ -0,0 +1,7 @@
+/* This fakes the situation where other.proto was not found at generation time,
+ so size_union declarations are generated. */
+
+#define SecondOneof_size 88
+#define AnotherList_size 88
+
+#include "oneof.pb.h"
diff --git a/vendor/nanopb/tests/regression/issue_746/SConscript b/vendor/nanopb/tests/regression/issue_746/SConscript
new file mode 100644
index 00000000..5a59d8a3
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_746/SConscript
@@ -0,0 +1,8 @@
+# Regression test for #746:
+# Name conflict when generating default values for message
+
+Import("env")
+
+env.NanopbProto("test.proto")
+env.Object("test.pb.c")
+
diff --git a/vendor/nanopb/tests/regression/issue_746/test.proto b/vendor/nanopb/tests/regression/issue_746/test.proto
new file mode 100644
index 00000000..fe768632
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_746/test.proto
@@ -0,0 +1,25 @@
+syntax = "proto2"; // must be "proto2", proto3 requires enums starting with 0.
+
+enum Enum
+{
+ enumerand = 1; // must be non-zero
+}
+
+message Outer1
+{
+ message Inner
+ {
+ optional Enum enum1 = 1; // can also be "required"
+ }
+}
+
+message Outer2
+{
+ message Inner
+ {
+ // further trouble of this is also named "enum1"
+ optional Enum enum2 = 1; // can also be "required"
+
+ }
+}
+
diff --git a/vendor/nanopb/tests/regression/issue_783/SConscript b/vendor/nanopb/tests/regression/issue_783/SConscript
new file mode 100644
index 00000000..7041ae27
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_783/SConscript
@@ -0,0 +1,19 @@
+# Regression test for issues #783 and #820:
+# Could not find enum type when using mangle_names:M_STRIP_PACKAGE
+# when using multiple packages. Same when overriding package name
+# with "(nanopb_fileopt).package".
+
+Import('env')
+
+incpath = env.Clone()
+incpath.Append(CPPPATH="$BUILD/regression/issue_783")
+
+a = incpath.NanopbProto(['folder_A/file_A', 'folder_A/file_A.options'])
+b = incpath.NanopbProto(['folder_C/file_C'])
+c = incpath.NanopbProto(['folder_B/file_B'])
+
+incpath.Depends(c, [a, b])
+
+incpath.Object('folder_A/file_A.pb.c')
+incpath.Object('folder_B/file_B.pb.c')
+incpath.Object('folder_C/file_C.pb.c')
diff --git a/vendor/nanopb/tests/regression/issue_783/folder_A/file_A.options b/vendor/nanopb/tests/regression/issue_783/folder_A/file_A.options
new file mode 100644
index 00000000..d9b4e415
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_783/folder_A/file_A.options
@@ -0,0 +1 @@
+* mangle_names:M_STRIP_PACKAGE
diff --git a/vendor/nanopb/tests/regression/issue_783/folder_A/file_A.proto b/vendor/nanopb/tests/regression/issue_783/folder_A/file_A.proto
new file mode 100644
index 00000000..f9d8fcd5
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_783/folder_A/file_A.proto
@@ -0,0 +1,7 @@
+syntax = "proto2";
+package folder_A;
+enum my_enum {
+ ENUM_0 = 0;
+ ENUM_1 = 1;
+ ENUM_2 = 2;
+}
diff --git a/vendor/nanopb/tests/regression/issue_783/folder_B/file_B.proto b/vendor/nanopb/tests/regression/issue_783/folder_B/file_B.proto
new file mode 100644
index 00000000..f4c2a85a
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_783/folder_B/file_B.proto
@@ -0,0 +1,10 @@
+syntax = "proto2";
+package folder_B;
+
+import "folder_A/file_A.proto";
+import "folder_C/file_C.proto";
+
+message dummy {
+ required folder_A.my_enum value = 1;
+ required folder_C.messageC msg = 2;
+}
diff --git a/vendor/nanopb/tests/regression/issue_783/folder_C/file_C.proto b/vendor/nanopb/tests/regression/issue_783/folder_C/file_C.proto
new file mode 100644
index 00000000..1be20161
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_783/folder_C/file_C.proto
@@ -0,0 +1,11 @@
+syntax = "proto2";
+
+import "nanopb.proto";
+
+package folder_C;
+option (nanopb_fileopt).package = "dir_C";
+
+message messageC {
+ required uint32 a = 1;
+}
+
diff --git a/vendor/nanopb/tests/regression/issue_795/SConscript b/vendor/nanopb/tests/regression/issue_795/SConscript
new file mode 100644
index 00000000..e7295e3a
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_795/SConscript
@@ -0,0 +1,16 @@
+# Regression test for issue #795:
+# "Lookup of non-absolute type names is not supported" when using
+# mangle_names:M_STRIP_PACKAGE
+
+Import('env')
+
+opts = env.Clone()
+
+opts.Append(NANOPBFLAGS = "-s mangle_names=M_STRIP_PACKAGE")
+
+opts.NanopbProto("test.proto")
+opts.NanopbProto("test2.proto")
+opts.NanopbProto("test3.proto")
+opts.Object("test.pb.c")
+opts.Object("test2.pb.c")
+opts.Object("test3.pb.c")
diff --git a/vendor/nanopb/tests/regression/issue_795/test.proto b/vendor/nanopb/tests/regression/issue_795/test.proto
new file mode 100644
index 00000000..53f2edb3
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_795/test.proto
@@ -0,0 +1,32 @@
+syntax = "proto3";
+
+import "nanopb.proto";
+
+package test.package;
+
+message GenericResponse
+{
+ bool success = 1;
+ string detail = 2 [(nanopb).max_length = 127];
+ int32 int_code = 3;
+}
+
+message TrippleInt {
+ int32 v1 = 1;
+ int32 v2 = 2;
+ int32 v3 = 3;
+}
+
+message TrippleDouble {
+ double v1 = 1;
+ double v2 = 2;
+ double v3 = 3;
+}
+
+enum CircuitID {
+ CIRCUIT_ID_DRIVE_MOTORS = 0;
+ CIRCUIT_ID_GENERATOR = 1;
+ CIRCUIT_ID_PAYLOAD = 2;
+ CIRCUIT_ID_MID = 3;
+ CIRCUIT_ID_SPARE = 4;
+}
diff --git a/vendor/nanopb/tests/regression/issue_795/test2.proto b/vendor/nanopb/tests/regression/issue_795/test2.proto
new file mode 100644
index 00000000..a0dca405
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_795/test2.proto
@@ -0,0 +1,9 @@
+syntax = "proto3";
+
+enum TestEnum {
+ TEST_0 = 0;
+}
+
+message TestMsg {
+ TestEnum a = 1;
+}
diff --git a/vendor/nanopb/tests/regression/issue_795/test3.proto b/vendor/nanopb/tests/regression/issue_795/test3.proto
new file mode 100644
index 00000000..7d53065f
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_795/test3.proto
@@ -0,0 +1,9 @@
+syntax = "proto3";
+
+import "test2.proto";
+
+package test3.package;
+
+message TestMessage {
+ TestEnum a = 1;
+}
diff --git a/vendor/nanopb/tests/regression/issue_838/SConscript b/vendor/nanopb/tests/regression/issue_838/SConscript
new file mode 100644
index 00000000..5646deb6
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_838/SConscript
@@ -0,0 +1,8 @@
+# Regression test for issue #838:
+# Test enum to string functionality with C++ main program
+
+Import('env')
+env.NanopbProto("enum.proto")
+p = env.Program(["enum_to_string.cxx", "enum.pb.c"])
+env.RunTest(p)
+
diff --git a/vendor/nanopb/tests/regression/issue_838/enum.proto b/vendor/nanopb/tests/regression/issue_838/enum.proto
new file mode 100644
index 00000000..07c67363
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_838/enum.proto
@@ -0,0 +1,19 @@
+/* Test enum to string function generation */
+
+syntax = "proto2";
+
+import "nanopb.proto";
+
+option (nanopb_fileopt).enum_to_string = true;
+
+enum MyEnum {
+ VALUE1 = 1;
+ VALUE2 = 2;
+ VALUE15 = 15;
+}
+
+enum MyShortNameEnum {
+ option (nanopb_enumopt).long_names = false;
+ MSNE_VALUE256 = 256;
+}
+
diff --git a/vendor/nanopb/tests/regression/issue_838/enum_to_string.cxx b/vendor/nanopb/tests/regression/issue_838/enum_to_string.cxx
new file mode 100644
index 00000000..69c4b40d
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_838/enum_to_string.cxx
@@ -0,0 +1,19 @@
+#include
+#include "unittests.h"
+#include "enum.pb.h"
+
+extern "C" int main()
+{
+ int status = 0;
+ TEST(strcmp(MyEnum_name(MyEnum_VALUE1), "VALUE1") == 0);
+ TEST(strcmp(MyEnum_name(MyEnum_VALUE2), "VALUE2") == 0);
+ TEST(strcmp(MyEnum_name(MyEnum_VALUE15), "VALUE15") == 0);
+ TEST(strcmp(MyShortNameEnum_name(MSNE_VALUE256), "MSNE_VALUE256") == 0);
+ TEST(strcmp(MyShortNameEnum_name((MyShortNameEnum)100), "unknown") == 0);
+
+ if (status != 0)
+ fprintf(stdout, "\n\nSome tests FAILED!\n");
+
+ return status;
+}
+
diff --git a/vendor/nanopb/tests/regression/issue_869/SConscript b/vendor/nanopb/tests/regression/issue_869/SConscript
new file mode 100644
index 00000000..4adda3b7
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_869/SConscript
@@ -0,0 +1,7 @@
+# Regression test for #869:
+# Generation of PB_STATIC_ASSERT can lead to an identified that exceeds compiler maximum length
+
+Import("env")
+
+env.NanopbProto("bigfile.proto")
+env.Object("bigfile.pb.c")
diff --git a/vendor/nanopb/tests/regression/issue_869/bigfile.proto b/vendor/nanopb/tests/regression/issue_869/bigfile.proto
new file mode 100644
index 00000000..99a87618
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_869/bigfile.proto
@@ -0,0 +1,623 @@
+// Test generation and compilation with a file that contains a lot of messages.
+// This tests for symbol name size exceeding limits.
+
+syntax = "proto3";
+
+message LongMessage {
+ int32 field1 = 1;
+ int32 field2 = 2;
+ int32 field3 = 3;
+ int32 field4 = 4;
+ int32 field5 = 5;
+ int32 field6 = 6;
+ int32 field7 = 7;
+ int32 field8 = 8;
+ int32 field9 = 9;
+ int32 field10 = 10;
+ int32 field11 = 11;
+ int32 field12 = 12;
+ int32 field13 = 13;
+ int32 field14 = 14;
+ int32 field15 = 15;
+ int32 field16 = 16;
+ int32 field17 = 17;
+ int32 field18 = 18;
+ int32 field19 = 19;
+ int32 field20 = 20;
+ int32 field21 = 21;
+ int32 field22 = 22;
+ int32 field23 = 23;
+ int32 field24 = 24;
+ int32 field25 = 25;
+ int32 field26 = 26;
+ int32 field27 = 27;
+ int32 field28 = 28;
+ int32 field29 = 29;
+ int32 field30 = 30;
+ int32 field31 = 31;
+ int32 field32 = 32;
+ int32 field33 = 33;
+ int32 field34 = 34;
+ int32 field35 = 35;
+ int32 field36 = 36;
+ int32 field37 = 37;
+ int32 field38 = 38;
+ int32 field39 = 39;
+ int32 field40 = 40;
+ int32 field41 = 41;
+ int32 field42 = 42;
+ int32 field43 = 43;
+ int32 field44 = 44;
+ int32 field45 = 45;
+ int32 field46 = 46;
+ int32 field47 = 47;
+ int32 field48 = 48;
+ int32 field49 = 49;
+ int32 field50 = 50;
+ int32 field51 = 51;
+ int32 field52 = 52;
+ int32 field53 = 53;
+ int32 field54 = 54;
+ int32 field55 = 55;
+ int32 field56 = 56;
+ int32 field57 = 57;
+ int32 field58 = 58;
+ int32 field59 = 59;
+ int32 field60 = 60;
+ int32 field61 = 61;
+ int32 field62 = 62;
+ int32 field63 = 63;
+ int32 field64 = 64;
+ int32 field65 = 65;
+ int32 field66 = 66;
+ int32 field67 = 67;
+ int32 field68 = 68;
+ int32 field69 = 69;
+ int32 field70 = 70;
+ int32 field71 = 71;
+ int32 field72 = 72;
+ int32 field73 = 73;
+ int32 field74 = 74;
+ int32 field75 = 75;
+ int32 field76 = 76;
+ int32 field77 = 77;
+ int32 field78 = 78;
+ int32 field79 = 79;
+ int32 field80 = 80;
+ int32 field81 = 81;
+ int32 field82 = 82;
+ int32 field83 = 83;
+ int32 field84 = 84;
+ int32 field85 = 85;
+ int32 field86 = 86;
+ int32 field87 = 87;
+ int32 field88 = 88;
+ int32 field89 = 89;
+ int32 field90 = 90;
+ int32 field91 = 91;
+ int32 field92 = 92;
+ int32 field93 = 93;
+ int32 field94 = 94;
+ int32 field95 = 95;
+ int32 field96 = 96;
+ int32 field97 = 97;
+ int32 field98 = 98;
+ int32 field99 = 99;
+ int32 field100 = 100;
+ int32 field101 = 101;
+ int32 field102 = 102;
+ int32 field103 = 103;
+ int32 field104 = 104;
+ int32 field105 = 105;
+ int32 field106 = 106;
+ int32 field107 = 107;
+ int32 field108 = 108;
+ int32 field109 = 109;
+ int32 field110 = 110;
+ int32 field111 = 111;
+ int32 field112 = 112;
+ int32 field113 = 113;
+ int32 field114 = 114;
+ int32 field115 = 115;
+ int32 field116 = 116;
+ int32 field117 = 117;
+ int32 field118 = 118;
+ int32 field119 = 119;
+ int32 field120 = 120;
+ int32 field121 = 121;
+ int32 field122 = 122;
+ int32 field123 = 123;
+ int32 field124 = 124;
+ int32 field125 = 125;
+ int32 field126 = 126;
+ int32 field127 = 127;
+ int32 field128 = 128;
+ int32 field129 = 129;
+ int32 field130 = 130;
+ int32 field131 = 131;
+ int32 field132 = 132;
+ int32 field133 = 133;
+ int32 field134 = 134;
+ int32 field135 = 135;
+ int32 field136 = 136;
+ int32 field137 = 137;
+ int32 field138 = 138;
+ int32 field139 = 139;
+ int32 field140 = 140;
+ int32 field141 = 141;
+ int32 field142 = 142;
+ int32 field143 = 143;
+ int32 field144 = 144;
+ int32 field145 = 145;
+ int32 field146 = 146;
+ int32 field147 = 147;
+ int32 field148 = 148;
+ int32 field149 = 149;
+ int32 field150 = 150;
+ int32 field151 = 151;
+ int32 field152 = 152;
+ int32 field153 = 153;
+ int32 field154 = 154;
+ int32 field155 = 155;
+ int32 field156 = 156;
+ int32 field157 = 157;
+ int32 field158 = 158;
+ int32 field159 = 159;
+ int32 field160 = 160;
+ int32 field161 = 161;
+ int32 field162 = 162;
+ int32 field163 = 163;
+ int32 field164 = 164;
+ int32 field165 = 165;
+ int32 field166 = 166;
+ int32 field167 = 167;
+ int32 field168 = 168;
+ int32 field169 = 169;
+ int32 field170 = 170;
+ int32 field171 = 171;
+ int32 field172 = 172;
+ int32 field173 = 173;
+ int32 field174 = 174;
+ int32 field175 = 175;
+ int32 field176 = 176;
+ int32 field177 = 177;
+ int32 field178 = 178;
+ int32 field179 = 179;
+ int32 field180 = 180;
+ int32 field181 = 181;
+ int32 field182 = 182;
+ int32 field183 = 183;
+ int32 field184 = 184;
+ int32 field185 = 185;
+ int32 field186 = 186;
+ int32 field187 = 187;
+ int32 field188 = 188;
+ int32 field189 = 189;
+ int32 field190 = 190;
+ int32 field191 = 191;
+ int32 field192 = 192;
+ int32 field193 = 193;
+ int32 field194 = 194;
+ int32 field195 = 195;
+ int32 field196 = 196;
+ int32 field197 = 197;
+ int32 field198 = 198;
+ int32 field199 = 199;
+ int32 field200 = 200;
+ int32 field201 = 201;
+ int32 field202 = 202;
+ int32 field203 = 203;
+ int32 field204 = 204;
+ int32 field205 = 205;
+ int32 field206 = 206;
+ int32 field207 = 207;
+ int32 field208 = 208;
+ int32 field209 = 209;
+ int32 field210 = 210;
+ int32 field211 = 211;
+ int32 field212 = 212;
+ int32 field213 = 213;
+ int32 field214 = 214;
+ int32 field215 = 215;
+ int32 field216 = 216;
+ int32 field217 = 217;
+ int32 field218 = 218;
+ int32 field219 = 219;
+ int32 field220 = 220;
+ int32 field221 = 221;
+ int32 field222 = 222;
+ int32 field223 = 223;
+ int32 field224 = 224;
+ int32 field225 = 225;
+ int32 field226 = 226;
+ int32 field227 = 227;
+ int32 field228 = 228;
+ int32 field229 = 229;
+ int32 field230 = 230;
+ int32 field231 = 231;
+ int32 field232 = 232;
+ int32 field233 = 233;
+ int32 field234 = 234;
+ int32 field235 = 235;
+ int32 field236 = 236;
+ int32 field237 = 237;
+ int32 field238 = 238;
+ int32 field239 = 239;
+ int32 field240 = 240;
+ int32 field241 = 241;
+ int32 field242 = 242;
+ int32 field243 = 243;
+ int32 field244 = 244;
+ int32 field245 = 245;
+ int32 field246 = 246;
+ int32 field247 = 247;
+ int32 field248 = 248;
+ int32 field249 = 249;
+ int32 field250 = 250;
+ int32 field251 = 251;
+ int32 field252 = 252;
+ int32 field253 = 253;
+ int32 field254 = 254;
+ int32 field255 = 255;
+ int32 field256 = 256;
+ int32 field257 = 257;
+ int32 field258 = 258;
+ int32 field259 = 259;
+ int32 field260 = 260;
+ int32 field261 = 261;
+ int32 field262 = 262;
+ int32 field263 = 263;
+ int32 field264 = 264;
+ int32 field265 = 265;
+ int32 field266 = 266;
+ int32 field267 = 267;
+ int32 field268 = 268;
+ int32 field269 = 269;
+ int32 field270 = 270;
+ int32 field271 = 271;
+ int32 field272 = 272;
+ int32 field273 = 273;
+ int32 field274 = 274;
+ int32 field275 = 275;
+ int32 field276 = 276;
+ int32 field277 = 277;
+ int32 field278 = 278;
+ int32 field279 = 279;
+ int32 field280 = 280;
+ int32 field281 = 281;
+ int32 field282 = 282;
+ int32 field283 = 283;
+ int32 field284 = 284;
+ int32 field285 = 285;
+ int32 field286 = 286;
+ int32 field287 = 287;
+ int32 field288 = 288;
+ int32 field289 = 289;
+ int32 field290 = 290;
+ int32 field291 = 291;
+ int32 field292 = 292;
+ int32 field293 = 293;
+ int32 field294 = 294;
+ int32 field295 = 295;
+ int32 field296 = 296;
+ int32 field297 = 297;
+ int32 field298 = 298;
+ int32 field299 = 299;
+ int32 field300 = 300;
+ int32 field301 = 301;
+ int32 field302 = 302;
+ int32 field303 = 303;
+ int32 field304 = 304;
+ int32 field305 = 305;
+ int32 field306 = 306;
+ int32 field307 = 307;
+ int32 field308 = 308;
+ int32 field309 = 309;
+ int32 field310 = 310;
+ int32 field311 = 311;
+ int32 field312 = 312;
+ int32 field313 = 313;
+ int32 field314 = 314;
+ int32 field315 = 315;
+ int32 field316 = 316;
+ int32 field317 = 317;
+ int32 field318 = 318;
+ int32 field319 = 319;
+ int32 field320 = 320;
+ int32 field321 = 321;
+ int32 field322 = 322;
+ int32 field323 = 323;
+ int32 field324 = 324;
+ int32 field325 = 325;
+ int32 field326 = 326;
+ int32 field327 = 327;
+ int32 field328 = 328;
+ int32 field329 = 329;
+ int32 field330 = 330;
+ int32 field331 = 331;
+ int32 field332 = 332;
+ int32 field333 = 333;
+ int32 field334 = 334;
+ int32 field335 = 335;
+ int32 field336 = 336;
+ int32 field337 = 337;
+ int32 field338 = 338;
+ int32 field339 = 339;
+ int32 field340 = 340;
+ int32 field341 = 341;
+ int32 field342 = 342;
+ int32 field343 = 343;
+ int32 field344 = 344;
+ int32 field345 = 345;
+ int32 field346 = 346;
+ int32 field347 = 347;
+ int32 field348 = 348;
+ int32 field349 = 349;
+ int32 field350 = 350;
+ int32 field351 = 351;
+ int32 field352 = 352;
+ int32 field353 = 353;
+ int32 field354 = 354;
+ int32 field355 = 355;
+ int32 field356 = 356;
+ int32 field357 = 357;
+ int32 field358 = 358;
+ int32 field359 = 359;
+ int32 field360 = 360;
+ int32 field361 = 361;
+ int32 field362 = 362;
+ int32 field363 = 363;
+ int32 field364 = 364;
+ int32 field365 = 365;
+ int32 field366 = 366;
+ int32 field367 = 367;
+ int32 field368 = 368;
+ int32 field369 = 369;
+ int32 field370 = 370;
+ int32 field371 = 371;
+ int32 field372 = 372;
+ int32 field373 = 373;
+ int32 field374 = 374;
+ int32 field375 = 375;
+ int32 field376 = 376;
+ int32 field377 = 377;
+ int32 field378 = 378;
+ int32 field379 = 379;
+ int32 field380 = 380;
+ int32 field381 = 381;
+ int32 field382 = 382;
+ int32 field383 = 383;
+ int32 field384 = 384;
+ int32 field385 = 385;
+ int32 field386 = 386;
+ int32 field387 = 387;
+ int32 field388 = 388;
+ int32 field389 = 389;
+ int32 field390 = 390;
+ int32 field391 = 391;
+ int32 field392 = 392;
+ int32 field393 = 393;
+ int32 field394 = 394;
+ int32 field395 = 395;
+ int32 field396 = 396;
+ int32 field397 = 397;
+ int32 field398 = 398;
+ int32 field399 = 399;
+ int32 field400 = 400;
+ int32 field401 = 401;
+ int32 field402 = 402;
+ int32 field403 = 403;
+ int32 field404 = 404;
+ int32 field405 = 405;
+ int32 field406 = 406;
+ int32 field407 = 407;
+ int32 field408 = 408;
+ int32 field409 = 409;
+ int32 field410 = 410;
+ int32 field411 = 411;
+ int32 field412 = 412;
+ int32 field413 = 413;
+ int32 field414 = 414;
+ int32 field415 = 415;
+ int32 field416 = 416;
+ int32 field417 = 417;
+ int32 field418 = 418;
+ int32 field419 = 419;
+ int32 field420 = 420;
+ int32 field421 = 421;
+ int32 field422 = 422;
+ int32 field423 = 423;
+ int32 field424 = 424;
+ int32 field425 = 425;
+ int32 field426 = 426;
+ int32 field427 = 427;
+ int32 field428 = 428;
+ int32 field429 = 429;
+ int32 field430 = 430;
+ int32 field431 = 431;
+ int32 field432 = 432;
+ int32 field433 = 433;
+ int32 field434 = 434;
+ int32 field435 = 435;
+ int32 field436 = 436;
+ int32 field437 = 437;
+ int32 field438 = 438;
+ int32 field439 = 439;
+ int32 field440 = 440;
+ int32 field441 = 441;
+ int32 field442 = 442;
+ int32 field443 = 443;
+ int32 field444 = 444;
+ int32 field445 = 445;
+ int32 field446 = 446;
+ int32 field447 = 447;
+ int32 field448 = 448;
+ int32 field449 = 449;
+ int32 field450 = 450;
+ int32 field451 = 451;
+ int32 field452 = 452;
+ int32 field453 = 453;
+ int32 field454 = 454;
+ int32 field455 = 455;
+ int32 field456 = 456;
+ int32 field457 = 457;
+ int32 field458 = 458;
+ int32 field459 = 459;
+ int32 field460 = 460;
+ int32 field461 = 461;
+ int32 field462 = 462;
+ int32 field463 = 463;
+ int32 field464 = 464;
+ int32 field465 = 465;
+ int32 field466 = 466;
+ int32 field467 = 467;
+ int32 field468 = 468;
+ int32 field469 = 469;
+ int32 field470 = 470;
+ int32 field471 = 471;
+ int32 field472 = 472;
+ int32 field473 = 473;
+ int32 field474 = 474;
+ int32 field475 = 475;
+ int32 field476 = 476;
+ int32 field477 = 477;
+ int32 field478 = 478;
+ int32 field479 = 479;
+ int32 field480 = 480;
+ int32 field481 = 481;
+ int32 field482 = 482;
+ int32 field483 = 483;
+ int32 field484 = 484;
+ int32 field485 = 485;
+ int32 field486 = 486;
+ int32 field487 = 487;
+ int32 field488 = 488;
+ int32 field489 = 489;
+ int32 field490 = 490;
+ int32 field491 = 491;
+ int32 field492 = 492;
+ int32 field493 = 493;
+ int32 field494 = 494;
+ int32 field495 = 495;
+ int32 field496 = 496;
+ int32 field497 = 497;
+ int32 field498 = 498;
+ int32 field499 = 499;
+ int32 field500 = 500;
+ int32 field501 = 501;
+ int32 field502 = 502;
+ int32 field503 = 503;
+ int32 field504 = 504;
+ int32 field505 = 505;
+ int32 field506 = 506;
+ int32 field507 = 507;
+ int32 field508 = 508;
+ int32 field509 = 509;
+ int32 field510 = 510;
+ int32 field511 = 511;
+ int32 field512 = 512;
+}
+
+message Message0 { LongMessage field1 = 1; }
+message Message1 { Message0 submsg = 1; }
+message Message2 { Message1 submsg = 2; }
+message Message3 { Message2 submsg = 3; }
+message Message4 { Message3 submsg = 4; }
+message Message5 { Message4 submsg = 5; }
+message Message6 { Message5 submsg = 6; }
+message Message7 { Message6 submsg = 7; }
+message Message8 { Message7 submsg = 8; }
+message Message9 { Message8 submsg = 9; }
+message Message10 { Message9 submsg = 10; }
+message Message11 { Message10 submsg = 11; }
+message Message12 { Message11 submsg = 12; }
+message Message13 { Message12 submsg = 13; }
+message Message14 { Message13 submsg = 14; }
+message Message15 { Message14 submsg = 15; }
+message Message16 { Message15 submsg = 16; }
+
+// Avoid excessive recursion
+
+message Message17 { Message16 submsg = 17; }
+message Message18 { Message16 submsg = 18; }
+message Message19 { Message16 submsg = 19; }
+message Message20 { Message16 submsg = 20; }
+message Message21 { Message16 submsg = 21; }
+message Message22 { Message16 submsg = 22; }
+message Message23 { Message16 submsg = 23; }
+message Message24 { Message16 submsg = 24; }
+message Message25 { Message16 submsg = 25; }
+message Message26 { Message16 submsg = 26; }
+message Message27 { Message16 submsg = 27; }
+message Message28 { Message16 submsg = 28; }
+message Message29 { Message16 submsg = 29; }
+message Message30 { Message16 submsg = 30; }
+message Message31 { Message16 submsg = 31; }
+message Message32 { Message16 submsg = 32; }
+message Message33 { Message16 submsg = 33; }
+message Message34 { Message16 submsg = 34; }
+message Message35 { Message16 submsg = 35; }
+message Message36 { Message16 submsg = 36; }
+message Message37 { Message16 submsg = 37; }
+message Message38 { Message16 submsg = 38; }
+message Message39 { Message16 submsg = 39; }
+message Message40 { Message16 submsg = 40; }
+message Message41 { Message16 submsg = 41; }
+message Message42 { Message16 submsg = 42; }
+message Message43 { Message16 submsg = 43; }
+message Message44 { Message16 submsg = 44; }
+message Message45 { Message16 submsg = 45; }
+message Message46 { Message16 submsg = 46; }
+message Message47 { Message16 submsg = 47; }
+message Message48 { Message16 submsg = 48; }
+message Message49 { Message16 submsg = 49; }
+message Message50 { Message16 submsg = 50; }
+message Message51 { Message16 submsg = 51; }
+message Message52 { Message16 submsg = 52; }
+message Message53 { Message16 submsg = 53; }
+message Message54 { Message16 submsg = 54; }
+message Message55 { Message16 submsg = 55; }
+message Message56 { Message16 submsg = 56; }
+message Message57 { Message16 submsg = 57; }
+message Message58 { Message16 submsg = 58; }
+message Message59 { Message16 submsg = 59; }
+message Message60 { Message16 submsg = 60; }
+message Message61 { Message16 submsg = 61; }
+message Message62 { Message16 submsg = 62; }
+message Message63 { Message16 submsg = 63; }
+message Message64 { Message16 submsg = 64; }
+message Message65 { Message16 submsg = 65; }
+message Message66 { Message16 submsg = 66; }
+message Message67 { Message16 submsg = 67; }
+message Message68 { Message16 submsg = 68; }
+message Message69 { Message16 submsg = 69; }
+message Message70 { Message16 submsg = 70; }
+message Message71 { Message16 submsg = 71; }
+message Message72 { Message16 submsg = 72; }
+message Message73 { Message16 submsg = 73; }
+message Message74 { Message16 submsg = 74; }
+message Message75 { Message16 submsg = 75; }
+message Message76 { Message16 submsg = 76; }
+message Message77 { Message16 submsg = 77; }
+message Message78 { Message16 submsg = 78; }
+message Message79 { Message16 submsg = 79; }
+message Message80 { Message16 submsg = 80; }
+message Message81 { Message16 submsg = 81; }
+message Message82 { Message16 submsg = 82; }
+message Message83 { Message16 submsg = 83; }
+message Message84 { Message16 submsg = 84; }
+message Message85 { Message16 submsg = 85; }
+message Message86 { Message16 submsg = 86; }
+message Message87 { Message16 submsg = 87; }
+message Message88 { Message16 submsg = 88; }
+message Message89 { Message16 submsg = 89; }
+message Message90 { Message16 submsg = 90; }
+message Message91 { Message16 submsg = 91; }
+message Message92 { Message16 submsg = 92; }
+message Message93 { Message16 submsg = 93; }
+message Message94 { Message16 submsg = 94; }
+message Message95 { Message16 submsg = 95; }
+message Message96 { Message16 submsg = 96; }
+message Message97 { Message16 submsg = 97; }
+message Message98 { Message16 submsg = 98; }
+message Message99 { Message16 submsg = 99; }
diff --git a/vendor/nanopb/tests/regression/issue_956/SConscript b/vendor/nanopb/tests/regression/issue_956/SConscript
new file mode 100644
index 00000000..2114cf12
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_956/SConscript
@@ -0,0 +1,8 @@
+# Regression test for #956:
+# (nanopb_msgopt).skip_message does not skip generation of map types
+
+Import("env")
+
+env.NanopbProto("skipmap.proto")
+env.Match(["skipmap.pb.h", "skipmap.expected"])
+env.Object("skipmap.pb.c")
diff --git a/vendor/nanopb/tests/regression/issue_956/skipmap.expected b/vendor/nanopb/tests/regression/issue_956/skipmap.expected
new file mode 100644
index 00000000..b0709895
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_956/skipmap.expected
@@ -0,0 +1,4 @@
+Message1_Mymap1Entry mymap1
+! Message2_Mymap2Entry
+! Message3_Mymap3Entry
+! Message4_Mymap4Entry
diff --git a/vendor/nanopb/tests/regression/issue_956/skipmap.proto b/vendor/nanopb/tests/regression/issue_956/skipmap.proto
new file mode 100644
index 00000000..3e603b4e
--- /dev/null
+++ b/vendor/nanopb/tests/regression/issue_956/skipmap.proto
@@ -0,0 +1,28 @@
+syntax = "proto3";
+import "nanopb.proto";
+
+option (nanopb_fileopt).max_count = 4;
+
+message Message1 {
+ map mymap1 = 1;
+}
+
+message Message2 {
+ option (nanopb_msgopt).skip_message = true;
+ map mymap2 = 1;
+}
+
+message Message3 {
+ map mymap3 = 1 [(nanopb).type = FT_IGNORE];
+}
+
+message Message4 {
+ option (nanopb_msgopt).skip_message = true;
+
+ message SubMessage4 {
+ option (nanopb_msgopt).skip_message = true;
+ uint32 dummy = 1;
+ }
+
+ map mymap4 = 1;
+}
diff --git a/vendor/nanopb/tests/site_scons/platforms/avr/run_test.c b/vendor/nanopb/tests/site_scons/platforms/avr/run_test.c
index 8e4679e5..eca0d328 100644
--- a/vendor/nanopb/tests/site_scons/platforms/avr/run_test.c
+++ b/vendor/nanopb/tests/site_scons/platforms/avr/run_test.c
@@ -146,7 +146,7 @@ int main(int argc, char *argv[])
filename = argv[2];
}
- elf_firmware_t firmware;
+ elf_firmware_t firmware = {};
elf_read_firmware(filename, &firmware);
avr_init(g_avr);
avr_load_firmware(g_avr, &firmware);
diff --git a/vendor/nanopb/tests/site_scons/site_init.py b/vendor/nanopb/tests/site_scons/site_init.py
index ce8f7e11..1f52b834 100644
--- a/vendor/nanopb/tests/site_scons/site_init.py
+++ b/vendor/nanopb/tests/site_scons/site_init.py
@@ -22,25 +22,30 @@
except ImportError:
pass
+# UTF-8 support on Python 2
+if sys.version_info.major == 2:
+ import codecs
+ open = codecs.open
+
def add_nanopb_builders(env):
'''Add the necessary builder commands for nanopb tests.'''
# Build command that runs a test program and saves the output
def run_test(target, source, env):
if len(source) > 1:
- infile = open(str(source[1]))
+ infile = open(str(source[1]), 'rb')
else:
infile = None
- if env.has_key("COMMAND"):
+ if "COMMAND" in env:
args = [env["COMMAND"]]
else:
args = [str(source[0])]
- if env.has_key('ARGS'):
+ if 'ARGS' in env:
args.extend(env['ARGS'])
- if env.has_key("TEST_RUNNER"):
+ if "TEST_RUNNER" in env:
args = [env["TEST_RUNNER"]] + args
print('Command line: ' + str(args))
@@ -98,8 +103,8 @@ def compare_files(target, source, env):
# Build command that checks that each pattern in source2 is found in source1.
def match_files(target, source, env):
- data = open(str(source[0]), 'rU').read()
- patterns = open(str(source[1]))
+ data = open(str(source[0]), 'r', encoding = 'utf-8').read()
+ patterns = open(str(source[1]), 'r', encoding = 'utf-8')
for pattern in patterns:
if pattern.strip():
invert = False
diff --git a/vendor/nanopb/tests/site_scons/site_tools/nanopb.py b/vendor/nanopb/tests/site_scons/site_tools/nanopb.py
index 1a258088..0b46c09f 100644
--- a/vendor/nanopb/tests/site_scons/site_tools/nanopb.py
+++ b/vendor/nanopb/tests/site_scons/site_tools/nanopb.py
@@ -34,6 +34,7 @@
from SCons.Script import Dir, File
import os.path
import platform
+import sys
try:
warningbase = SCons.Warnings.SConsWarning
@@ -46,7 +47,7 @@ class NanopbWarning(warningbase):
def _detect_nanopb(env):
'''Find the path to nanopb root directory.'''
- if env.has_key('NANOPB'):
+ if 'NANOPB' in env:
# Use nanopb dir given by user
return env['NANOPB']
@@ -60,7 +61,7 @@ def _detect_nanopb(env):
def _detect_python(env):
'''Find Python executable to use.'''
- if env.has_key('PYTHON'):
+ if 'PYTHON' in env:
return env['PYTHON']
p = env.WhereIs('python3')
@@ -85,7 +86,7 @@ def _detect_nanopb_generator(env):
def _detect_protoc(env):
'''Find the path to the protoc compiler.'''
- if env.has_key('PROTOC'):
+ if 'PROTOC' in env:
# Use protoc defined by user
return env['PROTOC']
@@ -113,7 +114,7 @@ def _detect_protoc(env):
def _detect_protocflags(env):
'''Find the options to use for protoc.'''
- if env.has_key('PROTOCFLAGS'):
+ if 'PROTOCFLAGS' in env:
return env['PROTOCFLAGS']
p = _detect_protoc(env)
@@ -151,7 +152,7 @@ def _nanopb_proto_actions(source, target, env, for_signature):
else:
nanopb_flags = '--source-extension=%s,--header-extension=%s:.' % (source_extension, header_extension)
- return SCons.Action.CommandAction('$PROTOC $PROTOCFLAGS %s --nanopb_out=%s %s' % (include_dirs, nanopb_flags, srcfile),
+ return SCons.Action.CommandAction('$PROTOC $PROTOCFLAGS %s "--nanopb_out=%s" %s' % (include_dirs, nanopb_flags, srcfile),
chdir = prefix)
def _nanopb_proto_emitter(target, source, env):
diff --git a/vendor/nanopb/tests/special_characters/SConscript b/vendor/nanopb/tests/special_characters/SConscript
index 2309cf2e..d3bd33b3 100644
--- a/vendor/nanopb/tests/special_characters/SConscript
+++ b/vendor/nanopb/tests/special_characters/SConscript
@@ -2,5 +2,7 @@
Import('env')
-env.NanopbProto("funny-proto+name has.characters.proto")
+env.NanopbProto(["funny-proto+name has.characters.proto", "funny-proto+name has.characters.options"])
env.Object("funny-proto+name has.characters.pb.c")
+env.Match(['funny-proto+name has.characters.pb.h', 'specchars.expected'])
+
diff --git a/vendor/nanopb/tests/special_characters/funny-proto+name has.characters.options b/vendor/nanopb/tests/special_characters/funny-proto+name has.characters.options
new file mode 100644
index 00000000..b9e25751
--- /dev/null
+++ b/vendor/nanopb/tests/special_characters/funny-proto+name has.characters.options
@@ -0,0 +1,3 @@
+// Unicode comment эмйÅÄÖ
+* max_count:10
+
diff --git a/vendor/nanopb/tests/special_characters/funny-proto+name has.characters.proto b/vendor/nanopb/tests/special_characters/funny-proto+name has.characters.proto
index f5170c8a..879b7968 100644
--- a/vendor/nanopb/tests/special_characters/funny-proto+name has.characters.proto
+++ b/vendor/nanopb/tests/special_characters/funny-proto+name has.characters.proto
@@ -1,8 +1,10 @@
syntax="proto2";
+// Unicode comment эмйÅÄÖ
message WorkingMessage {
required int32 b = 1;
required int32 Z = 2;
+ repeated int32 x = 3;
}
message FailingMessageBecauseMembersAreMacroParameter {
@@ -19,3 +21,5 @@ message TestMacroParametersAndUnderscores {
required int32 X___ = 6;
required int32 X____ = 7;
}
+
+
diff --git a/vendor/nanopb/tests/special_characters/specchars.expected b/vendor/nanopb/tests/special_characters/specchars.expected
new file mode 100644
index 00000000..dc90cc4d
--- /dev/null
+++ b/vendor/nanopb/tests/special_characters/specchars.expected
@@ -0,0 +1,3 @@
+int32_t x\[10\];
+Unicode comment эмйÅÄÖ
+
diff --git a/vendor/nanopb/tests/splint/splint.rc b/vendor/nanopb/tests/splint/splint.rc
index 0cf43761..c01e2fca 100644
--- a/vendor/nanopb/tests/splint/splint.rc
+++ b/vendor/nanopb/tests/splint/splint.rc
@@ -36,3 +36,5 @@
-noeffect
-usedef
+# Splint doesn't support C11
+-DPB_C99_STATIC_ASSERT
diff --git a/vendor/nanopb/tests/typename_mangling/SConscript b/vendor/nanopb/tests/typename_mangling/SConscript
index a3994f43..9e140d8c 100644
--- a/vendor/nanopb/tests/typename_mangling/SConscript
+++ b/vendor/nanopb/tests/typename_mangling/SConscript
@@ -1,24 +1,47 @@
-# Test mangle_names option
+# Test mangle_names option in various configurations.
+# Each sub testcase makes a modified copy of the files in the build directory.
Import('env')
-def set_mangling(type):
+def set_options(setting, value):
+ '''Create options file that sets a nanopb generator option'''
def command(target, source, env):
with open(str(source[0])) as src, open(str(target[0]), "w") as dst:
- dst.write("* mangle_names:{}\n".format(type))
+ dst.write("* {}:{}\n".format(setting, value))
dst.write(src.read())
return command
+def set_mangling(type):
+ return set_options('mangle_names', type)
+
+# Test type names when M_STRIP_PACKAGE option is used in a single file
env.Command("strip_package.options", "with_package.options", set_mangling("M_STRIP_PACKAGE"))
env.Command("strip_package.proto", "with_package.proto", Copy("$TARGET", "$SOURCE"))
env.NanopbProto(["strip_package", "strip_package.options"])
env.Program(["test_strip_package.c", "strip_package.pb.c", '$COMMON/pb_common.o'])
+# Test type names with M_STRIP_PACKAGE used in both files
+env.Command("strip_package_a.options", "with_package.options", set_mangling("M_STRIP_PACKAGE"))
+env.Command("strip_package_b.options", "with_package.options", set_mangling("M_STRIP_PACKAGE"))
+env.Command("strip_package_a.proto", "with_package_a.proto", Copy("$TARGET", "$SOURCE"))
+env.Command("strip_package_b.proto", "with_package_b.proto", Copy("$TARGET", "$SOURCE"))
+env.NanopbProto(["strip_package_a", "strip_package_a.options"])
+env.NanopbProto(["strip_package_b", "strip_package_b.options"])
+env.Program(["test_strip_package_dependencies.c", "strip_package_a.pb.c", "strip_package_b.pb.c", '$COMMON/pb_common.o'])
+
+# Test type names with M_STRIP_PACKAGE used in file A and (nanopb).package overrides package name in B
+env.Command("replace_package_b.options", "with_package.options", set_options("package", '"ReplacedName"'))
+env.Command("replace_package_b.proto", "with_package_b.proto", Copy("$TARGET", "$SOURCE"))
+env.NanopbProto(["replace_package_b", "replace_package_b.options", "strip_package_a.proto"])
+env.Program(["test_replace_package.c", "strip_package_a.pb.c", "replace_package_b.pb.c", '$COMMON/pb_common.o'])
+
+# Test M_FLATTEN with a single file
env.Command("flatten.options", "with_package.options", set_mangling("M_FLATTEN"))
env.Command("flatten.proto", "with_package.proto", Copy("$TARGET", "$SOURCE"))
env.NanopbProto(["flatten", "flatten.options"])
env.Program(["test_flatten.c", "flatten.pb.c", '$COMMON/pb_common.o'])
+# Test M_PACKAGE_INITIALS with a single file
env.Command("package_initials.options", "with_package.options", set_mangling("M_PACKAGE_INITIALS"))
env.Command("package_initials.proto", "with_package.proto", Copy("$TARGET", "$SOURCE"))
env.NanopbProto(["package_initials", "package_initials.options"])
diff --git a/vendor/nanopb/tests/typename_mangling/test_replace_package.c b/vendor/nanopb/tests/typename_mangling/test_replace_package.c
new file mode 100644
index 00000000..a1d86bcc
--- /dev/null
+++ b/vendor/nanopb/tests/typename_mangling/test_replace_package.c
@@ -0,0 +1,27 @@
+/*
+ * Tests if expected names are generated when M_STRIP_PACKAGE is used in one of the files.
+ */
+
+#include
+#include "unittests.h"
+#include "replace_package_b.pb.h"
+
+int main()
+{
+ MessageA msgA1 = package_a_MessageA_init_default;
+ package_a_MessageA msgA2 = MessageA_init_default;
+
+ package_b_MessageB msgB1 = ReplacedName_MessageB_init_zero;
+ ReplacedName_MessageB msgB2 = package_b_MessageB_init_zero;
+
+ package_a_EnumA e1 = EnumA_VALUE_A_0;
+ EnumA e2 = EnumA_VALUE_A_1;
+ e2 = _package_a_EnumA_MIN;
+ e2 = _EnumA_MIN;
+ e2 = _package_a_EnumA_MAX;
+ e2 = _EnumA_MAX;
+ e2 = _package_a_EnumA_ARRAYSIZE;
+ e2 = _EnumA_ARRAYSIZE;
+
+ return msgA1.enum_a_field + msgA2.enum_a_field + msgB1.nested_enum + msgB2.nested_enum + e1 + e2; /* marks variables as used */
+}
diff --git a/vendor/nanopb/tests/typename_mangling/test_strip_package_dependencies.c b/vendor/nanopb/tests/typename_mangling/test_strip_package_dependencies.c
new file mode 100644
index 00000000..0e2ceca1
--- /dev/null
+++ b/vendor/nanopb/tests/typename_mangling/test_strip_package_dependencies.c
@@ -0,0 +1,27 @@
+/*
+ * Tests if expected names are generated when M_STRIP_PACKAGE is used.
+ */
+
+#include
+#include "unittests.h"
+#include "strip_package_b.pb.h"
+
+int main()
+{
+ MessageA msgA1 = package_a_MessageA_init_default;
+ package_a_MessageA msgA2 = MessageA_init_default;
+
+ MessageB msgB1 = package_b_MessageB_init_zero;
+ package_b_MessageB msgB2 = MessageB_init_zero;
+
+ package_a_EnumA e1 = EnumA_VALUE_A_0;
+ EnumA e2 = EnumA_VALUE_A_1;
+ e2 = _package_a_EnumA_MIN;
+ e2 = _EnumA_MIN;
+ e2 = _package_a_EnumA_MAX;
+ e2 = _EnumA_MAX;
+ e2 = _package_a_EnumA_ARRAYSIZE;
+ e2 = _EnumA_ARRAYSIZE;
+
+ return msgA1.enum_a_field + msgA2.enum_a_field + msgB1.nested_enum + msgB2.nested_enum + e1 + e2; /* marks variables as used */
+}
diff --git a/vendor/nanopb/tests/typename_mangling/with_package_a.proto b/vendor/nanopb/tests/typename_mangling/with_package_a.proto
new file mode 100644
index 00000000..4d683415
--- /dev/null
+++ b/vendor/nanopb/tests/typename_mangling/with_package_a.proto
@@ -0,0 +1,13 @@
+syntax = "proto3";
+
+package package.a;
+
+enum EnumA {
+ VALUE_A_0 = 0;
+ VALUE_A_1 = 1;
+ VALUE_A_2 = 2;
+}
+
+message MessageA {
+ EnumA enum_a_field = 1;
+}
\ No newline at end of file
diff --git a/vendor/nanopb/tests/typename_mangling/with_package_b.proto b/vendor/nanopb/tests/typename_mangling/with_package_b.proto
new file mode 100644
index 00000000..5cd2dfa1
--- /dev/null
+++ b/vendor/nanopb/tests/typename_mangling/with_package_b.proto
@@ -0,0 +1,10 @@
+syntax = "proto3";
+
+import "strip_package_a.proto";
+
+package package.b;
+
+message MessageB {
+ package.a.EnumA nested_enum = 1;
+ package.a.MessageA nested_message = 2;
+}
diff --git a/vendor/nanopb/tools/make_linux_package.sh b/vendor/nanopb/tools/make_linux_package.sh
index 5509cbbd..42d720bf 100644
--- a/vendor/nanopb/tools/make_linux_package.sh
+++ b/vendor/nanopb/tools/make_linux_package.sh
@@ -21,12 +21,13 @@ git archive HEAD | tar x -C $DEST
( cd $DEST/generator; python3 nanopb_generator.py ||: )
# Package the Python libraries
-( cd $DEST/generator; python3 -m PyInstaller nanopb_generator.py )
-( cd $DEST/generator; python3 -m PyInstaller protoc )
+( cd $DEST/generator; python3 -m PyInstaller --collect-all grpc_tools.grpc_version --strip nanopb_generator.py )
+( cd $DEST/generator; python3 -m PyInstaller --collect-all grpc_tools.grpc_version --strip protoc )
mv $DEST/generator/dist/nanopb_generator $DEST/generator-bin
cp $DEST/generator/dist/protoc/protoc $DEST/generator-bin
# Include Google's descriptor.proto and nanopb.proto
+mkdir -p $DEST/generator-bin/grpc_tools/
cp -pr $(python3 -c 'import grpc_tools, os.path; print(os.path.dirname(grpc_tools.__file__))')/_proto $DEST/generator-bin/grpc_tools/
cp -pr $DEST/generator/proto $DEST/generator-bin/proto
@@ -36,9 +37,6 @@ rm -rf $DEST/generator/dist $DEST/generator/build $DEST/generator/nanopb_generat
# Make the nanopb generator available as a protoc plugin
cp $DEST/generator-bin/nanopb_generator $DEST/generator-bin/protoc-gen-nanopb
-# Remove debugging symbols to reduce size of package
-( cd $DEST/generator-bin; strip *.so *.so.* )
-
# Tar it all up
( cd dist; tar -czf $VERSION.tar.gz $VERSION )
diff --git a/vendor/nanopb/tools/make_mac_package.sh b/vendor/nanopb/tools/make_mac_package.sh
index a9806143..08dc19c6 100644
--- a/vendor/nanopb/tools/make_mac_package.sh
+++ b/vendor/nanopb/tools/make_mac_package.sh
@@ -21,8 +21,8 @@ git archive HEAD | tar x -C $DEST
( cd $DEST/generator; python3 nanopb_generator.py ||: )
# Package the Python libraries
-( cd $DEST/generator; python3 -m PyInstaller nanopb_generator.py )
-( cd $DEST/generator; python3 -m PyInstaller protoc )
+( cd $DEST/generator; python3 -m PyInstaller --collect-all grpc_tools.grpc_version nanopb_generator.py )
+( cd $DEST/generator; python3 -m PyInstaller --collect-all grpc_tools.grpc_version protoc )
mv $DEST/generator/dist/nanopb_generator $DEST/generator-bin
cp $DEST/generator/dist/protoc/protoc $DEST/generator-bin
diff --git a/vendor/nanopb/tools/make_windows_package.sh b/vendor/nanopb/tools/make_windows_package.sh
index 1d1acc12..c4577f26 100644
--- a/vendor/nanopb/tools/make_windows_package.sh
+++ b/vendor/nanopb/tools/make_windows_package.sh
@@ -17,16 +17,16 @@ mkdir -p $DEST
git archive HEAD | tar x -C $DEST
# Rebuild the Python .proto files and .pyc
-( cd $DEST/generator; py -3 nanopb_generator.py ||: )
+( cd $DEST/generator; python3 nanopb_generator.py ||: )
# Package the Python libraries
-( cd $DEST/generator; py -3 -m PyInstaller nanopb_generator.py )
-( cd $DEST/generator; py -3 -m PyInstaller protoc )
+( cd $DEST/generator; python3 -m PyInstaller nanopb_generator.py )
+( cd $DEST/generator; python3 -m PyInstaller protoc )
mv $DEST/generator/dist/nanopb_generator $DEST/generator-bin
cp $DEST/generator/dist/protoc/protoc.exe $DEST/generator-bin
# Include Google's descriptor.proto and nanopb.proto
-cp -pr $(py -3 -c 'import grpc_tools, os.path; print(os.path.dirname(grpc_tools.__file__))')/_proto $DEST/generator-bin/grpc_tools/
+cp -pr $(python3 -c 'import grpc_tools, os.path; print(os.path.dirname(grpc_tools.__file__))')/_proto $DEST/generator-bin/grpc_tools/
cp -pr $DEST/generator/proto $DEST/generator-bin/proto
# Remove temp files
diff --git a/vendor/nanopb/tools/set_version.sh b/vendor/nanopb/tools/set_version.sh
index 60006190..966678d9 100644
--- a/vendor/nanopb/tools/set_version.sh
+++ b/vendor/nanopb/tools/set_version.sh
@@ -4,9 +4,10 @@
# e.g. user@localhost:~/nanopb$ tools/set_version.sh nanopb-0.1.9-dev
# It sets the version number in pb.h and generator/nanopb_generator.py.
+VERSION_NUMBER_ONLY=$(echo $1 | cut -d '-' -f 2)
sed -i -e 's/nanopb_version\s*=\s*"[^"]*"/nanopb_version = "'$1'"/' generator/nanopb_generator.py
-sed -i -e 's/#define\s*NANOPB_VERSION\s*.*/#define NANOPB_VERSION '$1'/' pb.h
-sed -i -e 's/set(\s*nanopb_VERSION_STRING\s*[^)]*)/set(nanopb_VERSION_STRING '$1')/' CMakeLists.txt
+sed -i -e 's/#define\s*NANOPB_VERSION\s*.*/#define NANOPB_VERSION "'$1'"/' pb.h
+sed -i -e 's/project(\s*nanopb\s*VERSION\s*[^)]*\s*LANGUAGES\s*C\s*)/project(nanopb VERSION '$VERSION_NUMBER_ONLY' LANGUAGES C)/' CMakeLists.txt
VERSION_ONLY=$(echo $1 | sed 's/nanopb-//')
if [[ $1 != *dev ]]
diff --git a/vendor/nanopb/zephyr/module.yml b/vendor/nanopb/zephyr/module.yml
new file mode 100644
index 00000000..207809a7
--- /dev/null
+++ b/vendor/nanopb/zephyr/module.yml
@@ -0,0 +1,4 @@
+name: nanopb
+build:
+ cmake-ext: True
+ kconfig-ext: True